Commit | Line | Data |
---|---|---|
9ae326a6 DH |
1 | /* Storage object read/write |
2 | * | |
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells (dhowells@redhat.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public Licence | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the Licence, or (at your option) any later version. | |
10 | */ | |
11 | ||
12 | #include <linux/mount.h> | |
13 | #include <linux/file.h> | |
14 | #include "internal.h" | |
15 | ||
16 | /* | |
17 | * detect wake up events generated by the unlocking of pages in which we're | |
18 | * interested | |
19 | * - we use this to detect read completion of backing pages | |
20 | * - the caller holds the waitqueue lock | |
21 | */ | |
22 | static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode, | |
23 | int sync, void *_key) | |
24 | { | |
25 | struct cachefiles_one_read *monitor = | |
26 | container_of(wait, struct cachefiles_one_read, monitor); | |
27 | struct cachefiles_object *object; | |
28 | struct wait_bit_key *key = _key; | |
29 | struct page *page = wait->private; | |
30 | ||
31 | ASSERT(key); | |
32 | ||
33 | _enter("{%lu},%u,%d,{%p,%u}", | |
34 | monitor->netfs_page->index, mode, sync, | |
35 | key->flags, key->bit_nr); | |
36 | ||
37 | if (key->flags != &page->flags || | |
38 | key->bit_nr != PG_locked) | |
39 | return 0; | |
40 | ||
41 | _debug("--- monitor %p %lx ---", page, page->flags); | |
42 | ||
5e929b33 DH |
43 | if (!PageUptodate(page) && !PageError(page)) { |
44 | /* unlocked, not uptodate and not erronous? */ | |
45 | _debug("page probably truncated"); | |
46 | } | |
9ae326a6 DH |
47 | |
48 | /* remove from the waitqueue */ | |
49 | list_del(&wait->task_list); | |
50 | ||
51 | /* move onto the action list and queue for FS-Cache thread pool */ | |
52 | ASSERT(monitor->op); | |
53 | ||
54 | object = container_of(monitor->op->op.object, | |
55 | struct cachefiles_object, fscache); | |
56 | ||
57 | spin_lock(&object->work_lock); | |
58 | list_add_tail(&monitor->op_link, &monitor->op->to_do); | |
59 | spin_unlock(&object->work_lock); | |
60 | ||
61 | fscache_enqueue_retrieval(monitor->op); | |
62 | return 0; | |
63 | } | |
64 | ||
5e929b33 DH |
65 | /* |
66 | * handle a probably truncated page | |
67 | * - check to see if the page is still relevant and reissue the read if | |
68 | * possible | |
69 | * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we | |
70 | * must wait again and 0 if successful | |
71 | */ | |
72 | static int cachefiles_read_reissue(struct cachefiles_object *object, | |
73 | struct cachefiles_one_read *monitor) | |
74 | { | |
75 | struct address_space *bmapping = object->backer->d_inode->i_mapping; | |
76 | struct page *backpage = monitor->back_page, *backpage2; | |
77 | int ret; | |
78 | ||
79 | kenter("{ino=%lx},{%lx,%lx}", | |
80 | object->backer->d_inode->i_ino, | |
81 | backpage->index, backpage->flags); | |
82 | ||
83 | /* skip if the page was truncated away completely */ | |
84 | if (backpage->mapping != bmapping) { | |
85 | kleave(" = -ENODATA [mapping]"); | |
86 | return -ENODATA; | |
87 | } | |
88 | ||
89 | backpage2 = find_get_page(bmapping, backpage->index); | |
90 | if (!backpage2) { | |
91 | kleave(" = -ENODATA [gone]"); | |
92 | return -ENODATA; | |
93 | } | |
94 | ||
95 | if (backpage != backpage2) { | |
96 | put_page(backpage2); | |
97 | kleave(" = -ENODATA [different]"); | |
98 | return -ENODATA; | |
99 | } | |
100 | ||
101 | /* the page is still there and we already have a ref on it, so we don't | |
102 | * need a second */ | |
103 | put_page(backpage2); | |
104 | ||
105 | INIT_LIST_HEAD(&monitor->op_link); | |
106 | add_page_wait_queue(backpage, &monitor->monitor); | |
107 | ||
108 | if (trylock_page(backpage)) { | |
109 | ret = -EIO; | |
110 | if (PageError(backpage)) | |
111 | goto unlock_discard; | |
112 | ret = 0; | |
113 | if (PageUptodate(backpage)) | |
114 | goto unlock_discard; | |
115 | ||
116 | kdebug("reissue read"); | |
117 | ret = bmapping->a_ops->readpage(NULL, backpage); | |
118 | if (ret < 0) | |
119 | goto unlock_discard; | |
120 | } | |
121 | ||
122 | /* but the page may have been read before the monitor was installed, so | |
123 | * the monitor may miss the event - so we have to ensure that we do get | |
124 | * one in such a case */ | |
125 | if (trylock_page(backpage)) { | |
126 | _debug("jumpstart %p {%lx}", backpage, backpage->flags); | |
127 | unlock_page(backpage); | |
128 | } | |
129 | ||
130 | /* it'll reappear on the todo list */ | |
131 | kleave(" = -EINPROGRESS"); | |
132 | return -EINPROGRESS; | |
133 | ||
134 | unlock_discard: | |
135 | unlock_page(backpage); | |
136 | spin_lock_irq(&object->work_lock); | |
137 | list_del(&monitor->op_link); | |
138 | spin_unlock_irq(&object->work_lock); | |
139 | kleave(" = %d", ret); | |
140 | return ret; | |
141 | } | |
142 | ||
9ae326a6 DH |
143 | /* |
144 | * copy data from backing pages to netfs pages to complete a read operation | |
145 | * - driven by FS-Cache's thread pool | |
146 | */ | |
147 | static void cachefiles_read_copier(struct fscache_operation *_op) | |
148 | { | |
149 | struct cachefiles_one_read *monitor; | |
150 | struct cachefiles_object *object; | |
151 | struct fscache_retrieval *op; | |
152 | struct pagevec pagevec; | |
153 | int error, max; | |
154 | ||
155 | op = container_of(_op, struct fscache_retrieval, op); | |
156 | object = container_of(op->op.object, | |
157 | struct cachefiles_object, fscache); | |
158 | ||
159 | _enter("{ino=%lu}", object->backer->d_inode->i_ino); | |
160 | ||
161 | pagevec_init(&pagevec, 0); | |
162 | ||
163 | max = 8; | |
164 | spin_lock_irq(&object->work_lock); | |
165 | ||
166 | while (!list_empty(&op->to_do)) { | |
167 | monitor = list_entry(op->to_do.next, | |
168 | struct cachefiles_one_read, op_link); | |
169 | list_del(&monitor->op_link); | |
170 | ||
171 | spin_unlock_irq(&object->work_lock); | |
172 | ||
173 | _debug("- copy {%lu}", monitor->back_page->index); | |
174 | ||
5e929b33 | 175 | recheck: |
9ae326a6 DH |
176 | if (PageUptodate(monitor->back_page)) { |
177 | copy_highpage(monitor->netfs_page, monitor->back_page); | |
178 | ||
179 | pagevec_add(&pagevec, monitor->netfs_page); | |
180 | fscache_mark_pages_cached(monitor->op, &pagevec); | |
181 | error = 0; | |
5e929b33 DH |
182 | } else if (!PageError(monitor->back_page)) { |
183 | /* the page has probably been truncated */ | |
184 | error = cachefiles_read_reissue(object, monitor); | |
185 | if (error == -EINPROGRESS) | |
186 | goto next; | |
187 | goto recheck; | |
188 | } else { | |
9ae326a6 DH |
189 | cachefiles_io_error_obj( |
190 | object, | |
191 | "Readpage failed on backing file %lx", | |
192 | (unsigned long) monitor->back_page->flags); | |
5e929b33 DH |
193 | error = -EIO; |
194 | } | |
9ae326a6 DH |
195 | |
196 | page_cache_release(monitor->back_page); | |
197 | ||
198 | fscache_end_io(op, monitor->netfs_page, error); | |
199 | page_cache_release(monitor->netfs_page); | |
200 | fscache_put_retrieval(op); | |
201 | kfree(monitor); | |
202 | ||
5e929b33 | 203 | next: |
9ae326a6 DH |
204 | /* let the thread pool have some air occasionally */ |
205 | max--; | |
206 | if (max < 0 || need_resched()) { | |
207 | if (!list_empty(&op->to_do)) | |
208 | fscache_enqueue_retrieval(op); | |
209 | _leave(" [maxed out]"); | |
210 | return; | |
211 | } | |
212 | ||
213 | spin_lock_irq(&object->work_lock); | |
214 | } | |
215 | ||
216 | spin_unlock_irq(&object->work_lock); | |
217 | _leave(""); | |
218 | } | |
219 | ||
220 | /* | |
221 | * read the corresponding page to the given set from the backing file | |
222 | * - an uncertain page is simply discarded, to be tried again another time | |
223 | */ | |
224 | static int cachefiles_read_backing_file_one(struct cachefiles_object *object, | |
225 | struct fscache_retrieval *op, | |
226 | struct page *netpage, | |
227 | struct pagevec *pagevec) | |
228 | { | |
229 | struct cachefiles_one_read *monitor; | |
230 | struct address_space *bmapping; | |
231 | struct page *newpage, *backpage; | |
232 | int ret; | |
233 | ||
234 | _enter(""); | |
235 | ||
236 | pagevec_reinit(pagevec); | |
237 | ||
238 | _debug("read back %p{%lu,%d}", | |
239 | netpage, netpage->index, page_count(netpage)); | |
240 | ||
241 | monitor = kzalloc(sizeof(*monitor), GFP_KERNEL); | |
242 | if (!monitor) | |
243 | goto nomem; | |
244 | ||
245 | monitor->netfs_page = netpage; | |
246 | monitor->op = fscache_get_retrieval(op); | |
247 | ||
248 | init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter); | |
249 | ||
250 | /* attempt to get hold of the backing page */ | |
251 | bmapping = object->backer->d_inode->i_mapping; | |
252 | newpage = NULL; | |
253 | ||
254 | for (;;) { | |
255 | backpage = find_get_page(bmapping, netpage->index); | |
256 | if (backpage) | |
257 | goto backing_page_already_present; | |
258 | ||
259 | if (!newpage) { | |
260 | newpage = page_cache_alloc_cold(bmapping); | |
261 | if (!newpage) | |
262 | goto nomem_monitor; | |
263 | } | |
264 | ||
265 | ret = add_to_page_cache(newpage, bmapping, | |
266 | netpage->index, GFP_KERNEL); | |
267 | if (ret == 0) | |
268 | goto installed_new_backing_page; | |
269 | if (ret != -EEXIST) | |
270 | goto nomem_page; | |
271 | } | |
272 | ||
273 | /* we've installed a new backing page, so now we need to add it | |
274 | * to the LRU list and start it reading */ | |
275 | installed_new_backing_page: | |
276 | _debug("- new %p", newpage); | |
277 | ||
278 | backpage = newpage; | |
279 | newpage = NULL; | |
280 | ||
281 | page_cache_get(backpage); | |
282 | pagevec_add(pagevec, backpage); | |
283 | __pagevec_lru_add_file(pagevec); | |
284 | ||
285 | read_backing_page: | |
286 | ret = bmapping->a_ops->readpage(NULL, backpage); | |
287 | if (ret < 0) | |
288 | goto read_error; | |
289 | ||
290 | /* set the monitor to transfer the data across */ | |
291 | monitor_backing_page: | |
292 | _debug("- monitor add"); | |
293 | ||
294 | /* install the monitor */ | |
295 | page_cache_get(monitor->netfs_page); | |
296 | page_cache_get(backpage); | |
297 | monitor->back_page = backpage; | |
298 | monitor->monitor.private = backpage; | |
299 | add_page_wait_queue(backpage, &monitor->monitor); | |
300 | monitor = NULL; | |
301 | ||
302 | /* but the page may have been read before the monitor was installed, so | |
303 | * the monitor may miss the event - so we have to ensure that we do get | |
304 | * one in such a case */ | |
305 | if (trylock_page(backpage)) { | |
306 | _debug("jumpstart %p {%lx}", backpage, backpage->flags); | |
307 | unlock_page(backpage); | |
308 | } | |
309 | goto success; | |
310 | ||
311 | /* if the backing page is already present, it can be in one of | |
312 | * three states: read in progress, read failed or read okay */ | |
313 | backing_page_already_present: | |
314 | _debug("- present"); | |
315 | ||
316 | if (newpage) { | |
317 | page_cache_release(newpage); | |
318 | newpage = NULL; | |
319 | } | |
320 | ||
321 | if (PageError(backpage)) | |
322 | goto io_error; | |
323 | ||
324 | if (PageUptodate(backpage)) | |
325 | goto backing_page_already_uptodate; | |
326 | ||
327 | if (!trylock_page(backpage)) | |
328 | goto monitor_backing_page; | |
329 | _debug("read %p {%lx}", backpage, backpage->flags); | |
330 | goto read_backing_page; | |
331 | ||
332 | /* the backing page is already up to date, attach the netfs | |
333 | * page to the pagecache and LRU and copy the data across */ | |
334 | backing_page_already_uptodate: | |
335 | _debug("- uptodate"); | |
336 | ||
337 | pagevec_add(pagevec, netpage); | |
338 | fscache_mark_pages_cached(op, pagevec); | |
339 | ||
340 | copy_highpage(netpage, backpage); | |
341 | fscache_end_io(op, netpage, 0); | |
342 | ||
343 | success: | |
344 | _debug("success"); | |
345 | ret = 0; | |
346 | ||
347 | out: | |
348 | if (backpage) | |
349 | page_cache_release(backpage); | |
350 | if (monitor) { | |
351 | fscache_put_retrieval(monitor->op); | |
352 | kfree(monitor); | |
353 | } | |
354 | _leave(" = %d", ret); | |
355 | return ret; | |
356 | ||
357 | read_error: | |
358 | _debug("read error %d", ret); | |
359 | if (ret == -ENOMEM) | |
360 | goto out; | |
361 | io_error: | |
362 | cachefiles_io_error_obj(object, "Page read error on backing file"); | |
363 | ret = -ENOBUFS; | |
364 | goto out; | |
365 | ||
366 | nomem_page: | |
367 | page_cache_release(newpage); | |
368 | nomem_monitor: | |
369 | fscache_put_retrieval(monitor->op); | |
370 | kfree(monitor); | |
371 | nomem: | |
372 | _leave(" = -ENOMEM"); | |
373 | return -ENOMEM; | |
374 | } | |
375 | ||
376 | /* | |
377 | * read a page from the cache or allocate a block in which to store it | |
378 | * - cache withdrawal is prevented by the caller | |
379 | * - returns -EINTR if interrupted | |
380 | * - returns -ENOMEM if ran out of memory | |
381 | * - returns -ENOBUFS if no buffers can be made available | |
382 | * - returns -ENOBUFS if page is beyond EOF | |
383 | * - if the page is backed by a block in the cache: | |
384 | * - a read will be started which will call the callback on completion | |
385 | * - 0 will be returned | |
386 | * - else if the page is unbacked: | |
387 | * - the metadata will be retained | |
388 | * - -ENODATA will be returned | |
389 | */ | |
390 | int cachefiles_read_or_alloc_page(struct fscache_retrieval *op, | |
391 | struct page *page, | |
392 | gfp_t gfp) | |
393 | { | |
394 | struct cachefiles_object *object; | |
395 | struct cachefiles_cache *cache; | |
396 | struct pagevec pagevec; | |
397 | struct inode *inode; | |
398 | sector_t block0, block; | |
399 | unsigned shift; | |
400 | int ret; | |
401 | ||
402 | object = container_of(op->op.object, | |
403 | struct cachefiles_object, fscache); | |
404 | cache = container_of(object->fscache.cache, | |
405 | struct cachefiles_cache, cache); | |
406 | ||
407 | _enter("{%p},{%lx},,,", object, page->index); | |
408 | ||
409 | if (!object->backer) | |
410 | return -ENOBUFS; | |
411 | ||
412 | inode = object->backer->d_inode; | |
413 | ASSERT(S_ISREG(inode->i_mode)); | |
414 | ASSERT(inode->i_mapping->a_ops->bmap); | |
415 | ASSERT(inode->i_mapping->a_ops->readpages); | |
416 | ||
417 | /* calculate the shift required to use bmap */ | |
418 | if (inode->i_sb->s_blocksize > PAGE_SIZE) | |
419 | return -ENOBUFS; | |
420 | ||
421 | shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; | |
422 | ||
4fbf4291 DH |
423 | op->op.flags &= FSCACHE_OP_KEEP_FLAGS; |
424 | op->op.flags |= FSCACHE_OP_FAST; | |
9ae326a6 DH |
425 | op->op.processor = cachefiles_read_copier; |
426 | ||
427 | pagevec_init(&pagevec, 0); | |
428 | ||
429 | /* we assume the absence or presence of the first block is a good | |
430 | * enough indication for the page as a whole | |
431 | * - TODO: don't use bmap() for this as it is _not_ actually good | |
432 | * enough for this as it doesn't indicate errors, but it's all we've | |
433 | * got for the moment | |
434 | */ | |
435 | block0 = page->index; | |
436 | block0 <<= shift; | |
437 | ||
438 | block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0); | |
439 | _debug("%llx -> %llx", | |
440 | (unsigned long long) block0, | |
441 | (unsigned long long) block); | |
442 | ||
443 | if (block) { | |
444 | /* submit the apparently valid page to the backing fs to be | |
445 | * read from disk */ | |
446 | ret = cachefiles_read_backing_file_one(object, op, page, | |
447 | &pagevec); | |
448 | } else if (cachefiles_has_space(cache, 0, 1) == 0) { | |
449 | /* there's space in the cache we can use */ | |
450 | pagevec_add(&pagevec, page); | |
451 | fscache_mark_pages_cached(op, &pagevec); | |
452 | ret = -ENODATA; | |
453 | } else { | |
454 | ret = -ENOBUFS; | |
455 | } | |
456 | ||
457 | _leave(" = %d", ret); | |
458 | return ret; | |
459 | } | |
460 | ||
461 | /* | |
462 | * read the corresponding pages to the given set from the backing file | |
463 | * - any uncertain pages are simply discarded, to be tried again another time | |
464 | */ | |
465 | static int cachefiles_read_backing_file(struct cachefiles_object *object, | |
466 | struct fscache_retrieval *op, | |
467 | struct list_head *list, | |
468 | struct pagevec *mark_pvec) | |
469 | { | |
470 | struct cachefiles_one_read *monitor = NULL; | |
471 | struct address_space *bmapping = object->backer->d_inode->i_mapping; | |
472 | struct pagevec lru_pvec; | |
473 | struct page *newpage = NULL, *netpage, *_n, *backpage = NULL; | |
474 | int ret = 0; | |
475 | ||
476 | _enter(""); | |
477 | ||
478 | pagevec_init(&lru_pvec, 0); | |
479 | ||
480 | list_for_each_entry_safe(netpage, _n, list, lru) { | |
481 | list_del(&netpage->lru); | |
482 | ||
483 | _debug("read back %p{%lu,%d}", | |
484 | netpage, netpage->index, page_count(netpage)); | |
485 | ||
486 | if (!monitor) { | |
487 | monitor = kzalloc(sizeof(*monitor), GFP_KERNEL); | |
488 | if (!monitor) | |
489 | goto nomem; | |
490 | ||
491 | monitor->op = fscache_get_retrieval(op); | |
492 | init_waitqueue_func_entry(&monitor->monitor, | |
493 | cachefiles_read_waiter); | |
494 | } | |
495 | ||
496 | for (;;) { | |
497 | backpage = find_get_page(bmapping, netpage->index); | |
498 | if (backpage) | |
499 | goto backing_page_already_present; | |
500 | ||
501 | if (!newpage) { | |
502 | newpage = page_cache_alloc_cold(bmapping); | |
503 | if (!newpage) | |
504 | goto nomem; | |
505 | } | |
506 | ||
507 | ret = add_to_page_cache(newpage, bmapping, | |
508 | netpage->index, GFP_KERNEL); | |
509 | if (ret == 0) | |
510 | goto installed_new_backing_page; | |
511 | if (ret != -EEXIST) | |
512 | goto nomem; | |
513 | } | |
514 | ||
515 | /* we've installed a new backing page, so now we need to add it | |
516 | * to the LRU list and start it reading */ | |
517 | installed_new_backing_page: | |
518 | _debug("- new %p", newpage); | |
519 | ||
520 | backpage = newpage; | |
521 | newpage = NULL; | |
522 | ||
523 | page_cache_get(backpage); | |
524 | if (!pagevec_add(&lru_pvec, backpage)) | |
525 | __pagevec_lru_add_file(&lru_pvec); | |
526 | ||
527 | reread_backing_page: | |
528 | ret = bmapping->a_ops->readpage(NULL, backpage); | |
529 | if (ret < 0) | |
530 | goto read_error; | |
531 | ||
532 | /* add the netfs page to the pagecache and LRU, and set the | |
533 | * monitor to transfer the data across */ | |
534 | monitor_backing_page: | |
535 | _debug("- monitor add"); | |
536 | ||
537 | ret = add_to_page_cache(netpage, op->mapping, netpage->index, | |
538 | GFP_KERNEL); | |
539 | if (ret < 0) { | |
540 | if (ret == -EEXIST) { | |
541 | page_cache_release(netpage); | |
542 | continue; | |
543 | } | |
544 | goto nomem; | |
545 | } | |
546 | ||
547 | page_cache_get(netpage); | |
548 | if (!pagevec_add(&lru_pvec, netpage)) | |
549 | __pagevec_lru_add_file(&lru_pvec); | |
550 | ||
551 | /* install a monitor */ | |
552 | page_cache_get(netpage); | |
553 | monitor->netfs_page = netpage; | |
554 | ||
555 | page_cache_get(backpage); | |
556 | monitor->back_page = backpage; | |
557 | monitor->monitor.private = backpage; | |
558 | add_page_wait_queue(backpage, &monitor->monitor); | |
559 | monitor = NULL; | |
560 | ||
561 | /* but the page may have been read before the monitor was | |
562 | * installed, so the monitor may miss the event - so we have to | |
563 | * ensure that we do get one in such a case */ | |
564 | if (trylock_page(backpage)) { | |
565 | _debug("2unlock %p {%lx}", backpage, backpage->flags); | |
566 | unlock_page(backpage); | |
567 | } | |
568 | ||
569 | page_cache_release(backpage); | |
570 | backpage = NULL; | |
571 | ||
572 | page_cache_release(netpage); | |
573 | netpage = NULL; | |
574 | continue; | |
575 | ||
576 | /* if the backing page is already present, it can be in one of | |
577 | * three states: read in progress, read failed or read okay */ | |
578 | backing_page_already_present: | |
579 | _debug("- present %p", backpage); | |
580 | ||
581 | if (PageError(backpage)) | |
582 | goto io_error; | |
583 | ||
584 | if (PageUptodate(backpage)) | |
585 | goto backing_page_already_uptodate; | |
586 | ||
587 | _debug("- not ready %p{%lx}", backpage, backpage->flags); | |
588 | ||
589 | if (!trylock_page(backpage)) | |
590 | goto monitor_backing_page; | |
591 | ||
592 | if (PageError(backpage)) { | |
593 | _debug("error %lx", backpage->flags); | |
594 | unlock_page(backpage); | |
595 | goto io_error; | |
596 | } | |
597 | ||
598 | if (PageUptodate(backpage)) | |
599 | goto backing_page_already_uptodate_unlock; | |
600 | ||
601 | /* we've locked a page that's neither up to date nor erroneous, | |
602 | * so we need to attempt to read it again */ | |
603 | goto reread_backing_page; | |
604 | ||
605 | /* the backing page is already up to date, attach the netfs | |
606 | * page to the pagecache and LRU and copy the data across */ | |
607 | backing_page_already_uptodate_unlock: | |
608 | _debug("uptodate %lx", backpage->flags); | |
609 | unlock_page(backpage); | |
610 | backing_page_already_uptodate: | |
611 | _debug("- uptodate"); | |
612 | ||
613 | ret = add_to_page_cache(netpage, op->mapping, netpage->index, | |
614 | GFP_KERNEL); | |
615 | if (ret < 0) { | |
616 | if (ret == -EEXIST) { | |
617 | page_cache_release(netpage); | |
618 | continue; | |
619 | } | |
620 | goto nomem; | |
621 | } | |
622 | ||
623 | copy_highpage(netpage, backpage); | |
624 | ||
625 | page_cache_release(backpage); | |
626 | backpage = NULL; | |
627 | ||
628 | if (!pagevec_add(mark_pvec, netpage)) | |
629 | fscache_mark_pages_cached(op, mark_pvec); | |
630 | ||
631 | page_cache_get(netpage); | |
632 | if (!pagevec_add(&lru_pvec, netpage)) | |
633 | __pagevec_lru_add_file(&lru_pvec); | |
634 | ||
635 | fscache_end_io(op, netpage, 0); | |
636 | page_cache_release(netpage); | |
637 | netpage = NULL; | |
638 | continue; | |
639 | } | |
640 | ||
641 | netpage = NULL; | |
642 | ||
643 | _debug("out"); | |
644 | ||
645 | out: | |
646 | /* tidy up */ | |
647 | pagevec_lru_add_file(&lru_pvec); | |
648 | ||
649 | if (newpage) | |
650 | page_cache_release(newpage); | |
651 | if (netpage) | |
652 | page_cache_release(netpage); | |
653 | if (backpage) | |
654 | page_cache_release(backpage); | |
655 | if (monitor) { | |
656 | fscache_put_retrieval(op); | |
657 | kfree(monitor); | |
658 | } | |
659 | ||
660 | list_for_each_entry_safe(netpage, _n, list, lru) { | |
661 | list_del(&netpage->lru); | |
662 | page_cache_release(netpage); | |
663 | } | |
664 | ||
665 | _leave(" = %d", ret); | |
666 | return ret; | |
667 | ||
668 | nomem: | |
669 | _debug("nomem"); | |
670 | ret = -ENOMEM; | |
671 | goto out; | |
672 | ||
673 | read_error: | |
674 | _debug("read error %d", ret); | |
675 | if (ret == -ENOMEM) | |
676 | goto out; | |
677 | io_error: | |
678 | cachefiles_io_error_obj(object, "Page read error on backing file"); | |
679 | ret = -ENOBUFS; | |
680 | goto out; | |
681 | } | |
682 | ||
683 | /* | |
684 | * read a list of pages from the cache or allocate blocks in which to store | |
685 | * them | |
686 | */ | |
687 | int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op, | |
688 | struct list_head *pages, | |
689 | unsigned *nr_pages, | |
690 | gfp_t gfp) | |
691 | { | |
692 | struct cachefiles_object *object; | |
693 | struct cachefiles_cache *cache; | |
694 | struct list_head backpages; | |
695 | struct pagevec pagevec; | |
696 | struct inode *inode; | |
697 | struct page *page, *_n; | |
698 | unsigned shift, nrbackpages; | |
699 | int ret, ret2, space; | |
700 | ||
701 | object = container_of(op->op.object, | |
702 | struct cachefiles_object, fscache); | |
703 | cache = container_of(object->fscache.cache, | |
704 | struct cachefiles_cache, cache); | |
705 | ||
706 | _enter("{OBJ%x,%d},,%d,,", | |
707 | object->fscache.debug_id, atomic_read(&op->op.usage), | |
708 | *nr_pages); | |
709 | ||
710 | if (!object->backer) | |
711 | return -ENOBUFS; | |
712 | ||
713 | space = 1; | |
714 | if (cachefiles_has_space(cache, 0, *nr_pages) < 0) | |
715 | space = 0; | |
716 | ||
717 | inode = object->backer->d_inode; | |
718 | ASSERT(S_ISREG(inode->i_mode)); | |
719 | ASSERT(inode->i_mapping->a_ops->bmap); | |
720 | ASSERT(inode->i_mapping->a_ops->readpages); | |
721 | ||
722 | /* calculate the shift required to use bmap */ | |
723 | if (inode->i_sb->s_blocksize > PAGE_SIZE) | |
724 | return -ENOBUFS; | |
725 | ||
726 | shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; | |
727 | ||
728 | pagevec_init(&pagevec, 0); | |
729 | ||
4fbf4291 DH |
730 | op->op.flags &= FSCACHE_OP_KEEP_FLAGS; |
731 | op->op.flags |= FSCACHE_OP_FAST; | |
9ae326a6 DH |
732 | op->op.processor = cachefiles_read_copier; |
733 | ||
734 | INIT_LIST_HEAD(&backpages); | |
735 | nrbackpages = 0; | |
736 | ||
737 | ret = space ? -ENODATA : -ENOBUFS; | |
738 | list_for_each_entry_safe(page, _n, pages, lru) { | |
739 | sector_t block0, block; | |
740 | ||
741 | /* we assume the absence or presence of the first block is a | |
742 | * good enough indication for the page as a whole | |
743 | * - TODO: don't use bmap() for this as it is _not_ actually | |
744 | * good enough for this as it doesn't indicate errors, but | |
745 | * it's all we've got for the moment | |
746 | */ | |
747 | block0 = page->index; | |
748 | block0 <<= shift; | |
749 | ||
750 | block = inode->i_mapping->a_ops->bmap(inode->i_mapping, | |
751 | block0); | |
752 | _debug("%llx -> %llx", | |
753 | (unsigned long long) block0, | |
754 | (unsigned long long) block); | |
755 | ||
756 | if (block) { | |
757 | /* we have data - add it to the list to give to the | |
758 | * backing fs */ | |
759 | list_move(&page->lru, &backpages); | |
760 | (*nr_pages)--; | |
761 | nrbackpages++; | |
762 | } else if (space && pagevec_add(&pagevec, page) == 0) { | |
763 | fscache_mark_pages_cached(op, &pagevec); | |
764 | ret = -ENODATA; | |
765 | } | |
766 | } | |
767 | ||
768 | if (pagevec_count(&pagevec) > 0) | |
769 | fscache_mark_pages_cached(op, &pagevec); | |
770 | ||
771 | if (list_empty(pages)) | |
772 | ret = 0; | |
773 | ||
774 | /* submit the apparently valid pages to the backing fs to be read from | |
775 | * disk */ | |
776 | if (nrbackpages > 0) { | |
777 | ret2 = cachefiles_read_backing_file(object, op, &backpages, | |
778 | &pagevec); | |
779 | if (ret2 == -ENOMEM || ret2 == -EINTR) | |
780 | ret = ret2; | |
781 | } | |
782 | ||
783 | if (pagevec_count(&pagevec) > 0) | |
784 | fscache_mark_pages_cached(op, &pagevec); | |
785 | ||
786 | _leave(" = %d [nr=%u%s]", | |
787 | ret, *nr_pages, list_empty(pages) ? " empty" : ""); | |
788 | return ret; | |
789 | } | |
790 | ||
791 | /* | |
792 | * allocate a block in the cache in which to store a page | |
793 | * - cache withdrawal is prevented by the caller | |
794 | * - returns -EINTR if interrupted | |
795 | * - returns -ENOMEM if ran out of memory | |
796 | * - returns -ENOBUFS if no buffers can be made available | |
797 | * - returns -ENOBUFS if page is beyond EOF | |
798 | * - otherwise: | |
799 | * - the metadata will be retained | |
800 | * - 0 will be returned | |
801 | */ | |
802 | int cachefiles_allocate_page(struct fscache_retrieval *op, | |
803 | struct page *page, | |
804 | gfp_t gfp) | |
805 | { | |
806 | struct cachefiles_object *object; | |
807 | struct cachefiles_cache *cache; | |
808 | struct pagevec pagevec; | |
809 | int ret; | |
810 | ||
811 | object = container_of(op->op.object, | |
812 | struct cachefiles_object, fscache); | |
813 | cache = container_of(object->fscache.cache, | |
814 | struct cachefiles_cache, cache); | |
815 | ||
816 | _enter("%p,{%lx},", object, page->index); | |
817 | ||
818 | ret = cachefiles_has_space(cache, 0, 1); | |
819 | if (ret == 0) { | |
820 | pagevec_init(&pagevec, 0); | |
821 | pagevec_add(&pagevec, page); | |
822 | fscache_mark_pages_cached(op, &pagevec); | |
823 | } else { | |
824 | ret = -ENOBUFS; | |
825 | } | |
826 | ||
827 | _leave(" = %d", ret); | |
828 | return ret; | |
829 | } | |
830 | ||
831 | /* | |
832 | * allocate blocks in the cache in which to store a set of pages | |
833 | * - cache withdrawal is prevented by the caller | |
834 | * - returns -EINTR if interrupted | |
835 | * - returns -ENOMEM if ran out of memory | |
836 | * - returns -ENOBUFS if some buffers couldn't be made available | |
837 | * - returns -ENOBUFS if some pages are beyond EOF | |
838 | * - otherwise: | |
839 | * - -ENODATA will be returned | |
840 | * - metadata will be retained for any page marked | |
841 | */ | |
842 | int cachefiles_allocate_pages(struct fscache_retrieval *op, | |
843 | struct list_head *pages, | |
844 | unsigned *nr_pages, | |
845 | gfp_t gfp) | |
846 | { | |
847 | struct cachefiles_object *object; | |
848 | struct cachefiles_cache *cache; | |
849 | struct pagevec pagevec; | |
850 | struct page *page; | |
851 | int ret; | |
852 | ||
853 | object = container_of(op->op.object, | |
854 | struct cachefiles_object, fscache); | |
855 | cache = container_of(object->fscache.cache, | |
856 | struct cachefiles_cache, cache); | |
857 | ||
858 | _enter("%p,,,%d,", object, *nr_pages); | |
859 | ||
860 | ret = cachefiles_has_space(cache, 0, *nr_pages); | |
861 | if (ret == 0) { | |
862 | pagevec_init(&pagevec, 0); | |
863 | ||
864 | list_for_each_entry(page, pages, lru) { | |
865 | if (pagevec_add(&pagevec, page) == 0) | |
866 | fscache_mark_pages_cached(op, &pagevec); | |
867 | } | |
868 | ||
869 | if (pagevec_count(&pagevec) > 0) | |
870 | fscache_mark_pages_cached(op, &pagevec); | |
871 | ret = -ENODATA; | |
872 | } else { | |
873 | ret = -ENOBUFS; | |
874 | } | |
875 | ||
876 | _leave(" = %d", ret); | |
877 | return ret; | |
878 | } | |
879 | ||
880 | /* | |
881 | * request a page be stored in the cache | |
882 | * - cache withdrawal is prevented by the caller | |
883 | * - this request may be ignored if there's no cache block available, in which | |
884 | * case -ENOBUFS will be returned | |
885 | * - if the op is in progress, 0 will be returned | |
886 | */ | |
887 | int cachefiles_write_page(struct fscache_storage *op, struct page *page) | |
888 | { | |
889 | struct cachefiles_object *object; | |
890 | struct cachefiles_cache *cache; | |
891 | mm_segment_t old_fs; | |
892 | struct file *file; | |
a17754fb DH |
893 | loff_t pos, eof; |
894 | size_t len; | |
9ae326a6 DH |
895 | void *data; |
896 | int ret; | |
897 | ||
898 | ASSERT(op != NULL); | |
899 | ASSERT(page != NULL); | |
900 | ||
901 | object = container_of(op->op.object, | |
902 | struct cachefiles_object, fscache); | |
903 | ||
904 | _enter("%p,%p{%lx},,,", object, page, page->index); | |
905 | ||
906 | if (!object->backer) { | |
907 | _leave(" = -ENOBUFS"); | |
908 | return -ENOBUFS; | |
909 | } | |
910 | ||
911 | ASSERT(S_ISREG(object->backer->d_inode->i_mode)); | |
912 | ||
913 | cache = container_of(object->fscache.cache, | |
914 | struct cachefiles_cache, cache); | |
915 | ||
916 | /* write the page to the backing filesystem and let it store it in its | |
917 | * own time */ | |
918 | dget(object->backer); | |
919 | mntget(cache->mnt); | |
920 | file = dentry_open(object->backer, cache->mnt, O_RDWR, | |
921 | cache->cache_cred); | |
922 | if (IS_ERR(file)) { | |
923 | ret = PTR_ERR(file); | |
924 | } else { | |
925 | ret = -EIO; | |
926 | if (file->f_op->write) { | |
927 | pos = (loff_t) page->index << PAGE_SHIFT; | |
a17754fb DH |
928 | |
929 | /* we mustn't write more data than we have, so we have | |
930 | * to beware of a partial page at EOF */ | |
931 | eof = object->fscache.store_limit_l; | |
932 | len = PAGE_SIZE; | |
933 | if (eof & ~PAGE_MASK) { | |
934 | ASSERTCMP(pos, <, eof); | |
935 | if (eof - pos < PAGE_SIZE) { | |
936 | _debug("cut short %llx to %llx", | |
937 | pos, eof); | |
938 | len = eof - pos; | |
939 | ASSERTCMP(pos + len, ==, eof); | |
940 | } | |
941 | } | |
942 | ||
9ae326a6 DH |
943 | data = kmap(page); |
944 | old_fs = get_fs(); | |
945 | set_fs(KERNEL_DS); | |
946 | ret = file->f_op->write( | |
a17754fb | 947 | file, (const void __user *) data, len, &pos); |
9ae326a6 DH |
948 | set_fs(old_fs); |
949 | kunmap(page); | |
a17754fb | 950 | if (ret != len) |
9ae326a6 DH |
951 | ret = -EIO; |
952 | } | |
953 | fput(file); | |
954 | } | |
955 | ||
956 | if (ret < 0) { | |
957 | if (ret == -EIO) | |
958 | cachefiles_io_error_obj( | |
959 | object, "Write page to backing file failed"); | |
960 | ret = -ENOBUFS; | |
961 | } | |
962 | ||
963 | _leave(" = %d", ret); | |
964 | return ret; | |
965 | } | |
966 | ||
967 | /* | |
968 | * detach a backing block from a page | |
969 | * - cache withdrawal is prevented by the caller | |
970 | */ | |
971 | void cachefiles_uncache_page(struct fscache_object *_object, struct page *page) | |
972 | { | |
973 | struct cachefiles_object *object; | |
974 | struct cachefiles_cache *cache; | |
975 | ||
976 | object = container_of(_object, struct cachefiles_object, fscache); | |
977 | cache = container_of(object->fscache.cache, | |
978 | struct cachefiles_cache, cache); | |
979 | ||
980 | _enter("%p,{%lu}", object, page->index); | |
981 | ||
982 | spin_unlock(&object->fscache.cookie->lock); | |
983 | } |