1 /* Cache page management and data I/O routines
3 * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define FSCACHE_DEBUG_LEVEL PAGE
13 #include <linux/module.h>
14 #include <linux/fscache-cache.h>
15 #include <linux/buffer_head.h>
16 #include <linux/pagevec.h>
17 #include <linux/slab.h>
21 * check to see if a page is being written to the cache
23 bool __fscache_check_page_write(struct fscache_cookie
*cookie
, struct page
*page
)
28 val
= radix_tree_lookup(&cookie
->stores
, page
->index
);
33 EXPORT_SYMBOL(__fscache_check_page_write
);
36 * wait for a page to finish being written to the cache
38 void __fscache_wait_on_page_write(struct fscache_cookie
*cookie
, struct page
*page
)
40 wait_queue_head_t
*wq
= bit_waitqueue(&cookie
->flags
, 0);
42 wait_event(*wq
, !__fscache_check_page_write(cookie
, page
));
44 EXPORT_SYMBOL(__fscache_wait_on_page_write
);
47 * decide whether a page can be released, possibly by cancelling a store to it
48 * - we're allowed to sleep if __GFP_WAIT is flagged
50 bool __fscache_maybe_release_page(struct fscache_cookie
*cookie
,
57 _enter("%p,%p,%x", cookie
, page
, gfp
);
61 val
= radix_tree_lookup(&cookie
->stores
, page
->index
);
64 fscache_stat(&fscache_n_store_vmscan_not_storing
);
65 __fscache_uncache_page(cookie
, page
);
69 /* see if the page is actually undergoing storage - if so we can't get
70 * rid of it till the cache has finished with it */
71 if (radix_tree_tag_get(&cookie
->stores
, page
->index
,
72 FSCACHE_COOKIE_STORING_TAG
)) {
77 /* the page is pending storage, so we attempt to cancel the store and
78 * discard the store request so that the page can be reclaimed */
79 spin_lock(&cookie
->stores_lock
);
82 if (radix_tree_tag_get(&cookie
->stores
, page
->index
,
83 FSCACHE_COOKIE_STORING_TAG
)) {
84 /* the page started to undergo storage whilst we were looking,
85 * so now we can only wait or return */
86 spin_unlock(&cookie
->stores_lock
);
90 xpage
= radix_tree_delete(&cookie
->stores
, page
->index
);
91 spin_unlock(&cookie
->stores_lock
);
94 fscache_stat(&fscache_n_store_vmscan_cancelled
);
95 fscache_stat(&fscache_n_store_radix_deletes
);
96 ASSERTCMP(xpage
, ==, page
);
98 fscache_stat(&fscache_n_store_vmscan_gone
);
101 wake_up_bit(&cookie
->flags
, 0);
103 page_cache_release(xpage
);
104 __fscache_uncache_page(cookie
, page
);
108 /* We will wait here if we're allowed to, but that could deadlock the
109 * allocator as the work threads writing to the cache may all end up
110 * sleeping on memory allocation, so we may need to impose a timeout
112 if (!(gfp
& __GFP_WAIT
) || !(gfp
& __GFP_FS
)) {
113 fscache_stat(&fscache_n_store_vmscan_busy
);
117 fscache_stat(&fscache_n_store_vmscan_wait
);
118 __fscache_wait_on_page_write(cookie
, page
);
122 EXPORT_SYMBOL(__fscache_maybe_release_page
);
125 * note that a page has finished being written to the cache
127 static void fscache_end_page_write(struct fscache_object
*object
,
130 struct fscache_cookie
*cookie
;
131 struct page
*xpage
= NULL
;
133 spin_lock(&object
->lock
);
134 cookie
= object
->cookie
;
136 /* delete the page from the tree if it is now no longer
138 spin_lock(&cookie
->stores_lock
);
139 radix_tree_tag_clear(&cookie
->stores
, page
->index
,
140 FSCACHE_COOKIE_STORING_TAG
);
141 if (!radix_tree_tag_get(&cookie
->stores
, page
->index
,
142 FSCACHE_COOKIE_PENDING_TAG
)) {
143 fscache_stat(&fscache_n_store_radix_deletes
);
144 xpage
= radix_tree_delete(&cookie
->stores
, page
->index
);
146 spin_unlock(&cookie
->stores_lock
);
147 wake_up_bit(&cookie
->flags
, 0);
149 spin_unlock(&object
->lock
);
151 page_cache_release(xpage
);
155 * actually apply the changed attributes to a cache object
157 static void fscache_attr_changed_op(struct fscache_operation
*op
)
159 struct fscache_object
*object
= op
->object
;
162 _enter("{OBJ%x OP%x}", object
->debug_id
, op
->debug_id
);
164 fscache_stat(&fscache_n_attr_changed_calls
);
166 if (fscache_object_is_active(object
)) {
167 fscache_stat(&fscache_n_cop_attr_changed
);
168 ret
= object
->cache
->ops
->attr_changed(object
);
169 fscache_stat_d(&fscache_n_cop_attr_changed
);
171 fscache_abort_object(object
);
174 fscache_op_complete(op
, true);
179 * notification that the attributes on an object have changed
181 int __fscache_attr_changed(struct fscache_cookie
*cookie
)
183 struct fscache_operation
*op
;
184 struct fscache_object
*object
;
187 _enter("%p", cookie
);
189 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
191 fscache_stat(&fscache_n_attr_changed
);
193 op
= kzalloc(sizeof(*op
), GFP_KERNEL
);
195 fscache_stat(&fscache_n_attr_changed_nomem
);
196 _leave(" = -ENOMEM");
200 fscache_operation_init(op
, fscache_attr_changed_op
, NULL
);
201 op
->flags
= FSCACHE_OP_ASYNC
|
202 (1 << FSCACHE_OP_EXCLUSIVE
) |
203 (1 << FSCACHE_OP_UNUSE_COOKIE
);
205 spin_lock(&cookie
->lock
);
207 if (!fscache_cookie_enabled(cookie
) ||
208 hlist_empty(&cookie
->backing_objects
))
210 object
= hlist_entry(cookie
->backing_objects
.first
,
211 struct fscache_object
, cookie_link
);
213 __fscache_use_cookie(cookie
);
214 if (fscache_submit_exclusive_op(object
, op
) < 0)
216 spin_unlock(&cookie
->lock
);
217 fscache_stat(&fscache_n_attr_changed_ok
);
218 fscache_put_operation(op
);
223 wake_cookie
= __fscache_unuse_cookie(cookie
);
224 spin_unlock(&cookie
->lock
);
227 __fscache_wake_unused_cookie(cookie
);
228 fscache_stat(&fscache_n_attr_changed_nobufs
);
229 _leave(" = %d", -ENOBUFS
);
232 EXPORT_SYMBOL(__fscache_attr_changed
);
235 * release a retrieval op reference
237 static void fscache_release_retrieval_op(struct fscache_operation
*_op
)
239 struct fscache_retrieval
*op
=
240 container_of(_op
, struct fscache_retrieval
, op
);
242 _enter("{OP%x}", op
->op
.debug_id
);
244 ASSERTCMP(atomic_read(&op
->n_pages
), ==, 0);
246 fscache_hist(fscache_retrieval_histogram
, op
->start_time
);
248 fscache_put_context(op
->op
.object
->cookie
, op
->context
);
254 * allocate a retrieval op
256 static struct fscache_retrieval
*fscache_alloc_retrieval(
257 struct fscache_cookie
*cookie
,
258 struct address_space
*mapping
,
259 fscache_rw_complete_t end_io_func
,
262 struct fscache_retrieval
*op
;
264 /* allocate a retrieval operation and attempt to submit it */
265 op
= kzalloc(sizeof(*op
), GFP_NOIO
);
267 fscache_stat(&fscache_n_retrievals_nomem
);
271 fscache_operation_init(&op
->op
, NULL
, fscache_release_retrieval_op
);
272 op
->op
.flags
= FSCACHE_OP_MYTHREAD
|
273 (1UL << FSCACHE_OP_WAITING
) |
274 (1UL << FSCACHE_OP_UNUSE_COOKIE
);
275 op
->mapping
= mapping
;
276 op
->end_io_func
= end_io_func
;
277 op
->context
= context
;
278 op
->start_time
= jiffies
;
279 INIT_LIST_HEAD(&op
->to_do
);
284 * wait for a deferred lookup to complete
286 int fscache_wait_for_deferred_lookup(struct fscache_cookie
*cookie
)
292 if (!test_bit(FSCACHE_COOKIE_LOOKING_UP
, &cookie
->flags
)) {
293 _leave(" = 0 [imm]");
297 fscache_stat(&fscache_n_retrievals_wait
);
300 if (wait_on_bit(&cookie
->flags
, FSCACHE_COOKIE_LOOKING_UP
,
301 fscache_wait_bit_interruptible
,
302 TASK_INTERRUPTIBLE
) != 0) {
303 fscache_stat(&fscache_n_retrievals_intr
);
304 _leave(" = -ERESTARTSYS");
308 ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP
, &cookie
->flags
));
311 fscache_hist(fscache_retrieval_delay_histogram
, jif
);
312 _leave(" = 0 [dly]");
317 * Handle cancellation of a pending retrieval op
319 static void fscache_do_cancel_retrieval(struct fscache_operation
*_op
)
321 struct fscache_retrieval
*op
=
322 container_of(_op
, struct fscache_retrieval
, op
);
324 atomic_set(&op
->n_pages
, 0);
328 * wait for an object to become active (or dead)
330 int fscache_wait_for_operation_activation(struct fscache_object
*object
,
331 struct fscache_operation
*op
,
332 atomic_t
*stat_op_waits
,
333 atomic_t
*stat_object_dead
,
334 void (*do_cancel
)(struct fscache_operation
*))
338 if (!test_bit(FSCACHE_OP_WAITING
, &op
->flags
))
343 fscache_stat(stat_op_waits
);
344 if (wait_on_bit(&op
->flags
, FSCACHE_OP_WAITING
,
345 fscache_wait_bit_interruptible
,
346 TASK_INTERRUPTIBLE
) != 0) {
347 ret
= fscache_cancel_op(op
, do_cancel
);
351 /* it's been removed from the pending queue by another party,
352 * so we should get to run shortly */
353 wait_on_bit(&op
->flags
, FSCACHE_OP_WAITING
,
354 fscache_wait_bit
, TASK_UNINTERRUPTIBLE
);
359 if (op
->state
== FSCACHE_OP_ST_CANCELLED
) {
360 if (stat_object_dead
)
361 fscache_stat(stat_object_dead
);
362 _leave(" = -ENOBUFS [cancelled]");
365 if (unlikely(fscache_object_is_dead(object
))) {
366 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__
, op
->state
);
367 fscache_cancel_op(op
, do_cancel
);
368 if (stat_object_dead
)
369 fscache_stat(stat_object_dead
);
376 * read a page from the cache or allocate a block in which to store it
378 * -ENOMEM - out of memory, nothing done
379 * -ERESTARTSYS - interrupted
380 * -ENOBUFS - no backing object available in which to cache the block
381 * -ENODATA - no data available in the backing object for this block
382 * 0 - dispatched a read - it'll call end_io_func() when finished
384 int __fscache_read_or_alloc_page(struct fscache_cookie
*cookie
,
386 fscache_rw_complete_t end_io_func
,
390 struct fscache_retrieval
*op
;
391 struct fscache_object
*object
;
392 bool wake_cookie
= false;
395 _enter("%p,%p,,,", cookie
, page
);
397 fscache_stat(&fscache_n_retrievals
);
399 if (hlist_empty(&cookie
->backing_objects
))
402 if (test_bit(FSCACHE_COOKIE_INVALIDATING
, &cookie
->flags
)) {
403 _leave(" = -ENOBUFS [invalidating]");
407 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
408 ASSERTCMP(page
, !=, NULL
);
410 if (fscache_wait_for_deferred_lookup(cookie
) < 0)
413 op
= fscache_alloc_retrieval(cookie
, page
->mapping
,
414 end_io_func
, context
);
416 _leave(" = -ENOMEM");
419 atomic_set(&op
->n_pages
, 1);
421 spin_lock(&cookie
->lock
);
423 if (!fscache_cookie_enabled(cookie
) ||
424 hlist_empty(&cookie
->backing_objects
))
426 object
= hlist_entry(cookie
->backing_objects
.first
,
427 struct fscache_object
, cookie_link
);
429 ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP
, &object
->flags
));
431 __fscache_use_cookie(cookie
);
432 atomic_inc(&object
->n_reads
);
433 __set_bit(FSCACHE_OP_DEC_READ_CNT
, &op
->op
.flags
);
435 if (fscache_submit_op(object
, &op
->op
) < 0)
436 goto nobufs_unlock_dec
;
437 spin_unlock(&cookie
->lock
);
439 fscache_stat(&fscache_n_retrieval_ops
);
441 /* pin the netfs read context in case we need to do the actual netfs
442 * read because we've encountered a cache read failure */
443 fscache_get_context(object
->cookie
, op
->context
);
445 /* we wait for the operation to become active, and then process it
446 * *here*, in this thread, and not in the thread pool */
447 ret
= fscache_wait_for_operation_activation(
449 __fscache_stat(&fscache_n_retrieval_op_waits
),
450 __fscache_stat(&fscache_n_retrievals_object_dead
),
451 fscache_do_cancel_retrieval
);
455 /* ask the cache to honour the operation */
456 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET
, &object
->cookie
->flags
)) {
457 fscache_stat(&fscache_n_cop_allocate_page
);
458 ret
= object
->cache
->ops
->allocate_page(op
, page
, gfp
);
459 fscache_stat_d(&fscache_n_cop_allocate_page
);
463 fscache_stat(&fscache_n_cop_read_or_alloc_page
);
464 ret
= object
->cache
->ops
->read_or_alloc_page(op
, page
, gfp
);
465 fscache_stat_d(&fscache_n_cop_read_or_alloc_page
);
470 fscache_stat(&fscache_n_retrievals_nomem
);
471 else if (ret
== -ERESTARTSYS
)
472 fscache_stat(&fscache_n_retrievals_intr
);
473 else if (ret
== -ENODATA
)
474 fscache_stat(&fscache_n_retrievals_nodata
);
476 fscache_stat(&fscache_n_retrievals_nobufs
);
478 fscache_stat(&fscache_n_retrievals_ok
);
480 fscache_put_retrieval(op
);
481 _leave(" = %d", ret
);
485 atomic_dec(&object
->n_reads
);
486 wake_cookie
= __fscache_unuse_cookie(cookie
);
488 spin_unlock(&cookie
->lock
);
490 __fscache_wake_unused_cookie(cookie
);
493 fscache_stat(&fscache_n_retrievals_nobufs
);
494 _leave(" = -ENOBUFS");
497 EXPORT_SYMBOL(__fscache_read_or_alloc_page
);
500 * read a list of page from the cache or allocate a block in which to store
503 * -ENOMEM - out of memory, some pages may be being read
504 * -ERESTARTSYS - interrupted, some pages may be being read
505 * -ENOBUFS - no backing object or space available in which to cache any
506 * pages not being read
507 * -ENODATA - no data available in the backing object for some or all of
509 * 0 - dispatched a read on all pages
511 * end_io_func() will be called for each page read from the cache as it is
512 * finishes being read
514 * any pages for which a read is dispatched will be removed from pages and
517 int __fscache_read_or_alloc_pages(struct fscache_cookie
*cookie
,
518 struct address_space
*mapping
,
519 struct list_head
*pages
,
521 fscache_rw_complete_t end_io_func
,
525 struct fscache_retrieval
*op
;
526 struct fscache_object
*object
;
527 bool wake_cookie
= false;
530 _enter("%p,,%d,,,", cookie
, *nr_pages
);
532 fscache_stat(&fscache_n_retrievals
);
534 if (hlist_empty(&cookie
->backing_objects
))
537 if (test_bit(FSCACHE_COOKIE_INVALIDATING
, &cookie
->flags
)) {
538 _leave(" = -ENOBUFS [invalidating]");
542 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
543 ASSERTCMP(*nr_pages
, >, 0);
544 ASSERT(!list_empty(pages
));
546 if (fscache_wait_for_deferred_lookup(cookie
) < 0)
549 op
= fscache_alloc_retrieval(cookie
, mapping
, end_io_func
, context
);
552 atomic_set(&op
->n_pages
, *nr_pages
);
554 spin_lock(&cookie
->lock
);
556 if (!fscache_cookie_enabled(cookie
) ||
557 hlist_empty(&cookie
->backing_objects
))
559 object
= hlist_entry(cookie
->backing_objects
.first
,
560 struct fscache_object
, cookie_link
);
562 __fscache_use_cookie(cookie
);
563 atomic_inc(&object
->n_reads
);
564 __set_bit(FSCACHE_OP_DEC_READ_CNT
, &op
->op
.flags
);
566 if (fscache_submit_op(object
, &op
->op
) < 0)
567 goto nobufs_unlock_dec
;
568 spin_unlock(&cookie
->lock
);
570 fscache_stat(&fscache_n_retrieval_ops
);
572 /* pin the netfs read context in case we need to do the actual netfs
573 * read because we've encountered a cache read failure */
574 fscache_get_context(object
->cookie
, op
->context
);
576 /* we wait for the operation to become active, and then process it
577 * *here*, in this thread, and not in the thread pool */
578 ret
= fscache_wait_for_operation_activation(
580 __fscache_stat(&fscache_n_retrieval_op_waits
),
581 __fscache_stat(&fscache_n_retrievals_object_dead
),
582 fscache_do_cancel_retrieval
);
586 /* ask the cache to honour the operation */
587 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET
, &object
->cookie
->flags
)) {
588 fscache_stat(&fscache_n_cop_allocate_pages
);
589 ret
= object
->cache
->ops
->allocate_pages(
590 op
, pages
, nr_pages
, gfp
);
591 fscache_stat_d(&fscache_n_cop_allocate_pages
);
593 fscache_stat(&fscache_n_cop_read_or_alloc_pages
);
594 ret
= object
->cache
->ops
->read_or_alloc_pages(
595 op
, pages
, nr_pages
, gfp
);
596 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages
);
601 fscache_stat(&fscache_n_retrievals_nomem
);
602 else if (ret
== -ERESTARTSYS
)
603 fscache_stat(&fscache_n_retrievals_intr
);
604 else if (ret
== -ENODATA
)
605 fscache_stat(&fscache_n_retrievals_nodata
);
607 fscache_stat(&fscache_n_retrievals_nobufs
);
609 fscache_stat(&fscache_n_retrievals_ok
);
611 fscache_put_retrieval(op
);
612 _leave(" = %d", ret
);
616 atomic_dec(&object
->n_reads
);
617 wake_cookie
= __fscache_unuse_cookie(cookie
);
619 spin_unlock(&cookie
->lock
);
622 __fscache_wake_unused_cookie(cookie
);
624 fscache_stat(&fscache_n_retrievals_nobufs
);
625 _leave(" = -ENOBUFS");
628 EXPORT_SYMBOL(__fscache_read_or_alloc_pages
);
631 * allocate a block in the cache on which to store a page
633 * -ENOMEM - out of memory, nothing done
634 * -ERESTARTSYS - interrupted
635 * -ENOBUFS - no backing object available in which to cache the block
636 * 0 - block allocated
638 int __fscache_alloc_page(struct fscache_cookie
*cookie
,
642 struct fscache_retrieval
*op
;
643 struct fscache_object
*object
;
644 bool wake_cookie
= false;
647 _enter("%p,%p,,,", cookie
, page
);
649 fscache_stat(&fscache_n_allocs
);
651 if (hlist_empty(&cookie
->backing_objects
))
654 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
655 ASSERTCMP(page
, !=, NULL
);
657 if (test_bit(FSCACHE_COOKIE_INVALIDATING
, &cookie
->flags
)) {
658 _leave(" = -ENOBUFS [invalidating]");
662 if (fscache_wait_for_deferred_lookup(cookie
) < 0)
665 op
= fscache_alloc_retrieval(cookie
, page
->mapping
, NULL
, NULL
);
668 atomic_set(&op
->n_pages
, 1);
670 spin_lock(&cookie
->lock
);
672 if (!fscache_cookie_enabled(cookie
) ||
673 hlist_empty(&cookie
->backing_objects
))
675 object
= hlist_entry(cookie
->backing_objects
.first
,
676 struct fscache_object
, cookie_link
);
678 __fscache_use_cookie(cookie
);
679 if (fscache_submit_op(object
, &op
->op
) < 0)
680 goto nobufs_unlock_dec
;
681 spin_unlock(&cookie
->lock
);
683 fscache_stat(&fscache_n_alloc_ops
);
685 ret
= fscache_wait_for_operation_activation(
687 __fscache_stat(&fscache_n_alloc_op_waits
),
688 __fscache_stat(&fscache_n_allocs_object_dead
),
689 fscache_do_cancel_retrieval
);
693 /* ask the cache to honour the operation */
694 fscache_stat(&fscache_n_cop_allocate_page
);
695 ret
= object
->cache
->ops
->allocate_page(op
, page
, gfp
);
696 fscache_stat_d(&fscache_n_cop_allocate_page
);
699 if (ret
== -ERESTARTSYS
)
700 fscache_stat(&fscache_n_allocs_intr
);
702 fscache_stat(&fscache_n_allocs_nobufs
);
704 fscache_stat(&fscache_n_allocs_ok
);
706 fscache_put_retrieval(op
);
707 _leave(" = %d", ret
);
711 wake_cookie
= __fscache_unuse_cookie(cookie
);
713 spin_unlock(&cookie
->lock
);
716 __fscache_wake_unused_cookie(cookie
);
718 fscache_stat(&fscache_n_allocs_nobufs
);
719 _leave(" = -ENOBUFS");
722 EXPORT_SYMBOL(__fscache_alloc_page
);
725 * Unmark pages allocate in the readahead code path (via:
726 * fscache_readpages_or_alloc) after delegating to the base filesystem
728 void __fscache_readpages_cancel(struct fscache_cookie
*cookie
,
729 struct list_head
*pages
)
733 list_for_each_entry(page
, pages
, lru
) {
734 if (PageFsCache(page
))
735 __fscache_uncache_page(cookie
, page
);
738 EXPORT_SYMBOL(__fscache_readpages_cancel
);
741 * release a write op reference
743 static void fscache_release_write_op(struct fscache_operation
*_op
)
745 _enter("{OP%x}", _op
->debug_id
);
749 * perform the background storage of a page into the cache
751 static void fscache_write_op(struct fscache_operation
*_op
)
753 struct fscache_storage
*op
=
754 container_of(_op
, struct fscache_storage
, op
);
755 struct fscache_object
*object
= op
->op
.object
;
756 struct fscache_cookie
*cookie
;
762 _enter("{OP%x,%d}", op
->op
.debug_id
, atomic_read(&op
->op
.usage
));
764 spin_lock(&object
->lock
);
765 cookie
= object
->cookie
;
767 if (!fscache_object_is_active(object
)) {
768 /* If we get here, then the on-disk cache object likely longer
769 * exists, so we should just cancel this write operation.
771 spin_unlock(&object
->lock
);
772 fscache_op_complete(&op
->op
, false);
773 _leave(" [inactive]");
778 /* If we get here, then the cookie belonging to the object was
779 * detached, probably by the cookie being withdrawn due to
780 * memory pressure, which means that the pages we might write
781 * to the cache from no longer exist - therefore, we can just
782 * cancel this write operation.
784 spin_unlock(&object
->lock
);
785 fscache_op_complete(&op
->op
, false);
786 _leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}",
787 _op
->flags
, _op
->state
, object
->state
->short_name
,
792 spin_lock(&cookie
->stores_lock
);
794 fscache_stat(&fscache_n_store_calls
);
796 /* find a page to store */
798 n
= radix_tree_gang_lookup_tag(&cookie
->stores
, results
, 0, 1,
799 FSCACHE_COOKIE_PENDING_TAG
);
803 _debug("gang %d [%lx]", n
, page
->index
);
804 if (page
->index
> op
->store_limit
) {
805 fscache_stat(&fscache_n_store_pages_over_limit
);
809 radix_tree_tag_set(&cookie
->stores
, page
->index
,
810 FSCACHE_COOKIE_STORING_TAG
);
811 radix_tree_tag_clear(&cookie
->stores
, page
->index
,
812 FSCACHE_COOKIE_PENDING_TAG
);
814 spin_unlock(&cookie
->stores_lock
);
815 spin_unlock(&object
->lock
);
817 fscache_stat(&fscache_n_store_pages
);
818 fscache_stat(&fscache_n_cop_write_page
);
819 ret
= object
->cache
->ops
->write_page(op
, page
);
820 fscache_stat_d(&fscache_n_cop_write_page
);
821 fscache_end_page_write(object
, page
);
823 fscache_abort_object(object
);
824 fscache_op_complete(&op
->op
, true);
826 fscache_enqueue_operation(&op
->op
);
833 /* this writer is going away and there aren't any more things to
836 spin_unlock(&cookie
->stores_lock
);
837 clear_bit(FSCACHE_OBJECT_PENDING_WRITE
, &object
->flags
);
838 spin_unlock(&object
->lock
);
839 fscache_op_complete(&op
->op
, true);
844 * Clear the pages pending writing for invalidation
846 void fscache_invalidate_writes(struct fscache_cookie
*cookie
)
855 spin_lock(&cookie
->stores_lock
);
856 n
= radix_tree_gang_lookup_tag(&cookie
->stores
, results
, 0,
858 FSCACHE_COOKIE_PENDING_TAG
);
860 spin_unlock(&cookie
->stores_lock
);
864 for (i
= n
- 1; i
>= 0; i
--) {
866 radix_tree_delete(&cookie
->stores
, page
->index
);
869 spin_unlock(&cookie
->stores_lock
);
871 for (i
= n
- 1; i
>= 0; i
--)
872 page_cache_release(results
[i
]);
879 * request a page be stored in the cache
881 * -ENOMEM - out of memory, nothing done
882 * -ENOBUFS - no backing object available in which to cache the page
883 * 0 - dispatched a write - it'll call end_io_func() when finished
885 * if the cookie still has a backing object at this point, that object can be
886 * in one of a few states with respect to storage processing:
888 * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
893 * (b) writes deferred till post-creation (mark page for writing and
894 * return immediately)
896 * (2) negative lookup, object created, initial fill being made from netfs
898 * (a) fill point not yet reached this page (mark page for writing and
901 * (b) fill point passed this page (queue op to store this page)
903 * (3) object extant (queue op to store this page)
905 * any other state is invalid
907 int __fscache_write_page(struct fscache_cookie
*cookie
,
911 struct fscache_storage
*op
;
912 struct fscache_object
*object
;
913 bool wake_cookie
= false;
916 _enter("%p,%x,", cookie
, (u32
) page
->flags
);
918 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
919 ASSERT(PageFsCache(page
));
921 fscache_stat(&fscache_n_stores
);
923 if (test_bit(FSCACHE_COOKIE_INVALIDATING
, &cookie
->flags
)) {
924 _leave(" = -ENOBUFS [invalidating]");
928 op
= kzalloc(sizeof(*op
), GFP_NOIO
| __GFP_NOMEMALLOC
| __GFP_NORETRY
);
932 fscache_operation_init(&op
->op
, fscache_write_op
,
933 fscache_release_write_op
);
934 op
->op
.flags
= FSCACHE_OP_ASYNC
|
935 (1 << FSCACHE_OP_WAITING
) |
936 (1 << FSCACHE_OP_UNUSE_COOKIE
);
938 ret
= radix_tree_maybe_preload(gfp
& ~__GFP_HIGHMEM
);
943 spin_lock(&cookie
->lock
);
945 if (!fscache_cookie_enabled(cookie
) ||
946 hlist_empty(&cookie
->backing_objects
))
948 object
= hlist_entry(cookie
->backing_objects
.first
,
949 struct fscache_object
, cookie_link
);
950 if (test_bit(FSCACHE_IOERROR
, &object
->cache
->flags
))
953 /* add the page to the pending-storage radix tree on the backing
955 spin_lock(&object
->lock
);
956 spin_lock(&cookie
->stores_lock
);
958 _debug("store limit %llx", (unsigned long long) object
->store_limit
);
960 ret
= radix_tree_insert(&cookie
->stores
, page
->index
, page
);
964 _debug("insert failed %d", ret
);
965 goto nobufs_unlock_obj
;
968 radix_tree_tag_set(&cookie
->stores
, page
->index
,
969 FSCACHE_COOKIE_PENDING_TAG
);
970 page_cache_get(page
);
972 /* we only want one writer at a time, but we do need to queue new
973 * writers after exclusive ops */
974 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE
, &object
->flags
))
975 goto already_pending
;
977 spin_unlock(&cookie
->stores_lock
);
978 spin_unlock(&object
->lock
);
980 op
->op
.debug_id
= atomic_inc_return(&fscache_op_debug_id
);
981 op
->store_limit
= object
->store_limit
;
983 __fscache_use_cookie(cookie
);
984 if (fscache_submit_op(object
, &op
->op
) < 0)
987 spin_unlock(&cookie
->lock
);
988 radix_tree_preload_end();
989 fscache_stat(&fscache_n_store_ops
);
990 fscache_stat(&fscache_n_stores_ok
);
992 /* the work queue now carries its own ref on the object */
993 fscache_put_operation(&op
->op
);
998 fscache_stat(&fscache_n_stores_again
);
1000 spin_unlock(&cookie
->stores_lock
);
1001 spin_unlock(&object
->lock
);
1002 spin_unlock(&cookie
->lock
);
1003 radix_tree_preload_end();
1005 fscache_stat(&fscache_n_stores_ok
);
1010 spin_lock(&cookie
->stores_lock
);
1011 radix_tree_delete(&cookie
->stores
, page
->index
);
1012 spin_unlock(&cookie
->stores_lock
);
1013 wake_cookie
= __fscache_unuse_cookie(cookie
);
1014 page_cache_release(page
);
1019 spin_unlock(&cookie
->stores_lock
);
1020 spin_unlock(&object
->lock
);
1022 spin_unlock(&cookie
->lock
);
1023 radix_tree_preload_end();
1026 __fscache_wake_unused_cookie(cookie
);
1027 fscache_stat(&fscache_n_stores_nobufs
);
1028 _leave(" = -ENOBUFS");
1034 fscache_stat(&fscache_n_stores_oom
);
1035 _leave(" = -ENOMEM");
1038 EXPORT_SYMBOL(__fscache_write_page
);
1041 * remove a page from the cache
1043 void __fscache_uncache_page(struct fscache_cookie
*cookie
, struct page
*page
)
1045 struct fscache_object
*object
;
1047 _enter(",%p", page
);
1049 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
1050 ASSERTCMP(page
, !=, NULL
);
1052 fscache_stat(&fscache_n_uncaches
);
1054 /* cache withdrawal may beat us to it */
1055 if (!PageFsCache(page
))
1058 /* get the object */
1059 spin_lock(&cookie
->lock
);
1061 if (hlist_empty(&cookie
->backing_objects
)) {
1062 ClearPageFsCache(page
);
1066 object
= hlist_entry(cookie
->backing_objects
.first
,
1067 struct fscache_object
, cookie_link
);
1069 /* there might now be stuff on disk we could read */
1070 clear_bit(FSCACHE_COOKIE_NO_DATA_YET
, &cookie
->flags
);
1072 /* only invoke the cache backend if we managed to mark the page
1073 * uncached here; this deals with synchronisation vs withdrawal */
1074 if (TestClearPageFsCache(page
) &&
1075 object
->cache
->ops
->uncache_page
) {
1076 /* the cache backend releases the cookie lock */
1077 fscache_stat(&fscache_n_cop_uncache_page
);
1078 object
->cache
->ops
->uncache_page(object
, page
);
1079 fscache_stat_d(&fscache_n_cop_uncache_page
);
1084 spin_unlock(&cookie
->lock
);
1088 EXPORT_SYMBOL(__fscache_uncache_page
);
1091 * fscache_mark_page_cached - Mark a page as being cached
1092 * @op: The retrieval op pages are being marked for
1093 * @page: The page to be marked
1095 * Mark a netfs page as being cached. After this is called, the netfs
1096 * must call fscache_uncache_page() to remove the mark.
1098 void fscache_mark_page_cached(struct fscache_retrieval
*op
, struct page
*page
)
1100 struct fscache_cookie
*cookie
= op
->op
.object
->cookie
;
1102 #ifdef CONFIG_FSCACHE_STATS
1103 atomic_inc(&fscache_n_marks
);
1106 _debug("- mark %p{%lx}", page
, page
->index
);
1107 if (TestSetPageFsCache(page
)) {
1108 static bool once_only
;
1111 pr_warn("Cookie type %s marked page %lx multiple times\n",
1112 cookie
->def
->name
, page
->index
);
1116 if (cookie
->def
->mark_page_cached
)
1117 cookie
->def
->mark_page_cached(cookie
->netfs_data
,
1120 EXPORT_SYMBOL(fscache_mark_page_cached
);
1123 * fscache_mark_pages_cached - Mark pages as being cached
1124 * @op: The retrieval op pages are being marked for
1125 * @pagevec: The pages to be marked
1127 * Mark a bunch of netfs pages as being cached. After this is called,
1128 * the netfs must call fscache_uncache_page() to remove the mark.
1130 void fscache_mark_pages_cached(struct fscache_retrieval
*op
,
1131 struct pagevec
*pagevec
)
1135 for (loop
= 0; loop
< pagevec
->nr
; loop
++)
1136 fscache_mark_page_cached(op
, pagevec
->pages
[loop
]);
1138 pagevec_reinit(pagevec
);
1140 EXPORT_SYMBOL(fscache_mark_pages_cached
);
1143 * Uncache all the pages in an inode that are marked PG_fscache, assuming them
1144 * to be associated with the given cookie.
1146 void __fscache_uncache_all_inode_pages(struct fscache_cookie
*cookie
,
1147 struct inode
*inode
)
1149 struct address_space
*mapping
= inode
->i_mapping
;
1150 struct pagevec pvec
;
1154 _enter("%p,%p", cookie
, inode
);
1156 if (!mapping
|| mapping
->nrpages
== 0) {
1157 _leave(" [no pages]");
1161 pagevec_init(&pvec
, 0);
1164 if (!pagevec_lookup(&pvec
, mapping
, next
, PAGEVEC_SIZE
))
1166 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
1167 struct page
*page
= pvec
.pages
[i
];
1169 if (PageFsCache(page
)) {
1170 __fscache_wait_on_page_write(cookie
, page
);
1171 __fscache_uncache_page(cookie
, page
);
1174 pagevec_release(&pvec
);
1180 EXPORT_SYMBOL(__fscache_uncache_all_inode_pages
);