1 /* Cache page management and data I/O routines
3 * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define FSCACHE_DEBUG_LEVEL PAGE
13 #include <linux/module.h>
14 #include <linux/fscache-cache.h>
15 #include <linux/buffer_head.h>
16 #include <linux/pagevec.h>
17 #include <linux/slab.h>
21 * check to see if a page is being written to the cache
23 bool __fscache_check_page_write(struct fscache_cookie
*cookie
, struct page
*page
)
28 val
= radix_tree_lookup(&cookie
->stores
, page
->index
);
33 EXPORT_SYMBOL(__fscache_check_page_write
);
36 * wait for a page to finish being written to the cache
38 void __fscache_wait_on_page_write(struct fscache_cookie
*cookie
, struct page
*page
)
40 wait_queue_head_t
*wq
= bit_waitqueue(&cookie
->flags
, 0);
42 wait_event(*wq
, !__fscache_check_page_write(cookie
, page
));
44 EXPORT_SYMBOL(__fscache_wait_on_page_write
);
47 * decide whether a page can be released, possibly by cancelling a store to it
48 * - we're allowed to sleep if __GFP_WAIT is flagged
50 bool __fscache_maybe_release_page(struct fscache_cookie
*cookie
,
57 _enter("%p,%p,%x", cookie
, page
, gfp
);
60 val
= radix_tree_lookup(&cookie
->stores
, page
->index
);
63 fscache_stat(&fscache_n_store_vmscan_not_storing
);
64 __fscache_uncache_page(cookie
, page
);
68 /* see if the page is actually undergoing storage - if so we can't get
69 * rid of it till the cache has finished with it */
70 if (radix_tree_tag_get(&cookie
->stores
, page
->index
,
71 FSCACHE_COOKIE_STORING_TAG
)) {
76 /* the page is pending storage, so we attempt to cancel the store and
77 * discard the store request so that the page can be reclaimed */
78 spin_lock(&cookie
->stores_lock
);
81 if (radix_tree_tag_get(&cookie
->stores
, page
->index
,
82 FSCACHE_COOKIE_STORING_TAG
)) {
83 /* the page started to undergo storage whilst we were looking,
84 * so now we can only wait or return */
85 spin_unlock(&cookie
->stores_lock
);
89 xpage
= radix_tree_delete(&cookie
->stores
, page
->index
);
90 spin_unlock(&cookie
->stores_lock
);
93 fscache_stat(&fscache_n_store_vmscan_cancelled
);
94 fscache_stat(&fscache_n_store_radix_deletes
);
95 ASSERTCMP(xpage
, ==, page
);
97 fscache_stat(&fscache_n_store_vmscan_gone
);
100 wake_up_bit(&cookie
->flags
, 0);
102 page_cache_release(xpage
);
103 __fscache_uncache_page(cookie
, page
);
107 /* we might want to wait here, but that could deadlock the allocator as
108 * the work threads writing to the cache may all end up sleeping
109 * on memory allocation */
110 fscache_stat(&fscache_n_store_vmscan_busy
);
113 EXPORT_SYMBOL(__fscache_maybe_release_page
);
116 * note that a page has finished being written to the cache
118 static void fscache_end_page_write(struct fscache_object
*object
,
121 struct fscache_cookie
*cookie
;
122 struct page
*xpage
= NULL
;
124 spin_lock(&object
->lock
);
125 cookie
= object
->cookie
;
127 /* delete the page from the tree if it is now no longer
129 spin_lock(&cookie
->stores_lock
);
130 radix_tree_tag_clear(&cookie
->stores
, page
->index
,
131 FSCACHE_COOKIE_STORING_TAG
);
132 if (!radix_tree_tag_get(&cookie
->stores
, page
->index
,
133 FSCACHE_COOKIE_PENDING_TAG
)) {
134 fscache_stat(&fscache_n_store_radix_deletes
);
135 xpage
= radix_tree_delete(&cookie
->stores
, page
->index
);
137 spin_unlock(&cookie
->stores_lock
);
138 wake_up_bit(&cookie
->flags
, 0);
140 spin_unlock(&object
->lock
);
142 page_cache_release(xpage
);
146 * actually apply the changed attributes to a cache object
148 static void fscache_attr_changed_op(struct fscache_operation
*op
)
150 struct fscache_object
*object
= op
->object
;
153 _enter("{OBJ%x OP%x}", object
->debug_id
, op
->debug_id
);
155 fscache_stat(&fscache_n_attr_changed_calls
);
157 if (fscache_object_is_active(object
)) {
158 fscache_stat(&fscache_n_cop_attr_changed
);
159 ret
= object
->cache
->ops
->attr_changed(object
);
160 fscache_stat_d(&fscache_n_cop_attr_changed
);
162 fscache_abort_object(object
);
165 fscache_op_complete(op
);
170 * notification that the attributes on an object have changed
172 int __fscache_attr_changed(struct fscache_cookie
*cookie
)
174 struct fscache_operation
*op
;
175 struct fscache_object
*object
;
177 _enter("%p", cookie
);
179 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
181 fscache_stat(&fscache_n_attr_changed
);
183 op
= kzalloc(sizeof(*op
), GFP_KERNEL
);
185 fscache_stat(&fscache_n_attr_changed_nomem
);
186 _leave(" = -ENOMEM");
190 fscache_operation_init(op
, fscache_attr_changed_op
, NULL
);
191 op
->flags
= FSCACHE_OP_ASYNC
| (1 << FSCACHE_OP_EXCLUSIVE
);
193 spin_lock(&cookie
->lock
);
195 if (hlist_empty(&cookie
->backing_objects
))
197 object
= hlist_entry(cookie
->backing_objects
.first
,
198 struct fscache_object
, cookie_link
);
200 if (fscache_submit_exclusive_op(object
, op
) < 0)
202 spin_unlock(&cookie
->lock
);
203 fscache_stat(&fscache_n_attr_changed_ok
);
204 fscache_put_operation(op
);
209 spin_unlock(&cookie
->lock
);
211 fscache_stat(&fscache_n_attr_changed_nobufs
);
212 _leave(" = %d", -ENOBUFS
);
215 EXPORT_SYMBOL(__fscache_attr_changed
);
218 * release a retrieval op reference
220 static void fscache_release_retrieval_op(struct fscache_operation
*_op
)
222 struct fscache_retrieval
*op
=
223 container_of(_op
, struct fscache_retrieval
, op
);
225 _enter("{OP%x}", op
->op
.debug_id
);
227 ASSERTCMP(op
->n_pages
, ==, 0);
229 fscache_hist(fscache_retrieval_histogram
, op
->start_time
);
231 fscache_put_context(op
->op
.object
->cookie
, op
->context
);
237 * allocate a retrieval op
239 static struct fscache_retrieval
*fscache_alloc_retrieval(
240 struct address_space
*mapping
,
241 fscache_rw_complete_t end_io_func
,
244 struct fscache_retrieval
*op
;
246 /* allocate a retrieval operation and attempt to submit it */
247 op
= kzalloc(sizeof(*op
), GFP_NOIO
);
249 fscache_stat(&fscache_n_retrievals_nomem
);
253 fscache_operation_init(&op
->op
, NULL
, fscache_release_retrieval_op
);
254 op
->op
.flags
= FSCACHE_OP_MYTHREAD
| (1 << FSCACHE_OP_WAITING
);
255 op
->mapping
= mapping
;
256 op
->end_io_func
= end_io_func
;
257 op
->context
= context
;
258 op
->start_time
= jiffies
;
259 INIT_LIST_HEAD(&op
->to_do
);
264 * wait for a deferred lookup to complete
266 static int fscache_wait_for_deferred_lookup(struct fscache_cookie
*cookie
)
272 if (!test_bit(FSCACHE_COOKIE_LOOKING_UP
, &cookie
->flags
)) {
273 _leave(" = 0 [imm]");
277 fscache_stat(&fscache_n_retrievals_wait
);
280 if (wait_on_bit(&cookie
->flags
, FSCACHE_COOKIE_LOOKING_UP
,
281 fscache_wait_bit_interruptible
,
282 TASK_INTERRUPTIBLE
) != 0) {
283 fscache_stat(&fscache_n_retrievals_intr
);
284 _leave(" = -ERESTARTSYS");
288 ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP
, &cookie
->flags
));
291 fscache_hist(fscache_retrieval_delay_histogram
, jif
);
292 _leave(" = 0 [dly]");
297 * wait for an object to become active (or dead)
299 static int fscache_wait_for_retrieval_activation(struct fscache_object
*object
,
300 struct fscache_retrieval
*op
,
301 atomic_t
*stat_op_waits
,
302 atomic_t
*stat_object_dead
)
306 if (!test_bit(FSCACHE_OP_WAITING
, &op
->op
.flags
))
310 fscache_stat(stat_op_waits
);
311 if (wait_on_bit(&op
->op
.flags
, FSCACHE_OP_WAITING
,
312 fscache_wait_bit_interruptible
,
313 TASK_INTERRUPTIBLE
) < 0) {
314 ret
= fscache_cancel_op(&op
->op
);
318 /* it's been removed from the pending queue by another party,
319 * so we should get to run shortly */
320 wait_on_bit(&op
->op
.flags
, FSCACHE_OP_WAITING
,
321 fscache_wait_bit
, TASK_UNINTERRUPTIBLE
);
326 if (op
->op
.state
== FSCACHE_OP_ST_CANCELLED
) {
327 fscache_stat(stat_object_dead
);
328 _leave(" = -ENOBUFS [cancelled]");
331 if (unlikely(fscache_object_is_dead(object
))) {
332 pr_err("%s() = -ENOBUFS [obj dead %d]", __func__
, op
->op
.state
);
333 fscache_cancel_op(&op
->op
);
334 fscache_stat(stat_object_dead
);
341 * read a page from the cache or allocate a block in which to store it
343 * -ENOMEM - out of memory, nothing done
344 * -ERESTARTSYS - interrupted
345 * -ENOBUFS - no backing object available in which to cache the block
346 * -ENODATA - no data available in the backing object for this block
347 * 0 - dispatched a read - it'll call end_io_func() when finished
349 int __fscache_read_or_alloc_page(struct fscache_cookie
*cookie
,
351 fscache_rw_complete_t end_io_func
,
355 struct fscache_retrieval
*op
;
356 struct fscache_object
*object
;
359 _enter("%p,%p,,,", cookie
, page
);
361 fscache_stat(&fscache_n_retrievals
);
363 if (hlist_empty(&cookie
->backing_objects
))
366 if (test_bit(FSCACHE_COOKIE_INVALIDATING
, &cookie
->flags
)) {
367 _leave(" = -ENOBUFS [invalidating]");
371 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
372 ASSERTCMP(page
, !=, NULL
);
374 if (fscache_wait_for_deferred_lookup(cookie
) < 0)
377 op
= fscache_alloc_retrieval(page
->mapping
, end_io_func
, context
);
379 _leave(" = -ENOMEM");
384 spin_lock(&cookie
->lock
);
386 if (hlist_empty(&cookie
->backing_objects
))
388 object
= hlist_entry(cookie
->backing_objects
.first
,
389 struct fscache_object
, cookie_link
);
391 ASSERTCMP(object
->state
, >, FSCACHE_OBJECT_LOOKING_UP
);
393 atomic_inc(&object
->n_reads
);
394 __set_bit(FSCACHE_OP_DEC_READ_CNT
, &op
->op
.flags
);
396 if (fscache_submit_op(object
, &op
->op
) < 0)
397 goto nobufs_unlock_dec
;
398 spin_unlock(&cookie
->lock
);
400 fscache_stat(&fscache_n_retrieval_ops
);
402 /* pin the netfs read context in case we need to do the actual netfs
403 * read because we've encountered a cache read failure */
404 fscache_get_context(object
->cookie
, op
->context
);
406 /* we wait for the operation to become active, and then process it
407 * *here*, in this thread, and not in the thread pool */
408 ret
= fscache_wait_for_retrieval_activation(
410 __fscache_stat(&fscache_n_retrieval_op_waits
),
411 __fscache_stat(&fscache_n_retrievals_object_dead
));
415 /* ask the cache to honour the operation */
416 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET
, &object
->cookie
->flags
)) {
417 fscache_stat(&fscache_n_cop_allocate_page
);
418 ret
= object
->cache
->ops
->allocate_page(op
, page
, gfp
);
419 fscache_stat_d(&fscache_n_cop_allocate_page
);
423 fscache_stat(&fscache_n_cop_read_or_alloc_page
);
424 ret
= object
->cache
->ops
->read_or_alloc_page(op
, page
, gfp
);
425 fscache_stat_d(&fscache_n_cop_read_or_alloc_page
);
430 fscache_stat(&fscache_n_retrievals_nomem
);
431 else if (ret
== -ERESTARTSYS
)
432 fscache_stat(&fscache_n_retrievals_intr
);
433 else if (ret
== -ENODATA
)
434 fscache_stat(&fscache_n_retrievals_nodata
);
436 fscache_stat(&fscache_n_retrievals_nobufs
);
438 fscache_stat(&fscache_n_retrievals_ok
);
440 fscache_put_retrieval(op
);
441 _leave(" = %d", ret
);
445 atomic_dec(&object
->n_reads
);
447 spin_unlock(&cookie
->lock
);
450 fscache_stat(&fscache_n_retrievals_nobufs
);
451 _leave(" = -ENOBUFS");
454 EXPORT_SYMBOL(__fscache_read_or_alloc_page
);
457 * read a list of page from the cache or allocate a block in which to store
460 * -ENOMEM - out of memory, some pages may be being read
461 * -ERESTARTSYS - interrupted, some pages may be being read
462 * -ENOBUFS - no backing object or space available in which to cache any
463 * pages not being read
464 * -ENODATA - no data available in the backing object for some or all of
466 * 0 - dispatched a read on all pages
468 * end_io_func() will be called for each page read from the cache as it is
469 * finishes being read
471 * any pages for which a read is dispatched will be removed from pages and
474 int __fscache_read_or_alloc_pages(struct fscache_cookie
*cookie
,
475 struct address_space
*mapping
,
476 struct list_head
*pages
,
478 fscache_rw_complete_t end_io_func
,
482 struct fscache_retrieval
*op
;
483 struct fscache_object
*object
;
486 _enter("%p,,%d,,,", cookie
, *nr_pages
);
488 fscache_stat(&fscache_n_retrievals
);
490 if (hlist_empty(&cookie
->backing_objects
))
493 if (test_bit(FSCACHE_COOKIE_INVALIDATING
, &cookie
->flags
)) {
494 _leave(" = -ENOBUFS [invalidating]");
498 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
499 ASSERTCMP(*nr_pages
, >, 0);
500 ASSERT(!list_empty(pages
));
502 if (fscache_wait_for_deferred_lookup(cookie
) < 0)
505 op
= fscache_alloc_retrieval(mapping
, end_io_func
, context
);
508 op
->n_pages
= *nr_pages
;
510 spin_lock(&cookie
->lock
);
512 if (hlist_empty(&cookie
->backing_objects
))
514 object
= hlist_entry(cookie
->backing_objects
.first
,
515 struct fscache_object
, cookie_link
);
517 atomic_inc(&object
->n_reads
);
518 __set_bit(FSCACHE_OP_DEC_READ_CNT
, &op
->op
.flags
);
520 if (fscache_submit_op(object
, &op
->op
) < 0)
521 goto nobufs_unlock_dec
;
522 spin_unlock(&cookie
->lock
);
524 fscache_stat(&fscache_n_retrieval_ops
);
526 /* pin the netfs read context in case we need to do the actual netfs
527 * read because we've encountered a cache read failure */
528 fscache_get_context(object
->cookie
, op
->context
);
530 /* we wait for the operation to become active, and then process it
531 * *here*, in this thread, and not in the thread pool */
532 ret
= fscache_wait_for_retrieval_activation(
534 __fscache_stat(&fscache_n_retrieval_op_waits
),
535 __fscache_stat(&fscache_n_retrievals_object_dead
));
539 /* ask the cache to honour the operation */
540 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET
, &object
->cookie
->flags
)) {
541 fscache_stat(&fscache_n_cop_allocate_pages
);
542 ret
= object
->cache
->ops
->allocate_pages(
543 op
, pages
, nr_pages
, gfp
);
544 fscache_stat_d(&fscache_n_cop_allocate_pages
);
546 fscache_stat(&fscache_n_cop_read_or_alloc_pages
);
547 ret
= object
->cache
->ops
->read_or_alloc_pages(
548 op
, pages
, nr_pages
, gfp
);
549 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages
);
554 fscache_stat(&fscache_n_retrievals_nomem
);
555 else if (ret
== -ERESTARTSYS
)
556 fscache_stat(&fscache_n_retrievals_intr
);
557 else if (ret
== -ENODATA
)
558 fscache_stat(&fscache_n_retrievals_nodata
);
560 fscache_stat(&fscache_n_retrievals_nobufs
);
562 fscache_stat(&fscache_n_retrievals_ok
);
564 fscache_put_retrieval(op
);
565 _leave(" = %d", ret
);
569 atomic_dec(&object
->n_reads
);
571 spin_unlock(&cookie
->lock
);
574 fscache_stat(&fscache_n_retrievals_nobufs
);
575 _leave(" = -ENOBUFS");
578 EXPORT_SYMBOL(__fscache_read_or_alloc_pages
);
581 * allocate a block in the cache on which to store a page
583 * -ENOMEM - out of memory, nothing done
584 * -ERESTARTSYS - interrupted
585 * -ENOBUFS - no backing object available in which to cache the block
586 * 0 - block allocated
588 int __fscache_alloc_page(struct fscache_cookie
*cookie
,
592 struct fscache_retrieval
*op
;
593 struct fscache_object
*object
;
596 _enter("%p,%p,,,", cookie
, page
);
598 fscache_stat(&fscache_n_allocs
);
600 if (hlist_empty(&cookie
->backing_objects
))
603 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
604 ASSERTCMP(page
, !=, NULL
);
606 if (test_bit(FSCACHE_COOKIE_INVALIDATING
, &cookie
->flags
)) {
607 _leave(" = -ENOBUFS [invalidating]");
611 if (fscache_wait_for_deferred_lookup(cookie
) < 0)
614 op
= fscache_alloc_retrieval(page
->mapping
, NULL
, NULL
);
619 spin_lock(&cookie
->lock
);
621 if (hlist_empty(&cookie
->backing_objects
))
623 object
= hlist_entry(cookie
->backing_objects
.first
,
624 struct fscache_object
, cookie_link
);
626 if (fscache_submit_op(object
, &op
->op
) < 0)
628 spin_unlock(&cookie
->lock
);
630 fscache_stat(&fscache_n_alloc_ops
);
632 ret
= fscache_wait_for_retrieval_activation(
634 __fscache_stat(&fscache_n_alloc_op_waits
),
635 __fscache_stat(&fscache_n_allocs_object_dead
));
639 /* ask the cache to honour the operation */
640 fscache_stat(&fscache_n_cop_allocate_page
);
641 ret
= object
->cache
->ops
->allocate_page(op
, page
, gfp
);
642 fscache_stat_d(&fscache_n_cop_allocate_page
);
645 if (ret
== -ERESTARTSYS
)
646 fscache_stat(&fscache_n_allocs_intr
);
648 fscache_stat(&fscache_n_allocs_nobufs
);
650 fscache_stat(&fscache_n_allocs_ok
);
652 fscache_put_retrieval(op
);
653 _leave(" = %d", ret
);
657 spin_unlock(&cookie
->lock
);
660 fscache_stat(&fscache_n_allocs_nobufs
);
661 _leave(" = -ENOBUFS");
664 EXPORT_SYMBOL(__fscache_alloc_page
);
667 * release a write op reference
669 static void fscache_release_write_op(struct fscache_operation
*_op
)
671 _enter("{OP%x}", _op
->debug_id
);
675 * perform the background storage of a page into the cache
677 static void fscache_write_op(struct fscache_operation
*_op
)
679 struct fscache_storage
*op
=
680 container_of(_op
, struct fscache_storage
, op
);
681 struct fscache_object
*object
= op
->op
.object
;
682 struct fscache_cookie
*cookie
;
688 _enter("{OP%x,%d}", op
->op
.debug_id
, atomic_read(&op
->op
.usage
));
690 spin_lock(&object
->lock
);
691 cookie
= object
->cookie
;
693 if (!fscache_object_is_active(object
) || !cookie
) {
694 spin_unlock(&object
->lock
);
699 spin_lock(&cookie
->stores_lock
);
701 fscache_stat(&fscache_n_store_calls
);
703 /* find a page to store */
705 n
= radix_tree_gang_lookup_tag(&cookie
->stores
, results
, 0, 1,
706 FSCACHE_COOKIE_PENDING_TAG
);
710 _debug("gang %d [%lx]", n
, page
->index
);
711 if (page
->index
> op
->store_limit
) {
712 fscache_stat(&fscache_n_store_pages_over_limit
);
716 radix_tree_tag_set(&cookie
->stores
, page
->index
,
717 FSCACHE_COOKIE_STORING_TAG
);
718 radix_tree_tag_clear(&cookie
->stores
, page
->index
,
719 FSCACHE_COOKIE_PENDING_TAG
);
721 spin_unlock(&cookie
->stores_lock
);
722 spin_unlock(&object
->lock
);
724 fscache_stat(&fscache_n_store_pages
);
725 fscache_stat(&fscache_n_cop_write_page
);
726 ret
= object
->cache
->ops
->write_page(op
, page
);
727 fscache_stat_d(&fscache_n_cop_write_page
);
728 fscache_end_page_write(object
, page
);
730 fscache_abort_object(object
);
731 fscache_op_complete(&op
->op
);
733 fscache_enqueue_operation(&op
->op
);
740 /* this writer is going away and there aren't any more things to
743 spin_unlock(&cookie
->stores_lock
);
744 clear_bit(FSCACHE_OBJECT_PENDING_WRITE
, &object
->flags
);
745 spin_unlock(&object
->lock
);
746 fscache_op_complete(&op
->op
);
751 * Clear the pages pending writing for invalidation
753 void fscache_invalidate_writes(struct fscache_cookie
*cookie
)
761 while (spin_lock(&cookie
->stores_lock
),
762 n
= radix_tree_gang_lookup_tag(&cookie
->stores
, results
, 0,
764 FSCACHE_COOKIE_PENDING_TAG
),
766 for (i
= n
- 1; i
>= 0; i
--) {
768 radix_tree_delete(&cookie
->stores
, page
->index
);
771 spin_unlock(&cookie
->stores_lock
);
773 for (i
= n
- 1; i
>= 0; i
--)
774 page_cache_release(results
[i
]);
777 spin_unlock(&cookie
->stores_lock
);
782 * request a page be stored in the cache
784 * -ENOMEM - out of memory, nothing done
785 * -ENOBUFS - no backing object available in which to cache the page
786 * 0 - dispatched a write - it'll call end_io_func() when finished
788 * if the cookie still has a backing object at this point, that object can be
789 * in one of a few states with respect to storage processing:
791 * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
794 * (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred
797 * (b) writes deferred till post-creation (mark page for writing and
798 * return immediately)
800 * (2) negative lookup, object created, initial fill being made from netfs
801 * (FSCACHE_COOKIE_INITIAL_FILL is set)
803 * (a) fill point not yet reached this page (mark page for writing and
806 * (b) fill point passed this page (queue op to store this page)
808 * (3) object extant (queue op to store this page)
810 * any other state is invalid
812 int __fscache_write_page(struct fscache_cookie
*cookie
,
816 struct fscache_storage
*op
;
817 struct fscache_object
*object
;
820 _enter("%p,%x,", cookie
, (u32
) page
->flags
);
822 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
823 ASSERT(PageFsCache(page
));
825 fscache_stat(&fscache_n_stores
);
827 if (test_bit(FSCACHE_COOKIE_INVALIDATING
, &cookie
->flags
)) {
828 _leave(" = -ENOBUFS [invalidating]");
832 op
= kzalloc(sizeof(*op
), GFP_NOIO
| __GFP_NOMEMALLOC
| __GFP_NORETRY
);
836 fscache_operation_init(&op
->op
, fscache_write_op
,
837 fscache_release_write_op
);
838 op
->op
.flags
= FSCACHE_OP_ASYNC
| (1 << FSCACHE_OP_WAITING
);
840 ret
= radix_tree_preload(gfp
& ~__GFP_HIGHMEM
);
845 spin_lock(&cookie
->lock
);
847 if (hlist_empty(&cookie
->backing_objects
))
849 object
= hlist_entry(cookie
->backing_objects
.first
,
850 struct fscache_object
, cookie_link
);
851 if (test_bit(FSCACHE_IOERROR
, &object
->cache
->flags
))
854 /* add the page to the pending-storage radix tree on the backing
856 spin_lock(&object
->lock
);
857 spin_lock(&cookie
->stores_lock
);
859 _debug("store limit %llx", (unsigned long long) object
->store_limit
);
861 ret
= radix_tree_insert(&cookie
->stores
, page
->index
, page
);
865 _debug("insert failed %d", ret
);
866 goto nobufs_unlock_obj
;
869 radix_tree_tag_set(&cookie
->stores
, page
->index
,
870 FSCACHE_COOKIE_PENDING_TAG
);
871 page_cache_get(page
);
873 /* we only want one writer at a time, but we do need to queue new
874 * writers after exclusive ops */
875 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE
, &object
->flags
))
876 goto already_pending
;
878 spin_unlock(&cookie
->stores_lock
);
879 spin_unlock(&object
->lock
);
881 op
->op
.debug_id
= atomic_inc_return(&fscache_op_debug_id
);
882 op
->store_limit
= object
->store_limit
;
884 if (fscache_submit_op(object
, &op
->op
) < 0)
887 spin_unlock(&cookie
->lock
);
888 radix_tree_preload_end();
889 fscache_stat(&fscache_n_store_ops
);
890 fscache_stat(&fscache_n_stores_ok
);
892 /* the work queue now carries its own ref on the object */
893 fscache_put_operation(&op
->op
);
898 fscache_stat(&fscache_n_stores_again
);
900 spin_unlock(&cookie
->stores_lock
);
901 spin_unlock(&object
->lock
);
902 spin_unlock(&cookie
->lock
);
903 radix_tree_preload_end();
905 fscache_stat(&fscache_n_stores_ok
);
910 spin_lock(&cookie
->stores_lock
);
911 radix_tree_delete(&cookie
->stores
, page
->index
);
912 spin_unlock(&cookie
->stores_lock
);
913 page_cache_release(page
);
918 spin_unlock(&cookie
->stores_lock
);
919 spin_unlock(&object
->lock
);
921 spin_unlock(&cookie
->lock
);
922 radix_tree_preload_end();
924 fscache_stat(&fscache_n_stores_nobufs
);
925 _leave(" = -ENOBUFS");
931 fscache_stat(&fscache_n_stores_oom
);
932 _leave(" = -ENOMEM");
935 EXPORT_SYMBOL(__fscache_write_page
);
938 * remove a page from the cache
940 void __fscache_uncache_page(struct fscache_cookie
*cookie
, struct page
*page
)
942 struct fscache_object
*object
;
946 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
947 ASSERTCMP(page
, !=, NULL
);
949 fscache_stat(&fscache_n_uncaches
);
951 /* cache withdrawal may beat us to it */
952 if (!PageFsCache(page
))
956 spin_lock(&cookie
->lock
);
958 if (hlist_empty(&cookie
->backing_objects
)) {
959 ClearPageFsCache(page
);
963 object
= hlist_entry(cookie
->backing_objects
.first
,
964 struct fscache_object
, cookie_link
);
966 /* there might now be stuff on disk we could read */
967 clear_bit(FSCACHE_COOKIE_NO_DATA_YET
, &cookie
->flags
);
969 /* only invoke the cache backend if we managed to mark the page
970 * uncached here; this deals with synchronisation vs withdrawal */
971 if (TestClearPageFsCache(page
) &&
972 object
->cache
->ops
->uncache_page
) {
973 /* the cache backend releases the cookie lock */
974 fscache_stat(&fscache_n_cop_uncache_page
);
975 object
->cache
->ops
->uncache_page(object
, page
);
976 fscache_stat_d(&fscache_n_cop_uncache_page
);
981 spin_unlock(&cookie
->lock
);
985 EXPORT_SYMBOL(__fscache_uncache_page
);
988 * fscache_mark_page_cached - Mark a page as being cached
989 * @op: The retrieval op pages are being marked for
990 * @page: The page to be marked
992 * Mark a netfs page as being cached. After this is called, the netfs
993 * must call fscache_uncache_page() to remove the mark.
995 void fscache_mark_page_cached(struct fscache_retrieval
*op
, struct page
*page
)
997 struct fscache_cookie
*cookie
= op
->op
.object
->cookie
;
999 #ifdef CONFIG_FSCACHE_STATS
1000 atomic_inc(&fscache_n_marks
);
1003 _debug("- mark %p{%lx}", page
, page
->index
);
1004 if (TestSetPageFsCache(page
)) {
1005 static bool once_only
;
1008 printk(KERN_WARNING
"FS-Cache:"
1009 " Cookie type %s marked page %lx"
1010 " multiple times\n",
1011 cookie
->def
->name
, page
->index
);
1015 if (cookie
->def
->mark_page_cached
)
1016 cookie
->def
->mark_page_cached(cookie
->netfs_data
,
1019 EXPORT_SYMBOL(fscache_mark_page_cached
);
1022 * fscache_mark_pages_cached - Mark pages as being cached
1023 * @op: The retrieval op pages are being marked for
1024 * @pagevec: The pages to be marked
1026 * Mark a bunch of netfs pages as being cached. After this is called,
1027 * the netfs must call fscache_uncache_page() to remove the mark.
1029 void fscache_mark_pages_cached(struct fscache_retrieval
*op
,
1030 struct pagevec
*pagevec
)
1034 for (loop
= 0; loop
< pagevec
->nr
; loop
++)
1035 fscache_mark_page_cached(op
, pagevec
->pages
[loop
]);
1037 pagevec_reinit(pagevec
);
1039 EXPORT_SYMBOL(fscache_mark_pages_cached
);
1042 * Uncache all the pages in an inode that are marked PG_fscache, assuming them
1043 * to be associated with the given cookie.
1045 void __fscache_uncache_all_inode_pages(struct fscache_cookie
*cookie
,
1046 struct inode
*inode
)
1048 struct address_space
*mapping
= inode
->i_mapping
;
1049 struct pagevec pvec
;
1053 _enter("%p,%p", cookie
, inode
);
1055 if (!mapping
|| mapping
->nrpages
== 0) {
1056 _leave(" [no pages]");
1060 pagevec_init(&pvec
, 0);
1063 if (!pagevec_lookup(&pvec
, mapping
, next
, PAGEVEC_SIZE
))
1065 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
1066 struct page
*page
= pvec
.pages
[i
];
1068 if (PageFsCache(page
)) {
1069 __fscache_wait_on_page_write(cookie
, page
);
1070 __fscache_uncache_page(cookie
, page
);
1073 pagevec_release(&pvec
);
1079 EXPORT_SYMBOL(__fscache_uncache_all_inode_pages
);