4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
38 #define DEBUG_SUBSYSTEM S_CLASS
40 #include "../../include/linux/libcfs/libcfs.h"
41 #include "../include/obd_class.h"
42 #include "../include/obd_support.h"
43 #include <linux/list.h>
45 #include "../include/cl_object.h"
46 #include "cl_internal.h"
48 static void cl_page_delete0(const struct lu_env
*env
, struct cl_page
*pg
);
50 # define PASSERT(env, page, expr) \
52 if (unlikely(!(expr))) { \
53 CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
58 # define PINVRNT(env, page, exp) \
59 ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
62 * Internal version of cl_page_get().
64 * This function can be used to obtain initial reference to previously
65 * unreferenced cached object. It can be called only if concurrent page
66 * reclamation is somehow prevented, e.g., by keeping a lock on a VM page,
67 * associated with \a page.
69 * Use with care! Not exported.
71 static void cl_page_get_trust(struct cl_page
*page
)
73 LASSERT(atomic_read(&page
->cp_ref
) > 0);
74 atomic_inc(&page
->cp_ref
);
78 * Returns a slice within a page, corresponding to the given layer in the
83 static const struct cl_page_slice
*
84 cl_page_at_trusted(const struct cl_page
*page
,
85 const struct lu_device_type
*dtype
)
87 const struct cl_page_slice
*slice
;
89 list_for_each_entry(slice
, &page
->cp_layers
, cpl_linkage
) {
90 if (slice
->cpl_obj
->co_lu
.lo_dev
->ld_type
== dtype
)
96 static void cl_page_free(const struct lu_env
*env
, struct cl_page
*page
)
98 struct cl_object
*obj
= page
->cp_obj
;
100 PASSERT(env
, page
, list_empty(&page
->cp_batch
));
101 PASSERT(env
, page
, !page
->cp_owner
);
102 PASSERT(env
, page
, !page
->cp_req
);
103 PASSERT(env
, page
, page
->cp_state
== CPS_FREEING
);
105 while (!list_empty(&page
->cp_layers
)) {
106 struct cl_page_slice
*slice
;
108 slice
= list_entry(page
->cp_layers
.next
,
109 struct cl_page_slice
, cpl_linkage
);
110 list_del_init(page
->cp_layers
.next
);
111 if (unlikely(slice
->cpl_ops
->cpo_fini
))
112 slice
->cpl_ops
->cpo_fini(env
, slice
);
114 lu_object_ref_del_at(&obj
->co_lu
, &page
->cp_obj_ref
, "cl_page", page
);
115 cl_object_put(env
, obj
);
116 lu_ref_fini(&page
->cp_reference
);
121 * Helper function updating page state. This is the only place in the code
122 * where cl_page::cp_state field is mutated.
124 static inline void cl_page_state_set_trust(struct cl_page
*page
,
125 enum cl_page_state state
)
128 *(enum cl_page_state
*)&page
->cp_state
= state
;
131 struct cl_page
*cl_page_alloc(const struct lu_env
*env
,
132 struct cl_object
*o
, pgoff_t ind
,
134 enum cl_page_type type
)
136 struct cl_page
*page
;
137 struct lu_object_header
*head
;
139 page
= kzalloc(cl_object_header(o
)->coh_page_bufsize
, GFP_NOFS
);
143 atomic_set(&page
->cp_ref
, 1);
146 lu_object_ref_add_at(&o
->co_lu
, &page
->cp_obj_ref
, "cl_page",
148 page
->cp_vmpage
= vmpage
;
149 cl_page_state_set_trust(page
, CPS_CACHED
);
150 page
->cp_type
= type
;
151 INIT_LIST_HEAD(&page
->cp_layers
);
152 INIT_LIST_HEAD(&page
->cp_batch
);
153 INIT_LIST_HEAD(&page
->cp_flight
);
154 mutex_init(&page
->cp_mutex
);
155 lu_ref_init(&page
->cp_reference
);
156 head
= o
->co_lu
.lo_header
;
157 list_for_each_entry(o
, &head
->loh_layers
, co_lu
.lo_linkage
) {
158 if (o
->co_ops
->coo_page_init
) {
159 result
= o
->co_ops
->coo_page_init(env
, o
, page
,
162 cl_page_delete0(env
, page
);
163 cl_page_free(env
, page
);
164 page
= ERR_PTR(result
);
170 page
= ERR_PTR(-ENOMEM
);
174 EXPORT_SYMBOL(cl_page_alloc
);
177 * Returns a cl_page with index \a idx at the object \a o, and associated with
178 * the VM page \a vmpage.
180 * This is the main entry point into the cl_page caching interface. First, a
181 * cache (implemented as a per-object radix tree) is consulted. If page is
182 * found there, it is returned immediately. Otherwise new page is allocated
183 * and returned. In any case, additional reference to page is acquired.
185 * \see cl_object_find(), cl_lock_find()
187 struct cl_page
*cl_page_find(const struct lu_env
*env
,
189 pgoff_t idx
, struct page
*vmpage
,
190 enum cl_page_type type
)
192 struct cl_page
*page
= NULL
;
193 struct cl_object_header
*hdr
;
195 LASSERT(type
== CPT_CACHEABLE
|| type
== CPT_TRANSIENT
);
198 hdr
= cl_object_header(o
);
200 CDEBUG(D_PAGE
, "%lu@"DFID
" %p %lx %d\n",
201 idx
, PFID(&hdr
->coh_lu
.loh_fid
), vmpage
, vmpage
->private, type
);
203 if (type
== CPT_CACHEABLE
) {
205 * vmpage lock is used to protect the child/parent
208 KLASSERT(PageLocked(vmpage
));
210 * cl_vmpage_page() can be called here without any locks as
212 * - "vmpage" is locked (which prevents ->private from
213 * concurrent updates), and
215 * - "o" cannot be destroyed while current thread holds a
218 page
= cl_vmpage_page(vmpage
, o
);
224 /* allocate and initialize cl_page */
225 page
= cl_page_alloc(env
, o
, idx
, vmpage
, type
);
228 EXPORT_SYMBOL(cl_page_find
);
230 static inline int cl_page_invariant(const struct cl_page
*pg
)
233 * Page invariant is protected by a VM lock.
235 LINVRNT(cl_page_is_vmlocked(NULL
, pg
));
237 return cl_page_in_use_noref(pg
);
240 static void cl_page_state_set0(const struct lu_env
*env
,
241 struct cl_page
*page
, enum cl_page_state state
)
243 enum cl_page_state old
;
246 * Matrix of allowed state transitions [old][new], for sanity
249 static const int allowed_transitions
[CPS_NR
][CPS_NR
] = {
252 [CPS_OWNED
] = 1, /* io finds existing cached page */
254 [CPS_PAGEOUT
] = 1, /* write-out from the cache */
255 [CPS_FREEING
] = 1, /* eviction on the memory pressure */
258 [CPS_CACHED
] = 1, /* release to the cache */
260 [CPS_PAGEIN
] = 1, /* start read immediately */
261 [CPS_PAGEOUT
] = 1, /* start write immediately */
262 [CPS_FREEING
] = 1, /* lock invalidation or truncate */
265 [CPS_CACHED
] = 1, /* io completion */
272 [CPS_CACHED
] = 1, /* io completion */
287 old
= page
->cp_state
;
288 PASSERT(env
, page
, allowed_transitions
[old
][state
]);
289 CL_PAGE_HEADER(D_TRACE
, env
, page
, "%d -> %d\n", old
, state
);
290 PASSERT(env
, page
, page
->cp_state
== old
);
291 PASSERT(env
, page
, equi(state
== CPS_OWNED
, page
->cp_owner
));
292 cl_page_state_set_trust(page
, state
);
295 static void cl_page_state_set(const struct lu_env
*env
,
296 struct cl_page
*page
, enum cl_page_state state
)
298 cl_page_state_set0(env
, page
, state
);
302 * Acquires an additional reference to a page.
304 * This can be called only by caller already possessing a reference to \a
307 * \see cl_object_get(), cl_lock_get().
309 void cl_page_get(struct cl_page
*page
)
311 cl_page_get_trust(page
);
313 EXPORT_SYMBOL(cl_page_get
);
316 * Releases a reference to a page.
318 * When last reference is released, page is returned to the cache, unless it
319 * is in cl_page_state::CPS_FREEING state, in which case it is immediately
322 * \see cl_object_put(), cl_lock_put().
324 void cl_page_put(const struct lu_env
*env
, struct cl_page
*page
)
326 CL_PAGE_HEADER(D_TRACE
, env
, page
, "%d\n",
327 atomic_read(&page
->cp_ref
));
329 if (atomic_dec_and_test(&page
->cp_ref
)) {
330 LASSERT(page
->cp_state
== CPS_FREEING
);
332 LASSERT(atomic_read(&page
->cp_ref
) == 0);
333 PASSERT(env
, page
, !page
->cp_owner
);
334 PASSERT(env
, page
, list_empty(&page
->cp_batch
));
336 * Page is no longer reachable by other threads. Tear
339 cl_page_free(env
, page
);
342 EXPORT_SYMBOL(cl_page_put
);
345 * Returns a cl_page associated with a VM page, and given cl_object.
347 struct cl_page
*cl_vmpage_page(struct page
*vmpage
, struct cl_object
*obj
)
349 struct cl_page
*page
;
351 KLASSERT(PageLocked(vmpage
));
354 * NOTE: absence of races and liveness of data are guaranteed by page
355 * lock on a "vmpage". That works because object destruction has
356 * bottom-to-top pass.
359 page
= (struct cl_page
*)vmpage
->private;
361 cl_page_get_trust(page
);
362 LASSERT(page
->cp_type
== CPT_CACHEABLE
);
366 EXPORT_SYMBOL(cl_vmpage_page
);
368 const struct cl_page_slice
*cl_page_at(const struct cl_page
*page
,
369 const struct lu_device_type
*dtype
)
371 return cl_page_at_trusted(page
, dtype
);
373 EXPORT_SYMBOL(cl_page_at
);
375 #define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
377 #define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...) \
379 const struct lu_env *__env = (_env); \
380 struct cl_page *__page = (_page); \
381 const struct cl_page_slice *__scan; \
383 ptrdiff_t __op = (_op); \
384 int (*__method)_proto; \
387 list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
388 __method = *(void **)((char *)__scan->cpl_ops + __op); \
390 __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
400 #define CL_PAGE_INVOKE_REVERSE(_env, _page, _op, _proto, ...) \
402 const struct lu_env *__env = (_env); \
403 struct cl_page *__page = (_page); \
404 const struct cl_page_slice *__scan; \
406 ptrdiff_t __op = (_op); \
407 int (*__method)_proto; \
410 list_for_each_entry_reverse(__scan, &__page->cp_layers, \
412 __method = *(void **)((char *)__scan->cpl_ops + __op); \
414 __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
424 #define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \
426 const struct lu_env *__env = (_env); \
427 struct cl_page *__page = (_page); \
428 const struct cl_page_slice *__scan; \
429 ptrdiff_t __op = (_op); \
430 void (*__method)_proto; \
432 list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
433 __method = *(void **)((char *)__scan->cpl_ops + __op); \
435 (*__method)(__env, __scan, ## __VA_ARGS__); \
439 #define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \
441 const struct lu_env *__env = (_env); \
442 struct cl_page *__page = (_page); \
443 const struct cl_page_slice *__scan; \
444 ptrdiff_t __op = (_op); \
445 void (*__method)_proto; \
447 list_for_each_entry_reverse(__scan, &__page->cp_layers, cpl_linkage) { \
448 __method = *(void **)((char *)__scan->cpl_ops + __op); \
450 (*__method)(__env, __scan, ## __VA_ARGS__); \
454 static int cl_page_invoke(const struct lu_env
*env
,
455 struct cl_io
*io
, struct cl_page
*page
, ptrdiff_t op
)
458 PINVRNT(env
, page
, cl_object_same(page
->cp_obj
, io
->ci_obj
));
459 return CL_PAGE_INVOKE(env
, page
, op
,
460 (const struct lu_env
*,
461 const struct cl_page_slice
*, struct cl_io
*),
465 static void cl_page_invoid(const struct lu_env
*env
,
466 struct cl_io
*io
, struct cl_page
*page
, ptrdiff_t op
)
469 PINVRNT(env
, page
, cl_object_same(page
->cp_obj
, io
->ci_obj
));
470 CL_PAGE_INVOID(env
, page
, op
,
471 (const struct lu_env
*,
472 const struct cl_page_slice
*, struct cl_io
*), io
);
475 static void cl_page_owner_clear(struct cl_page
*page
)
477 if (page
->cp_owner
) {
478 LASSERT(page
->cp_owner
->ci_owned_nr
> 0);
479 page
->cp_owner
->ci_owned_nr
--;
480 page
->cp_owner
= NULL
;
481 page
->cp_task
= NULL
;
485 static void cl_page_owner_set(struct cl_page
*page
)
487 page
->cp_owner
->ci_owned_nr
++;
490 void cl_page_disown0(const struct lu_env
*env
,
491 struct cl_io
*io
, struct cl_page
*pg
)
493 enum cl_page_state state
;
495 state
= pg
->cp_state
;
496 PINVRNT(env
, pg
, state
== CPS_OWNED
|| state
== CPS_FREEING
);
497 PINVRNT(env
, pg
, cl_page_invariant(pg
) || state
== CPS_FREEING
);
498 cl_page_owner_clear(pg
);
500 if (state
== CPS_OWNED
)
501 cl_page_state_set(env
, pg
, CPS_CACHED
);
503 * Completion call-backs are executed in the bottom-up order, so that
504 * uppermost layer (llite), responsible for VFS/VM interaction runs
505 * last and can release locks safely.
507 CL_PAGE_INVOID_REVERSE(env
, pg
, CL_PAGE_OP(cpo_disown
),
508 (const struct lu_env
*,
509 const struct cl_page_slice
*, struct cl_io
*),
514 * returns true, iff page is owned by the given io.
516 int cl_page_is_owned(const struct cl_page
*pg
, const struct cl_io
*io
)
518 struct cl_io
*top
= cl_io_top((struct cl_io
*)io
);
519 LINVRNT(cl_object_same(pg
->cp_obj
, io
->ci_obj
));
520 return pg
->cp_state
== CPS_OWNED
&& pg
->cp_owner
== top
;
522 EXPORT_SYMBOL(cl_page_is_owned
);
525 * Try to own a page by IO.
527 * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
528 * into cl_page_state::CPS_OWNED state.
530 * \pre !cl_page_is_owned(pg, io)
531 * \post result == 0 iff cl_page_is_owned(pg, io)
535 * \retval -ve failure, e.g., page was destroyed (and landed in
536 * cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED).
537 * or, page was owned by another thread, or in IO.
539 * \see cl_page_disown()
540 * \see cl_page_operations::cpo_own()
541 * \see cl_page_own_try()
544 static int cl_page_own0(const struct lu_env
*env
, struct cl_io
*io
,
545 struct cl_page
*pg
, int nonblock
)
549 PINVRNT(env
, pg
, !cl_page_is_owned(pg
, io
));
553 if (pg
->cp_state
== CPS_FREEING
) {
556 result
= CL_PAGE_INVOKE(env
, pg
, CL_PAGE_OP(cpo_own
),
557 (const struct lu_env
*,
558 const struct cl_page_slice
*,
559 struct cl_io
*, int),
562 PASSERT(env
, pg
, !pg
->cp_owner
);
563 PASSERT(env
, pg
, !pg
->cp_req
);
564 pg
->cp_owner
= cl_io_top(io
);
565 pg
->cp_task
= current
;
566 cl_page_owner_set(pg
);
567 if (pg
->cp_state
!= CPS_FREEING
) {
568 cl_page_state_set(env
, pg
, CPS_OWNED
);
570 cl_page_disown0(env
, io
, pg
);
575 PINVRNT(env
, pg
, ergo(result
== 0, cl_page_invariant(pg
)));
580 * Own a page, might be blocked.
582 * \see cl_page_own0()
584 int cl_page_own(const struct lu_env
*env
, struct cl_io
*io
, struct cl_page
*pg
)
586 return cl_page_own0(env
, io
, pg
, 0);
588 EXPORT_SYMBOL(cl_page_own
);
591 * Nonblock version of cl_page_own().
593 * \see cl_page_own0()
595 int cl_page_own_try(const struct lu_env
*env
, struct cl_io
*io
,
598 return cl_page_own0(env
, io
, pg
, 1);
600 EXPORT_SYMBOL(cl_page_own_try
);
603 * Assume page ownership.
605 * Called when page is already locked by the hosting VM.
607 * \pre !cl_page_is_owned(pg, io)
608 * \post cl_page_is_owned(pg, io)
610 * \see cl_page_operations::cpo_assume()
612 void cl_page_assume(const struct lu_env
*env
,
613 struct cl_io
*io
, struct cl_page
*pg
)
615 PINVRNT(env
, pg
, cl_object_same(pg
->cp_obj
, io
->ci_obj
));
619 cl_page_invoid(env
, io
, pg
, CL_PAGE_OP(cpo_assume
));
620 PASSERT(env
, pg
, !pg
->cp_owner
);
621 pg
->cp_owner
= cl_io_top(io
);
622 pg
->cp_task
= current
;
623 cl_page_owner_set(pg
);
624 cl_page_state_set(env
, pg
, CPS_OWNED
);
626 EXPORT_SYMBOL(cl_page_assume
);
629 * Releases page ownership without unlocking the page.
631 * Moves page into cl_page_state::CPS_CACHED without releasing a lock on the
632 * underlying VM page (as VM is supposed to do this itself).
634 * \pre cl_page_is_owned(pg, io)
635 * \post !cl_page_is_owned(pg, io)
637 * \see cl_page_assume()
639 void cl_page_unassume(const struct lu_env
*env
,
640 struct cl_io
*io
, struct cl_page
*pg
)
642 PINVRNT(env
, pg
, cl_page_is_owned(pg
, io
));
643 PINVRNT(env
, pg
, cl_page_invariant(pg
));
646 cl_page_owner_clear(pg
);
647 cl_page_state_set(env
, pg
, CPS_CACHED
);
648 CL_PAGE_INVOID_REVERSE(env
, pg
, CL_PAGE_OP(cpo_unassume
),
649 (const struct lu_env
*,
650 const struct cl_page_slice
*, struct cl_io
*),
653 EXPORT_SYMBOL(cl_page_unassume
);
656 * Releases page ownership.
658 * Moves page into cl_page_state::CPS_CACHED.
660 * \pre cl_page_is_owned(pg, io)
661 * \post !cl_page_is_owned(pg, io)
664 * \see cl_page_operations::cpo_disown()
666 void cl_page_disown(const struct lu_env
*env
,
667 struct cl_io
*io
, struct cl_page
*pg
)
669 PINVRNT(env
, pg
, cl_page_is_owned(pg
, io
) ||
670 pg
->cp_state
== CPS_FREEING
);
673 cl_page_disown0(env
, io
, pg
);
675 EXPORT_SYMBOL(cl_page_disown
);
678 * Called when page is to be removed from the object, e.g., as a result of
681 * Calls cl_page_operations::cpo_discard() top-to-bottom.
683 * \pre cl_page_is_owned(pg, io)
685 * \see cl_page_operations::cpo_discard()
687 void cl_page_discard(const struct lu_env
*env
,
688 struct cl_io
*io
, struct cl_page
*pg
)
690 PINVRNT(env
, pg
, cl_page_is_owned(pg
, io
));
691 PINVRNT(env
, pg
, cl_page_invariant(pg
));
693 cl_page_invoid(env
, io
, pg
, CL_PAGE_OP(cpo_discard
));
695 EXPORT_SYMBOL(cl_page_discard
);
698 * Version of cl_page_delete() that can be called for not fully constructed
699 * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
700 * path. Doesn't check page invariant.
702 static void cl_page_delete0(const struct lu_env
*env
, struct cl_page
*pg
)
704 PASSERT(env
, pg
, pg
->cp_state
!= CPS_FREEING
);
707 * Severe all ways to obtain new pointers to @pg.
709 cl_page_owner_clear(pg
);
711 cl_page_state_set0(env
, pg
, CPS_FREEING
);
713 CL_PAGE_INVOID_REVERSE(env
, pg
, CL_PAGE_OP(cpo_delete
),
714 (const struct lu_env
*,
715 const struct cl_page_slice
*));
719 * Called when a decision is made to throw page out of memory.
721 * Notifies all layers about page destruction by calling
722 * cl_page_operations::cpo_delete() method top-to-bottom.
724 * Moves page into cl_page_state::CPS_FREEING state (this is the only place
725 * where transition to this state happens).
727 * Eliminates all venues through which new references to the page can be
730 * - removes page from the radix trees,
732 * - breaks linkage from VM page to cl_page.
734 * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
735 * drain after some time, at which point page will be recycled.
737 * \pre VM page is locked
738 * \post pg->cp_state == CPS_FREEING
740 * \see cl_page_operations::cpo_delete()
742 void cl_page_delete(const struct lu_env
*env
, struct cl_page
*pg
)
744 PINVRNT(env
, pg
, cl_page_invariant(pg
));
745 cl_page_delete0(env
, pg
);
747 EXPORT_SYMBOL(cl_page_delete
);
750 * Marks page up-to-date.
752 * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
753 * layer responsible for VM interaction has to mark/clear page as up-to-date
754 * by the \a uptodate argument.
756 * \see cl_page_operations::cpo_export()
758 void cl_page_export(const struct lu_env
*env
, struct cl_page
*pg
, int uptodate
)
760 PINVRNT(env
, pg
, cl_page_invariant(pg
));
761 CL_PAGE_INVOID(env
, pg
, CL_PAGE_OP(cpo_export
),
762 (const struct lu_env
*,
763 const struct cl_page_slice
*, int), uptodate
);
765 EXPORT_SYMBOL(cl_page_export
);
768 * Returns true, iff \a pg is VM locked in a suitable sense by the calling
771 int cl_page_is_vmlocked(const struct lu_env
*env
, const struct cl_page
*pg
)
774 const struct cl_page_slice
*slice
;
776 slice
= container_of(pg
->cp_layers
.next
,
777 const struct cl_page_slice
, cpl_linkage
);
778 PASSERT(env
, pg
, slice
->cpl_ops
->cpo_is_vmlocked
);
780 * Call ->cpo_is_vmlocked() directly instead of going through
781 * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
782 * cl_page_invariant().
784 result
= slice
->cpl_ops
->cpo_is_vmlocked(env
, slice
);
785 PASSERT(env
, pg
, result
== -EBUSY
|| result
== -ENODATA
);
786 return result
== -EBUSY
;
788 EXPORT_SYMBOL(cl_page_is_vmlocked
);
790 static enum cl_page_state
cl_req_type_state(enum cl_req_type crt
)
792 return crt
== CRT_WRITE
? CPS_PAGEOUT
: CPS_PAGEIN
;
795 static void cl_page_io_start(const struct lu_env
*env
,
796 struct cl_page
*pg
, enum cl_req_type crt
)
799 * Page is queued for IO, change its state.
801 cl_page_owner_clear(pg
);
802 cl_page_state_set(env
, pg
, cl_req_type_state(crt
));
806 * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
807 * called top-to-bottom. Every layer either agrees to submit this page (by
808 * returning 0), or requests to omit this page (by returning -EALREADY). Layer
809 * handling interactions with the VM also has to inform VM that page is under
812 int cl_page_prep(const struct lu_env
*env
, struct cl_io
*io
,
813 struct cl_page
*pg
, enum cl_req_type crt
)
817 PINVRNT(env
, pg
, cl_page_is_owned(pg
, io
));
818 PINVRNT(env
, pg
, cl_page_invariant(pg
));
819 PINVRNT(env
, pg
, crt
< CRT_NR
);
822 * XXX this has to be called bottom-to-top, so that llite can set up
823 * PG_writeback without risking other layers deciding to skip this
828 result
= cl_page_invoke(env
, io
, pg
, CL_PAGE_OP(io
[crt
].cpo_prep
));
830 cl_page_io_start(env
, pg
, crt
);
832 CL_PAGE_HEADER(D_TRACE
, env
, pg
, "%d %d\n", crt
, result
);
835 EXPORT_SYMBOL(cl_page_prep
);
838 * Notify layers about transfer completion.
840 * Invoked by transfer sub-system (which is a part of osc) to notify layers
841 * that a transfer, of which this page is a part of has completed.
843 * Completion call-backs are executed in the bottom-up order, so that
844 * uppermost layer (llite), responsible for the VFS/VM interaction runs last
845 * and can release locks safely.
847 * \pre pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
848 * \post pg->cp_state == CPS_CACHED
850 * \see cl_page_operations::cpo_completion()
852 void cl_page_completion(const struct lu_env
*env
,
853 struct cl_page
*pg
, enum cl_req_type crt
, int ioret
)
855 struct cl_sync_io
*anchor
= pg
->cp_sync_io
;
857 PASSERT(env
, pg
, crt
< CRT_NR
);
858 /* cl_page::cp_req already cleared by the caller (osc_completion()) */
859 PASSERT(env
, pg
, !pg
->cp_req
);
860 PASSERT(env
, pg
, pg
->cp_state
== cl_req_type_state(crt
));
862 CL_PAGE_HEADER(D_TRACE
, env
, pg
, "%d %d\n", crt
, ioret
);
863 if (crt
== CRT_READ
&& ioret
== 0) {
864 PASSERT(env
, pg
, !(pg
->cp_flags
& CPF_READ_COMPLETED
));
865 pg
->cp_flags
|= CPF_READ_COMPLETED
;
868 cl_page_state_set(env
, pg
, CPS_CACHED
);
871 CL_PAGE_INVOID_REVERSE(env
, pg
, CL_PAGE_OP(io
[crt
].cpo_completion
),
872 (const struct lu_env
*,
873 const struct cl_page_slice
*, int), ioret
);
875 LASSERT(cl_page_is_vmlocked(env
, pg
));
876 LASSERT(pg
->cp_sync_io
== anchor
);
877 pg
->cp_sync_io
= NULL
;
880 * As page->cp_obj is pinned by a reference from page->cp_req, it is
881 * safe to call cl_page_put() without risking object destruction in a
882 * non-blocking context.
884 cl_page_put(env
, pg
);
887 cl_sync_io_note(env
, anchor
, ioret
);
889 EXPORT_SYMBOL(cl_page_completion
);
892 * Notify layers that transfer formation engine decided to yank this page from
893 * the cache and to make it a part of a transfer.
895 * \pre pg->cp_state == CPS_CACHED
896 * \post pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
898 * \see cl_page_operations::cpo_make_ready()
900 int cl_page_make_ready(const struct lu_env
*env
, struct cl_page
*pg
,
901 enum cl_req_type crt
)
905 PINVRNT(env
, pg
, crt
< CRT_NR
);
909 result
= CL_PAGE_INVOKE(env
, pg
, CL_PAGE_OP(io
[crt
].cpo_make_ready
),
910 (const struct lu_env
*,
911 const struct cl_page_slice
*));
913 PASSERT(env
, pg
, pg
->cp_state
== CPS_CACHED
);
914 cl_page_io_start(env
, pg
, crt
);
916 CL_PAGE_HEADER(D_TRACE
, env
, pg
, "%d %d\n", crt
, result
);
919 EXPORT_SYMBOL(cl_page_make_ready
);
922 * Called if a pge is being written back by kernel's intention.
924 * \pre cl_page_is_owned(pg, io)
925 * \post ergo(result == 0, pg->cp_state == CPS_PAGEOUT)
927 * \see cl_page_operations::cpo_flush()
929 int cl_page_flush(const struct lu_env
*env
, struct cl_io
*io
,
934 PINVRNT(env
, pg
, cl_page_is_owned(pg
, io
));
935 PINVRNT(env
, pg
, cl_page_invariant(pg
));
937 result
= cl_page_invoke(env
, io
, pg
, CL_PAGE_OP(cpo_flush
));
939 CL_PAGE_HEADER(D_TRACE
, env
, pg
, "%d\n", result
);
942 EXPORT_SYMBOL(cl_page_flush
);
945 * Checks whether page is protected by any extent lock is at least required
948 * \return the same as in cl_page_operations::cpo_is_under_lock() method.
949 * \see cl_page_operations::cpo_is_under_lock()
951 int cl_page_is_under_lock(const struct lu_env
*env
, struct cl_io
*io
,
952 struct cl_page
*page
, pgoff_t
*max_index
)
956 PINVRNT(env
, page
, cl_page_invariant(page
));
958 rc
= CL_PAGE_INVOKE_REVERSE(env
, page
, CL_PAGE_OP(cpo_is_under_lock
),
959 (const struct lu_env
*,
960 const struct cl_page_slice
*,
961 struct cl_io
*, pgoff_t
*),
965 EXPORT_SYMBOL(cl_page_is_under_lock
);
968 * Tells transfer engine that only part of a page is to be transmitted.
970 * \see cl_page_operations::cpo_clip()
972 void cl_page_clip(const struct lu_env
*env
, struct cl_page
*pg
,
975 PINVRNT(env
, pg
, cl_page_invariant(pg
));
977 CL_PAGE_HEADER(D_TRACE
, env
, pg
, "%d %d\n", from
, to
);
978 CL_PAGE_INVOID(env
, pg
, CL_PAGE_OP(cpo_clip
),
979 (const struct lu_env
*,
980 const struct cl_page_slice
*, int, int),
983 EXPORT_SYMBOL(cl_page_clip
);
986 * Prints human readable representation of \a pg to the \a f.
988 void cl_page_header_print(const struct lu_env
*env
, void *cookie
,
989 lu_printer_t printer
, const struct cl_page
*pg
)
991 (*printer
)(env
, cookie
,
992 "page@%p[%d %p %d %d %d %p %p %#x]\n",
993 pg
, atomic_read(&pg
->cp_ref
), pg
->cp_obj
,
994 pg
->cp_state
, pg
->cp_error
, pg
->cp_type
,
995 pg
->cp_owner
, pg
->cp_req
, pg
->cp_flags
);
997 EXPORT_SYMBOL(cl_page_header_print
);
1000 * Prints human readable representation of \a pg to the \a f.
1002 void cl_page_print(const struct lu_env
*env
, void *cookie
,
1003 lu_printer_t printer
, const struct cl_page
*pg
)
1005 cl_page_header_print(env
, cookie
, printer
, pg
);
1006 CL_PAGE_INVOKE(env
, (struct cl_page
*)pg
, CL_PAGE_OP(cpo_print
),
1007 (const struct lu_env
*env
,
1008 const struct cl_page_slice
*slice
,
1009 void *cookie
, lu_printer_t p
), cookie
, printer
);
1010 (*printer
)(env
, cookie
, "end page@%p\n", pg
);
1012 EXPORT_SYMBOL(cl_page_print
);
1015 * Cancel a page which is still in a transfer.
1017 int cl_page_cancel(const struct lu_env
*env
, struct cl_page
*page
)
1019 return CL_PAGE_INVOKE(env
, page
, CL_PAGE_OP(cpo_cancel
),
1020 (const struct lu_env
*,
1021 const struct cl_page_slice
*));
1023 EXPORT_SYMBOL(cl_page_cancel
);
1026 * Converts a byte offset within object \a obj into a page index.
1028 loff_t
cl_offset(const struct cl_object
*obj
, pgoff_t idx
)
1033 return (loff_t
)idx
<< PAGE_SHIFT
;
1035 EXPORT_SYMBOL(cl_offset
);
1038 * Converts a page index into a byte offset within object \a obj.
1040 pgoff_t
cl_index(const struct cl_object
*obj
, loff_t offset
)
1045 return offset
>> PAGE_SHIFT
;
1047 EXPORT_SYMBOL(cl_index
);
1049 int cl_page_size(const struct cl_object
*obj
)
1051 return 1 << PAGE_SHIFT
;
1053 EXPORT_SYMBOL(cl_page_size
);
1056 * Adds page slice to the compound page.
1058 * This is called by cl_object_operations::coo_page_init() methods to add a
1059 * per-layer state to the page. New state is added at the end of
1060 * cl_page::cp_layers list, that is, it is at the bottom of the stack.
1062 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
1064 void cl_page_slice_add(struct cl_page
*page
, struct cl_page_slice
*slice
,
1065 struct cl_object
*obj
, pgoff_t index
,
1066 const struct cl_page_operations
*ops
)
1068 list_add_tail(&slice
->cpl_linkage
, &page
->cp_layers
);
1069 slice
->cpl_obj
= obj
;
1070 slice
->cpl_index
= index
;
1071 slice
->cpl_ops
= ops
;
1072 slice
->cpl_page
= page
;
1074 EXPORT_SYMBOL(cl_page_slice_add
);
1077 * Allocate and initialize cl_cache, called by ll_init_sbi().
1079 struct cl_client_cache
*cl_cache_init(unsigned long lru_page_max
)
1081 struct cl_client_cache
*cache
= NULL
;
1083 cache
= kzalloc(sizeof(*cache
), GFP_KERNEL
);
1087 /* Initialize cache data */
1088 atomic_set(&cache
->ccc_users
, 1);
1089 cache
->ccc_lru_max
= lru_page_max
;
1090 atomic_set(&cache
->ccc_lru_left
, lru_page_max
);
1091 spin_lock_init(&cache
->ccc_lru_lock
);
1092 INIT_LIST_HEAD(&cache
->ccc_lru
);
1094 atomic_set(&cache
->ccc_unstable_nr
, 0);
1095 init_waitqueue_head(&cache
->ccc_unstable_waitq
);
1099 EXPORT_SYMBOL(cl_cache_init
);
1102 * Increase cl_cache refcount
1104 void cl_cache_incref(struct cl_client_cache
*cache
)
1106 atomic_inc(&cache
->ccc_users
);
1108 EXPORT_SYMBOL(cl_cache_incref
);
1111 * Decrease cl_cache refcount and free the cache if refcount=0.
1112 * Since llite, lov and osc all hold cl_cache refcount,
1113 * the free will not cause race. (LU-6173)
1115 void cl_cache_decref(struct cl_client_cache
*cache
)
1117 if (atomic_dec_and_test(&cache
->ccc_users
))
1120 EXPORT_SYMBOL(cl_cache_decref
);
This page took 0.060062 seconds and 5 git commands to generate.