4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdclass/lu_object.c
39 * These are the only exported functions, they provide some generic
40 * infrastructure for managing object devices
42 * Author: Nikita Danilov <nikita.danilov@sun.com>
45 #define DEBUG_SUBSYSTEM S_CLASS
47 #include "../../include/linux/libcfs/libcfs.h"
49 # include <linux/module.h>
52 #include "../../include/linux/libcfs/libcfs_hash.h"
53 #include "../include/obd_class.h"
54 #include "../include/obd_support.h"
55 #include "../include/lustre_disk.h"
56 #include "../include/lustre_fid.h"
57 #include "../include/lu_object.h"
58 #include "../include/lu_ref.h"
59 #include <linux/list.h>
61 static void lu_object_free(const struct lu_env
*env
, struct lu_object
*o
);
62 static __u32
ls_stats_read(struct lprocfs_stats
*stats
, int idx
);
65 * Decrease reference counter on object. If last reference is freed, return
66 * object to the cache, unless lu_object_is_dying(o) holds. In the latter
67 * case, free object immediately.
69 void lu_object_put(const struct lu_env
*env
, struct lu_object
*o
)
71 struct lu_site_bkt_data
*bkt
;
72 struct lu_object_header
*top
;
74 struct lu_object
*orig
;
75 struct cfs_hash_bd bd
;
76 const struct lu_fid
*fid
;
79 site
= o
->lo_dev
->ld_site
;
83 * till we have full fids-on-OST implemented anonymous objects
84 * are possible in OSP. such an object isn't listed in the site
85 * so we should not remove it from the site.
87 fid
= lu_object_fid(o
);
88 if (fid_is_zero(fid
)) {
89 LASSERT(!top
->loh_hash
.next
&& !top
->loh_hash
.pprev
);
90 LASSERT(list_empty(&top
->loh_lru
));
91 if (!atomic_dec_and_test(&top
->loh_ref
))
93 list_for_each_entry_reverse(o
, &top
->loh_layers
, lo_linkage
) {
94 if (o
->lo_ops
->loo_object_release
)
95 o
->lo_ops
->loo_object_release(env
, o
);
97 lu_object_free(env
, orig
);
101 cfs_hash_bd_get(site
->ls_obj_hash
, &top
->loh_fid
, &bd
);
102 bkt
= cfs_hash_bd_extra_get(site
->ls_obj_hash
, &bd
);
104 if (!cfs_hash_bd_dec_and_lock(site
->ls_obj_hash
, &bd
, &top
->loh_ref
)) {
105 if (lu_object_is_dying(top
)) {
108 * somebody may be waiting for this, currently only
109 * used for cl_object, see cl_object_put_last().
111 wake_up_all(&bkt
->lsb_marche_funebre
);
117 * When last reference is released, iterate over object
118 * layers, and notify them that object is no longer busy.
120 list_for_each_entry_reverse(o
, &top
->loh_layers
, lo_linkage
) {
121 if (o
->lo_ops
->loo_object_release
)
122 o
->lo_ops
->loo_object_release(env
, o
);
125 if (!lu_object_is_dying(top
)) {
126 LASSERT(list_empty(&top
->loh_lru
));
127 list_add_tail(&top
->loh_lru
, &bkt
->lsb_lru
);
129 lprocfs_counter_incr(site
->ls_stats
, LU_SS_LRU_LEN
);
130 CDEBUG(D_INODE
, "Add %p to site lru. hash: %p, bkt: %p, lru_len: %ld\n",
131 o
, site
->ls_obj_hash
, bkt
, bkt
->lsb_lru_len
);
132 cfs_hash_bd_unlock(site
->ls_obj_hash
, &bd
, 1);
137 * If object is dying (will not be cached), then removed it
138 * from hash table and LRU.
140 * This is done with hash table and LRU lists locked. As the only
141 * way to acquire first reference to previously unreferenced
142 * object is through hash-table lookup (lu_object_find()),
143 * or LRU scanning (lu_site_purge()), that are done under hash-table
144 * and LRU lock, no race with concurrent object lookup is possible
145 * and we can safely destroy object below.
147 if (!test_and_set_bit(LU_OBJECT_UNHASHED
, &top
->loh_flags
))
148 cfs_hash_bd_del_locked(site
->ls_obj_hash
, &bd
, &top
->loh_hash
);
149 cfs_hash_bd_unlock(site
->ls_obj_hash
, &bd
, 1);
151 * Object was already removed from hash and lru above, can
154 lu_object_free(env
, orig
);
156 EXPORT_SYMBOL(lu_object_put
);
159 * Kill the object and take it out of LRU cache.
160 * Currently used by client code for layout change.
162 void lu_object_unhash(const struct lu_env
*env
, struct lu_object
*o
)
164 struct lu_object_header
*top
;
167 set_bit(LU_OBJECT_HEARD_BANSHEE
, &top
->loh_flags
);
168 if (!test_and_set_bit(LU_OBJECT_UNHASHED
, &top
->loh_flags
)) {
169 struct lu_site
*site
= o
->lo_dev
->ld_site
;
170 struct cfs_hash
*obj_hash
= site
->ls_obj_hash
;
171 struct cfs_hash_bd bd
;
173 cfs_hash_bd_get_and_lock(obj_hash
, &top
->loh_fid
, &bd
, 1);
174 if (!list_empty(&top
->loh_lru
)) {
175 struct lu_site_bkt_data
*bkt
;
177 list_del_init(&top
->loh_lru
);
178 bkt
= cfs_hash_bd_extra_get(obj_hash
, &bd
);
180 lprocfs_counter_decr(site
->ls_stats
, LU_SS_LRU_LEN
);
182 cfs_hash_bd_del_locked(obj_hash
, &bd
, &top
->loh_hash
);
183 cfs_hash_bd_unlock(obj_hash
, &bd
, 1);
186 EXPORT_SYMBOL(lu_object_unhash
);
189 * Allocate new object.
191 * This follows object creation protocol, described in the comment within
192 * struct lu_device_operations definition.
194 static struct lu_object
*lu_object_alloc(const struct lu_env
*env
,
195 struct lu_device
*dev
,
196 const struct lu_fid
*f
,
197 const struct lu_object_conf
*conf
)
199 struct lu_object
*scan
;
200 struct lu_object
*top
;
201 struct list_head
*layers
;
202 unsigned int init_mask
= 0;
203 unsigned int init_flag
;
208 * Create top-level object slice. This will also create
211 top
= dev
->ld_ops
->ldo_object_alloc(env
, NULL
, dev
);
213 return ERR_PTR(-ENOMEM
);
217 * This is the only place where object fid is assigned. It's constant
220 top
->lo_header
->loh_fid
= *f
;
221 layers
= &top
->lo_header
->loh_layers
;
225 * Call ->loo_object_init() repeatedly, until no more new
226 * object slices are created.
230 list_for_each_entry(scan
, layers
, lo_linkage
) {
231 if (init_mask
& init_flag
)
234 scan
->lo_header
= top
->lo_header
;
235 result
= scan
->lo_ops
->loo_object_init(env
, scan
, conf
);
237 lu_object_free(env
, top
);
238 return ERR_PTR(result
);
240 init_mask
|= init_flag
;
246 list_for_each_entry_reverse(scan
, layers
, lo_linkage
) {
247 if (scan
->lo_ops
->loo_object_start
) {
248 result
= scan
->lo_ops
->loo_object_start(env
, scan
);
250 lu_object_free(env
, top
);
251 return ERR_PTR(result
);
256 lprocfs_counter_incr(dev
->ld_site
->ls_stats
, LU_SS_CREATED
);
263 static void lu_object_free(const struct lu_env
*env
, struct lu_object
*o
)
265 struct lu_site_bkt_data
*bkt
;
266 struct lu_site
*site
;
267 struct lu_object
*scan
;
268 struct list_head
*layers
;
269 struct list_head splice
;
271 site
= o
->lo_dev
->ld_site
;
272 layers
= &o
->lo_header
->loh_layers
;
273 bkt
= lu_site_bkt_from_fid(site
, &o
->lo_header
->loh_fid
);
275 * First call ->loo_object_delete() method to release all resources.
277 list_for_each_entry_reverse(scan
, layers
, lo_linkage
) {
278 if (scan
->lo_ops
->loo_object_delete
)
279 scan
->lo_ops
->loo_object_delete(env
, scan
);
283 * Then, splice object layers into stand-alone list, and call
284 * ->loo_object_free() on all layers to free memory. Splice is
285 * necessary, because lu_object_header is freed together with the
288 INIT_LIST_HEAD(&splice
);
289 list_splice_init(layers
, &splice
);
290 while (!list_empty(&splice
)) {
292 * Free layers in bottom-to-top order, so that object header
293 * lives as long as possible and ->loo_object_free() methods
294 * can look at its contents.
296 o
= container_of0(splice
.prev
, struct lu_object
, lo_linkage
);
297 list_del_init(&o
->lo_linkage
);
298 o
->lo_ops
->loo_object_free(env
, o
);
301 if (waitqueue_active(&bkt
->lsb_marche_funebre
))
302 wake_up_all(&bkt
->lsb_marche_funebre
);
306 * Free \a nr objects from the cold end of the site LRU list.
308 int lu_site_purge(const struct lu_env
*env
, struct lu_site
*s
, int nr
)
310 struct lu_object_header
*h
;
311 struct lu_object_header
*temp
;
312 struct lu_site_bkt_data
*bkt
;
313 struct cfs_hash_bd bd
;
314 struct cfs_hash_bd bd2
;
315 struct list_head dispose
;
322 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU
))
325 INIT_LIST_HEAD(&dispose
);
327 * Under LRU list lock, scan LRU list and move unreferenced objects to
328 * the dispose list, removing them from LRU and hash table.
330 start
= s
->ls_purge_start
;
331 bnr
= (nr
== ~0) ? -1 : nr
/ CFS_HASH_NBKT(s
->ls_obj_hash
) + 1;
334 cfs_hash_for_each_bucket(s
->ls_obj_hash
, &bd
, i
) {
338 cfs_hash_bd_lock(s
->ls_obj_hash
, &bd
, 1);
339 bkt
= cfs_hash_bd_extra_get(s
->ls_obj_hash
, &bd
);
341 list_for_each_entry_safe(h
, temp
, &bkt
->lsb_lru
, loh_lru
) {
342 LASSERT(atomic_read(&h
->loh_ref
) == 0);
344 cfs_hash_bd_get(s
->ls_obj_hash
, &h
->loh_fid
, &bd2
);
345 LASSERT(bd
.bd_bucket
== bd2
.bd_bucket
);
347 cfs_hash_bd_del_locked(s
->ls_obj_hash
,
349 list_move(&h
->loh_lru
, &dispose
);
351 lprocfs_counter_decr(s
->ls_stats
, LU_SS_LRU_LEN
);
355 if (nr
!= ~0 && --nr
== 0)
358 if (count
> 0 && --count
== 0)
362 cfs_hash_bd_unlock(s
->ls_obj_hash
, &bd
, 1);
365 * Free everything on the dispose list. This is safe against
366 * races due to the reasons described in lu_object_put().
368 while (!list_empty(&dispose
)) {
369 h
= container_of0(dispose
.next
,
370 struct lu_object_header
, loh_lru
);
371 list_del_init(&h
->loh_lru
);
372 lu_object_free(env
, lu_object_top(h
));
373 lprocfs_counter_incr(s
->ls_stats
, LU_SS_LRU_PURGED
);
380 if (nr
!= 0 && did_sth
&& start
!= 0) {
381 start
= 0; /* restart from the first bucket */
384 /* race on s->ls_purge_start, but nobody cares */
385 s
->ls_purge_start
= i
% CFS_HASH_NBKT(s
->ls_obj_hash
);
389 EXPORT_SYMBOL(lu_site_purge
);
394 * Code below has to jump through certain loops to output object description
395 * into libcfs_debug_msg-based log. The problem is that lu_object_print()
396 * composes object description from strings that are parts of _lines_ of
397 * output (i.e., strings that are not terminated by newline). This doesn't fit
398 * very well into libcfs_debug_msg() interface that assumes that each message
399 * supplied to it is a self-contained output line.
401 * To work around this, strings are collected in a temporary buffer
402 * (implemented as a value of lu_cdebug_key key), until terminating newline
403 * character is detected.
411 * XXX overflow is not handled correctly.
416 struct lu_cdebug_data
{
420 char lck_area
[LU_CDEBUG_LINE
];
423 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
424 LU_KEY_INIT_FINI(lu_global
, struct lu_cdebug_data
);
427 * Key, holding temporary buffer. This key is registered very early by
430 static struct lu_context_key lu_global_key
= {
431 .lct_tags
= LCT_MD_THREAD
| LCT_DT_THREAD
|
432 LCT_MG_THREAD
| LCT_CL_THREAD
| LCT_LOCAL
,
433 .lct_init
= lu_global_key_init
,
434 .lct_fini
= lu_global_key_fini
438 * Printer function emitting messages through libcfs_debug_msg().
440 int lu_cdebug_printer(const struct lu_env
*env
,
441 void *cookie
, const char *format
, ...)
443 struct libcfs_debug_msg_data
*msgdata
= cookie
;
444 struct lu_cdebug_data
*key
;
449 va_start(args
, format
);
451 key
= lu_context_key_get(&env
->le_ctx
, &lu_global_key
);
453 used
= strlen(key
->lck_area
);
454 complete
= format
[strlen(format
) - 1] == '\n';
456 * Append new chunk to the buffer.
458 vsnprintf(key
->lck_area
+ used
,
459 ARRAY_SIZE(key
->lck_area
) - used
, format
, args
);
461 if (cfs_cdebug_show(msgdata
->msg_mask
, msgdata
->msg_subsys
))
462 libcfs_debug_msg(msgdata
, "%s\n", key
->lck_area
);
463 key
->lck_area
[0] = 0;
468 EXPORT_SYMBOL(lu_cdebug_printer
);
471 * Print object header.
473 void lu_object_header_print(const struct lu_env
*env
, void *cookie
,
474 lu_printer_t printer
,
475 const struct lu_object_header
*hdr
)
477 (*printer
)(env
, cookie
, "header@%p[%#lx, %d, "DFID
"%s%s%s]",
478 hdr
, hdr
->loh_flags
, atomic_read(&hdr
->loh_ref
),
480 hlist_unhashed(&hdr
->loh_hash
) ? "" : " hash",
481 list_empty((struct list_head
*)&hdr
->loh_lru
) ? \
483 hdr
->loh_attr
& LOHA_EXISTS
? " exist":"");
485 EXPORT_SYMBOL(lu_object_header_print
);
488 * Print human readable representation of the \a o to the \a printer.
490 void lu_object_print(const struct lu_env
*env
, void *cookie
,
491 lu_printer_t printer
, const struct lu_object
*o
)
493 static const char ruler
[] = "........................................";
494 struct lu_object_header
*top
;
498 lu_object_header_print(env
, cookie
, printer
, top
);
499 (*printer
)(env
, cookie
, "{\n");
501 list_for_each_entry(o
, &top
->loh_layers
, lo_linkage
) {
503 * print `.' \a depth times followed by type name and address
505 (*printer
)(env
, cookie
, "%*.*s%s@%p", depth
, depth
, ruler
,
506 o
->lo_dev
->ld_type
->ldt_name
, o
);
508 if (o
->lo_ops
->loo_object_print
)
509 (*o
->lo_ops
->loo_object_print
)(env
, cookie
, printer
, o
);
511 (*printer
)(env
, cookie
, "\n");
514 (*printer
)(env
, cookie
, "} header@%p\n", top
);
516 EXPORT_SYMBOL(lu_object_print
);
518 static struct lu_object
*htable_lookup(struct lu_site
*s
,
519 struct cfs_hash_bd
*bd
,
520 const struct lu_fid
*f
,
521 wait_queue_t
*waiter
,
524 struct lu_site_bkt_data
*bkt
;
525 struct lu_object_header
*h
;
526 struct hlist_node
*hnode
;
527 __u64 ver
= cfs_hash_bd_version_get(bd
);
530 return ERR_PTR(-ENOENT
);
533 bkt
= cfs_hash_bd_extra_get(s
->ls_obj_hash
, bd
);
534 /* cfs_hash_bd_peek_locked is a somehow "internal" function
535 * of cfs_hash, it doesn't add refcount on object.
537 hnode
= cfs_hash_bd_peek_locked(s
->ls_obj_hash
, bd
, (void *)f
);
539 lprocfs_counter_incr(s
->ls_stats
, LU_SS_CACHE_MISS
);
540 return ERR_PTR(-ENOENT
);
543 h
= container_of0(hnode
, struct lu_object_header
, loh_hash
);
544 if (likely(!lu_object_is_dying(h
))) {
545 cfs_hash_get(s
->ls_obj_hash
, hnode
);
546 lprocfs_counter_incr(s
->ls_stats
, LU_SS_CACHE_HIT
);
547 if (!list_empty(&h
->loh_lru
)) {
548 list_del_init(&h
->loh_lru
);
550 lprocfs_counter_decr(s
->ls_stats
, LU_SS_LRU_LEN
);
552 return lu_object_top(h
);
556 * Lookup found an object being destroyed this object cannot be
557 * returned (to assure that references to dying objects are eventually
558 * drained), and moreover, lookup has to wait until object is freed.
561 init_waitqueue_entry(waiter
, current
);
562 add_wait_queue(&bkt
->lsb_marche_funebre
, waiter
);
563 set_current_state(TASK_UNINTERRUPTIBLE
);
564 lprocfs_counter_incr(s
->ls_stats
, LU_SS_CACHE_DEATH_RACE
);
565 return ERR_PTR(-EAGAIN
);
569 * Search cache for an object with the fid \a f. If such object is found,
570 * return it. Otherwise, create new object, insert it into cache and return
571 * it. In any case, additional reference is acquired on the returned object.
573 static struct lu_object
*lu_object_find(const struct lu_env
*env
,
574 struct lu_device
*dev
,
575 const struct lu_fid
*f
,
576 const struct lu_object_conf
*conf
)
578 return lu_object_find_at(env
, dev
->ld_site
->ls_top_dev
, f
, conf
);
581 static struct lu_object
*lu_object_new(const struct lu_env
*env
,
582 struct lu_device
*dev
,
583 const struct lu_fid
*f
,
584 const struct lu_object_conf
*conf
)
588 struct cfs_hash_bd bd
;
590 o
= lu_object_alloc(env
, dev
, f
, conf
);
594 hs
= dev
->ld_site
->ls_obj_hash
;
595 cfs_hash_bd_get_and_lock(hs
, (void *)f
, &bd
, 1);
596 cfs_hash_bd_add_locked(hs
, &bd
, &o
->lo_header
->loh_hash
);
597 cfs_hash_bd_unlock(hs
, &bd
, 1);
602 * Core logic of lu_object_find*() functions.
604 static struct lu_object
*lu_object_find_try(const struct lu_env
*env
,
605 struct lu_device
*dev
,
606 const struct lu_fid
*f
,
607 const struct lu_object_conf
*conf
,
608 wait_queue_t
*waiter
)
611 struct lu_object
*shadow
;
614 struct cfs_hash_bd bd
;
618 * This uses standard index maintenance protocol:
620 * - search index under lock, and return object if found;
621 * - otherwise, unlock index, allocate new object;
622 * - lock index and search again;
623 * - if nothing is found (usual case), insert newly created
625 * - otherwise (race: other thread inserted object), free
626 * object just allocated.
630 * For "LOC_F_NEW" case, we are sure the object is new established.
631 * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
632 * just alloc and insert directly.
634 * If dying object is found during index search, add @waiter to the
635 * site wait-queue and return ERR_PTR(-EAGAIN).
637 if (conf
&& conf
->loc_flags
& LOC_F_NEW
)
638 return lu_object_new(env
, dev
, f
, conf
);
642 cfs_hash_bd_get_and_lock(hs
, (void *)f
, &bd
, 1);
643 o
= htable_lookup(s
, &bd
, f
, waiter
, &version
);
644 cfs_hash_bd_unlock(hs
, &bd
, 1);
645 if (!IS_ERR(o
) || PTR_ERR(o
) != -ENOENT
)
649 * Allocate new object. This may result in rather complicated
650 * operations, including fld queries, inode loading, etc.
652 o
= lu_object_alloc(env
, dev
, f
, conf
);
656 LASSERT(lu_fid_eq(lu_object_fid(o
), f
));
658 cfs_hash_bd_lock(hs
, &bd
, 1);
660 shadow
= htable_lookup(s
, &bd
, f
, waiter
, &version
);
661 if (likely(PTR_ERR(shadow
) == -ENOENT
)) {
662 cfs_hash_bd_add_locked(hs
, &bd
, &o
->lo_header
->loh_hash
);
663 cfs_hash_bd_unlock(hs
, &bd
, 1);
667 lprocfs_counter_incr(s
->ls_stats
, LU_SS_CACHE_RACE
);
668 cfs_hash_bd_unlock(hs
, &bd
, 1);
669 lu_object_free(env
, o
);
674 * Much like lu_object_find(), but top level device of object is specifically
675 * \a dev rather than top level device of the site. This interface allows
676 * objects of different "stacking" to be created within the same site.
678 struct lu_object
*lu_object_find_at(const struct lu_env
*env
,
679 struct lu_device
*dev
,
680 const struct lu_fid
*f
,
681 const struct lu_object_conf
*conf
)
683 struct lu_site_bkt_data
*bkt
;
684 struct lu_object
*obj
;
688 obj
= lu_object_find_try(env
, dev
, f
, conf
, &wait
);
689 if (obj
!= ERR_PTR(-EAGAIN
))
692 * lu_object_find_try() already added waiter into the
696 bkt
= lu_site_bkt_from_fid(dev
->ld_site
, (void *)f
);
697 remove_wait_queue(&bkt
->lsb_marche_funebre
, &wait
);
700 EXPORT_SYMBOL(lu_object_find_at
);
703 * Find object with given fid, and return its slice belonging to given device.
705 struct lu_object
*lu_object_find_slice(const struct lu_env
*env
,
706 struct lu_device
*dev
,
707 const struct lu_fid
*f
,
708 const struct lu_object_conf
*conf
)
710 struct lu_object
*top
;
711 struct lu_object
*obj
;
713 top
= lu_object_find(env
, dev
, f
, conf
);
715 obj
= lu_object_locate(top
->lo_header
, dev
->ld_type
);
717 lu_object_put(env
, top
);
722 EXPORT_SYMBOL(lu_object_find_slice
);
725 * Global list of all device types.
727 static LIST_HEAD(lu_device_types
);
729 int lu_device_type_init(struct lu_device_type
*ldt
)
733 INIT_LIST_HEAD(&ldt
->ldt_linkage
);
734 if (ldt
->ldt_ops
->ldto_init
)
735 result
= ldt
->ldt_ops
->ldto_init(ldt
);
737 list_add(&ldt
->ldt_linkage
, &lu_device_types
);
740 EXPORT_SYMBOL(lu_device_type_init
);
742 void lu_device_type_fini(struct lu_device_type
*ldt
)
744 list_del_init(&ldt
->ldt_linkage
);
745 if (ldt
->ldt_ops
->ldto_fini
)
746 ldt
->ldt_ops
->ldto_fini(ldt
);
748 EXPORT_SYMBOL(lu_device_type_fini
);
750 void lu_types_stop(void)
752 struct lu_device_type
*ldt
;
754 list_for_each_entry(ldt
, &lu_device_types
, ldt_linkage
) {
755 if (ldt
->ldt_device_nr
== 0 && ldt
->ldt_ops
->ldto_stop
)
756 ldt
->ldt_ops
->ldto_stop(ldt
);
759 EXPORT_SYMBOL(lu_types_stop
);
762 * Global list of all sites on this node
764 static LIST_HEAD(lu_sites
);
765 static DEFINE_MUTEX(lu_sites_guard
);
768 * Global environment used by site shrinker.
770 static struct lu_env lu_shrink_env
;
772 struct lu_site_print_arg
{
773 struct lu_env
*lsp_env
;
775 lu_printer_t lsp_printer
;
779 lu_site_obj_print(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
,
780 struct hlist_node
*hnode
, void *data
)
782 struct lu_site_print_arg
*arg
= (struct lu_site_print_arg
*)data
;
783 struct lu_object_header
*h
;
785 h
= hlist_entry(hnode
, struct lu_object_header
, loh_hash
);
786 if (!list_empty(&h
->loh_layers
)) {
787 const struct lu_object
*o
;
789 o
= lu_object_top(h
);
790 lu_object_print(arg
->lsp_env
, arg
->lsp_cookie
,
791 arg
->lsp_printer
, o
);
793 lu_object_header_print(arg
->lsp_env
, arg
->lsp_cookie
,
794 arg
->lsp_printer
, h
);
800 * Print all objects in \a s.
802 void lu_site_print(const struct lu_env
*env
, struct lu_site
*s
, void *cookie
,
803 lu_printer_t printer
)
805 struct lu_site_print_arg arg
= {
806 .lsp_env
= (struct lu_env
*)env
,
807 .lsp_cookie
= cookie
,
808 .lsp_printer
= printer
,
811 cfs_hash_for_each(s
->ls_obj_hash
, lu_site_obj_print
, &arg
);
813 EXPORT_SYMBOL(lu_site_print
);
816 LU_CACHE_PERCENT_MAX
= 50,
817 LU_CACHE_PERCENT_DEFAULT
= 20
820 static unsigned int lu_cache_percent
= LU_CACHE_PERCENT_DEFAULT
;
821 module_param(lu_cache_percent
, int, 0644);
822 MODULE_PARM_DESC(lu_cache_percent
, "Percentage of memory to be used as lu_object cache");
825 * Return desired hash table order.
827 static int lu_htable_order(void)
829 unsigned long cache_size
;
833 * Calculate hash table size, assuming that we want reasonable
834 * performance when 20% of total memory is occupied by cache of
837 * Size of lu_object is (arbitrary) taken as 1K (together with inode).
839 cache_size
= totalram_pages
;
841 #if BITS_PER_LONG == 32
842 /* limit hashtable size for lowmem systems to low RAM */
843 if (cache_size
> 1 << (30 - PAGE_CACHE_SHIFT
))
844 cache_size
= 1 << (30 - PAGE_CACHE_SHIFT
) * 3 / 4;
847 /* clear off unreasonable cache setting. */
848 if (lu_cache_percent
== 0 || lu_cache_percent
> LU_CACHE_PERCENT_MAX
) {
849 CWARN("obdclass: invalid lu_cache_percent: %u, it must be in the range of (0, %u]. Will use default value: %u.\n",
850 lu_cache_percent
, LU_CACHE_PERCENT_MAX
,
851 LU_CACHE_PERCENT_DEFAULT
);
853 lu_cache_percent
= LU_CACHE_PERCENT_DEFAULT
;
855 cache_size
= cache_size
/ 100 * lu_cache_percent
*
856 (PAGE_CACHE_SIZE
/ 1024);
858 for (bits
= 1; (1 << bits
) < cache_size
; ++bits
) {
864 static unsigned lu_obj_hop_hash(struct cfs_hash
*hs
,
865 const void *key
, unsigned mask
)
867 struct lu_fid
*fid
= (struct lu_fid
*)key
;
870 hash
= fid_flatten32(fid
);
871 hash
+= (hash
>> 4) + (hash
<< 12); /* mixing oid and seq */
872 hash
= hash_long(hash
, hs
->hs_bkt_bits
);
874 /* give me another random factor */
875 hash
-= hash_long((unsigned long)hs
, fid_oid(fid
) % 11 + 3);
877 hash
<<= hs
->hs_cur_bits
- hs
->hs_bkt_bits
;
878 hash
|= (fid_seq(fid
) + fid_oid(fid
)) & (CFS_HASH_NBKT(hs
) - 1);
883 static void *lu_obj_hop_object(struct hlist_node
*hnode
)
885 return hlist_entry(hnode
, struct lu_object_header
, loh_hash
);
888 static void *lu_obj_hop_key(struct hlist_node
*hnode
)
890 struct lu_object_header
*h
;
892 h
= hlist_entry(hnode
, struct lu_object_header
, loh_hash
);
896 static int lu_obj_hop_keycmp(const void *key
, struct hlist_node
*hnode
)
898 struct lu_object_header
*h
;
900 h
= hlist_entry(hnode
, struct lu_object_header
, loh_hash
);
901 return lu_fid_eq(&h
->loh_fid
, (struct lu_fid
*)key
);
904 static void lu_obj_hop_get(struct cfs_hash
*hs
, struct hlist_node
*hnode
)
906 struct lu_object_header
*h
;
908 h
= hlist_entry(hnode
, struct lu_object_header
, loh_hash
);
909 atomic_inc(&h
->loh_ref
);
912 static void lu_obj_hop_put_locked(struct cfs_hash
*hs
, struct hlist_node
*hnode
)
914 LBUG(); /* we should never called it */
917 static struct cfs_hash_ops lu_site_hash_ops
= {
918 .hs_hash
= lu_obj_hop_hash
,
919 .hs_key
= lu_obj_hop_key
,
920 .hs_keycmp
= lu_obj_hop_keycmp
,
921 .hs_object
= lu_obj_hop_object
,
922 .hs_get
= lu_obj_hop_get
,
923 .hs_put_locked
= lu_obj_hop_put_locked
,
926 static void lu_dev_add_linkage(struct lu_site
*s
, struct lu_device
*d
)
928 spin_lock(&s
->ls_ld_lock
);
929 if (list_empty(&d
->ld_linkage
))
930 list_add(&d
->ld_linkage
, &s
->ls_ld_linkage
);
931 spin_unlock(&s
->ls_ld_lock
);
935 * Initialize site \a s, with \a d as the top level device.
937 #define LU_SITE_BITS_MIN 12
938 #define LU_SITE_BITS_MAX 24
940 * total 256 buckets, we don't want too many buckets because:
941 * - consume too much memory
942 * - avoid unbalanced LRU list
944 #define LU_SITE_BKT_BITS 8
946 int lu_site_init(struct lu_site
*s
, struct lu_device
*top
)
948 struct lu_site_bkt_data
*bkt
;
949 struct cfs_hash_bd bd
;
954 memset(s
, 0, sizeof(*s
));
955 bits
= lu_htable_order();
956 snprintf(name
, 16, "lu_site_%s", top
->ld_type
->ldt_name
);
957 for (bits
= min(max(LU_SITE_BITS_MIN
, bits
), LU_SITE_BITS_MAX
);
958 bits
>= LU_SITE_BITS_MIN
; bits
--) {
959 s
->ls_obj_hash
= cfs_hash_create(name
, bits
, bits
,
960 bits
- LU_SITE_BKT_BITS
,
963 CFS_HASH_SPIN_BKTLOCK
|
964 CFS_HASH_NO_ITEMREF
|
966 CFS_HASH_ASSERT_EMPTY
);
971 if (!s
->ls_obj_hash
) {
972 CERROR("failed to create lu_site hash with bits: %d\n", bits
);
976 cfs_hash_for_each_bucket(s
->ls_obj_hash
, &bd
, i
) {
977 bkt
= cfs_hash_bd_extra_get(s
->ls_obj_hash
, &bd
);
978 INIT_LIST_HEAD(&bkt
->lsb_lru
);
979 init_waitqueue_head(&bkt
->lsb_marche_funebre
);
982 s
->ls_stats
= lprocfs_alloc_stats(LU_SS_LAST_STAT
, 0);
984 cfs_hash_putref(s
->ls_obj_hash
);
985 s
->ls_obj_hash
= NULL
;
989 lprocfs_counter_init(s
->ls_stats
, LU_SS_CREATED
,
990 0, "created", "created");
991 lprocfs_counter_init(s
->ls_stats
, LU_SS_CACHE_HIT
,
992 0, "cache_hit", "cache_hit");
993 lprocfs_counter_init(s
->ls_stats
, LU_SS_CACHE_MISS
,
994 0, "cache_miss", "cache_miss");
995 lprocfs_counter_init(s
->ls_stats
, LU_SS_CACHE_RACE
,
996 0, "cache_race", "cache_race");
997 lprocfs_counter_init(s
->ls_stats
, LU_SS_CACHE_DEATH_RACE
,
998 0, "cache_death_race", "cache_death_race");
999 lprocfs_counter_init(s
->ls_stats
, LU_SS_LRU_PURGED
,
1000 0, "lru_purged", "lru_purged");
1002 * Unlike other counters, lru_len can be decremented so
1003 * need lc_sum instead of just lc_count
1005 lprocfs_counter_init(s
->ls_stats
, LU_SS_LRU_LEN
,
1006 LPROCFS_CNTR_AVGMINMAX
, "lru_len", "lru_len");
1008 INIT_LIST_HEAD(&s
->ls_linkage
);
1009 s
->ls_top_dev
= top
;
1012 lu_ref_add(&top
->ld_reference
, "site-top", s
);
1014 INIT_LIST_HEAD(&s
->ls_ld_linkage
);
1015 spin_lock_init(&s
->ls_ld_lock
);
1017 lu_dev_add_linkage(s
, top
);
1021 EXPORT_SYMBOL(lu_site_init
);
1024 * Finalize \a s and release its resources.
1026 void lu_site_fini(struct lu_site
*s
)
1028 mutex_lock(&lu_sites_guard
);
1029 list_del_init(&s
->ls_linkage
);
1030 mutex_unlock(&lu_sites_guard
);
1032 if (s
->ls_obj_hash
) {
1033 cfs_hash_putref(s
->ls_obj_hash
);
1034 s
->ls_obj_hash
= NULL
;
1037 if (s
->ls_top_dev
) {
1038 s
->ls_top_dev
->ld_site
= NULL
;
1039 lu_ref_del(&s
->ls_top_dev
->ld_reference
, "site-top", s
);
1040 lu_device_put(s
->ls_top_dev
);
1041 s
->ls_top_dev
= NULL
;
1045 lprocfs_free_stats(&s
->ls_stats
);
1047 EXPORT_SYMBOL(lu_site_fini
);
1050 * Called when initialization of stack for this site is completed.
1052 int lu_site_init_finish(struct lu_site
*s
)
1056 mutex_lock(&lu_sites_guard
);
1057 result
= lu_context_refill(&lu_shrink_env
.le_ctx
);
1059 list_add(&s
->ls_linkage
, &lu_sites
);
1060 mutex_unlock(&lu_sites_guard
);
1063 EXPORT_SYMBOL(lu_site_init_finish
);
1066 * Acquire additional reference on device \a d
1068 void lu_device_get(struct lu_device
*d
)
1070 atomic_inc(&d
->ld_ref
);
1072 EXPORT_SYMBOL(lu_device_get
);
1075 * Release reference on device \a d.
1077 void lu_device_put(struct lu_device
*d
)
1079 LASSERT(atomic_read(&d
->ld_ref
) > 0);
1080 atomic_dec(&d
->ld_ref
);
1082 EXPORT_SYMBOL(lu_device_put
);
1085 * Initialize device \a d of type \a t.
1087 int lu_device_init(struct lu_device
*d
, struct lu_device_type
*t
)
1089 if (t
->ldt_device_nr
++ == 0 && t
->ldt_ops
->ldto_start
)
1090 t
->ldt_ops
->ldto_start(t
);
1091 memset(d
, 0, sizeof(*d
));
1092 atomic_set(&d
->ld_ref
, 0);
1094 lu_ref_init(&d
->ld_reference
);
1095 INIT_LIST_HEAD(&d
->ld_linkage
);
1098 EXPORT_SYMBOL(lu_device_init
);
1101 * Finalize device \a d.
1103 void lu_device_fini(struct lu_device
*d
)
1105 struct lu_device_type
*t
;
1109 d
->ld_obd
->obd_lu_dev
= NULL
;
1113 lu_ref_fini(&d
->ld_reference
);
1114 LASSERTF(atomic_read(&d
->ld_ref
) == 0,
1115 "Refcount is %u\n", atomic_read(&d
->ld_ref
));
1116 LASSERT(t
->ldt_device_nr
> 0);
1117 if (--t
->ldt_device_nr
== 0 && t
->ldt_ops
->ldto_stop
)
1118 t
->ldt_ops
->ldto_stop(t
);
1120 EXPORT_SYMBOL(lu_device_fini
);
1123 * Initialize object \a o that is part of compound object \a h and was created
1126 int lu_object_init(struct lu_object
*o
, struct lu_object_header
*h
,
1127 struct lu_device
*d
)
1129 memset(o
, 0, sizeof(*o
));
1133 lu_ref_add_at(&d
->ld_reference
, &o
->lo_dev_ref
, "lu_object", o
);
1134 INIT_LIST_HEAD(&o
->lo_linkage
);
1138 EXPORT_SYMBOL(lu_object_init
);
1141 * Finalize object and release its resources.
1143 void lu_object_fini(struct lu_object
*o
)
1145 struct lu_device
*dev
= o
->lo_dev
;
1147 LASSERT(list_empty(&o
->lo_linkage
));
1150 lu_ref_del_at(&dev
->ld_reference
, &o
->lo_dev_ref
,
1156 EXPORT_SYMBOL(lu_object_fini
);
1159 * Add object \a o as first layer of compound object \a h
1161 * This is typically called by the ->ldo_object_alloc() method of top-level
1164 void lu_object_add_top(struct lu_object_header
*h
, struct lu_object
*o
)
1166 list_move(&o
->lo_linkage
, &h
->loh_layers
);
1168 EXPORT_SYMBOL(lu_object_add_top
);
1171 * Add object \a o as a layer of compound object, going after \a before.
1173 * This is typically called by the ->ldo_object_alloc() method of \a
1176 void lu_object_add(struct lu_object
*before
, struct lu_object
*o
)
1178 list_move(&o
->lo_linkage
, &before
->lo_linkage
);
1180 EXPORT_SYMBOL(lu_object_add
);
1183 * Initialize compound object.
1185 int lu_object_header_init(struct lu_object_header
*h
)
1187 memset(h
, 0, sizeof(*h
));
1188 atomic_set(&h
->loh_ref
, 1);
1189 INIT_HLIST_NODE(&h
->loh_hash
);
1190 INIT_LIST_HEAD(&h
->loh_lru
);
1191 INIT_LIST_HEAD(&h
->loh_layers
);
1192 lu_ref_init(&h
->loh_reference
);
1195 EXPORT_SYMBOL(lu_object_header_init
);
1198 * Finalize compound object.
1200 void lu_object_header_fini(struct lu_object_header
*h
)
1202 LASSERT(list_empty(&h
->loh_layers
));
1203 LASSERT(list_empty(&h
->loh_lru
));
1204 LASSERT(hlist_unhashed(&h
->loh_hash
));
1205 lu_ref_fini(&h
->loh_reference
);
1207 EXPORT_SYMBOL(lu_object_header_fini
);
1210 * Given a compound object, find its slice, corresponding to the device type
1213 struct lu_object
*lu_object_locate(struct lu_object_header
*h
,
1214 const struct lu_device_type
*dtype
)
1216 struct lu_object
*o
;
1218 list_for_each_entry(o
, &h
->loh_layers
, lo_linkage
) {
1219 if (o
->lo_dev
->ld_type
== dtype
)
1224 EXPORT_SYMBOL(lu_object_locate
);
1227 * Finalize and free devices in the device stack.
1229 * Finalize device stack by purging object cache, and calling
1230 * lu_device_type_operations::ldto_device_fini() and
1231 * lu_device_type_operations::ldto_device_free() on all devices in the stack.
1233 void lu_stack_fini(const struct lu_env
*env
, struct lu_device
*top
)
1235 struct lu_site
*site
= top
->ld_site
;
1236 struct lu_device
*scan
;
1237 struct lu_device
*next
;
1239 lu_site_purge(env
, site
, ~0);
1240 for (scan
= top
; scan
; scan
= next
) {
1241 next
= scan
->ld_type
->ldt_ops
->ldto_device_fini(env
, scan
);
1242 lu_ref_del(&scan
->ld_reference
, "lu-stack", &lu_site_init
);
1243 lu_device_put(scan
);
1247 lu_site_purge(env
, site
, ~0);
1249 for (scan
= top
; scan
; scan
= next
) {
1250 const struct lu_device_type
*ldt
= scan
->ld_type
;
1251 struct obd_type
*type
;
1253 next
= ldt
->ldt_ops
->ldto_device_free(env
, scan
);
1254 type
= ldt
->ldt_obd_type
;
1257 class_put_type(type
);
1261 EXPORT_SYMBOL(lu_stack_fini
);
1265 * Maximal number of tld slots.
1267 LU_CONTEXT_KEY_NR
= 40
1270 static struct lu_context_key
*lu_keys
[LU_CONTEXT_KEY_NR
] = { NULL
, };
1272 static DEFINE_SPINLOCK(lu_keys_guard
);
1275 * Global counter incremented whenever key is registered, unregistered,
1276 * revived or quiesced. This is used to void unnecessary calls to
1277 * lu_context_refill(). No locking is provided, as initialization and shutdown
1278 * are supposed to be externally serialized.
1280 static unsigned key_set_version
;
1285 int lu_context_key_register(struct lu_context_key
*key
)
1290 LASSERT(key
->lct_init
);
1291 LASSERT(key
->lct_fini
);
1292 LASSERT(key
->lct_tags
!= 0);
1295 spin_lock(&lu_keys_guard
);
1296 for (i
= 0; i
< ARRAY_SIZE(lu_keys
); ++i
) {
1299 atomic_set(&key
->lct_used
, 1);
1301 lu_ref_init(&key
->lct_reference
);
1307 spin_unlock(&lu_keys_guard
);
1310 EXPORT_SYMBOL(lu_context_key_register
);
1312 static void key_fini(struct lu_context
*ctx
, int index
)
1314 if (ctx
->lc_value
&& ctx
->lc_value
[index
]) {
1315 struct lu_context_key
*key
;
1317 key
= lu_keys
[index
];
1318 LASSERT(atomic_read(&key
->lct_used
) > 1);
1320 key
->lct_fini(ctx
, key
, ctx
->lc_value
[index
]);
1321 lu_ref_del(&key
->lct_reference
, "ctx", ctx
);
1322 atomic_dec(&key
->lct_used
);
1324 if ((ctx
->lc_tags
& LCT_NOREF
) == 0) {
1325 #ifdef CONFIG_MODULE_UNLOAD
1326 LINVRNT(module_refcount(key
->lct_owner
) > 0);
1328 module_put(key
->lct_owner
);
1330 ctx
->lc_value
[index
] = NULL
;
1337 void lu_context_key_degister(struct lu_context_key
*key
)
1339 LASSERT(atomic_read(&key
->lct_used
) >= 1);
1340 LINVRNT(0 <= key
->lct_index
&& key
->lct_index
< ARRAY_SIZE(lu_keys
));
1342 lu_context_key_quiesce(key
);
1345 spin_lock(&lu_keys_guard
);
1346 key_fini(&lu_shrink_env
.le_ctx
, key
->lct_index
);
1347 if (lu_keys
[key
->lct_index
]) {
1348 lu_keys
[key
->lct_index
] = NULL
;
1349 lu_ref_fini(&key
->lct_reference
);
1351 spin_unlock(&lu_keys_guard
);
1353 LASSERTF(atomic_read(&key
->lct_used
) == 1,
1354 "key has instances: %d\n",
1355 atomic_read(&key
->lct_used
));
1357 EXPORT_SYMBOL(lu_context_key_degister
);
1360 * Register a number of keys. This has to be called after all keys have been
1361 * initialized by a call to LU_CONTEXT_KEY_INIT().
1363 int lu_context_key_register_many(struct lu_context_key
*k
, ...)
1365 struct lu_context_key
*key
= k
;
1371 result
= lu_context_key_register(key
);
1374 key
= va_arg(args
, struct lu_context_key
*);
1381 lu_context_key_degister(k
);
1382 k
= va_arg(args
, struct lu_context_key
*);
1389 EXPORT_SYMBOL(lu_context_key_register_many
);
1392 * De-register a number of keys. This is a dual to
1393 * lu_context_key_register_many().
1395 void lu_context_key_degister_many(struct lu_context_key
*k
, ...)
1401 lu_context_key_degister(k
);
1402 k
= va_arg(args
, struct lu_context_key
*);
1406 EXPORT_SYMBOL(lu_context_key_degister_many
);
1409 * Revive a number of keys.
1411 void lu_context_key_revive_many(struct lu_context_key
*k
, ...)
1417 lu_context_key_revive(k
);
1418 k
= va_arg(args
, struct lu_context_key
*);
1422 EXPORT_SYMBOL(lu_context_key_revive_many
);
1425 * Quiescent a number of keys.
1427 void lu_context_key_quiesce_many(struct lu_context_key
*k
, ...)
1433 lu_context_key_quiesce(k
);
1434 k
= va_arg(args
, struct lu_context_key
*);
1438 EXPORT_SYMBOL(lu_context_key_quiesce_many
);
1441 * Return value associated with key \a key in context \a ctx.
1443 void *lu_context_key_get(const struct lu_context
*ctx
,
1444 const struct lu_context_key
*key
)
1446 LINVRNT(ctx
->lc_state
== LCS_ENTERED
);
1447 LINVRNT(0 <= key
->lct_index
&& key
->lct_index
< ARRAY_SIZE(lu_keys
));
1448 LASSERT(lu_keys
[key
->lct_index
] == key
);
1449 return ctx
->lc_value
[key
->lct_index
];
1451 EXPORT_SYMBOL(lu_context_key_get
);
1454 * List of remembered contexts. XXX document me.
1456 static LIST_HEAD(lu_context_remembered
);
1459 * Destroy \a key in all remembered contexts. This is used to destroy key
1460 * values in "shared" contexts (like service threads), when a module owning
1461 * the key is about to be unloaded.
1463 void lu_context_key_quiesce(struct lu_context_key
*key
)
1465 struct lu_context
*ctx
;
1467 if (!(key
->lct_tags
& LCT_QUIESCENT
)) {
1469 * XXX layering violation.
1471 key
->lct_tags
|= LCT_QUIESCENT
;
1473 * XXX memory barrier has to go here.
1475 spin_lock(&lu_keys_guard
);
1476 list_for_each_entry(ctx
, &lu_context_remembered
, lc_remember
)
1477 key_fini(ctx
, key
->lct_index
);
1478 spin_unlock(&lu_keys_guard
);
1482 EXPORT_SYMBOL(lu_context_key_quiesce
);
1484 void lu_context_key_revive(struct lu_context_key
*key
)
1486 key
->lct_tags
&= ~LCT_QUIESCENT
;
1489 EXPORT_SYMBOL(lu_context_key_revive
);
1491 static void keys_fini(struct lu_context
*ctx
)
1498 for (i
= 0; i
< ARRAY_SIZE(lu_keys
); ++i
)
1501 kfree(ctx
->lc_value
);
1502 ctx
->lc_value
= NULL
;
1505 static int keys_fill(struct lu_context
*ctx
)
1509 LINVRNT(ctx
->lc_value
);
1510 for (i
= 0; i
< ARRAY_SIZE(lu_keys
); ++i
) {
1511 struct lu_context_key
*key
;
1514 if (!ctx
->lc_value
[i
] && key
&&
1515 (key
->lct_tags
& ctx
->lc_tags
) &&
1517 * Don't create values for a LCT_QUIESCENT key, as this
1518 * will pin module owning a key.
1520 !(key
->lct_tags
& LCT_QUIESCENT
)) {
1523 LINVRNT(key
->lct_init
);
1524 LINVRNT(key
->lct_index
== i
);
1526 value
= key
->lct_init(ctx
, key
);
1528 return PTR_ERR(value
);
1530 if (!(ctx
->lc_tags
& LCT_NOREF
))
1531 try_module_get(key
->lct_owner
);
1532 lu_ref_add_atomic(&key
->lct_reference
, "ctx", ctx
);
1533 atomic_inc(&key
->lct_used
);
1535 * This is the only place in the code, where an
1536 * element of ctx->lc_value[] array is set to non-NULL
1539 ctx
->lc_value
[i
] = value
;
1541 ctx
->lc_tags
|= LCT_HAS_EXIT
;
1543 ctx
->lc_version
= key_set_version
;
1548 static int keys_init(struct lu_context
*ctx
)
1550 ctx
->lc_value
= kcalloc(ARRAY_SIZE(lu_keys
), sizeof(ctx
->lc_value
[0]),
1552 if (likely(ctx
->lc_value
))
1553 return keys_fill(ctx
);
1559 * Initialize context data-structure. Create values for all keys.
1561 int lu_context_init(struct lu_context
*ctx
, __u32 tags
)
1565 memset(ctx
, 0, sizeof(*ctx
));
1566 ctx
->lc_state
= LCS_INITIALIZED
;
1567 ctx
->lc_tags
= tags
;
1568 if (tags
& LCT_REMEMBER
) {
1569 spin_lock(&lu_keys_guard
);
1570 list_add(&ctx
->lc_remember
, &lu_context_remembered
);
1571 spin_unlock(&lu_keys_guard
);
1573 INIT_LIST_HEAD(&ctx
->lc_remember
);
1576 rc
= keys_init(ctx
);
1578 lu_context_fini(ctx
);
1582 EXPORT_SYMBOL(lu_context_init
);
1585 * Finalize context data-structure. Destroy key values.
1587 void lu_context_fini(struct lu_context
*ctx
)
1589 LINVRNT(ctx
->lc_state
== LCS_INITIALIZED
|| ctx
->lc_state
== LCS_LEFT
);
1590 ctx
->lc_state
= LCS_FINALIZED
;
1592 if ((ctx
->lc_tags
& LCT_REMEMBER
) == 0) {
1593 LASSERT(list_empty(&ctx
->lc_remember
));
1596 } else { /* could race with key degister */
1597 spin_lock(&lu_keys_guard
);
1599 list_del_init(&ctx
->lc_remember
);
1600 spin_unlock(&lu_keys_guard
);
1603 EXPORT_SYMBOL(lu_context_fini
);
1606 * Called before entering context.
1608 void lu_context_enter(struct lu_context
*ctx
)
1610 LINVRNT(ctx
->lc_state
== LCS_INITIALIZED
|| ctx
->lc_state
== LCS_LEFT
);
1611 ctx
->lc_state
= LCS_ENTERED
;
1613 EXPORT_SYMBOL(lu_context_enter
);
1616 * Called after exiting from \a ctx
1618 void lu_context_exit(struct lu_context
*ctx
)
1622 LINVRNT(ctx
->lc_state
== LCS_ENTERED
);
1623 ctx
->lc_state
= LCS_LEFT
;
1624 if (ctx
->lc_tags
& LCT_HAS_EXIT
&& ctx
->lc_value
) {
1625 for (i
= 0; i
< ARRAY_SIZE(lu_keys
); ++i
) {
1626 if (ctx
->lc_value
[i
]) {
1627 struct lu_context_key
*key
;
1632 key
, ctx
->lc_value
[i
]);
1637 EXPORT_SYMBOL(lu_context_exit
);
1640 * Allocate for context all missing keys that were registered after context
1641 * creation. key_set_version is only changed in rare cases when modules
1642 * are loaded and removed.
1644 int lu_context_refill(struct lu_context
*ctx
)
1646 return likely(ctx
->lc_version
== key_set_version
) ? 0 : keys_fill(ctx
);
1648 EXPORT_SYMBOL(lu_context_refill
);
1651 * lu_ctx_tags/lu_ses_tags will be updated if there are new types of
1652 * obd being added. Currently, this is only used on client side, specifically
1653 * for echo device client, for other stack (like ptlrpc threads), context are
1654 * predefined when the lu_device type are registered, during the module probe
1657 __u32 lu_context_tags_default
;
1658 __u32 lu_session_tags_default
;
1660 int lu_env_init(struct lu_env
*env
, __u32 tags
)
1665 result
= lu_context_init(&env
->le_ctx
, tags
);
1666 if (likely(result
== 0))
1667 lu_context_enter(&env
->le_ctx
);
1670 EXPORT_SYMBOL(lu_env_init
);
1672 void lu_env_fini(struct lu_env
*env
)
1674 lu_context_exit(&env
->le_ctx
);
1675 lu_context_fini(&env
->le_ctx
);
1678 EXPORT_SYMBOL(lu_env_fini
);
1680 int lu_env_refill(struct lu_env
*env
)
1684 result
= lu_context_refill(&env
->le_ctx
);
1685 if (result
== 0 && env
->le_ses
)
1686 result
= lu_context_refill(env
->le_ses
);
1689 EXPORT_SYMBOL(lu_env_refill
);
1691 struct lu_site_stats
{
1692 unsigned lss_populated
;
1693 unsigned lss_max_search
;
1698 static void lu_site_stats_get(struct cfs_hash
*hs
,
1699 struct lu_site_stats
*stats
, int populated
)
1701 struct cfs_hash_bd bd
;
1704 cfs_hash_for_each_bucket(hs
, &bd
, i
) {
1705 struct lu_site_bkt_data
*bkt
= cfs_hash_bd_extra_get(hs
, &bd
);
1706 struct hlist_head
*hhead
;
1708 cfs_hash_bd_lock(hs
, &bd
, 1);
1710 cfs_hash_bd_count_get(&bd
) - bkt
->lsb_lru_len
;
1711 stats
->lss_total
+= cfs_hash_bd_count_get(&bd
);
1712 stats
->lss_max_search
= max((int)stats
->lss_max_search
,
1713 cfs_hash_bd_depmax_get(&bd
));
1715 cfs_hash_bd_unlock(hs
, &bd
, 1);
1719 cfs_hash_bd_for_each_hlist(hs
, &bd
, hhead
) {
1720 if (!hlist_empty(hhead
))
1721 stats
->lss_populated
++;
1723 cfs_hash_bd_unlock(hs
, &bd
, 1);
1728 * lu_cache_shrink_count returns the number of cached objects that are
1729 * candidates to be freed by shrink_slab(). A counter, which tracks
1730 * the number of items in the site's lru, is maintained in the per cpu
1731 * stats of each site. The counter is incremented when an object is added
1732 * to a site's lru and decremented when one is removed. The number of
1733 * free-able objects is the sum of all per cpu counters for all sites.
1735 * Using a per cpu counter is a compromise solution to concurrent access:
1736 * lu_object_put() can update the counter without locking the site and
1737 * lu_cache_shrink_count can sum the counters without locking each
1738 * ls_obj_hash bucket.
1740 static unsigned long lu_cache_shrink_count(struct shrinker
*sk
,
1741 struct shrink_control
*sc
)
1744 struct lu_site
*tmp
;
1745 unsigned long cached
= 0;
1747 if (!(sc
->gfp_mask
& __GFP_FS
))
1750 mutex_lock(&lu_sites_guard
);
1751 list_for_each_entry_safe(s
, tmp
, &lu_sites
, ls_linkage
) {
1752 cached
+= ls_stats_read(s
->ls_stats
, LU_SS_LRU_LEN
);
1754 mutex_unlock(&lu_sites_guard
);
1756 cached
= (cached
/ 100) * sysctl_vfs_cache_pressure
;
1757 CDEBUG(D_INODE
, "%ld objects cached, cache pressure %d\n",
1758 cached
, sysctl_vfs_cache_pressure
);
1763 static unsigned long lu_cache_shrink_scan(struct shrinker
*sk
,
1764 struct shrink_control
*sc
)
1767 struct lu_site
*tmp
;
1768 unsigned long remain
= sc
->nr_to_scan
, freed
= 0;
1771 if (!(sc
->gfp_mask
& __GFP_FS
))
1772 /* We must not take the lu_sites_guard lock when
1773 * __GFP_FS is *not* set because of the deadlock
1774 * possibility detailed above. Additionally,
1775 * since we cannot determine the number of
1776 * objects in the cache without taking this
1777 * lock, we're in a particularly tough spot. As
1778 * a result, we'll just lie and say our cache is
1779 * empty. This _should_ be ok, as we can't
1780 * reclaim objects when __GFP_FS is *not* set
1785 mutex_lock(&lu_sites_guard
);
1786 list_for_each_entry_safe(s
, tmp
, &lu_sites
, ls_linkage
) {
1787 freed
= lu_site_purge(&lu_shrink_env
, s
, remain
);
1790 * Move just shrunk site to the tail of site list to
1791 * assure shrinking fairness.
1793 list_move_tail(&s
->ls_linkage
, &splice
);
1795 list_splice(&splice
, lu_sites
.prev
);
1796 mutex_unlock(&lu_sites_guard
);
1798 return sc
->nr_to_scan
- remain
;
1802 * Debugging printer function using printk().
1804 static struct shrinker lu_site_shrinker
= {
1805 .count_objects
= lu_cache_shrink_count
,
1806 .scan_objects
= lu_cache_shrink_scan
,
1807 .seeks
= DEFAULT_SEEKS
,
1811 * Initialization of global lu_* data.
1813 int lu_global_init(void)
1817 CDEBUG(D_INFO
, "Lustre LU module (%p).\n", &lu_keys
);
1819 result
= lu_ref_global_init();
1823 LU_CONTEXT_KEY_INIT(&lu_global_key
);
1824 result
= lu_context_key_register(&lu_global_key
);
1829 * At this level, we don't know what tags are needed, so allocate them
1830 * conservatively. This should not be too bad, because this
1831 * environment is global.
1833 mutex_lock(&lu_sites_guard
);
1834 result
= lu_env_init(&lu_shrink_env
, LCT_SHRINKER
);
1835 mutex_unlock(&lu_sites_guard
);
1840 * seeks estimation: 3 seeks to read a record from oi, one to read
1841 * inode, one for ea. Unfortunately setting this high value results in
1842 * lu_object/inode cache consuming all the memory.
1844 register_shrinker(&lu_site_shrinker
);
1850 * Dual to lu_global_init().
1852 void lu_global_fini(void)
1854 unregister_shrinker(&lu_site_shrinker
);
1855 lu_context_key_degister(&lu_global_key
);
1858 * Tear shrinker environment down _after_ de-registering
1859 * lu_global_key, because the latter has a value in the former.
1861 mutex_lock(&lu_sites_guard
);
1862 lu_env_fini(&lu_shrink_env
);
1863 mutex_unlock(&lu_sites_guard
);
1865 lu_ref_global_fini();
1868 static __u32
ls_stats_read(struct lprocfs_stats
*stats
, int idx
)
1870 struct lprocfs_counter ret
;
1872 lprocfs_stats_collect(stats
, idx
, &ret
);
1873 if (idx
== LU_SS_LRU_LEN
)
1875 * protect against counter on cpu A being decremented
1876 * before counter is incremented on cpu B; unlikely
1878 return (__u32
)((ret
.lc_sum
> 0) ? ret
.lc_sum
: 0);
1880 return (__u32
)ret
.lc_count
;
1884 * Output site statistical counters into a buffer. Suitable for
1885 * lprocfs_rd_*()-style functions.
1887 int lu_site_stats_print(const struct lu_site
*s
, struct seq_file
*m
)
1889 struct lu_site_stats stats
;
1891 memset(&stats
, 0, sizeof(stats
));
1892 lu_site_stats_get(s
->ls_obj_hash
, &stats
, 1);
1894 seq_printf(m
, "%d/%d %d/%d %d %d %d %d %d %d %d %d\n",
1897 stats
.lss_populated
,
1898 CFS_HASH_NHLIST(s
->ls_obj_hash
),
1899 stats
.lss_max_search
,
1900 ls_stats_read(s
->ls_stats
, LU_SS_CREATED
),
1901 ls_stats_read(s
->ls_stats
, LU_SS_CACHE_HIT
),
1902 ls_stats_read(s
->ls_stats
, LU_SS_CACHE_MISS
),
1903 ls_stats_read(s
->ls_stats
, LU_SS_CACHE_RACE
),
1904 ls_stats_read(s
->ls_stats
, LU_SS_CACHE_DEATH_RACE
),
1905 ls_stats_read(s
->ls_stats
, LU_SS_LRU_PURGED
),
1906 ls_stats_read(s
->ls_stats
, LU_SS_LRU_LEN
));
1909 EXPORT_SYMBOL(lu_site_stats_print
);
1912 * Helper function to initialize a number of kmem slab caches at once.
1914 int lu_kmem_init(struct lu_kmem_descr
*caches
)
1917 struct lu_kmem_descr
*iter
= caches
;
1919 for (result
= 0; iter
->ckd_cache
; ++iter
) {
1920 *iter
->ckd_cache
= kmem_cache_create(iter
->ckd_name
,
1923 if (!*iter
->ckd_cache
) {
1925 /* free all previously allocated caches */
1926 lu_kmem_fini(caches
);
1932 EXPORT_SYMBOL(lu_kmem_init
);
1935 * Helper function to finalize a number of kmem slab cached at once. Dual to
1938 void lu_kmem_fini(struct lu_kmem_descr
*caches
)
1940 for (; caches
->ckd_cache
; ++caches
) {
1941 kmem_cache_destroy(*caches
->ckd_cache
);
1942 *caches
->ckd_cache
= NULL
;
1945 EXPORT_SYMBOL(lu_kmem_fini
);