ata: add AMD Seattle platform driver
[deliverable/linux.git] / drivers / staging / lustre / lustre / obdclass / lu_object.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
1dc563a6 30 * Copyright (c) 2011, 2015, Intel Corporation.
d7e09d03
PT
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/obdclass/lu_object.c
37 *
38 * Lustre Object.
39 * These are the only exported functions, they provide some generic
40 * infrastructure for managing object devices
41 *
42 * Author: Nikita Danilov <nikita.danilov@sun.com>
43 */
44
45#define DEBUG_SUBSYSTEM S_CLASS
46
9fdaf8c0 47#include "../../include/linux/libcfs/libcfs.h"
d7e09d03
PT
48
49# include <linux/module.h>
50
51/* hash_long() */
9fdaf8c0 52#include "../../include/linux/libcfs/libcfs_hash.h"
610f7377
GKH
53#include "../include/obd_class.h"
54#include "../include/obd_support.h"
55#include "../include/lustre_disk.h"
56#include "../include/lustre_fid.h"
57#include "../include/lu_object.h"
58#include "../include/lu_ref.h"
d7e09d03
PT
59#include <linux/list.h>
60
61static void lu_object_free(const struct lu_env *env, struct lu_object *o);
a0b8803a 62static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx);
d7e09d03
PT
63
64/**
65 * Decrease reference counter on object. If last reference is freed, return
66 * object to the cache, unless lu_object_is_dying(o) holds. In the latter
67 * case, free object immediately.
68 */
69void lu_object_put(const struct lu_env *env, struct lu_object *o)
70{
71 struct lu_site_bkt_data *bkt;
72 struct lu_object_header *top;
73 struct lu_site *site;
74 struct lu_object *orig;
6ea510c1 75 struct cfs_hash_bd bd;
d7e09d03
PT
76 const struct lu_fid *fid;
77
78 top = o->lo_header;
79 site = o->lo_dev->ld_site;
80 orig = o;
81
82 /*
83 * till we have full fids-on-OST implemented anonymous objects
84 * are possible in OSP. such an object isn't listed in the site
85 * so we should not remove it from the site.
86 */
87 fid = lu_object_fid(o);
88 if (fid_is_zero(fid)) {
cce3c2da 89 LASSERT(!top->loh_hash.next && !top->loh_hash.pprev);
d7e09d03
PT
90 LASSERT(list_empty(&top->loh_lru));
91 if (!atomic_dec_and_test(&top->loh_ref))
92 return;
93 list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
cce3c2da 94 if (o->lo_ops->loo_object_release)
d7e09d03
PT
95 o->lo_ops->loo_object_release(env, o);
96 }
97 lu_object_free(env, orig);
98 return;
99 }
100
101 cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
102 bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
103
104 if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
105 if (lu_object_is_dying(top)) {
106
107 /*
108 * somebody may be waiting for this, currently only
109 * used for cl_object, see cl_object_put_last().
110 */
111 wake_up_all(&bkt->lsb_marche_funebre);
112 }
113 return;
114 }
115
d7e09d03
PT
116 /*
117 * When last reference is released, iterate over object
118 * layers, and notify them that object is no longer busy.
119 */
120 list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
cce3c2da 121 if (o->lo_ops->loo_object_release)
d7e09d03
PT
122 o->lo_ops->loo_object_release(env, o);
123 }
124
125 if (!lu_object_is_dying(top)) {
126 LASSERT(list_empty(&top->loh_lru));
127 list_add_tail(&top->loh_lru, &bkt->lsb_lru);
6e580ab5 128 bkt->lsb_lru_len++;
a0b8803a
AK
129 lprocfs_counter_incr(site->ls_stats, LU_SS_LRU_LEN);
130 CDEBUG(D_INODE, "Add %p to site lru. hash: %p, bkt: %p, lru_len: %ld\n",
131 o, site->ls_obj_hash, bkt, bkt->lsb_lru_len);
d7e09d03
PT
132 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
133 return;
134 }
135
136 /*
9c379663 137 * If object is dying (will not be cached), then removed it
d7e09d03
PT
138 * from hash table and LRU.
139 *
140 * This is done with hash table and LRU lists locked. As the only
141 * way to acquire first reference to previously unreferenced
142 * object is through hash-table lookup (lu_object_find()),
143 * or LRU scanning (lu_site_purge()), that are done under hash-table
144 * and LRU lock, no race with concurrent object lookup is possible
145 * and we can safely destroy object below.
146 */
147 if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags))
148 cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash);
149 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
150 /*
151 * Object was already removed from hash and lru above, can
152 * kill it.
153 */
154 lu_object_free(env, orig);
155}
156EXPORT_SYMBOL(lu_object_put);
157
d7e09d03
PT
158/**
159 * Kill the object and take it out of LRU cache.
160 * Currently used by client code for layout change.
161 */
162void lu_object_unhash(const struct lu_env *env, struct lu_object *o)
163{
164 struct lu_object_header *top;
165
166 top = o->lo_header;
167 set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags);
168 if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) {
a0b8803a
AK
169 struct lu_site *site = o->lo_dev->ld_site;
170 struct cfs_hash *obj_hash = site->ls_obj_hash;
6ea510c1 171 struct cfs_hash_bd bd;
d7e09d03
PT
172
173 cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1);
6e580ab5
FZ
174 if (!list_empty(&top->loh_lru)) {
175 struct lu_site_bkt_data *bkt;
176
a0b8803a 177 list_del_init(&top->loh_lru);
6e580ab5
FZ
178 bkt = cfs_hash_bd_extra_get(obj_hash, &bd);
179 bkt->lsb_lru_len--;
a0b8803a 180 lprocfs_counter_decr(site->ls_stats, LU_SS_LRU_LEN);
6e580ab5 181 }
d7e09d03
PT
182 cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash);
183 cfs_hash_bd_unlock(obj_hash, &bd, 1);
184 }
185}
186EXPORT_SYMBOL(lu_object_unhash);
187
188/**
189 * Allocate new object.
190 *
191 * This follows object creation protocol, described in the comment within
192 * struct lu_device_operations definition.
193 */
194static struct lu_object *lu_object_alloc(const struct lu_env *env,
195 struct lu_device *dev,
196 const struct lu_fid *f,
197 const struct lu_object_conf *conf)
198{
199 struct lu_object *scan;
200 struct lu_object *top;
201 struct list_head *layers;
7cd875d2
JH
202 unsigned int init_mask = 0;
203 unsigned int init_flag;
d7e09d03
PT
204 int clean;
205 int result;
d7e09d03
PT
206
207 /*
208 * Create top-level object slice. This will also create
209 * lu_object_header.
210 */
211 top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
cce3c2da 212 if (!top)
0a3bdb00 213 return ERR_PTR(-ENOMEM);
d7e09d03 214 if (IS_ERR(top))
0a3bdb00 215 return top;
d7e09d03
PT
216 /*
217 * This is the only place where object fid is assigned. It's constant
218 * after this point.
219 */
220 top->lo_header->loh_fid = *f;
221 layers = &top->lo_header->loh_layers;
7cd875d2 222
d7e09d03
PT
223 do {
224 /*
225 * Call ->loo_object_init() repeatedly, until no more new
226 * object slices are created.
227 */
228 clean = 1;
7cd875d2 229 init_flag = 1;
d7e09d03 230 list_for_each_entry(scan, layers, lo_linkage) {
7cd875d2
JH
231 if (init_mask & init_flag)
232 goto next;
d7e09d03
PT
233 clean = 0;
234 scan->lo_header = top->lo_header;
235 result = scan->lo_ops->loo_object_init(env, scan, conf);
236 if (result != 0) {
237 lu_object_free(env, top);
0a3bdb00 238 return ERR_PTR(result);
d7e09d03 239 }
7cd875d2
JH
240 init_mask |= init_flag;
241next:
242 init_flag <<= 1;
d7e09d03
PT
243 }
244 } while (!clean);
245
246 list_for_each_entry_reverse(scan, layers, lo_linkage) {
cce3c2da 247 if (scan->lo_ops->loo_object_start) {
d7e09d03
PT
248 result = scan->lo_ops->loo_object_start(env, scan);
249 if (result != 0) {
250 lu_object_free(env, top);
0a3bdb00 251 return ERR_PTR(result);
d7e09d03
PT
252 }
253 }
254 }
255
256 lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED);
0a3bdb00 257 return top;
d7e09d03
PT
258}
259
260/**
261 * Free an object.
262 */
263static void lu_object_free(const struct lu_env *env, struct lu_object *o)
264{
265 struct lu_site_bkt_data *bkt;
266 struct lu_site *site;
267 struct lu_object *scan;
268 struct list_head *layers;
269 struct list_head splice;
270
271 site = o->lo_dev->ld_site;
272 layers = &o->lo_header->loh_layers;
273 bkt = lu_site_bkt_from_fid(site, &o->lo_header->loh_fid);
274 /*
275 * First call ->loo_object_delete() method to release all resources.
276 */
277 list_for_each_entry_reverse(scan, layers, lo_linkage) {
cce3c2da 278 if (scan->lo_ops->loo_object_delete)
d7e09d03
PT
279 scan->lo_ops->loo_object_delete(env, scan);
280 }
281
282 /*
283 * Then, splice object layers into stand-alone list, and call
284 * ->loo_object_free() on all layers to free memory. Splice is
285 * necessary, because lu_object_header is freed together with the
286 * top-level slice.
287 */
288 INIT_LIST_HEAD(&splice);
289 list_splice_init(layers, &splice);
290 while (!list_empty(&splice)) {
291 /*
292 * Free layers in bottom-to-top order, so that object header
293 * lives as long as possible and ->loo_object_free() methods
294 * can look at its contents.
295 */
296 o = container_of0(splice.prev, struct lu_object, lo_linkage);
297 list_del_init(&o->lo_linkage);
d7e09d03
PT
298 o->lo_ops->loo_object_free(env, o);
299 }
300
301 if (waitqueue_active(&bkt->lsb_marche_funebre))
302 wake_up_all(&bkt->lsb_marche_funebre);
303}
304
305/**
306 * Free \a nr objects from the cold end of the site LRU list.
307 */
308int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
309{
310 struct lu_object_header *h;
311 struct lu_object_header *temp;
312 struct lu_site_bkt_data *bkt;
6ea510c1
LN
313 struct cfs_hash_bd bd;
314 struct cfs_hash_bd bd2;
d7e09d03
PT
315 struct list_head dispose;
316 int did_sth;
317 int start;
318 int count;
319 int bnr;
320 int i;
321
322 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU))
0a3bdb00 323 return 0;
d7e09d03
PT
324
325 INIT_LIST_HEAD(&dispose);
326 /*
327 * Under LRU list lock, scan LRU list and move unreferenced objects to
328 * the dispose list, removing them from LRU and hash table.
329 */
330 start = s->ls_purge_start;
331 bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1;
332 again:
333 did_sth = 0;
334 cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
335 if (i < start)
336 continue;
337 count = bnr;
338 cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1);
339 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
340
341 list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
342 LASSERT(atomic_read(&h->loh_ref) == 0);
343
344 cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
345 LASSERT(bd.bd_bucket == bd2.bd_bucket);
346
347 cfs_hash_bd_del_locked(s->ls_obj_hash,
348 &bd2, &h->loh_hash);
349 list_move(&h->loh_lru, &dispose);
6e580ab5 350 bkt->lsb_lru_len--;
a0b8803a 351 lprocfs_counter_decr(s->ls_stats, LU_SS_LRU_LEN);
d7e09d03
PT
352 if (did_sth == 0)
353 did_sth = 1;
354
355 if (nr != ~0 && --nr == 0)
356 break;
357
358 if (count > 0 && --count == 0)
359 break;
360
361 }
362 cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
363 cond_resched();
364 /*
365 * Free everything on the dispose list. This is safe against
366 * races due to the reasons described in lu_object_put().
367 */
368 while (!list_empty(&dispose)) {
369 h = container_of0(dispose.next,
370 struct lu_object_header, loh_lru);
371 list_del_init(&h->loh_lru);
372 lu_object_free(env, lu_object_top(h));
373 lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
374 }
375
376 if (nr == 0)
377 break;
378 }
379
380 if (nr != 0 && did_sth && start != 0) {
381 start = 0; /* restart from the first bucket */
382 goto again;
383 }
384 /* race on s->ls_purge_start, but nobody cares */
385 s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash);
386
387 return nr;
388}
389EXPORT_SYMBOL(lu_site_purge);
390
391/*
392 * Object printing.
393 *
394 * Code below has to jump through certain loops to output object description
395 * into libcfs_debug_msg-based log. The problem is that lu_object_print()
396 * composes object description from strings that are parts of _lines_ of
397 * output (i.e., strings that are not terminated by newline). This doesn't fit
398 * very well into libcfs_debug_msg() interface that assumes that each message
399 * supplied to it is a self-contained output line.
400 *
401 * To work around this, strings are collected in a temporary buffer
402 * (implemented as a value of lu_cdebug_key key), until terminating newline
403 * character is detected.
404 *
405 */
406
407enum {
408 /**
409 * Maximal line size.
410 *
411 * XXX overflow is not handled correctly.
412 */
413 LU_CDEBUG_LINE = 512
414};
415
416struct lu_cdebug_data {
417 /**
418 * Temporary buffer.
419 */
420 char lck_area[LU_CDEBUG_LINE];
421};
422
423/* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
424LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
425
426/**
427 * Key, holding temporary buffer. This key is registered very early by
428 * lu_global_init().
429 */
05311893 430static struct lu_context_key lu_global_key = {
d7e09d03 431 .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD |
aa4e3c8a 432 LCT_MG_THREAD | LCT_CL_THREAD | LCT_LOCAL,
d7e09d03
PT
433 .lct_init = lu_global_key_init,
434 .lct_fini = lu_global_key_fini
435};
436
437/**
438 * Printer function emitting messages through libcfs_debug_msg().
439 */
440int lu_cdebug_printer(const struct lu_env *env,
441 void *cookie, const char *format, ...)
442{
443 struct libcfs_debug_msg_data *msgdata = cookie;
444 struct lu_cdebug_data *key;
445 int used;
446 int complete;
447 va_list args;
448
449 va_start(args, format);
450
451 key = lu_context_key_get(&env->le_ctx, &lu_global_key);
d7e09d03
PT
452
453 used = strlen(key->lck_area);
454 complete = format[strlen(format) - 1] == '\n';
455 /*
456 * Append new chunk to the buffer.
457 */
458 vsnprintf(key->lck_area + used,
459 ARRAY_SIZE(key->lck_area) - used, format, args);
460 if (complete) {
461 if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys))
19b2056f 462 libcfs_debug_msg(msgdata, "%s\n", key->lck_area);
d7e09d03
PT
463 key->lck_area[0] = 0;
464 }
465 va_end(args);
466 return 0;
467}
468EXPORT_SYMBOL(lu_cdebug_printer);
469
470/**
471 * Print object header.
472 */
473void lu_object_header_print(const struct lu_env *env, void *cookie,
474 lu_printer_t printer,
475 const struct lu_object_header *hdr)
476{
477 (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
478 hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
479 PFID(&hdr->loh_fid),
480 hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
481 list_empty((struct list_head *)&hdr->loh_lru) ? \
482 "" : " lru",
483 hdr->loh_attr & LOHA_EXISTS ? " exist":"");
484}
485EXPORT_SYMBOL(lu_object_header_print);
486
487/**
488 * Print human readable representation of the \a o to the \a printer.
489 */
490void lu_object_print(const struct lu_env *env, void *cookie,
491 lu_printer_t printer, const struct lu_object *o)
492{
493 static const char ruler[] = "........................................";
494 struct lu_object_header *top;
7cd875d2 495 int depth = 4;
d7e09d03
PT
496
497 top = o->lo_header;
498 lu_object_header_print(env, cookie, printer, top);
7cd875d2 499 (*printer)(env, cookie, "{\n");
d7e09d03 500
7cd875d2 501 list_for_each_entry(o, &top->loh_layers, lo_linkage) {
d7e09d03
PT
502 /*
503 * print `.' \a depth times followed by type name and address
504 */
505 (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
506 o->lo_dev->ld_type->ldt_name, o);
7cd875d2 507
cce3c2da 508 if (o->lo_ops->loo_object_print)
7cd875d2
JH
509 (*o->lo_ops->loo_object_print)(env, cookie, printer, o);
510
d7e09d03
PT
511 (*printer)(env, cookie, "\n");
512 }
7cd875d2 513
d7e09d03
PT
514 (*printer)(env, cookie, "} header@%p\n", top);
515}
516EXPORT_SYMBOL(lu_object_print);
517
d7e09d03 518static struct lu_object *htable_lookup(struct lu_site *s,
6ea510c1 519 struct cfs_hash_bd *bd,
d7e09d03
PT
520 const struct lu_fid *f,
521 wait_queue_t *waiter,
522 __u64 *version)
523{
524 struct lu_site_bkt_data *bkt;
525 struct lu_object_header *h;
526 struct hlist_node *hnode;
527 __u64 ver = cfs_hash_bd_version_get(bd);
528
529 if (*version == ver)
70b749d4 530 return ERR_PTR(-ENOENT);
d7e09d03
PT
531
532 *version = ver;
533 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd);
534 /* cfs_hash_bd_peek_locked is a somehow "internal" function
6ba59179
OD
535 * of cfs_hash, it doesn't add refcount on object.
536 */
d7e09d03 537 hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f);
cce3c2da 538 if (!hnode) {
d7e09d03 539 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
70b749d4 540 return ERR_PTR(-ENOENT);
d7e09d03
PT
541 }
542
543 h = container_of0(hnode, struct lu_object_header, loh_hash);
544 if (likely(!lu_object_is_dying(h))) {
545 cfs_hash_get(s->ls_obj_hash, hnode);
546 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
6e580ab5 547 if (!list_empty(&h->loh_lru)) {
a0b8803a 548 list_del_init(&h->loh_lru);
6e580ab5 549 bkt->lsb_lru_len--;
a0b8803a 550 lprocfs_counter_decr(s->ls_stats, LU_SS_LRU_LEN);
6e580ab5 551 }
d7e09d03
PT
552 return lu_object_top(h);
553 }
554
555 /*
556 * Lookup found an object being destroyed this object cannot be
557 * returned (to assure that references to dying objects are eventually
558 * drained), and moreover, lookup has to wait until object is freed.
559 */
560
9e795d35 561 init_waitqueue_entry(waiter, current);
d7e09d03
PT
562 add_wait_queue(&bkt->lsb_marche_funebre, waiter);
563 set_current_state(TASK_UNINTERRUPTIBLE);
564 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
565 return ERR_PTR(-EAGAIN);
566}
567
568/**
569 * Search cache for an object with the fid \a f. If such object is found,
570 * return it. Otherwise, create new object, insert it into cache and return
571 * it. In any case, additional reference is acquired on the returned object.
572 */
5913ef5e
SB
573static struct lu_object *lu_object_find(const struct lu_env *env,
574 struct lu_device *dev,
575 const struct lu_fid *f,
576 const struct lu_object_conf *conf)
d7e09d03
PT
577{
578 return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
579}
d7e09d03
PT
580
581static struct lu_object *lu_object_new(const struct lu_env *env,
582 struct lu_device *dev,
583 const struct lu_fid *f,
584 const struct lu_object_conf *conf)
585{
586 struct lu_object *o;
6da6eabe 587 struct cfs_hash *hs;
6ea510c1 588 struct cfs_hash_bd bd;
d7e09d03
PT
589
590 o = lu_object_alloc(env, dev, f, conf);
7f44cb0b 591 if (IS_ERR(o))
d7e09d03
PT
592 return o;
593
594 hs = dev->ld_site->ls_obj_hash;
595 cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
d7e09d03 596 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
d7e09d03
PT
597 cfs_hash_bd_unlock(hs, &bd, 1);
598 return o;
599}
600
601/**
602 * Core logic of lu_object_find*() functions.
603 */
604static struct lu_object *lu_object_find_try(const struct lu_env *env,
605 struct lu_device *dev,
606 const struct lu_fid *f,
607 const struct lu_object_conf *conf,
608 wait_queue_t *waiter)
609{
610 struct lu_object *o;
611 struct lu_object *shadow;
612 struct lu_site *s;
6da6eabe 613 struct cfs_hash *hs;
6ea510c1 614 struct cfs_hash_bd bd;
d7e09d03
PT
615 __u64 version = 0;
616
617 /*
618 * This uses standard index maintenance protocol:
619 *
620 * - search index under lock, and return object if found;
621 * - otherwise, unlock index, allocate new object;
622 * - lock index and search again;
623 * - if nothing is found (usual case), insert newly created
624 * object into index;
625 * - otherwise (race: other thread inserted object), free
626 * object just allocated.
627 * - unlock index;
628 * - return object.
629 *
630 * For "LOC_F_NEW" case, we are sure the object is new established.
631 * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
632 * just alloc and insert directly.
633 *
634 * If dying object is found during index search, add @waiter to the
635 * site wait-queue and return ERR_PTR(-EAGAIN).
636 */
cce3c2da 637 if (conf && conf->loc_flags & LOC_F_NEW)
d7e09d03
PT
638 return lu_object_new(env, dev, f, conf);
639
640 s = dev->ld_site;
641 hs = s->ls_obj_hash;
642 cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
643 o = htable_lookup(s, &bd, f, waiter, &version);
644 cfs_hash_bd_unlock(hs, &bd, 1);
70b749d4 645 if (!IS_ERR(o) || PTR_ERR(o) != -ENOENT)
d7e09d03
PT
646 return o;
647
648 /*
649 * Allocate new object. This may result in rather complicated
650 * operations, including fld queries, inode loading, etc.
651 */
652 o = lu_object_alloc(env, dev, f, conf);
7f44cb0b 653 if (IS_ERR(o))
d7e09d03
PT
654 return o;
655
656 LASSERT(lu_fid_eq(lu_object_fid(o), f));
657
658 cfs_hash_bd_lock(hs, &bd, 1);
659
660 shadow = htable_lookup(s, &bd, f, waiter, &version);
208bf770 661 if (likely(PTR_ERR(shadow) == -ENOENT)) {
d7e09d03 662 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
d7e09d03
PT
663 cfs_hash_bd_unlock(hs, &bd, 1);
664 return o;
665 }
666
667 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE);
668 cfs_hash_bd_unlock(hs, &bd, 1);
669 lu_object_free(env, o);
670 return shadow;
671}
672
673/**
674 * Much like lu_object_find(), but top level device of object is specifically
675 * \a dev rather than top level device of the site. This interface allows
676 * objects of different "stacking" to be created within the same site.
677 */
678struct lu_object *lu_object_find_at(const struct lu_env *env,
679 struct lu_device *dev,
680 const struct lu_fid *f,
681 const struct lu_object_conf *conf)
682{
683 struct lu_site_bkt_data *bkt;
684 struct lu_object *obj;
685 wait_queue_t wait;
686
687 while (1) {
688 obj = lu_object_find_try(env, dev, f, conf, &wait);
689 if (obj != ERR_PTR(-EAGAIN))
690 return obj;
691 /*
692 * lu_object_find_try() already added waiter into the
693 * wait queue.
694 */
b3669a7f 695 schedule();
d7e09d03
PT
696 bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
697 remove_wait_queue(&bkt->lsb_marche_funebre, &wait);
698 }
699}
700EXPORT_SYMBOL(lu_object_find_at);
701
702/**
703 * Find object with given fid, and return its slice belonging to given device.
704 */
705struct lu_object *lu_object_find_slice(const struct lu_env *env,
706 struct lu_device *dev,
707 const struct lu_fid *f,
708 const struct lu_object_conf *conf)
709{
710 struct lu_object *top;
711 struct lu_object *obj;
712
713 top = lu_object_find(env, dev, f, conf);
714 if (!IS_ERR(top)) {
715 obj = lu_object_locate(top->lo_header, dev->ld_type);
cce3c2da 716 if (!obj)
d7e09d03
PT
717 lu_object_put(env, top);
718 } else
719 obj = top;
720 return obj;
721}
722EXPORT_SYMBOL(lu_object_find_slice);
723
724/**
725 * Global list of all device types.
726 */
727static LIST_HEAD(lu_device_types);
728
729int lu_device_type_init(struct lu_device_type *ldt)
730{
731 int result = 0;
732
733 INIT_LIST_HEAD(&ldt->ldt_linkage);
734 if (ldt->ldt_ops->ldto_init)
735 result = ldt->ldt_ops->ldto_init(ldt);
736 if (result == 0)
737 list_add(&ldt->ldt_linkage, &lu_device_types);
738 return result;
739}
740EXPORT_SYMBOL(lu_device_type_init);
741
742void lu_device_type_fini(struct lu_device_type *ldt)
743{
744 list_del_init(&ldt->ldt_linkage);
745 if (ldt->ldt_ops->ldto_fini)
746 ldt->ldt_ops->ldto_fini(ldt);
747}
748EXPORT_SYMBOL(lu_device_type_fini);
749
750void lu_types_stop(void)
751{
752 struct lu_device_type *ldt;
753
754 list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
755 if (ldt->ldt_device_nr == 0 && ldt->ldt_ops->ldto_stop)
756 ldt->ldt_ops->ldto_stop(ldt);
757 }
758}
759EXPORT_SYMBOL(lu_types_stop);
760
761/**
762 * Global list of all sites on this node
763 */
764static LIST_HEAD(lu_sites);
765static DEFINE_MUTEX(lu_sites_guard);
766
767/**
768 * Global environment used by site shrinker.
769 */
770static struct lu_env lu_shrink_env;
771
772struct lu_site_print_arg {
773 struct lu_env *lsp_env;
774 void *lsp_cookie;
775 lu_printer_t lsp_printer;
776};
777
778static int
6da6eabe 779lu_site_obj_print(struct cfs_hash *hs, struct cfs_hash_bd *bd,
d7e09d03
PT
780 struct hlist_node *hnode, void *data)
781{
782 struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
783 struct lu_object_header *h;
784
785 h = hlist_entry(hnode, struct lu_object_header, loh_hash);
786 if (!list_empty(&h->loh_layers)) {
787 const struct lu_object *o;
788
789 o = lu_object_top(h);
790 lu_object_print(arg->lsp_env, arg->lsp_cookie,
791 arg->lsp_printer, o);
792 } else {
793 lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
794 arg->lsp_printer, h);
795 }
796 return 0;
797}
798
799/**
800 * Print all objects in \a s.
801 */
802void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
803 lu_printer_t printer)
804{
805 struct lu_site_print_arg arg = {
806 .lsp_env = (struct lu_env *)env,
807 .lsp_cookie = cookie,
808 .lsp_printer = printer,
809 };
810
811 cfs_hash_for_each(s->ls_obj_hash, lu_site_obj_print, &arg);
812}
813EXPORT_SYMBOL(lu_site_print);
814
815enum {
816 LU_CACHE_PERCENT_MAX = 50,
817 LU_CACHE_PERCENT_DEFAULT = 20
818};
819
820static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
8cc7b4b9
PT
821module_param(lu_cache_percent, int, 0644);
822MODULE_PARM_DESC(lu_cache_percent, "Percentage of memory to be used as lu_object cache");
d7e09d03
PT
823
824/**
825 * Return desired hash table order.
826 */
827static int lu_htable_order(void)
828{
829 unsigned long cache_size;
830 int bits;
831
832 /*
833 * Calculate hash table size, assuming that we want reasonable
834 * performance when 20% of total memory is occupied by cache of
835 * lu_objects.
836 *
837 * Size of lu_object is (arbitrary) taken as 1K (together with inode).
838 */
4f6cc9ab 839 cache_size = totalram_pages;
d7e09d03
PT
840
841#if BITS_PER_LONG == 32
842 /* limit hashtable size for lowmem systems to low RAM */
843 if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT))
844 cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4;
845#endif
846
847 /* clear off unreasonable cache setting. */
848 if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) {
2d00bd17 849 CWARN("obdclass: invalid lu_cache_percent: %u, it must be in the range of (0, %u]. Will use default value: %u.\n",
d7e09d03
PT
850 lu_cache_percent, LU_CACHE_PERCENT_MAX,
851 LU_CACHE_PERCENT_DEFAULT);
852
853 lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
854 }
855 cache_size = cache_size / 100 * lu_cache_percent *
856 (PAGE_CACHE_SIZE / 1024);
857
858 for (bits = 1; (1 << bits) < cache_size; ++bits) {
859 ;
860 }
861 return bits;
862}
863
6da6eabe 864static unsigned lu_obj_hop_hash(struct cfs_hash *hs,
d7e09d03
PT
865 const void *key, unsigned mask)
866{
867 struct lu_fid *fid = (struct lu_fid *)key;
868 __u32 hash;
869
870 hash = fid_flatten32(fid);
871 hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
72c0824a 872 hash = hash_long(hash, hs->hs_bkt_bits);
d7e09d03
PT
873
874 /* give me another random factor */
72c0824a 875 hash -= hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
d7e09d03
PT
876
877 hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
878 hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
879
880 return hash & mask;
881}
882
883static void *lu_obj_hop_object(struct hlist_node *hnode)
884{
885 return hlist_entry(hnode, struct lu_object_header, loh_hash);
886}
887
888static void *lu_obj_hop_key(struct hlist_node *hnode)
889{
890 struct lu_object_header *h;
891
892 h = hlist_entry(hnode, struct lu_object_header, loh_hash);
893 return &h->loh_fid;
894}
895
896static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode)
897{
898 struct lu_object_header *h;
899
900 h = hlist_entry(hnode, struct lu_object_header, loh_hash);
901 return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
902}
903
6da6eabe 904static void lu_obj_hop_get(struct cfs_hash *hs, struct hlist_node *hnode)
d7e09d03
PT
905{
906 struct lu_object_header *h;
907
908 h = hlist_entry(hnode, struct lu_object_header, loh_hash);
6e580ab5 909 atomic_inc(&h->loh_ref);
d7e09d03
PT
910}
911
6da6eabe 912static void lu_obj_hop_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
d7e09d03
PT
913{
914 LBUG(); /* we should never called it */
915}
916
fb7a0201 917static struct cfs_hash_ops lu_site_hash_ops = {
d7e09d03 918 .hs_hash = lu_obj_hop_hash,
db9fc06b 919 .hs_key = lu_obj_hop_key,
d7e09d03
PT
920 .hs_keycmp = lu_obj_hop_keycmp,
921 .hs_object = lu_obj_hop_object,
db9fc06b 922 .hs_get = lu_obj_hop_get,
d7e09d03
PT
923 .hs_put_locked = lu_obj_hop_put_locked,
924};
925
5913ef5e 926static void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d)
d7e09d03
PT
927{
928 spin_lock(&s->ls_ld_lock);
929 if (list_empty(&d->ld_linkage))
930 list_add(&d->ld_linkage, &s->ls_ld_linkage);
931 spin_unlock(&s->ls_ld_lock);
932}
d7e09d03 933
d7e09d03
PT
934/**
935 * Initialize site \a s, with \a d as the top level device.
936 */
937#define LU_SITE_BITS_MIN 12
938#define LU_SITE_BITS_MAX 24
939/**
940 * total 256 buckets, we don't want too many buckets because:
941 * - consume too much memory
942 * - avoid unbalanced LRU list
943 */
944#define LU_SITE_BKT_BITS 8
945
946int lu_site_init(struct lu_site *s, struct lu_device *top)
947{
948 struct lu_site_bkt_data *bkt;
6ea510c1 949 struct cfs_hash_bd bd;
d7e09d03
PT
950 char name[16];
951 int bits;
952 int i;
d7e09d03 953
ec83e611 954 memset(s, 0, sizeof(*s));
d7e09d03
PT
955 bits = lu_htable_order();
956 snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name);
957 for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX);
958 bits >= LU_SITE_BITS_MIN; bits--) {
959 s->ls_obj_hash = cfs_hash_create(name, bits, bits,
960 bits - LU_SITE_BKT_BITS,
961 sizeof(*bkt), 0, 0,
962 &lu_site_hash_ops,
963 CFS_HASH_SPIN_BKTLOCK |
964 CFS_HASH_NO_ITEMREF |
965 CFS_HASH_DEPTH |
966 CFS_HASH_ASSERT_EMPTY);
cce3c2da 967 if (s->ls_obj_hash)
d7e09d03
PT
968 break;
969 }
970
cce3c2da 971 if (!s->ls_obj_hash) {
d7e09d03
PT
972 CERROR("failed to create lu_site hash with bits: %d\n", bits);
973 return -ENOMEM;
974 }
975
976 cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
977 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
978 INIT_LIST_HEAD(&bkt->lsb_lru);
979 init_waitqueue_head(&bkt->lsb_marche_funebre);
980 }
981
982 s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
cce3c2da 983 if (!s->ls_stats) {
d7e09d03
PT
984 cfs_hash_putref(s->ls_obj_hash);
985 s->ls_obj_hash = NULL;
986 return -ENOMEM;
987 }
988
989 lprocfs_counter_init(s->ls_stats, LU_SS_CREATED,
990 0, "created", "created");
991 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT,
992 0, "cache_hit", "cache_hit");
993 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS,
994 0, "cache_miss", "cache_miss");
995 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE,
996 0, "cache_race", "cache_race");
997 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE,
998 0, "cache_death_race", "cache_death_race");
999 lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED,
1000 0, "lru_purged", "lru_purged");
a0b8803a
AK
1001 /*
1002 * Unlike other counters, lru_len can be decremented so
1003 * need lc_sum instead of just lc_count
1004 */
1005 lprocfs_counter_init(s->ls_stats, LU_SS_LRU_LEN,
1006 LPROCFS_CNTR_AVGMINMAX, "lru_len", "lru_len");
d7e09d03
PT
1007
1008 INIT_LIST_HEAD(&s->ls_linkage);
1009 s->ls_top_dev = top;
1010 top->ld_site = s;
1011 lu_device_get(top);
1012 lu_ref_add(&top->ld_reference, "site-top", s);
1013
1014 INIT_LIST_HEAD(&s->ls_ld_linkage);
1015 spin_lock_init(&s->ls_ld_lock);
1016
1017 lu_dev_add_linkage(s, top);
1018
0a3bdb00 1019 return 0;
d7e09d03
PT
1020}
1021EXPORT_SYMBOL(lu_site_init);
1022
1023/**
1024 * Finalize \a s and release its resources.
1025 */
1026void lu_site_fini(struct lu_site *s)
1027{
1028 mutex_lock(&lu_sites_guard);
1029 list_del_init(&s->ls_linkage);
1030 mutex_unlock(&lu_sites_guard);
1031
cce3c2da 1032 if (s->ls_obj_hash) {
d7e09d03
PT
1033 cfs_hash_putref(s->ls_obj_hash);
1034 s->ls_obj_hash = NULL;
1035 }
1036
cce3c2da 1037 if (s->ls_top_dev) {
d7e09d03
PT
1038 s->ls_top_dev->ld_site = NULL;
1039 lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
1040 lu_device_put(s->ls_top_dev);
1041 s->ls_top_dev = NULL;
1042 }
1043
cce3c2da 1044 if (s->ls_stats)
d7e09d03
PT
1045 lprocfs_free_stats(&s->ls_stats);
1046}
1047EXPORT_SYMBOL(lu_site_fini);
1048
1049/**
1050 * Called when initialization of stack for this site is completed.
1051 */
1052int lu_site_init_finish(struct lu_site *s)
1053{
1054 int result;
50ffcb7e 1055
d7e09d03
PT
1056 mutex_lock(&lu_sites_guard);
1057 result = lu_context_refill(&lu_shrink_env.le_ctx);
1058 if (result == 0)
1059 list_add(&s->ls_linkage, &lu_sites);
1060 mutex_unlock(&lu_sites_guard);
1061 return result;
1062}
1063EXPORT_SYMBOL(lu_site_init_finish);
1064
1065/**
1066 * Acquire additional reference on device \a d
1067 */
1068void lu_device_get(struct lu_device *d)
1069{
1070 atomic_inc(&d->ld_ref);
1071}
1072EXPORT_SYMBOL(lu_device_get);
1073
1074/**
1075 * Release reference on device \a d.
1076 */
1077void lu_device_put(struct lu_device *d)
1078{
1079 LASSERT(atomic_read(&d->ld_ref) > 0);
1080 atomic_dec(&d->ld_ref);
1081}
1082EXPORT_SYMBOL(lu_device_put);
1083
1084/**
1085 * Initialize device \a d of type \a t.
1086 */
1087int lu_device_init(struct lu_device *d, struct lu_device_type *t)
1088{
cce3c2da 1089 if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start)
d7e09d03 1090 t->ldt_ops->ldto_start(t);
ec83e611 1091 memset(d, 0, sizeof(*d));
d7e09d03
PT
1092 atomic_set(&d->ld_ref, 0);
1093 d->ld_type = t;
1094 lu_ref_init(&d->ld_reference);
1095 INIT_LIST_HEAD(&d->ld_linkage);
1096 return 0;
1097}
1098EXPORT_SYMBOL(lu_device_init);
1099
1100/**
1101 * Finalize device \a d.
1102 */
1103void lu_device_fini(struct lu_device *d)
1104{
1105 struct lu_device_type *t;
1106
1107 t = d->ld_type;
cce3c2da 1108 if (d->ld_obd) {
d7e09d03
PT
1109 d->ld_obd->obd_lu_dev = NULL;
1110 d->ld_obd = NULL;
1111 }
1112
1113 lu_ref_fini(&d->ld_reference);
1114 LASSERTF(atomic_read(&d->ld_ref) == 0,
1115 "Refcount is %u\n", atomic_read(&d->ld_ref));
1116 LASSERT(t->ldt_device_nr > 0);
cce3c2da 1117 if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop)
d7e09d03
PT
1118 t->ldt_ops->ldto_stop(t);
1119}
1120EXPORT_SYMBOL(lu_device_fini);
1121
1122/**
1123 * Initialize object \a o that is part of compound object \a h and was created
1124 * by device \a d.
1125 */
631abc6e
JH
1126int lu_object_init(struct lu_object *o, struct lu_object_header *h,
1127 struct lu_device *d)
d7e09d03 1128{
631abc6e 1129 memset(o, 0, sizeof(*o));
d7e09d03 1130 o->lo_header = h;
631abc6e 1131 o->lo_dev = d;
d7e09d03 1132 lu_device_get(d);
631abc6e 1133 lu_ref_add_at(&d->ld_reference, &o->lo_dev_ref, "lu_object", o);
d7e09d03 1134 INIT_LIST_HEAD(&o->lo_linkage);
631abc6e 1135
d7e09d03
PT
1136 return 0;
1137}
1138EXPORT_SYMBOL(lu_object_init);
1139
1140/**
1141 * Finalize object and release its resources.
1142 */
1143void lu_object_fini(struct lu_object *o)
1144{
1145 struct lu_device *dev = o->lo_dev;
1146
1147 LASSERT(list_empty(&o->lo_linkage));
1148
cce3c2da 1149 if (dev) {
631abc6e
JH
1150 lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref,
1151 "lu_object", o);
d7e09d03
PT
1152 lu_device_put(dev);
1153 o->lo_dev = NULL;
1154 }
1155}
1156EXPORT_SYMBOL(lu_object_fini);
1157
1158/**
1159 * Add object \a o as first layer of compound object \a h
1160 *
1161 * This is typically called by the ->ldo_object_alloc() method of top-level
1162 * device.
1163 */
1164void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
1165{
1166 list_move(&o->lo_linkage, &h->loh_layers);
1167}
1168EXPORT_SYMBOL(lu_object_add_top);
1169
1170/**
1171 * Add object \a o as a layer of compound object, going after \a before.
1172 *
1173 * This is typically called by the ->ldo_object_alloc() method of \a
1174 * before->lo_dev.
1175 */
1176void lu_object_add(struct lu_object *before, struct lu_object *o)
1177{
1178 list_move(&o->lo_linkage, &before->lo_linkage);
1179}
1180EXPORT_SYMBOL(lu_object_add);
1181
1182/**
1183 * Initialize compound object.
1184 */
1185int lu_object_header_init(struct lu_object_header *h)
1186{
ec83e611 1187 memset(h, 0, sizeof(*h));
d7e09d03
PT
1188 atomic_set(&h->loh_ref, 1);
1189 INIT_HLIST_NODE(&h->loh_hash);
1190 INIT_LIST_HEAD(&h->loh_lru);
1191 INIT_LIST_HEAD(&h->loh_layers);
1192 lu_ref_init(&h->loh_reference);
1193 return 0;
1194}
1195EXPORT_SYMBOL(lu_object_header_init);
1196
1197/**
1198 * Finalize compound object.
1199 */
1200void lu_object_header_fini(struct lu_object_header *h)
1201{
1202 LASSERT(list_empty(&h->loh_layers));
1203 LASSERT(list_empty(&h->loh_lru));
1204 LASSERT(hlist_unhashed(&h->loh_hash));
1205 lu_ref_fini(&h->loh_reference);
1206}
1207EXPORT_SYMBOL(lu_object_header_fini);
1208
1209/**
1210 * Given a compound object, find its slice, corresponding to the device type
1211 * \a dtype.
1212 */
1213struct lu_object *lu_object_locate(struct lu_object_header *h,
1214 const struct lu_device_type *dtype)
1215{
1216 struct lu_object *o;
1217
1218 list_for_each_entry(o, &h->loh_layers, lo_linkage) {
1219 if (o->lo_dev->ld_type == dtype)
1220 return o;
1221 }
1222 return NULL;
1223}
1224EXPORT_SYMBOL(lu_object_locate);
1225
d7e09d03
PT
1226/**
1227 * Finalize and free devices in the device stack.
1228 *
1229 * Finalize device stack by purging object cache, and calling
1230 * lu_device_type_operations::ldto_device_fini() and
1231 * lu_device_type_operations::ldto_device_free() on all devices in the stack.
1232 */
1233void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
1234{
1235 struct lu_site *site = top->ld_site;
1236 struct lu_device *scan;
1237 struct lu_device *next;
1238
1239 lu_site_purge(env, site, ~0);
cce3c2da 1240 for (scan = top; scan; scan = next) {
d7e09d03
PT
1241 next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
1242 lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
1243 lu_device_put(scan);
1244 }
1245
1246 /* purge again. */
1247 lu_site_purge(env, site, ~0);
1248
cce3c2da 1249 for (scan = top; scan; scan = next) {
d7e09d03
PT
1250 const struct lu_device_type *ldt = scan->ld_type;
1251 struct obd_type *type;
1252
1253 next = ldt->ldt_ops->ldto_device_free(env, scan);
1254 type = ldt->ldt_obd_type;
cce3c2da 1255 if (type) {
d7e09d03
PT
1256 type->typ_refcnt--;
1257 class_put_type(type);
1258 }
1259 }
1260}
1261EXPORT_SYMBOL(lu_stack_fini);
1262
1263enum {
1264 /**
1265 * Maximal number of tld slots.
1266 */
1267 LU_CONTEXT_KEY_NR = 40
1268};
1269
1270static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
1271
1272static DEFINE_SPINLOCK(lu_keys_guard);
1273
1274/**
1275 * Global counter incremented whenever key is registered, unregistered,
1276 * revived or quiesced. This is used to void unnecessary calls to
1277 * lu_context_refill(). No locking is provided, as initialization and shutdown
1278 * are supposed to be externally serialized.
1279 */
225f597c 1280static unsigned key_set_version;
d7e09d03
PT
1281
1282/**
1283 * Register new key.
1284 */
1285int lu_context_key_register(struct lu_context_key *key)
1286{
1287 int result;
1288 int i;
1289
cce3c2da
OD
1290 LASSERT(key->lct_init);
1291 LASSERT(key->lct_fini);
d7e09d03 1292 LASSERT(key->lct_tags != 0);
d7e09d03
PT
1293
1294 result = -ENFILE;
1295 spin_lock(&lu_keys_guard);
1296 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
cce3c2da 1297 if (!lu_keys[i]) {
d7e09d03
PT
1298 key->lct_index = i;
1299 atomic_set(&key->lct_used, 1);
1300 lu_keys[i] = key;
1301 lu_ref_init(&key->lct_reference);
1302 result = 0;
1303 ++key_set_version;
1304 break;
1305 }
1306 }
1307 spin_unlock(&lu_keys_guard);
1308 return result;
1309}
1310EXPORT_SYMBOL(lu_context_key_register);
1311
1312static void key_fini(struct lu_context *ctx, int index)
1313{
cce3c2da 1314 if (ctx->lc_value && ctx->lc_value[index]) {
d7e09d03
PT
1315 struct lu_context_key *key;
1316
1317 key = lu_keys[index];
d7e09d03
PT
1318 LASSERT(atomic_read(&key->lct_used) > 1);
1319
1320 key->lct_fini(ctx, key, ctx->lc_value[index]);
1321 lu_ref_del(&key->lct_reference, "ctx", ctx);
1322 atomic_dec(&key->lct_used);
1323
d7e09d03 1324 if ((ctx->lc_tags & LCT_NOREF) == 0) {
4a1a01ea 1325#ifdef CONFIG_MODULE_UNLOAD
d7e09d03 1326 LINVRNT(module_refcount(key->lct_owner) > 0);
4a1a01ea 1327#endif
d7e09d03
PT
1328 module_put(key->lct_owner);
1329 }
1330 ctx->lc_value[index] = NULL;
1331 }
1332}
1333
1334/**
1335 * Deregister key.
1336 */
1337void lu_context_key_degister(struct lu_context_key *key)
1338{
1339 LASSERT(atomic_read(&key->lct_used) >= 1);
1340 LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1341
1342 lu_context_key_quiesce(key);
1343
1344 ++key_set_version;
1345 spin_lock(&lu_keys_guard);
1346 key_fini(&lu_shrink_env.le_ctx, key->lct_index);
1347 if (lu_keys[key->lct_index]) {
1348 lu_keys[key->lct_index] = NULL;
1349 lu_ref_fini(&key->lct_reference);
1350 }
1351 spin_unlock(&lu_keys_guard);
1352
1353 LASSERTF(atomic_read(&key->lct_used) == 1,
1354 "key has instances: %d\n",
1355 atomic_read(&key->lct_used));
1356}
1357EXPORT_SYMBOL(lu_context_key_degister);
1358
1359/**
1360 * Register a number of keys. This has to be called after all keys have been
1361 * initialized by a call to LU_CONTEXT_KEY_INIT().
1362 */
1363int lu_context_key_register_many(struct lu_context_key *k, ...)
1364{
1365 struct lu_context_key *key = k;
1366 va_list args;
1367 int result;
1368
1369 va_start(args, k);
1370 do {
1371 result = lu_context_key_register(key);
1372 if (result)
1373 break;
1374 key = va_arg(args, struct lu_context_key *);
cce3c2da 1375 } while (key);
d7e09d03
PT
1376 va_end(args);
1377
1378 if (result != 0) {
1379 va_start(args, k);
1380 while (k != key) {
1381 lu_context_key_degister(k);
1382 k = va_arg(args, struct lu_context_key *);
1383 }
1384 va_end(args);
1385 }
1386
1387 return result;
1388}
1389EXPORT_SYMBOL(lu_context_key_register_many);
1390
1391/**
1392 * De-register a number of keys. This is a dual to
1393 * lu_context_key_register_many().
1394 */
1395void lu_context_key_degister_many(struct lu_context_key *k, ...)
1396{
1397 va_list args;
1398
1399 va_start(args, k);
1400 do {
1401 lu_context_key_degister(k);
1402 k = va_arg(args, struct lu_context_key*);
cce3c2da 1403 } while (k);
d7e09d03
PT
1404 va_end(args);
1405}
1406EXPORT_SYMBOL(lu_context_key_degister_many);
1407
1408/**
1409 * Revive a number of keys.
1410 */
1411void lu_context_key_revive_many(struct lu_context_key *k, ...)
1412{
1413 va_list args;
1414
1415 va_start(args, k);
1416 do {
1417 lu_context_key_revive(k);
1418 k = va_arg(args, struct lu_context_key*);
cce3c2da 1419 } while (k);
d7e09d03
PT
1420 va_end(args);
1421}
1422EXPORT_SYMBOL(lu_context_key_revive_many);
1423
1424/**
1425 * Quiescent a number of keys.
1426 */
1427void lu_context_key_quiesce_many(struct lu_context_key *k, ...)
1428{
1429 va_list args;
1430
1431 va_start(args, k);
1432 do {
1433 lu_context_key_quiesce(k);
1434 k = va_arg(args, struct lu_context_key*);
cce3c2da 1435 } while (k);
d7e09d03
PT
1436 va_end(args);
1437}
1438EXPORT_SYMBOL(lu_context_key_quiesce_many);
1439
1440/**
1441 * Return value associated with key \a key in context \a ctx.
1442 */
1443void *lu_context_key_get(const struct lu_context *ctx,
1444 const struct lu_context_key *key)
1445{
1446 LINVRNT(ctx->lc_state == LCS_ENTERED);
1447 LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1448 LASSERT(lu_keys[key->lct_index] == key);
1449 return ctx->lc_value[key->lct_index];
1450}
1451EXPORT_SYMBOL(lu_context_key_get);
1452
1453/**
1454 * List of remembered contexts. XXX document me.
1455 */
1456static LIST_HEAD(lu_context_remembered);
1457
1458/**
1459 * Destroy \a key in all remembered contexts. This is used to destroy key
1460 * values in "shared" contexts (like service threads), when a module owning
1461 * the key is about to be unloaded.
1462 */
1463void lu_context_key_quiesce(struct lu_context_key *key)
1464{
1465 struct lu_context *ctx;
1466
1467 if (!(key->lct_tags & LCT_QUIESCENT)) {
1468 /*
1469 * XXX layering violation.
1470 */
1471 key->lct_tags |= LCT_QUIESCENT;
1472 /*
1473 * XXX memory barrier has to go here.
1474 */
1475 spin_lock(&lu_keys_guard);
926d6fb2 1476 list_for_each_entry(ctx, &lu_context_remembered, lc_remember)
d7e09d03
PT
1477 key_fini(ctx, key->lct_index);
1478 spin_unlock(&lu_keys_guard);
1479 ++key_set_version;
1480 }
1481}
1482EXPORT_SYMBOL(lu_context_key_quiesce);
1483
1484void lu_context_key_revive(struct lu_context_key *key)
1485{
1486 key->lct_tags &= ~LCT_QUIESCENT;
1487 ++key_set_version;
1488}
1489EXPORT_SYMBOL(lu_context_key_revive);
1490
1491static void keys_fini(struct lu_context *ctx)
1492{
1493 int i;
1494
cce3c2da 1495 if (!ctx->lc_value)
d7e09d03
PT
1496 return;
1497
1498 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
1499 key_fini(ctx, i);
1500
d7279044 1501 kfree(ctx->lc_value);
d7e09d03
PT
1502 ctx->lc_value = NULL;
1503}
1504
1505static int keys_fill(struct lu_context *ctx)
1506{
1507 int i;
1508
cce3c2da 1509 LINVRNT(ctx->lc_value);
d7e09d03
PT
1510 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1511 struct lu_context_key *key;
1512
1513 key = lu_keys[i];
cce3c2da 1514 if (!ctx->lc_value[i] && key &&
d7e09d03
PT
1515 (key->lct_tags & ctx->lc_tags) &&
1516 /*
1517 * Don't create values for a LCT_QUIESCENT key, as this
1518 * will pin module owning a key.
1519 */
1520 !(key->lct_tags & LCT_QUIESCENT)) {
1521 void *value;
1522
cce3c2da 1523 LINVRNT(key->lct_init);
d7e09d03
PT
1524 LINVRNT(key->lct_index == i);
1525
1526 value = key->lct_init(ctx, key);
7f44cb0b 1527 if (IS_ERR(value))
d7e09d03
PT
1528 return PTR_ERR(value);
1529
d7e09d03
PT
1530 if (!(ctx->lc_tags & LCT_NOREF))
1531 try_module_get(key->lct_owner);
1532 lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
1533 atomic_inc(&key->lct_used);
1534 /*
1535 * This is the only place in the code, where an
1536 * element of ctx->lc_value[] array is set to non-NULL
1537 * value.
1538 */
1539 ctx->lc_value[i] = value;
cce3c2da 1540 if (key->lct_exit)
d7e09d03
PT
1541 ctx->lc_tags |= LCT_HAS_EXIT;
1542 }
1543 ctx->lc_version = key_set_version;
1544 }
1545 return 0;
1546}
1547
1548static int keys_init(struct lu_context *ctx)
1549{
d7279044
JL
1550 ctx->lc_value = kcalloc(ARRAY_SIZE(lu_keys), sizeof(ctx->lc_value[0]),
1551 GFP_NOFS);
cce3c2da 1552 if (likely(ctx->lc_value))
d7e09d03
PT
1553 return keys_fill(ctx);
1554
1555 return -ENOMEM;
1556}
1557
1558/**
1559 * Initialize context data-structure. Create values for all keys.
1560 */
1561int lu_context_init(struct lu_context *ctx, __u32 tags)
1562{
1563 int rc;
1564
ec83e611 1565 memset(ctx, 0, sizeof(*ctx));
d7e09d03
PT
1566 ctx->lc_state = LCS_INITIALIZED;
1567 ctx->lc_tags = tags;
1568 if (tags & LCT_REMEMBER) {
1569 spin_lock(&lu_keys_guard);
1570 list_add(&ctx->lc_remember, &lu_context_remembered);
1571 spin_unlock(&lu_keys_guard);
1572 } else {
1573 INIT_LIST_HEAD(&ctx->lc_remember);
1574 }
1575
1576 rc = keys_init(ctx);
1577 if (rc != 0)
1578 lu_context_fini(ctx);
1579
1580 return rc;
1581}
1582EXPORT_SYMBOL(lu_context_init);
1583
1584/**
1585 * Finalize context data-structure. Destroy key values.
1586 */
1587void lu_context_fini(struct lu_context *ctx)
1588{
1589 LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1590 ctx->lc_state = LCS_FINALIZED;
1591
1592 if ((ctx->lc_tags & LCT_REMEMBER) == 0) {
1593 LASSERT(list_empty(&ctx->lc_remember));
1594 keys_fini(ctx);
1595
1596 } else { /* could race with key degister */
1597 spin_lock(&lu_keys_guard);
1598 keys_fini(ctx);
1599 list_del_init(&ctx->lc_remember);
1600 spin_unlock(&lu_keys_guard);
1601 }
1602}
1603EXPORT_SYMBOL(lu_context_fini);
1604
1605/**
1606 * Called before entering context.
1607 */
1608void lu_context_enter(struct lu_context *ctx)
1609{
1610 LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1611 ctx->lc_state = LCS_ENTERED;
1612}
1613EXPORT_SYMBOL(lu_context_enter);
1614
1615/**
1616 * Called after exiting from \a ctx
1617 */
1618void lu_context_exit(struct lu_context *ctx)
1619{
1620 int i;
1621
1622 LINVRNT(ctx->lc_state == LCS_ENTERED);
1623 ctx->lc_state = LCS_LEFT;
cce3c2da 1624 if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value) {
d7e09d03 1625 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
cce3c2da 1626 if (ctx->lc_value[i]) {
d7e09d03
PT
1627 struct lu_context_key *key;
1628
1629 key = lu_keys[i];
cce3c2da 1630 if (key->lct_exit)
d7e09d03
PT
1631 key->lct_exit(ctx,
1632 key, ctx->lc_value[i]);
1633 }
1634 }
1635 }
1636}
1637EXPORT_SYMBOL(lu_context_exit);
1638
1639/**
1640 * Allocate for context all missing keys that were registered after context
1641 * creation. key_set_version is only changed in rare cases when modules
1642 * are loaded and removed.
1643 */
1644int lu_context_refill(struct lu_context *ctx)
1645{
1646 return likely(ctx->lc_version == key_set_version) ? 0 : keys_fill(ctx);
1647}
1648EXPORT_SYMBOL(lu_context_refill);
1649
1650/**
1651 * lu_ctx_tags/lu_ses_tags will be updated if there are new types of
1652 * obd being added. Currently, this is only used on client side, specifically
1653 * for echo device client, for other stack (like ptlrpc threads), context are
1654 * predefined when the lu_device type are registered, during the module probe
1655 * phase.
1656 */
ea28d21a
MR
1657__u32 lu_context_tags_default;
1658__u32 lu_session_tags_default;
d7e09d03 1659
d7e09d03
PT
1660int lu_env_init(struct lu_env *env, __u32 tags)
1661{
1662 int result;
1663
1664 env->le_ses = NULL;
1665 result = lu_context_init(&env->le_ctx, tags);
1666 if (likely(result == 0))
1667 lu_context_enter(&env->le_ctx);
1668 return result;
1669}
1670EXPORT_SYMBOL(lu_env_init);
1671
1672void lu_env_fini(struct lu_env *env)
1673{
1674 lu_context_exit(&env->le_ctx);
1675 lu_context_fini(&env->le_ctx);
1676 env->le_ses = NULL;
1677}
1678EXPORT_SYMBOL(lu_env_fini);
1679
1680int lu_env_refill(struct lu_env *env)
1681{
1682 int result;
1683
1684 result = lu_context_refill(&env->le_ctx);
cce3c2da 1685 if (result == 0 && env->le_ses)
d7e09d03
PT
1686 result = lu_context_refill(env->le_ses);
1687 return result;
1688}
1689EXPORT_SYMBOL(lu_env_refill);
1690
2de5855c 1691struct lu_site_stats {
d7e09d03
PT
1692 unsigned lss_populated;
1693 unsigned lss_max_search;
1694 unsigned lss_total;
1695 unsigned lss_busy;
2de5855c 1696};
d7e09d03 1697
6da6eabe 1698static void lu_site_stats_get(struct cfs_hash *hs,
2de5855c 1699 struct lu_site_stats *stats, int populated)
d7e09d03 1700{
6ea510c1 1701 struct cfs_hash_bd bd;
d7e09d03
PT
1702 int i;
1703
1704 cfs_hash_for_each_bucket(hs, &bd, i) {
1705 struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd);
1706 struct hlist_head *hhead;
1707
1708 cfs_hash_bd_lock(hs, &bd, 1);
6e580ab5
FZ
1709 stats->lss_busy +=
1710 cfs_hash_bd_count_get(&bd) - bkt->lsb_lru_len;
d7e09d03
PT
1711 stats->lss_total += cfs_hash_bd_count_get(&bd);
1712 stats->lss_max_search = max((int)stats->lss_max_search,
1713 cfs_hash_bd_depmax_get(&bd));
1714 if (!populated) {
1715 cfs_hash_bd_unlock(hs, &bd, 1);
1716 continue;
1717 }
1718
1719 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1720 if (!hlist_empty(hhead))
1721 stats->lss_populated++;
1722 }
1723 cfs_hash_bd_unlock(hs, &bd, 1);
1724 }
1725}
1726
d7e09d03 1727/*
a0b8803a
AK
1728 * lu_cache_shrink_count returns the number of cached objects that are
1729 * candidates to be freed by shrink_slab(). A counter, which tracks
1730 * the number of items in the site's lru, is maintained in the per cpu
1731 * stats of each site. The counter is incremented when an object is added
1732 * to a site's lru and decremented when one is removed. The number of
1733 * free-able objects is the sum of all per cpu counters for all sites.
d7e09d03 1734 *
a0b8803a
AK
1735 * Using a per cpu counter is a compromise solution to concurrent access:
1736 * lu_object_put() can update the counter without locking the site and
1737 * lu_cache_shrink_count can sum the counters without locking each
1738 * ls_obj_hash bucket.
d7e09d03 1739 */
fe92a055
PT
1740static unsigned long lu_cache_shrink_count(struct shrinker *sk,
1741 struct shrink_control *sc)
d7e09d03 1742{
d7e09d03
PT
1743 struct lu_site *s;
1744 struct lu_site *tmp;
fe92a055 1745 unsigned long cached = 0;
d7e09d03 1746
fe92a055
PT
1747 if (!(sc->gfp_mask & __GFP_FS))
1748 return 0;
d7e09d03
PT
1749
1750 mutex_lock(&lu_sites_guard);
1751 list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
a0b8803a 1752 cached += ls_stats_read(s->ls_stats, LU_SS_LRU_LEN);
d7e09d03 1753 }
d7e09d03
PT
1754 mutex_unlock(&lu_sites_guard);
1755
1756 cached = (cached / 100) * sysctl_vfs_cache_pressure;
a0b8803a
AK
1757 CDEBUG(D_INODE, "%ld objects cached, cache pressure %d\n",
1758 cached, sysctl_vfs_cache_pressure);
1759
d7e09d03
PT
1760 return cached;
1761}
1762
fe92a055
PT
1763static unsigned long lu_cache_shrink_scan(struct shrinker *sk,
1764 struct shrink_control *sc)
1765{
1766 struct lu_site *s;
1767 struct lu_site *tmp;
1768 unsigned long remain = sc->nr_to_scan, freed = 0;
1769 LIST_HEAD(splice);
1770
1771 if (!(sc->gfp_mask & __GFP_FS))
1772 /* We must not take the lu_sites_guard lock when
1773 * __GFP_FS is *not* set because of the deadlock
1774 * possibility detailed above. Additionally,
1775 * since we cannot determine the number of
1776 * objects in the cache without taking this
1777 * lock, we're in a particularly tough spot. As
1778 * a result, we'll just lie and say our cache is
1779 * empty. This _should_ be ok, as we can't
1780 * reclaim objects when __GFP_FS is *not* set
1781 * anyways.
1782 */
1783 return SHRINK_STOP;
1784
1785 mutex_lock(&lu_sites_guard);
1786 list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
1787 freed = lu_site_purge(&lu_shrink_env, s, remain);
1788 remain -= freed;
1789 /*
1790 * Move just shrunk site to the tail of site list to
1791 * assure shrinking fairness.
1792 */
1793 list_move_tail(&s->ls_linkage, &splice);
1794 }
1795 list_splice(&splice, lu_sites.prev);
1796 mutex_unlock(&lu_sites_guard);
1797
1798 return sc->nr_to_scan - remain;
1799}
1800
d7e09d03
PT
1801/**
1802 * Debugging printer function using printk().
1803 */
fe92a055
PT
1804static struct shrinker lu_site_shrinker = {
1805 .count_objects = lu_cache_shrink_count,
1806 .scan_objects = lu_cache_shrink_scan,
1807 .seeks = DEFAULT_SEEKS,
1808};
1809
d7e09d03
PT
1810/**
1811 * Initialization of global lu_* data.
1812 */
1813int lu_global_init(void)
1814{
1815 int result;
1816
1817 CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
1818
1819 result = lu_ref_global_init();
1820 if (result != 0)
1821 return result;
1822
1823 LU_CONTEXT_KEY_INIT(&lu_global_key);
1824 result = lu_context_key_register(&lu_global_key);
1825 if (result != 0)
1826 return result;
1827
1828 /*
1829 * At this level, we don't know what tags are needed, so allocate them
1830 * conservatively. This should not be too bad, because this
1831 * environment is global.
1832 */
1833 mutex_lock(&lu_sites_guard);
1834 result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
1835 mutex_unlock(&lu_sites_guard);
1836 if (result != 0)
1837 return result;
1838
1839 /*
1840 * seeks estimation: 3 seeks to read a record from oi, one to read
1841 * inode, one for ea. Unfortunately setting this high value results in
1842 * lu_object/inode cache consuming all the memory.
1843 */
fe92a055 1844 register_shrinker(&lu_site_shrinker);
d7e09d03
PT
1845
1846 return result;
1847}
1848
1849/**
1850 * Dual to lu_global_init().
1851 */
1852void lu_global_fini(void)
1853{
fe92a055 1854 unregister_shrinker(&lu_site_shrinker);
d7e09d03
PT
1855 lu_context_key_degister(&lu_global_key);
1856
1857 /*
1858 * Tear shrinker environment down _after_ de-registering
1859 * lu_global_key, because the latter has a value in the former.
1860 */
1861 mutex_lock(&lu_sites_guard);
1862 lu_env_fini(&lu_shrink_env);
1863 mutex_unlock(&lu_sites_guard);
1864
1865 lu_ref_global_fini();
1866}
1867
1868static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
1869{
d7e09d03
PT
1870 struct lprocfs_counter ret;
1871
1872 lprocfs_stats_collect(stats, idx, &ret);
a0b8803a
AK
1873 if (idx == LU_SS_LRU_LEN)
1874 /*
1875 * protect against counter on cpu A being decremented
1876 * before counter is incremented on cpu B; unlikely
1877 */
1878 return (__u32)((ret.lc_sum > 0) ? ret.lc_sum : 0);
1879
d7e09d03 1880 return (__u32)ret.lc_count;
d7e09d03
PT
1881}
1882
1883/**
1884 * Output site statistical counters into a buffer. Suitable for
1885 * lprocfs_rd_*()-style functions.
1886 */
73bb1da6 1887int lu_site_stats_print(const struct lu_site *s, struct seq_file *m)
d7e09d03 1888{
2de5855c 1889 struct lu_site_stats stats;
d7e09d03
PT
1890
1891 memset(&stats, 0, sizeof(stats));
1892 lu_site_stats_get(s->ls_obj_hash, &stats, 1);
1893
a0b8803a 1894 seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d %d\n",
8faeebdf
JP
1895 stats.lss_busy,
1896 stats.lss_total,
1897 stats.lss_populated,
1898 CFS_HASH_NHLIST(s->ls_obj_hash),
1899 stats.lss_max_search,
1900 ls_stats_read(s->ls_stats, LU_SS_CREATED),
1901 ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
1902 ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
1903 ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
1904 ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
a0b8803a
AK
1905 ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED),
1906 ls_stats_read(s->ls_stats, LU_SS_LRU_LEN));
8faeebdf 1907 return 0;
d7e09d03
PT
1908}
1909EXPORT_SYMBOL(lu_site_stats_print);
1910
1911/**
1912 * Helper function to initialize a number of kmem slab caches at once.
1913 */
1914int lu_kmem_init(struct lu_kmem_descr *caches)
1915{
1916 int result;
1917 struct lu_kmem_descr *iter = caches;
1918
cce3c2da 1919 for (result = 0; iter->ckd_cache; ++iter) {
d7e09d03
PT
1920 *iter->ckd_cache = kmem_cache_create(iter->ckd_name,
1921 iter->ckd_size,
1922 0, 0, NULL);
cce3c2da 1923 if (!*iter->ckd_cache) {
d7e09d03
PT
1924 result = -ENOMEM;
1925 /* free all previously allocated caches */
1926 lu_kmem_fini(caches);
1927 break;
1928 }
1929 }
1930 return result;
1931}
1932EXPORT_SYMBOL(lu_kmem_init);
1933
1934/**
1935 * Helper function to finalize a number of kmem slab cached at once. Dual to
1936 * lu_kmem_init().
1937 */
1938void lu_kmem_fini(struct lu_kmem_descr *caches)
1939{
cce3c2da 1940 for (; caches->ckd_cache; ++caches) {
ce85ed4d
JL
1941 kmem_cache_destroy(*caches->ckd_cache);
1942 *caches->ckd_cache = NULL;
d7e09d03
PT
1943 }
1944}
1945EXPORT_SYMBOL(lu_kmem_fini);
This page took 0.502079 seconds and 5 git commands to generate.