Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs
[deliverable/linux.git] / drivers / staging / lustre / lustre / ldlm / ldlm_pool.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2010, 2015, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/ldlm/ldlm_pool.c
37 *
38 * Author: Yury Umanets <umka@clusterfs.com>
39 */
40
41 /*
42 * Idea of this code is rather simple. Each second, for each server namespace
43 * we have SLV - server lock volume which is calculated on current number of
44 * granted locks, grant speed for past period, etc - that is, locking load.
45 * This SLV number may be thought as a flow definition for simplicity. It is
46 * sent to clients with each occasion to let them know what is current load
47 * situation on the server. By default, at the beginning, SLV on server is
48 * set max value which is calculated as the following: allow to one client
49 * have all locks of limit ->pl_limit for 10h.
50 *
51 * Next, on clients, number of cached locks is not limited artificially in any
52 * way as it was before. Instead, client calculates CLV, that is, client lock
53 * volume for each lock and compares it with last SLV from the server. CLV is
54 * calculated as the number of locks in LRU * lock live time in seconds. If
55 * CLV > SLV - lock is canceled.
56 *
57 * Client has LVF, that is, lock volume factor which regulates how much
58 * sensitive client should be about last SLV from server. The higher LVF is the
59 * more locks will be canceled on client. Default value for it is 1. Setting LVF
60 * to 2 means that client will cancel locks 2 times faster.
61 *
62 * Locks on a client will be canceled more intensively in these cases:
63 * (1) if SLV is smaller, that is, load is higher on the server;
64 * (2) client has a lot of locks (the more locks are held by client, the bigger
65 * chances that some of them should be canceled);
66 * (3) client has old locks (taken some time ago);
67 *
68 * Thus, according to flow paradigm that we use for better understanding SLV,
69 * CLV is the volume of particle in flow described by SLV. According to this,
70 * if flow is getting thinner, more and more particles become outside of it and
71 * as particles are locks, they should be canceled.
72 *
73 * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com).
74 * Andreas Dilger (adilger@clusterfs.com) proposed few nice ideas like using
75 * LVF and many cleanups. Flow definition to allow more easy understanding of
76 * the logic belongs to Nikita Danilov (nikita@clusterfs.com) as well as many
77 * cleanups and fixes. And design and implementation are done by Yury Umanets
78 * (umka@clusterfs.com).
79 *
80 * Glossary for terms used:
81 *
82 * pl_limit - Number of allowed locks in pool. Applies to server and client
83 * side (tunable);
84 *
85 * pl_granted - Number of granted locks (calculated);
86 * pl_grant_rate - Number of granted locks for last T (calculated);
87 * pl_cancel_rate - Number of canceled locks for last T (calculated);
88 * pl_grant_speed - Grant speed (GR - CR) for last T (calculated);
89 * pl_grant_plan - Planned number of granted locks for next T (calculated);
90 * pl_server_lock_volume - Current server lock volume (calculated);
91 *
92 * As it may be seen from list above, we have few possible tunables which may
93 * affect behavior much. They all may be modified via sysfs. However, they also
94 * give a possibility for constructing few pre-defined behavior policies. If
95 * none of predefines is suitable for a working pattern being used, new one may
96 * be "constructed" via sysfs tunables.
97 */
98
99 #define DEBUG_SUBSYSTEM S_LDLM
100
101 #include "../include/lustre_dlm.h"
102 #include "../include/cl_object.h"
103 #include "../include/obd_class.h"
104 #include "../include/obd_support.h"
105 #include "ldlm_internal.h"
106
107 /*
108 * 50 ldlm locks for 1MB of RAM.
109 */
110 #define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_SHIFT)) * 50)
111
112 /*
113 * Maximal possible grant step plan in %.
114 */
115 #define LDLM_POOL_MAX_GSP (30)
116
117 /*
118 * Minimal possible grant step plan in %.
119 */
120 #define LDLM_POOL_MIN_GSP (1)
121
122 /*
123 * This controls the speed of reaching LDLM_POOL_MAX_GSP
124 * with increasing thread period.
125 */
126 #define LDLM_POOL_GSP_STEP_SHIFT (2)
127
128 /*
129 * LDLM_POOL_GSP% of all locks is default GP.
130 */
131 #define LDLM_POOL_GP(L) (((L) * LDLM_POOL_MAX_GSP) / 100)
132
133 /*
134 * Max age for locks on clients.
135 */
136 #define LDLM_POOL_MAX_AGE (36000)
137
138 /*
139 * The granularity of SLV calculation.
140 */
141 #define LDLM_POOL_SLV_SHIFT (10)
142
143 static inline __u64 dru(__u64 val, __u32 shift, int round_up)
144 {
145 return (val + (round_up ? (1 << shift) - 1 : 0)) >> shift;
146 }
147
148 static inline __u64 ldlm_pool_slv_max(__u32 L)
149 {
150 /*
151 * Allow to have all locks for 1 client for 10 hrs.
152 * Formula is the following: limit * 10h / 1 client.
153 */
154 __u64 lim = (__u64)L * LDLM_POOL_MAX_AGE / 1;
155 return lim;
156 }
157
158 static inline __u64 ldlm_pool_slv_min(__u32 L)
159 {
160 return 1;
161 }
162
163 enum {
164 LDLM_POOL_FIRST_STAT = 0,
165 LDLM_POOL_GRANTED_STAT = LDLM_POOL_FIRST_STAT,
166 LDLM_POOL_GRANT_STAT,
167 LDLM_POOL_CANCEL_STAT,
168 LDLM_POOL_GRANT_RATE_STAT,
169 LDLM_POOL_CANCEL_RATE_STAT,
170 LDLM_POOL_GRANT_PLAN_STAT,
171 LDLM_POOL_SLV_STAT,
172 LDLM_POOL_SHRINK_REQTD_STAT,
173 LDLM_POOL_SHRINK_FREED_STAT,
174 LDLM_POOL_RECALC_STAT,
175 LDLM_POOL_TIMING_STAT,
176 LDLM_POOL_LAST_STAT
177 };
178
179 /**
180 * Calculates suggested grant_step in % of available locks for passed
181 * \a period. This is later used in grant_plan calculations.
182 */
183 static inline int ldlm_pool_t2gsp(unsigned int t)
184 {
185 /*
186 * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP
187 * and up to 30% for anything higher than LDLM_POOL_GSP_STEP.
188 *
189 * How this will affect execution is the following:
190 *
191 * - for thread period 1s we will have grant_step 1% which good from
192 * pov of taking some load off from server and push it out to clients.
193 * This is like that because 1% for grant_step means that server will
194 * not allow clients to get lots of locks in short period of time and
195 * keep all old locks in their caches. Clients will always have to
196 * get some locks back if they want to take some new;
197 *
198 * - for thread period 10s (which is default) we will have 23% which
199 * means that clients will have enough of room to take some new locks
200 * without getting some back. All locks from this 23% which were not
201 * taken by clients in current period will contribute in SLV growing.
202 * SLV growing means more locks cached on clients until limit or grant
203 * plan is reached.
204 */
205 return LDLM_POOL_MAX_GSP -
206 ((LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) >>
207 (t >> LDLM_POOL_GSP_STEP_SHIFT));
208 }
209
210 /**
211 * Recalculates next stats on passed \a pl.
212 *
213 * \pre ->pl_lock is locked.
214 */
215 static void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
216 {
217 int grant_plan = pl->pl_grant_plan;
218 __u64 slv = pl->pl_server_lock_volume;
219 int granted = atomic_read(&pl->pl_granted);
220 int grant_rate = atomic_read(&pl->pl_grant_rate);
221 int cancel_rate = atomic_read(&pl->pl_cancel_rate);
222
223 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
224 slv);
225 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
226 granted);
227 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
228 grant_rate);
229 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
230 grant_plan);
231 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
232 cancel_rate);
233 }
234
235 /**
236 * Sets SLV and Limit from container_of(pl, struct ldlm_namespace,
237 * ns_pool)->ns_obd tp passed \a pl.
238 */
239 static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
240 {
241 struct obd_device *obd;
242
243 /*
244 * Get new SLV and Limit from obd which is updated with coming
245 * RPCs.
246 */
247 obd = container_of(pl, struct ldlm_namespace,
248 ns_pool)->ns_obd;
249 read_lock(&obd->obd_pool_lock);
250 pl->pl_server_lock_volume = obd->obd_pool_slv;
251 atomic_set(&pl->pl_limit, obd->obd_pool_limit);
252 read_unlock(&obd->obd_pool_lock);
253 }
254
255 /**
256 * Recalculates client size pool \a pl according to current SLV and Limit.
257 */
258 static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
259 {
260 time64_t recalc_interval_sec;
261 int ret;
262
263 recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
264 if (recalc_interval_sec < pl->pl_recalc_period)
265 return 0;
266
267 spin_lock(&pl->pl_lock);
268 /*
269 * Check if we need to recalc lists now.
270 */
271 recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
272 if (recalc_interval_sec < pl->pl_recalc_period) {
273 spin_unlock(&pl->pl_lock);
274 return 0;
275 }
276
277 /*
278 * Make sure that pool knows last SLV and Limit from obd.
279 */
280 ldlm_cli_pool_pop_slv(pl);
281
282 spin_unlock(&pl->pl_lock);
283
284 /*
285 * Do not cancel locks in case lru resize is disabled for this ns.
286 */
287 if (!ns_connect_lru_resize(container_of(pl, struct ldlm_namespace,
288 ns_pool))) {
289 ret = 0;
290 goto out;
291 }
292
293 /*
294 * In the time of canceling locks on client we do not need to maintain
295 * sharp timing, we only want to cancel locks asap according to new SLV.
296 * It may be called when SLV has changed much, this is why we do not
297 * take into account pl->pl_recalc_time here.
298 */
299 ret = ldlm_cancel_lru(container_of(pl, struct ldlm_namespace, ns_pool),
300 0, LCF_ASYNC, LDLM_CANCEL_LRUR);
301
302 out:
303 spin_lock(&pl->pl_lock);
304 /*
305 * Time of LRU resizing might be longer than period,
306 * so update after LRU resizing rather than before it.
307 */
308 pl->pl_recalc_time = ktime_get_real_seconds();
309 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
310 recalc_interval_sec);
311 spin_unlock(&pl->pl_lock);
312 return ret;
313 }
314
315 /**
316 * This function is main entry point for memory pressure handling on client
317 * side. Main goal of this function is to cancel some number of locks on
318 * passed \a pl according to \a nr and \a gfp_mask.
319 */
320 static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
321 int nr, gfp_t gfp_mask)
322 {
323 struct ldlm_namespace *ns;
324 int unused;
325
326 ns = container_of(pl, struct ldlm_namespace, ns_pool);
327
328 /*
329 * Do not cancel locks in case lru resize is disabled for this ns.
330 */
331 if (!ns_connect_lru_resize(ns))
332 return 0;
333
334 /*
335 * Make sure that pool knows last SLV and Limit from obd.
336 */
337 ldlm_cli_pool_pop_slv(pl);
338
339 spin_lock(&ns->ns_lock);
340 unused = ns->ns_nr_unused;
341 spin_unlock(&ns->ns_lock);
342
343 if (nr == 0)
344 return (unused / 100) * sysctl_vfs_cache_pressure;
345 else
346 return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK);
347 }
348
349 static const struct ldlm_pool_ops ldlm_cli_pool_ops = {
350 .po_recalc = ldlm_cli_pool_recalc,
351 .po_shrink = ldlm_cli_pool_shrink
352 };
353
354 /**
355 * Pool recalc wrapper. Will call either client or server pool recalc callback
356 * depending what pool \a pl is used.
357 */
358 static int ldlm_pool_recalc(struct ldlm_pool *pl)
359 {
360 u32 recalc_interval_sec;
361 int count;
362
363 recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
364 if (recalc_interval_sec <= 0)
365 goto recalc;
366
367 spin_lock(&pl->pl_lock);
368 if (recalc_interval_sec > 0) {
369 /*
370 * Update pool statistics every 1s.
371 */
372 ldlm_pool_recalc_stats(pl);
373
374 /*
375 * Zero out all rates and speed for the last period.
376 */
377 atomic_set(&pl->pl_grant_rate, 0);
378 atomic_set(&pl->pl_cancel_rate, 0);
379 }
380 spin_unlock(&pl->pl_lock);
381
382 recalc:
383 if (pl->pl_ops->po_recalc) {
384 count = pl->pl_ops->po_recalc(pl);
385 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
386 count);
387 }
388 recalc_interval_sec = pl->pl_recalc_time - ktime_get_seconds() +
389 pl->pl_recalc_period;
390 if (recalc_interval_sec <= 0) {
391 /* Prevent too frequent recalculation. */
392 CDEBUG(D_DLMTRACE,
393 "Negative interval(%d), too short period(%lld)",
394 recalc_interval_sec,
395 (s64)pl->pl_recalc_period);
396 recalc_interval_sec = 1;
397 }
398
399 return recalc_interval_sec;
400 }
401
402 /*
403 * Pool shrink wrapper. Will call either client or server pool recalc callback
404 * depending what pool pl is used. When nr == 0, just return the number of
405 * freeable locks. Otherwise, return the number of canceled locks.
406 */
407 static int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, gfp_t gfp_mask)
408 {
409 int cancel = 0;
410
411 if (pl->pl_ops->po_shrink) {
412 cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask);
413 if (nr > 0) {
414 lprocfs_counter_add(pl->pl_stats,
415 LDLM_POOL_SHRINK_REQTD_STAT,
416 nr);
417 lprocfs_counter_add(pl->pl_stats,
418 LDLM_POOL_SHRINK_FREED_STAT,
419 cancel);
420 CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, shrunk %d\n",
421 pl->pl_name, nr, cancel);
422 }
423 }
424 return cancel;
425 }
426
427 static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
428 {
429 int granted, grant_rate, cancel_rate;
430 int grant_speed, lvf;
431 struct ldlm_pool *pl = m->private;
432 __u64 slv, clv;
433 __u32 limit;
434
435 spin_lock(&pl->pl_lock);
436 slv = pl->pl_server_lock_volume;
437 clv = pl->pl_client_lock_volume;
438 limit = atomic_read(&pl->pl_limit);
439 granted = atomic_read(&pl->pl_granted);
440 grant_rate = atomic_read(&pl->pl_grant_rate);
441 cancel_rate = atomic_read(&pl->pl_cancel_rate);
442 grant_speed = grant_rate - cancel_rate;
443 lvf = atomic_read(&pl->pl_lock_volume_factor);
444 spin_unlock(&pl->pl_lock);
445
446 seq_printf(m, "LDLM pool state (%s):\n"
447 " SLV: %llu\n"
448 " CLV: %llu\n"
449 " LVF: %d\n",
450 pl->pl_name, slv, clv, lvf);
451
452 seq_printf(m, " GR: %d\n CR: %d\n GS: %d\n"
453 " G: %d\n L: %d\n",
454 grant_rate, cancel_rate, grant_speed,
455 granted, limit);
456
457 return 0;
458 }
459
460 LPROC_SEQ_FOPS_RO(lprocfs_pool_state);
461
462 static ssize_t grant_speed_show(struct kobject *kobj, struct attribute *attr,
463 char *buf)
464 {
465 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,
466 pl_kobj);
467
468 int grant_speed;
469
470 spin_lock(&pl->pl_lock);
471 /* serialize with ldlm_pool_recalc */
472 grant_speed = atomic_read(&pl->pl_grant_rate) -
473 atomic_read(&pl->pl_cancel_rate);
474 spin_unlock(&pl->pl_lock);
475 return sprintf(buf, "%d\n", grant_speed);
476 }
477 LUSTRE_RO_ATTR(grant_speed);
478
479 LDLM_POOL_SYSFS_READER_SHOW(grant_plan, int);
480 LUSTRE_RO_ATTR(grant_plan);
481
482 LDLM_POOL_SYSFS_READER_SHOW(recalc_period, int);
483 LDLM_POOL_SYSFS_WRITER_STORE(recalc_period, int);
484 LUSTRE_RW_ATTR(recalc_period);
485
486 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(server_lock_volume, u64);
487 LUSTRE_RO_ATTR(server_lock_volume);
488
489 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(limit, atomic);
490 LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(limit, atomic);
491 LUSTRE_RW_ATTR(limit);
492
493 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(granted, atomic);
494 LUSTRE_RO_ATTR(granted);
495
496 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(cancel_rate, atomic);
497 LUSTRE_RO_ATTR(cancel_rate);
498
499 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(grant_rate, atomic);
500 LUSTRE_RO_ATTR(grant_rate);
501
502 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(lock_volume_factor, atomic);
503 LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(lock_volume_factor, atomic);
504 LUSTRE_RW_ATTR(lock_volume_factor);
505
506 #define LDLM_POOL_ADD_VAR(name, var, ops) \
507 do { \
508 snprintf(var_name, MAX_STRING_SIZE, #name); \
509 pool_vars[0].data = var; \
510 pool_vars[0].fops = ops; \
511 ldebugfs_add_vars(pl->pl_debugfs_entry, pool_vars, NULL);\
512 } while (0)
513
514 /* These are for pools in /sys/fs/lustre/ldlm/namespaces/.../pool */
515 static struct attribute *ldlm_pl_attrs[] = {
516 &lustre_attr_grant_speed.attr,
517 &lustre_attr_grant_plan.attr,
518 &lustre_attr_recalc_period.attr,
519 &lustre_attr_server_lock_volume.attr,
520 &lustre_attr_limit.attr,
521 &lustre_attr_granted.attr,
522 &lustre_attr_cancel_rate.attr,
523 &lustre_attr_grant_rate.attr,
524 &lustre_attr_lock_volume_factor.attr,
525 NULL,
526 };
527
528 static void ldlm_pl_release(struct kobject *kobj)
529 {
530 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,
531 pl_kobj);
532 complete(&pl->pl_kobj_unregister);
533 }
534
535 static struct kobj_type ldlm_pl_ktype = {
536 .default_attrs = ldlm_pl_attrs,
537 .sysfs_ops = &lustre_sysfs_ops,
538 .release = ldlm_pl_release,
539 };
540
541 static int ldlm_pool_sysfs_init(struct ldlm_pool *pl)
542 {
543 struct ldlm_namespace *ns = container_of(pl, struct ldlm_namespace,
544 ns_pool);
545 int err;
546
547 init_completion(&pl->pl_kobj_unregister);
548 err = kobject_init_and_add(&pl->pl_kobj, &ldlm_pl_ktype, &ns->ns_kobj,
549 "pool");
550
551 return err;
552 }
553
554 static int ldlm_pool_debugfs_init(struct ldlm_pool *pl)
555 {
556 struct ldlm_namespace *ns = container_of(pl, struct ldlm_namespace,
557 ns_pool);
558 struct dentry *debugfs_ns_parent;
559 struct lprocfs_vars pool_vars[2];
560 char *var_name = NULL;
561 int rc = 0;
562
563 var_name = kzalloc(MAX_STRING_SIZE + 1, GFP_NOFS);
564 if (!var_name)
565 return -ENOMEM;
566
567 debugfs_ns_parent = ns->ns_debugfs_entry;
568 if (IS_ERR_OR_NULL(debugfs_ns_parent)) {
569 CERROR("%s: debugfs entry is not initialized\n",
570 ldlm_ns_name(ns));
571 rc = -EINVAL;
572 goto out_free_name;
573 }
574 pl->pl_debugfs_entry = ldebugfs_register("pool", debugfs_ns_parent,
575 NULL, NULL);
576 if (IS_ERR(pl->pl_debugfs_entry)) {
577 CERROR("LdebugFS failed in ldlm-pool-init\n");
578 rc = PTR_ERR(pl->pl_debugfs_entry);
579 pl->pl_debugfs_entry = NULL;
580 goto out_free_name;
581 }
582
583 var_name[MAX_STRING_SIZE] = '\0';
584 memset(pool_vars, 0, sizeof(pool_vars));
585 pool_vars[0].name = var_name;
586
587 LDLM_POOL_ADD_VAR(state, pl, &lprocfs_pool_state_fops);
588
589 pl->pl_stats = lprocfs_alloc_stats(LDLM_POOL_LAST_STAT -
590 LDLM_POOL_FIRST_STAT, 0);
591 if (!pl->pl_stats) {
592 rc = -ENOMEM;
593 goto out_free_name;
594 }
595
596 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
597 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
598 "granted", "locks");
599 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_STAT,
600 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
601 "grant", "locks");
602 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_STAT,
603 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
604 "cancel", "locks");
605 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
606 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
607 "grant_rate", "locks/s");
608 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
609 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
610 "cancel_rate", "locks/s");
611 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
612 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
613 "grant_plan", "locks/s");
614 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SLV_STAT,
615 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
616 "slv", "slv");
617 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_REQTD_STAT,
618 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
619 "shrink_request", "locks");
620 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_FREED_STAT,
621 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
622 "shrink_freed", "locks");
623 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_RECALC_STAT,
624 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
625 "recalc_freed", "locks");
626 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_TIMING_STAT,
627 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
628 "recalc_timing", "sec");
629 rc = ldebugfs_register_stats(pl->pl_debugfs_entry, "stats",
630 pl->pl_stats);
631
632 out_free_name:
633 kfree(var_name);
634 return rc;
635 }
636
637 static void ldlm_pool_sysfs_fini(struct ldlm_pool *pl)
638 {
639 kobject_put(&pl->pl_kobj);
640 wait_for_completion(&pl->pl_kobj_unregister);
641 }
642
643 static void ldlm_pool_debugfs_fini(struct ldlm_pool *pl)
644 {
645 if (pl->pl_stats) {
646 lprocfs_free_stats(&pl->pl_stats);
647 pl->pl_stats = NULL;
648 }
649 if (pl->pl_debugfs_entry) {
650 ldebugfs_remove(&pl->pl_debugfs_entry);
651 pl->pl_debugfs_entry = NULL;
652 }
653 }
654
655 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
656 int idx, ldlm_side_t client)
657 {
658 int rc;
659
660 spin_lock_init(&pl->pl_lock);
661 atomic_set(&pl->pl_granted, 0);
662 pl->pl_recalc_time = ktime_get_seconds();
663 atomic_set(&pl->pl_lock_volume_factor, 1);
664
665 atomic_set(&pl->pl_grant_rate, 0);
666 atomic_set(&pl->pl_cancel_rate, 0);
667 pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
668
669 snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
670 ldlm_ns_name(ns), idx);
671
672 atomic_set(&pl->pl_limit, 1);
673 pl->pl_server_lock_volume = 0;
674 pl->pl_ops = &ldlm_cli_pool_ops;
675 pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
676 pl->pl_client_lock_volume = 0;
677 rc = ldlm_pool_debugfs_init(pl);
678 if (rc)
679 return rc;
680
681 rc = ldlm_pool_sysfs_init(pl);
682 if (rc)
683 return rc;
684
685 CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name);
686
687 return rc;
688 }
689 EXPORT_SYMBOL(ldlm_pool_init);
690
691 void ldlm_pool_fini(struct ldlm_pool *pl)
692 {
693 ldlm_pool_sysfs_fini(pl);
694 ldlm_pool_debugfs_fini(pl);
695
696 /*
697 * Pool should not be used after this point. We can't free it here as
698 * it lives in struct ldlm_namespace, but still interested in catching
699 * any abnormal using cases.
700 */
701 POISON(pl, 0x5a, sizeof(*pl));
702 }
703 EXPORT_SYMBOL(ldlm_pool_fini);
704
705 /**
706 * Add new taken ldlm lock \a lock into pool \a pl accounting.
707 */
708 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
709 {
710 /*
711 * FLOCK locks are special in a sense that they are almost never
712 * cancelled, instead special kind of lock is used to drop them.
713 * also there is no LRU for flock locks, so no point in tracking
714 * them anyway.
715 */
716 if (lock->l_resource->lr_type == LDLM_FLOCK)
717 return;
718
719 atomic_inc(&pl->pl_granted);
720 atomic_inc(&pl->pl_grant_rate);
721 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
722 /*
723 * Do not do pool recalc for client side as all locks which
724 * potentially may be canceled has already been packed into
725 * enqueue/cancel rpc. Also we do not want to run out of stack
726 * with too long call paths.
727 */
728 }
729 EXPORT_SYMBOL(ldlm_pool_add);
730
731 /**
732 * Remove ldlm lock \a lock from pool \a pl accounting.
733 */
734 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
735 {
736 /*
737 * Filter out FLOCK locks. Read above comment in ldlm_pool_add().
738 */
739 if (lock->l_resource->lr_type == LDLM_FLOCK)
740 return;
741
742 LASSERT(atomic_read(&pl->pl_granted) > 0);
743 atomic_dec(&pl->pl_granted);
744 atomic_inc(&pl->pl_cancel_rate);
745
746 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
747 }
748 EXPORT_SYMBOL(ldlm_pool_del);
749
750 /**
751 * Returns current \a pl SLV.
752 *
753 * \pre ->pl_lock is not locked.
754 */
755 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
756 {
757 __u64 slv;
758
759 spin_lock(&pl->pl_lock);
760 slv = pl->pl_server_lock_volume;
761 spin_unlock(&pl->pl_lock);
762 return slv;
763 }
764
765 /**
766 * Sets passed \a clv to \a pl.
767 *
768 * \pre ->pl_lock is not locked.
769 */
770 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
771 {
772 spin_lock(&pl->pl_lock);
773 pl->pl_client_lock_volume = clv;
774 spin_unlock(&pl->pl_lock);
775 }
776
777 /**
778 * Returns current LVF from \a pl.
779 */
780 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
781 {
782 return atomic_read(&pl->pl_lock_volume_factor);
783 }
784
785 static int ldlm_pool_granted(struct ldlm_pool *pl)
786 {
787 return atomic_read(&pl->pl_granted);
788 }
789
790 static struct ptlrpc_thread *ldlm_pools_thread;
791 static struct completion ldlm_pools_comp;
792
793 /*
794 * count locks from all namespaces (if possible). Returns number of
795 * cached locks.
796 */
797 static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
798 {
799 int total = 0, nr_ns;
800 struct ldlm_namespace *ns;
801 struct ldlm_namespace *ns_old = NULL; /* loop detection */
802 void *cookie;
803
804 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
805 return 0;
806
807 CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n",
808 client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
809
810 cookie = cl_env_reenter();
811
812 /*
813 * Find out how many resources we may release.
814 */
815 for (nr_ns = ldlm_namespace_nr_read(client);
816 nr_ns > 0; nr_ns--) {
817 mutex_lock(ldlm_namespace_lock(client));
818 if (list_empty(ldlm_namespace_list(client))) {
819 mutex_unlock(ldlm_namespace_lock(client));
820 cl_env_reexit(cookie);
821 return 0;
822 }
823 ns = ldlm_namespace_first_locked(client);
824
825 if (ns == ns_old) {
826 mutex_unlock(ldlm_namespace_lock(client));
827 break;
828 }
829
830 if (ldlm_ns_empty(ns)) {
831 ldlm_namespace_move_to_inactive_locked(ns, client);
832 mutex_unlock(ldlm_namespace_lock(client));
833 continue;
834 }
835
836 if (!ns_old)
837 ns_old = ns;
838
839 ldlm_namespace_get(ns);
840 ldlm_namespace_move_to_active_locked(ns, client);
841 mutex_unlock(ldlm_namespace_lock(client));
842 total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
843 ldlm_namespace_put(ns);
844 }
845
846 cl_env_reexit(cookie);
847 return total;
848 }
849
850 static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask)
851 {
852 unsigned long freed = 0;
853 int tmp, nr_ns;
854 struct ldlm_namespace *ns;
855 void *cookie;
856
857 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
858 return -1;
859
860 cookie = cl_env_reenter();
861
862 /*
863 * Shrink at least ldlm_namespace_nr_read(client) namespaces.
864 */
865 for (tmp = nr_ns = ldlm_namespace_nr_read(client);
866 tmp > 0; tmp--) {
867 int cancel, nr_locks;
868
869 /*
870 * Do not call shrink under ldlm_namespace_lock(client)
871 */
872 mutex_lock(ldlm_namespace_lock(client));
873 if (list_empty(ldlm_namespace_list(client))) {
874 mutex_unlock(ldlm_namespace_lock(client));
875 break;
876 }
877 ns = ldlm_namespace_first_locked(client);
878 ldlm_namespace_get(ns);
879 ldlm_namespace_move_to_active_locked(ns, client);
880 mutex_unlock(ldlm_namespace_lock(client));
881
882 nr_locks = ldlm_pool_granted(&ns->ns_pool);
883 /*
884 * We use to shrink propotionally but with new shrinker API,
885 * we lost the total number of freeable locks.
886 */
887 cancel = 1 + min_t(int, nr_locks, nr / nr_ns);
888 freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
889 ldlm_namespace_put(ns);
890 }
891 cl_env_reexit(cookie);
892 /*
893 * we only decrease the SLV in server pools shrinker, return
894 * SHRINK_STOP to kernel to avoid needless loop. LU-1128
895 */
896 return freed;
897 }
898
899 static unsigned long ldlm_pools_cli_count(struct shrinker *s,
900 struct shrink_control *sc)
901 {
902 return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask);
903 }
904
905 static unsigned long ldlm_pools_cli_scan(struct shrinker *s,
906 struct shrink_control *sc)
907 {
908 return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan,
909 sc->gfp_mask);
910 }
911
912 static int ldlm_pools_recalc(ldlm_side_t client)
913 {
914 struct ldlm_namespace *ns;
915 struct ldlm_namespace *ns_old = NULL;
916 int nr;
917 int time = 50; /* seconds of sleep if no active namespaces */
918
919 /*
920 * Recalc at least ldlm_namespace_nr_read(client) namespaces.
921 */
922 for (nr = ldlm_namespace_nr_read(client); nr > 0; nr--) {
923 int skip;
924 /*
925 * Lock the list, get first @ns in the list, getref, move it
926 * to the tail, unlock and call pool recalc. This way we avoid
927 * calling recalc under @ns lock what is really good as we get
928 * rid of potential deadlock on client nodes when canceling
929 * locks synchronously.
930 */
931 mutex_lock(ldlm_namespace_lock(client));
932 if (list_empty(ldlm_namespace_list(client))) {
933 mutex_unlock(ldlm_namespace_lock(client));
934 break;
935 }
936 ns = ldlm_namespace_first_locked(client);
937
938 if (ns_old == ns) { /* Full pass complete */
939 mutex_unlock(ldlm_namespace_lock(client));
940 break;
941 }
942
943 /* We got an empty namespace, need to move it back to inactive
944 * list.
945 * The race with parallel resource creation is fine:
946 * - If they do namespace_get before our check, we fail the
947 * check and they move this item to the end of the list anyway
948 * - If we do the check and then they do namespace_get, then
949 * we move the namespace to inactive and they will move
950 * it back to active (synchronised by the lock, so no clash
951 * there).
952 */
953 if (ldlm_ns_empty(ns)) {
954 ldlm_namespace_move_to_inactive_locked(ns, client);
955 mutex_unlock(ldlm_namespace_lock(client));
956 continue;
957 }
958
959 if (!ns_old)
960 ns_old = ns;
961
962 spin_lock(&ns->ns_lock);
963 /*
964 * skip ns which is being freed, and we don't want to increase
965 * its refcount again, not even temporarily. bz21519 & LU-499.
966 */
967 if (ns->ns_stopping) {
968 skip = 1;
969 } else {
970 skip = 0;
971 ldlm_namespace_get(ns);
972 }
973 spin_unlock(&ns->ns_lock);
974
975 ldlm_namespace_move_to_active_locked(ns, client);
976 mutex_unlock(ldlm_namespace_lock(client));
977
978 /*
979 * After setup is done - recalc the pool.
980 */
981 if (!skip) {
982 int ttime = ldlm_pool_recalc(&ns->ns_pool);
983
984 if (ttime < time)
985 time = ttime;
986
987 ldlm_namespace_put(ns);
988 }
989 }
990 return time;
991 }
992
993 static int ldlm_pools_thread_main(void *arg)
994 {
995 struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
996 int c_time;
997
998 thread_set_flags(thread, SVC_RUNNING);
999 wake_up(&thread->t_ctl_waitq);
1000
1001 CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
1002 "ldlm_poold", current_pid());
1003
1004 while (1) {
1005 struct l_wait_info lwi;
1006
1007 /*
1008 * Recal all pools on this tick.
1009 */
1010 c_time = ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT);
1011
1012 /*
1013 * Wait until the next check time, or until we're
1014 * stopped.
1015 */
1016 lwi = LWI_TIMEOUT(cfs_time_seconds(c_time),
1017 NULL, NULL);
1018 l_wait_event(thread->t_ctl_waitq,
1019 thread_is_stopping(thread) ||
1020 thread_is_event(thread),
1021 &lwi);
1022
1023 if (thread_test_and_clear_flags(thread, SVC_STOPPING))
1024 break;
1025 thread_test_and_clear_flags(thread, SVC_EVENT);
1026 }
1027
1028 thread_set_flags(thread, SVC_STOPPED);
1029 wake_up(&thread->t_ctl_waitq);
1030
1031 CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
1032 "ldlm_poold", current_pid());
1033
1034 complete_and_exit(&ldlm_pools_comp, 0);
1035 }
1036
1037 static int ldlm_pools_thread_start(void)
1038 {
1039 struct l_wait_info lwi = { 0 };
1040 struct task_struct *task;
1041
1042 if (ldlm_pools_thread)
1043 return -EALREADY;
1044
1045 ldlm_pools_thread = kzalloc(sizeof(*ldlm_pools_thread), GFP_NOFS);
1046 if (!ldlm_pools_thread)
1047 return -ENOMEM;
1048
1049 init_completion(&ldlm_pools_comp);
1050 init_waitqueue_head(&ldlm_pools_thread->t_ctl_waitq);
1051
1052 task = kthread_run(ldlm_pools_thread_main, ldlm_pools_thread,
1053 "ldlm_poold");
1054 if (IS_ERR(task)) {
1055 CERROR("Can't start pool thread, error %ld\n", PTR_ERR(task));
1056 kfree(ldlm_pools_thread);
1057 ldlm_pools_thread = NULL;
1058 return PTR_ERR(task);
1059 }
1060 l_wait_event(ldlm_pools_thread->t_ctl_waitq,
1061 thread_is_running(ldlm_pools_thread), &lwi);
1062 return 0;
1063 }
1064
1065 static void ldlm_pools_thread_stop(void)
1066 {
1067 if (!ldlm_pools_thread)
1068 return;
1069
1070 thread_set_flags(ldlm_pools_thread, SVC_STOPPING);
1071 wake_up(&ldlm_pools_thread->t_ctl_waitq);
1072
1073 /*
1074 * Make sure that pools thread is finished before freeing @thread.
1075 * This fixes possible race and oops due to accessing freed memory
1076 * in pools thread.
1077 */
1078 wait_for_completion(&ldlm_pools_comp);
1079 kfree(ldlm_pools_thread);
1080 ldlm_pools_thread = NULL;
1081 }
1082
1083 static struct shrinker ldlm_pools_cli_shrinker = {
1084 .count_objects = ldlm_pools_cli_count,
1085 .scan_objects = ldlm_pools_cli_scan,
1086 .seeks = DEFAULT_SEEKS,
1087 };
1088
1089 int ldlm_pools_init(void)
1090 {
1091 int rc;
1092
1093 rc = ldlm_pools_thread_start();
1094 if (rc == 0)
1095 register_shrinker(&ldlm_pools_cli_shrinker);
1096
1097 return rc;
1098 }
1099 EXPORT_SYMBOL(ldlm_pools_init);
1100
1101 void ldlm_pools_fini(void)
1102 {
1103 if (ldlm_pools_thread)
1104 unregister_shrinker(&ldlm_pools_cli_shrinker);
1105
1106 ldlm_pools_thread_stop();
1107 }
1108 EXPORT_SYMBOL(ldlm_pools_fini);
This page took 0.058178 seconds and 6 git commands to generate.