Merge remote-tracking branch 'staging/staging-next'
[deliverable/linux.git] / drivers / staging / lustre / lustre / ldlm / ldlm_pool.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
6a5b99a4 18 * http://www.gnu.org/licenses/gpl-2.0.html
d7e09d03 19 *
d7e09d03
PT
20 * GPL HEADER END
21 */
22/*
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
1dc563a6 26 * Copyright (c) 2010, 2015, Intel Corporation.
d7e09d03
PT
27 */
28/*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 *
32 * lustre/ldlm/ldlm_pool.c
33 *
34 * Author: Yury Umanets <umka@clusterfs.com>
35 */
36
37/*
38 * Idea of this code is rather simple. Each second, for each server namespace
39 * we have SLV - server lock volume which is calculated on current number of
40 * granted locks, grant speed for past period, etc - that is, locking load.
41 * This SLV number may be thought as a flow definition for simplicity. It is
42 * sent to clients with each occasion to let them know what is current load
43 * situation on the server. By default, at the beginning, SLV on server is
44 * set max value which is calculated as the following: allow to one client
45 * have all locks of limit ->pl_limit for 10h.
46 *
47 * Next, on clients, number of cached locks is not limited artificially in any
48 * way as it was before. Instead, client calculates CLV, that is, client lock
49 * volume for each lock and compares it with last SLV from the server. CLV is
50 * calculated as the number of locks in LRU * lock live time in seconds. If
51 * CLV > SLV - lock is canceled.
52 *
e7ddc48c
AR
53 * Client has LVF, that is, lock volume factor which regulates how much
54 * sensitive client should be about last SLV from server. The higher LVF is the
55 * more locks will be canceled on client. Default value for it is 1. Setting LVF
56 * to 2 means that client will cancel locks 2 times faster.
d7e09d03
PT
57 *
58 * Locks on a client will be canceled more intensively in these cases:
59 * (1) if SLV is smaller, that is, load is higher on the server;
60 * (2) client has a lot of locks (the more locks are held by client, the bigger
61 * chances that some of them should be canceled);
62 * (3) client has old locks (taken some time ago);
63 *
64 * Thus, according to flow paradigm that we use for better understanding SLV,
65 * CLV is the volume of particle in flow described by SLV. According to this,
66 * if flow is getting thinner, more and more particles become outside of it and
67 * as particles are locks, they should be canceled.
68 *
e7ddc48c
AR
69 * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com).
70 * Andreas Dilger (adilger@clusterfs.com) proposed few nice ideas like using
71 * LVF and many cleanups. Flow definition to allow more easy understanding of
72 * the logic belongs to Nikita Danilov (nikita@clusterfs.com) as well as many
73 * cleanups and fixes. And design and implementation are done by Yury Umanets
74 * (umka@clusterfs.com).
d7e09d03
PT
75 *
76 * Glossary for terms used:
77 *
78 * pl_limit - Number of allowed locks in pool. Applies to server and client
79 * side (tunable);
80 *
81 * pl_granted - Number of granted locks (calculated);
82 * pl_grant_rate - Number of granted locks for last T (calculated);
83 * pl_cancel_rate - Number of canceled locks for last T (calculated);
84 * pl_grant_speed - Grant speed (GR - CR) for last T (calculated);
85 * pl_grant_plan - Planned number of granted locks for next T (calculated);
86 * pl_server_lock_volume - Current server lock volume (calculated);
87 *
88 * As it may be seen from list above, we have few possible tunables which may
f2825e03 89 * affect behavior much. They all may be modified via sysfs. However, they also
d7e09d03
PT
90 * give a possibility for constructing few pre-defined behavior policies. If
91 * none of predefines is suitable for a working pattern being used, new one may
f2825e03 92 * be "constructed" via sysfs tunables.
d7e09d03
PT
93 */
94
95#define DEBUG_SUBSYSTEM S_LDLM
96
e27db149
GKH
97#include "../include/lustre_dlm.h"
98#include "../include/cl_object.h"
99#include "../include/obd_class.h"
100#include "../include/obd_support.h"
d7e09d03
PT
101#include "ldlm_internal.h"
102
d7e09d03
PT
103/*
104 * 50 ldlm locks for 1MB of RAM.
105 */
09cbfeaf 106#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_SHIFT)) * 50)
d7e09d03
PT
107
108/*
109 * Maximal possible grant step plan in %.
110 */
111#define LDLM_POOL_MAX_GSP (30)
112
113/*
114 * Minimal possible grant step plan in %.
115 */
116#define LDLM_POOL_MIN_GSP (1)
117
118/*
119 * This controls the speed of reaching LDLM_POOL_MAX_GSP
120 * with increasing thread period.
121 */
122#define LDLM_POOL_GSP_STEP_SHIFT (2)
123
124/*
125 * LDLM_POOL_GSP% of all locks is default GP.
126 */
127#define LDLM_POOL_GP(L) (((L) * LDLM_POOL_MAX_GSP) / 100)
128
129/*
130 * Max age for locks on clients.
131 */
132#define LDLM_POOL_MAX_AGE (36000)
133
134/*
135 * The granularity of SLV calculation.
136 */
137#define LDLM_POOL_SLV_SHIFT (10)
138
d7e09d03
PT
139static inline __u64 dru(__u64 val, __u32 shift, int round_up)
140{
141 return (val + (round_up ? (1 << shift) - 1 : 0)) >> shift;
142}
143
144static inline __u64 ldlm_pool_slv_max(__u32 L)
145{
146 /*
147 * Allow to have all locks for 1 client for 10 hrs.
148 * Formula is the following: limit * 10h / 1 client.
149 */
150 __u64 lim = (__u64)L * LDLM_POOL_MAX_AGE / 1;
151 return lim;
152}
153
154static inline __u64 ldlm_pool_slv_min(__u32 L)
155{
156 return 1;
157}
158
159enum {
160 LDLM_POOL_FIRST_STAT = 0,
161 LDLM_POOL_GRANTED_STAT = LDLM_POOL_FIRST_STAT,
162 LDLM_POOL_GRANT_STAT,
163 LDLM_POOL_CANCEL_STAT,
164 LDLM_POOL_GRANT_RATE_STAT,
165 LDLM_POOL_CANCEL_RATE_STAT,
166 LDLM_POOL_GRANT_PLAN_STAT,
167 LDLM_POOL_SLV_STAT,
168 LDLM_POOL_SHRINK_REQTD_STAT,
169 LDLM_POOL_SHRINK_FREED_STAT,
170 LDLM_POOL_RECALC_STAT,
171 LDLM_POOL_TIMING_STAT,
172 LDLM_POOL_LAST_STAT
173};
174
d7e09d03
PT
175/**
176 * Calculates suggested grant_step in % of available locks for passed
177 * \a period. This is later used in grant_plan calculations.
178 */
179static inline int ldlm_pool_t2gsp(unsigned int t)
180{
181 /*
182 * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP
183 * and up to 30% for anything higher than LDLM_POOL_GSP_STEP.
184 *
185 * How this will affect execution is the following:
186 *
187 * - for thread period 1s we will have grant_step 1% which good from
188 * pov of taking some load off from server and push it out to clients.
189 * This is like that because 1% for grant_step means that server will
190 * not allow clients to get lots of locks in short period of time and
191 * keep all old locks in their caches. Clients will always have to
192 * get some locks back if they want to take some new;
193 *
194 * - for thread period 10s (which is default) we will have 23% which
195 * means that clients will have enough of room to take some new locks
196 * without getting some back. All locks from this 23% which were not
197 * taken by clients in current period will contribute in SLV growing.
198 * SLV growing means more locks cached on clients until limit or grant
199 * plan is reached.
200 */
201 return LDLM_POOL_MAX_GSP -
202 ((LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) >>
203 (t >> LDLM_POOL_GSP_STEP_SHIFT));
204}
205
d7e09d03
PT
206/**
207 * Recalculates next stats on passed \a pl.
208 *
209 * \pre ->pl_lock is locked.
210 */
211static void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
212{
213 int grant_plan = pl->pl_grant_plan;
214 __u64 slv = pl->pl_server_lock_volume;
215 int granted = atomic_read(&pl->pl_granted);
216 int grant_rate = atomic_read(&pl->pl_grant_rate);
217 int cancel_rate = atomic_read(&pl->pl_cancel_rate);
218
219 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
220 slv);
221 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
222 granted);
223 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
224 grant_rate);
225 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
226 grant_plan);
227 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
228 cancel_rate);
229}
230
d7e09d03 231/**
7c37abe0
SB
232 * Sets SLV and Limit from container_of(pl, struct ldlm_namespace,
233 * ns_pool)->ns_obd tp passed \a pl.
d7e09d03
PT
234 */
235static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
236{
237 struct obd_device *obd;
238
239 /*
240 * Get new SLV and Limit from obd which is updated with coming
241 * RPCs.
242 */
7c37abe0
SB
243 obd = container_of(pl, struct ldlm_namespace,
244 ns_pool)->ns_obd;
d7e09d03
PT
245 read_lock(&obd->obd_pool_lock);
246 pl->pl_server_lock_volume = obd->obd_pool_slv;
f7ec22b5 247 atomic_set(&pl->pl_limit, obd->obd_pool_limit);
d7e09d03
PT
248 read_unlock(&obd->obd_pool_lock);
249}
250
251/**
252 * Recalculates client size pool \a pl according to current SLV and Limit.
253 */
254static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
255{
8f83409c 256 time64_t recalc_interval_sec;
4d2c7b30 257 int ret;
d7e09d03 258
8f83409c 259 recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
d7e09d03 260 if (recalc_interval_sec < pl->pl_recalc_period)
0a3bdb00 261 return 0;
d7e09d03
PT
262
263 spin_lock(&pl->pl_lock);
264 /*
265 * Check if we need to recalc lists now.
266 */
8f83409c 267 recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
d7e09d03
PT
268 if (recalc_interval_sec < pl->pl_recalc_period) {
269 spin_unlock(&pl->pl_lock);
0a3bdb00 270 return 0;
d7e09d03
PT
271 }
272
273 /*
274 * Make sure that pool knows last SLV and Limit from obd.
275 */
276 ldlm_cli_pool_pop_slv(pl);
277
d7e09d03
PT
278 spin_unlock(&pl->pl_lock);
279
280 /*
281 * Do not cancel locks in case lru resize is disabled for this ns.
282 */
7c37abe0
SB
283 if (!ns_connect_lru_resize(container_of(pl, struct ldlm_namespace,
284 ns_pool))) {
4d2c7b30
LX
285 ret = 0;
286 goto out;
287 }
d7e09d03
PT
288
289 /*
290 * In the time of canceling locks on client we do not need to maintain
291 * sharp timing, we only want to cancel locks asap according to new SLV.
292 * It may be called when SLV has changed much, this is why we do not
293 * take into account pl->pl_recalc_time here.
294 */
7c37abe0
SB
295 ret = ldlm_cancel_lru(container_of(pl, struct ldlm_namespace, ns_pool),
296 0, LCF_ASYNC, LDLM_CANCEL_LRUR);
4d2c7b30
LX
297
298out:
299 spin_lock(&pl->pl_lock);
300 /*
301 * Time of LRU resizing might be longer than period,
302 * so update after LRU resizing rather than before it.
303 */
8f83409c 304 pl->pl_recalc_time = ktime_get_real_seconds();
4d2c7b30
LX
305 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
306 recalc_interval_sec);
307 spin_unlock(&pl->pl_lock);
308 return ret;
d7e09d03
PT
309}
310
311/**
312 * This function is main entry point for memory pressure handling on client
313 * side. Main goal of this function is to cancel some number of locks on
314 * passed \a pl according to \a nr and \a gfp_mask.
315 */
316static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
5802572e 317 int nr, gfp_t gfp_mask)
d7e09d03
PT
318{
319 struct ldlm_namespace *ns;
cbc3769e 320 int unused;
d7e09d03 321
7c37abe0 322 ns = container_of(pl, struct ldlm_namespace, ns_pool);
d7e09d03
PT
323
324 /*
325 * Do not cancel locks in case lru resize is disabled for this ns.
326 */
327 if (!ns_connect_lru_resize(ns))
0a3bdb00 328 return 0;
d7e09d03
PT
329
330 /*
331 * Make sure that pool knows last SLV and Limit from obd.
332 */
333 ldlm_cli_pool_pop_slv(pl);
334
335 spin_lock(&ns->ns_lock);
336 unused = ns->ns_nr_unused;
337 spin_unlock(&ns->ns_lock);
338
cbc3769e
PT
339 if (nr == 0)
340 return (unused / 100) * sysctl_vfs_cache_pressure;
341 else
342 return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK);
d7e09d03
PT
343}
344
b9c98cfa 345static const struct ldlm_pool_ops ldlm_cli_pool_ops = {
d7e09d03
PT
346 .po_recalc = ldlm_cli_pool_recalc,
347 .po_shrink = ldlm_cli_pool_shrink
348};
349
350/**
351 * Pool recalc wrapper. Will call either client or server pool recalc callback
352 * depending what pool \a pl is used.
353 */
58c6d133 354static int ldlm_pool_recalc(struct ldlm_pool *pl)
d7e09d03 355{
8f83409c 356 u32 recalc_interval_sec;
d7e09d03
PT
357 int count;
358
8f83409c 359 recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
d7e09d03
PT
360 if (recalc_interval_sec <= 0)
361 goto recalc;
362
363 spin_lock(&pl->pl_lock);
d7e09d03
PT
364 if (recalc_interval_sec > 0) {
365 /*
366 * Update pool statistics every 1s.
367 */
368 ldlm_pool_recalc_stats(pl);
369
370 /*
371 * Zero out all rates and speed for the last period.
372 */
373 atomic_set(&pl->pl_grant_rate, 0);
374 atomic_set(&pl->pl_cancel_rate, 0);
375 }
376 spin_unlock(&pl->pl_lock);
377
378 recalc:
44b53f18 379 if (pl->pl_ops->po_recalc) {
d7e09d03
PT
380 count = pl->pl_ops->po_recalc(pl);
381 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
382 count);
d7e09d03 383 }
8f83409c 384 recalc_interval_sec = pl->pl_recalc_time - ktime_get_seconds() +
3eface59 385 pl->pl_recalc_period;
4d2c7b30
LX
386 if (recalc_interval_sec <= 0) {
387 /* Prevent too frequent recalculation. */
8f83409c
AB
388 CDEBUG(D_DLMTRACE,
389 "Negative interval(%d), too short period(%lld)",
4d2c7b30 390 recalc_interval_sec,
8f83409c 391 (s64)pl->pl_recalc_period);
4d2c7b30
LX
392 recalc_interval_sec = 1;
393 }
d7e09d03 394
3eface59 395 return recalc_interval_sec;
d7e09d03 396}
d7e09d03 397
cbc3769e 398/*
d7e09d03 399 * Pool shrink wrapper. Will call either client or server pool recalc callback
cbc3769e
PT
400 * depending what pool pl is used. When nr == 0, just return the number of
401 * freeable locks. Otherwise, return the number of canceled locks.
d7e09d03 402 */
58c6d133 403static int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, gfp_t gfp_mask)
d7e09d03
PT
404{
405 int cancel = 0;
406
44b53f18 407 if (pl->pl_ops->po_shrink) {
d7e09d03
PT
408 cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask);
409 if (nr > 0) {
410 lprocfs_counter_add(pl->pl_stats,
411 LDLM_POOL_SHRINK_REQTD_STAT,
412 nr);
413 lprocfs_counter_add(pl->pl_stats,
414 LDLM_POOL_SHRINK_FREED_STAT,
415 cancel);
2d00bd17
JP
416 CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, shrunk %d\n",
417 pl->pl_name, nr, cancel);
d7e09d03
PT
418 }
419 }
420 return cancel;
421}
d7e09d03 422
73bb1da6 423static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
d7e09d03 424{
71570b98
OD
425 int granted, grant_rate, cancel_rate;
426 int grant_speed, lvf;
73bb1da6 427 struct ldlm_pool *pl = m->private;
d7e09d03
PT
428 __u64 slv, clv;
429 __u32 limit;
430
431 spin_lock(&pl->pl_lock);
432 slv = pl->pl_server_lock_volume;
433 clv = pl->pl_client_lock_volume;
946d6f95 434 limit = atomic_read(&pl->pl_limit);
d7e09d03
PT
435 granted = atomic_read(&pl->pl_granted);
436 grant_rate = atomic_read(&pl->pl_grant_rate);
437 cancel_rate = atomic_read(&pl->pl_cancel_rate);
438 grant_speed = grant_rate - cancel_rate;
439 lvf = atomic_read(&pl->pl_lock_volume_factor);
d7e09d03
PT
440 spin_unlock(&pl->pl_lock);
441
73bb1da6 442 seq_printf(m, "LDLM pool state (%s):\n"
b0f5aad5
GKH
443 " SLV: %llu\n"
444 " CLV: %llu\n"
73bb1da6
PT
445 " LVF: %d\n",
446 pl->pl_name, slv, clv, lvf);
d7e09d03 447
2c2b7c05
HM
448 seq_printf(m, " GR: %d\n CR: %d\n GS: %d\n"
449 " G: %d\n L: %d\n",
73bb1da6
PT
450 grant_rate, cancel_rate, grant_speed,
451 granted, limit);
452
453 return 0;
d7e09d03 454}
c9f6bb96 455
73bb1da6 456LPROC_SEQ_FOPS_RO(lprocfs_pool_state);
d7e09d03 457
24b8c88a
OD
458static ssize_t grant_speed_show(struct kobject *kobj, struct attribute *attr,
459 char *buf)
d7e09d03 460{
24b8c88a
OD
461 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,
462 pl_kobj);
463
d7e09d03
PT
464 int grant_speed;
465
466 spin_lock(&pl->pl_lock);
467 /* serialize with ldlm_pool_recalc */
468 grant_speed = atomic_read(&pl->pl_grant_rate) -
469 atomic_read(&pl->pl_cancel_rate);
470 spin_unlock(&pl->pl_lock);
24b8c88a 471 return sprintf(buf, "%d\n", grant_speed);
d7e09d03 472}
24b8c88a 473LUSTRE_RO_ATTR(grant_speed);
d7e09d03 474
24b8c88a
OD
475LDLM_POOL_SYSFS_READER_SHOW(grant_plan, int);
476LUSTRE_RO_ATTR(grant_plan);
73bb1da6 477
24b8c88a
OD
478LDLM_POOL_SYSFS_READER_SHOW(recalc_period, int);
479LDLM_POOL_SYSFS_WRITER_STORE(recalc_period, int);
480LUSTRE_RW_ATTR(recalc_period);
73bb1da6 481
24b8c88a
OD
482LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(server_lock_volume, u64);
483LUSTRE_RO_ATTR(server_lock_volume);
484
485LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(limit, atomic);
486LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(limit, atomic);
487LUSTRE_RW_ATTR(limit);
488
489LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(granted, atomic);
490LUSTRE_RO_ATTR(granted);
491
492LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(cancel_rate, atomic);
493LUSTRE_RO_ATTR(cancel_rate);
73bb1da6 494
24b8c88a
OD
495LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(grant_rate, atomic);
496LUSTRE_RO_ATTR(grant_rate);
73bb1da6 497
24b8c88a
OD
498LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(lock_volume_factor, atomic);
499LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(lock_volume_factor, atomic);
500LUSTRE_RW_ATTR(lock_volume_factor);
73bb1da6
PT
501
502#define LDLM_POOL_ADD_VAR(name, var, ops) \
503 do { \
504 snprintf(var_name, MAX_STRING_SIZE, #name); \
505 pool_vars[0].data = var; \
506 pool_vars[0].fops = ops; \
700815d4 507 ldebugfs_add_vars(pl->pl_debugfs_entry, pool_vars, NULL);\
73bb1da6 508 } while (0)
d7e09d03 509
f2825e03
OD
510/* These are for pools in /sys/fs/lustre/ldlm/namespaces/.../pool */
511static struct attribute *ldlm_pl_attrs[] = {
24b8c88a
OD
512 &lustre_attr_grant_speed.attr,
513 &lustre_attr_grant_plan.attr,
514 &lustre_attr_recalc_period.attr,
515 &lustre_attr_server_lock_volume.attr,
516 &lustre_attr_limit.attr,
517 &lustre_attr_granted.attr,
518 &lustre_attr_cancel_rate.attr,
519 &lustre_attr_grant_rate.attr,
520 &lustre_attr_lock_volume_factor.attr,
f2825e03
OD
521 NULL,
522};
523
524static void ldlm_pl_release(struct kobject *kobj)
525{
526 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,
527 pl_kobj);
528 complete(&pl->pl_kobj_unregister);
529}
530
531static struct kobj_type ldlm_pl_ktype = {
532 .default_attrs = ldlm_pl_attrs,
533 .sysfs_ops = &lustre_sysfs_ops,
534 .release = ldlm_pl_release,
535};
536
537static int ldlm_pool_sysfs_init(struct ldlm_pool *pl)
538{
7c37abe0
SB
539 struct ldlm_namespace *ns = container_of(pl, struct ldlm_namespace,
540 ns_pool);
f2825e03
OD
541 int err;
542
543 init_completion(&pl->pl_kobj_unregister);
544 err = kobject_init_and_add(&pl->pl_kobj, &ldlm_pl_ktype, &ns->ns_kobj,
545 "pool");
546
547 return err;
548}
549
700815d4 550static int ldlm_pool_debugfs_init(struct ldlm_pool *pl)
d7e09d03 551{
7c37abe0
SB
552 struct ldlm_namespace *ns = container_of(pl, struct ldlm_namespace,
553 ns_pool);
700815d4 554 struct dentry *debugfs_ns_parent;
d7e09d03
PT
555 struct lprocfs_vars pool_vars[2];
556 char *var_name = NULL;
557 int rc = 0;
d7e09d03 558
352f7891 559 var_name = kzalloc(MAX_STRING_SIZE + 1, GFP_NOFS);
d7e09d03 560 if (!var_name)
0a3bdb00 561 return -ENOMEM;
d7e09d03 562
700815d4
DE
563 debugfs_ns_parent = ns->ns_debugfs_entry;
564 if (IS_ERR_OR_NULL(debugfs_ns_parent)) {
565 CERROR("%s: debugfs entry is not initialized\n",
d7e09d03 566 ldlm_ns_name(ns));
d1c0d446
JL
567 rc = -EINVAL;
568 goto out_free_name;
d7e09d03 569 }
700815d4
DE
570 pl->pl_debugfs_entry = ldebugfs_register("pool", debugfs_ns_parent,
571 NULL, NULL);
572 if (IS_ERR(pl->pl_debugfs_entry)) {
573 CERROR("LdebugFS failed in ldlm-pool-init\n");
574 rc = PTR_ERR(pl->pl_debugfs_entry);
575 pl->pl_debugfs_entry = NULL;
d1c0d446 576 goto out_free_name;
d7e09d03
PT
577 }
578
579 var_name[MAX_STRING_SIZE] = '\0';
580 memset(pool_vars, 0, sizeof(pool_vars));
581 pool_vars[0].name = var_name;
582
700815d4 583 LDLM_POOL_ADD_VAR(state, pl, &lprocfs_pool_state_fops);
d7e09d03
PT
584
585 pl->pl_stats = lprocfs_alloc_stats(LDLM_POOL_LAST_STAT -
586 LDLM_POOL_FIRST_STAT, 0);
d1c0d446
JL
587 if (!pl->pl_stats) {
588 rc = -ENOMEM;
589 goto out_free_name;
590 }
d7e09d03
PT
591
592 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
593 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
594 "granted", "locks");
595 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_STAT,
596 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
597 "grant", "locks");
598 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_STAT,
599 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
600 "cancel", "locks");
601 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
602 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
603 "grant_rate", "locks/s");
604 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
605 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
606 "cancel_rate", "locks/s");
607 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
608 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
609 "grant_plan", "locks/s");
610 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SLV_STAT,
611 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
612 "slv", "slv");
613 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_REQTD_STAT,
614 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
615 "shrink_request", "locks");
616 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_FREED_STAT,
617 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
618 "shrink_freed", "locks");
619 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_RECALC_STAT,
620 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
621 "recalc_freed", "locks");
622 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_TIMING_STAT,
623 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
624 "recalc_timing", "sec");
700815d4
DE
625 rc = ldebugfs_register_stats(pl->pl_debugfs_entry, "stats",
626 pl->pl_stats);
d7e09d03 627
d7e09d03 628out_free_name:
352f7891 629 kfree(var_name);
d7e09d03
PT
630 return rc;
631}
632
f2825e03
OD
633static void ldlm_pool_sysfs_fini(struct ldlm_pool *pl)
634{
635 kobject_put(&pl->pl_kobj);
636 wait_for_completion(&pl->pl_kobj_unregister);
637}
638
700815d4 639static void ldlm_pool_debugfs_fini(struct ldlm_pool *pl)
d7e09d03 640{
44b53f18 641 if (pl->pl_stats) {
d7e09d03
PT
642 lprocfs_free_stats(&pl->pl_stats);
643 pl->pl_stats = NULL;
644 }
44b53f18 645 if (pl->pl_debugfs_entry) {
700815d4
DE
646 ldebugfs_remove(&pl->pl_debugfs_entry);
647 pl->pl_debugfs_entry = NULL;
d7e09d03
PT
648 }
649}
650
651int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
652 int idx, ldlm_side_t client)
653{
654 int rc;
d7e09d03
PT
655
656 spin_lock_init(&pl->pl_lock);
657 atomic_set(&pl->pl_granted, 0);
8f83409c 658 pl->pl_recalc_time = ktime_get_seconds();
d7e09d03
PT
659 atomic_set(&pl->pl_lock_volume_factor, 1);
660
661 atomic_set(&pl->pl_grant_rate, 0);
662 atomic_set(&pl->pl_cancel_rate, 0);
663 pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
664
665 snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
666 ldlm_ns_name(ns), idx);
667
f7ec22b5 668 atomic_set(&pl->pl_limit, 1);
00f9d12b
OD
669 pl->pl_server_lock_volume = 0;
670 pl->pl_ops = &ldlm_cli_pool_ops;
671 pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
d7e09d03 672 pl->pl_client_lock_volume = 0;
700815d4 673 rc = ldlm_pool_debugfs_init(pl);
d7e09d03 674 if (rc)
0a3bdb00 675 return rc;
d7e09d03 676
f2825e03
OD
677 rc = ldlm_pool_sysfs_init(pl);
678 if (rc)
679 return rc;
680
d7e09d03
PT
681 CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name);
682
0a3bdb00 683 return rc;
d7e09d03
PT
684}
685EXPORT_SYMBOL(ldlm_pool_init);
686
687void ldlm_pool_fini(struct ldlm_pool *pl)
688{
f2825e03 689 ldlm_pool_sysfs_fini(pl);
700815d4 690 ldlm_pool_debugfs_fini(pl);
d7e09d03
PT
691
692 /*
693 * Pool should not be used after this point. We can't free it here as
694 * it lives in struct ldlm_namespace, but still interested in catching
695 * any abnormal using cases.
696 */
697 POISON(pl, 0x5a, sizeof(*pl));
d7e09d03
PT
698}
699EXPORT_SYMBOL(ldlm_pool_fini);
700
701/**
702 * Add new taken ldlm lock \a lock into pool \a pl accounting.
703 */
704void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
705{
706 /*
707 * FLOCK locks are special in a sense that they are almost never
708 * cancelled, instead special kind of lock is used to drop them.
709 * also there is no LRU for flock locks, so no point in tracking
710 * them anyway.
711 */
712 if (lock->l_resource->lr_type == LDLM_FLOCK)
713 return;
714
715 atomic_inc(&pl->pl_granted);
716 atomic_inc(&pl->pl_grant_rate);
717 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
718 /*
719 * Do not do pool recalc for client side as all locks which
720 * potentially may be canceled has already been packed into
721 * enqueue/cancel rpc. Also we do not want to run out of stack
722 * with too long call paths.
723 */
d7e09d03
PT
724}
725EXPORT_SYMBOL(ldlm_pool_add);
726
727/**
728 * Remove ldlm lock \a lock from pool \a pl accounting.
729 */
730void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
731{
732 /*
733 * Filter out FLOCK locks. Read above comment in ldlm_pool_add().
734 */
735 if (lock->l_resource->lr_type == LDLM_FLOCK)
736 return;
737
738 LASSERT(atomic_read(&pl->pl_granted) > 0);
739 atomic_dec(&pl->pl_granted);
740 atomic_inc(&pl->pl_cancel_rate);
741
742 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
d7e09d03
PT
743}
744EXPORT_SYMBOL(ldlm_pool_del);
745
746/**
747 * Returns current \a pl SLV.
748 *
749 * \pre ->pl_lock is not locked.
750 */
751__u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
752{
753 __u64 slv;
902f3bb1 754
d7e09d03
PT
755 spin_lock(&pl->pl_lock);
756 slv = pl->pl_server_lock_volume;
757 spin_unlock(&pl->pl_lock);
758 return slv;
759}
d7e09d03 760
d7e09d03
PT
761/**
762 * Sets passed \a clv to \a pl.
763 *
764 * \pre ->pl_lock is not locked.
765 */
766void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
767{
768 spin_lock(&pl->pl_lock);
769 pl->pl_client_lock_volume = clv;
770 spin_unlock(&pl->pl_lock);
771}
d7e09d03
PT
772
773/**
774 * Returns current LVF from \a pl.
775 */
776__u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
777{
778 return atomic_read(&pl->pl_lock_volume_factor);
779}
d7e09d03
PT
780
781static int ldlm_pool_granted(struct ldlm_pool *pl)
782{
783 return atomic_read(&pl->pl_granted);
784}
785
786static struct ptlrpc_thread *ldlm_pools_thread;
d7e09d03
PT
787static struct completion ldlm_pools_comp;
788
789/*
cbc3769e
PT
790 * count locks from all namespaces (if possible). Returns number of
791 * cached locks.
d7e09d03 792 */
5802572e 793static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
d7e09d03 794{
cbc3769e 795 int total = 0, nr_ns;
d7e09d03 796 struct ldlm_namespace *ns;
91a50030 797 struct ldlm_namespace *ns_old = NULL; /* loop detection */
d7e09d03
PT
798 void *cookie;
799
cbc3769e
PT
800 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
801 return 0;
d7e09d03 802
cbc3769e
PT
803 CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n",
804 client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
d7e09d03
PT
805
806 cookie = cl_env_reenter();
807
808 /*
809 * Find out how many resources we may release.
810 */
91a50030 811 for (nr_ns = ldlm_namespace_nr_read(client);
cbc3769e 812 nr_ns > 0; nr_ns--) {
d7e09d03
PT
813 mutex_lock(ldlm_namespace_lock(client));
814 if (list_empty(ldlm_namespace_list(client))) {
815 mutex_unlock(ldlm_namespace_lock(client));
816 cl_env_reexit(cookie);
817 return 0;
818 }
819 ns = ldlm_namespace_first_locked(client);
91a50030
OD
820
821 if (ns == ns_old) {
822 mutex_unlock(ldlm_namespace_lock(client));
823 break;
824 }
825
826 if (ldlm_ns_empty(ns)) {
827 ldlm_namespace_move_to_inactive_locked(ns, client);
828 mutex_unlock(ldlm_namespace_lock(client));
829 continue;
830 }
831
44b53f18 832 if (!ns_old)
91a50030
OD
833 ns_old = ns;
834
d7e09d03 835 ldlm_namespace_get(ns);
91a50030 836 ldlm_namespace_move_to_active_locked(ns, client);
d7e09d03
PT
837 mutex_unlock(ldlm_namespace_lock(client));
838 total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
839 ldlm_namespace_put(ns);
840 }
841
cbc3769e
PT
842 cl_env_reexit(cookie);
843 return total;
844}
845
5802572e 846static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask)
cbc3769e
PT
847{
848 unsigned long freed = 0;
849 int tmp, nr_ns;
850 struct ldlm_namespace *ns;
851 void *cookie;
852
853 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
854 return -1;
855
856 cookie = cl_env_reenter();
d7e09d03
PT
857
858 /*
cbc3769e 859 * Shrink at least ldlm_namespace_nr_read(client) namespaces.
d7e09d03 860 */
cbc3769e
PT
861 for (tmp = nr_ns = ldlm_namespace_nr_read(client);
862 tmp > 0; tmp--) {
d7e09d03
PT
863 int cancel, nr_locks;
864
865 /*
866 * Do not call shrink under ldlm_namespace_lock(client)
867 */
868 mutex_lock(ldlm_namespace_lock(client));
869 if (list_empty(ldlm_namespace_list(client))) {
870 mutex_unlock(ldlm_namespace_lock(client));
d7e09d03
PT
871 break;
872 }
873 ns = ldlm_namespace_first_locked(client);
874 ldlm_namespace_get(ns);
91a50030 875 ldlm_namespace_move_to_active_locked(ns, client);
d7e09d03
PT
876 mutex_unlock(ldlm_namespace_lock(client));
877
878 nr_locks = ldlm_pool_granted(&ns->ns_pool);
cbc3769e
PT
879 /*
880 * We use to shrink propotionally but with new shrinker API,
881 * we lost the total number of freeable locks.
882 */
883 cancel = 1 + min_t(int, nr_locks, nr / nr_ns);
884 freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
d7e09d03
PT
885 ldlm_namespace_put(ns);
886 }
887 cl_env_reexit(cookie);
cbc3769e
PT
888 /*
889 * we only decrease the SLV in server pools shrinker, return
890 * SHRINK_STOP to kernel to avoid needless loop. LU-1128
891 */
00f9d12b 892 return freed;
d7e09d03
PT
893}
894
e7ddc48c
AR
895static unsigned long ldlm_pools_cli_count(struct shrinker *s,
896 struct shrink_control *sc)
d7e09d03 897{
cbc3769e
PT
898 return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask);
899}
900
e7ddc48c
AR
901static unsigned long ldlm_pools_cli_scan(struct shrinker *s,
902 struct shrink_control *sc)
cbc3769e
PT
903{
904 return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan,
905 sc->gfp_mask);
d7e09d03
PT
906}
907
00f9d12b 908static int ldlm_pools_recalc(ldlm_side_t client)
d7e09d03 909{
d7e09d03 910 struct ldlm_namespace *ns;
91a50030 911 struct ldlm_namespace *ns_old = NULL;
00f9d12b 912 int nr;
3eface59 913 int time = 50; /* seconds of sleep if no active namespaces */
d7e09d03 914
d7e09d03 915 /*
cbc3769e 916 * Recalc at least ldlm_namespace_nr_read(client) namespaces.
d7e09d03 917 */
91a50030 918 for (nr = ldlm_namespace_nr_read(client); nr > 0; nr--) {
d7e09d03
PT
919 int skip;
920 /*
921 * Lock the list, get first @ns in the list, getref, move it
922 * to the tail, unlock and call pool recalc. This way we avoid
923 * calling recalc under @ns lock what is really good as we get
924 * rid of potential deadlock on client nodes when canceling
925 * locks synchronously.
926 */
927 mutex_lock(ldlm_namespace_lock(client));
928 if (list_empty(ldlm_namespace_list(client))) {
929 mutex_unlock(ldlm_namespace_lock(client));
930 break;
931 }
932 ns = ldlm_namespace_first_locked(client);
933
91a50030
OD
934 if (ns_old == ns) { /* Full pass complete */
935 mutex_unlock(ldlm_namespace_lock(client));
936 break;
937 }
938
939 /* We got an empty namespace, need to move it back to inactive
940 * list.
941 * The race with parallel resource creation is fine:
942 * - If they do namespace_get before our check, we fail the
943 * check and they move this item to the end of the list anyway
944 * - If we do the check and then they do namespace_get, then
945 * we move the namespace to inactive and they will move
946 * it back to active (synchronised by the lock, so no clash
947 * there).
948 */
949 if (ldlm_ns_empty(ns)) {
950 ldlm_namespace_move_to_inactive_locked(ns, client);
951 mutex_unlock(ldlm_namespace_lock(client));
952 continue;
953 }
954
44b53f18 955 if (!ns_old)
91a50030
OD
956 ns_old = ns;
957
d7e09d03
PT
958 spin_lock(&ns->ns_lock);
959 /*
960 * skip ns which is being freed, and we don't want to increase
961 * its refcount again, not even temporarily. bz21519 & LU-499.
962 */
963 if (ns->ns_stopping) {
964 skip = 1;
965 } else {
966 skip = 0;
967 ldlm_namespace_get(ns);
968 }
969 spin_unlock(&ns->ns_lock);
970
91a50030 971 ldlm_namespace_move_to_active_locked(ns, client);
d7e09d03
PT
972 mutex_unlock(ldlm_namespace_lock(client));
973
974 /*
975 * After setup is done - recalc the pool.
976 */
977 if (!skip) {
3eface59
OD
978 int ttime = ldlm_pool_recalc(&ns->ns_pool);
979
980 if (ttime < time)
981 time = ttime;
982
d7e09d03
PT
983 ldlm_namespace_put(ns);
984 }
985 }
3eface59 986 return time;
d7e09d03 987}
d7e09d03
PT
988
989static int ldlm_pools_thread_main(void *arg)
990{
991 struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
00f9d12b 992 int c_time;
d7e09d03
PT
993
994 thread_set_flags(thread, SVC_RUNNING);
995 wake_up(&thread->t_ctl_waitq);
996
997 CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
24c198e9 998 "ldlm_poold", current_pid());
d7e09d03
PT
999
1000 while (1) {
1001 struct l_wait_info lwi;
1002
1003 /*
1004 * Recal all pools on this tick.
1005 */
3eface59 1006 c_time = ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT);
d7e09d03
PT
1007
1008 /*
1009 * Wait until the next check time, or until we're
1010 * stopped.
1011 */
00f9d12b 1012 lwi = LWI_TIMEOUT(cfs_time_seconds(c_time),
d7e09d03
PT
1013 NULL, NULL);
1014 l_wait_event(thread->t_ctl_waitq,
1015 thread_is_stopping(thread) ||
1016 thread_is_event(thread),
1017 &lwi);
1018
1019 if (thread_test_and_clear_flags(thread, SVC_STOPPING))
1020 break;
71e8dd9a 1021 thread_test_and_clear_flags(thread, SVC_EVENT);
d7e09d03
PT
1022 }
1023
1024 thread_set_flags(thread, SVC_STOPPED);
1025 wake_up(&thread->t_ctl_waitq);
1026
1027 CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
24c198e9 1028 "ldlm_poold", current_pid());
d7e09d03
PT
1029
1030 complete_and_exit(&ldlm_pools_comp, 0);
1031}
1032
1033static int ldlm_pools_thread_start(void)
1034{
1035 struct l_wait_info lwi = { 0 };
68b636b6 1036 struct task_struct *task;
d7e09d03 1037
44b53f18 1038 if (ldlm_pools_thread)
0a3bdb00 1039 return -EALREADY;
d7e09d03 1040
352f7891 1041 ldlm_pools_thread = kzalloc(sizeof(*ldlm_pools_thread), GFP_NOFS);
94e67761 1042 if (!ldlm_pools_thread)
0a3bdb00 1043 return -ENOMEM;
d7e09d03
PT
1044
1045 init_completion(&ldlm_pools_comp);
1046 init_waitqueue_head(&ldlm_pools_thread->t_ctl_waitq);
1047
1048 task = kthread_run(ldlm_pools_thread_main, ldlm_pools_thread,
1049 "ldlm_poold");
1050 if (IS_ERR(task)) {
1051 CERROR("Can't start pool thread, error %ld\n", PTR_ERR(task));
352f7891 1052 kfree(ldlm_pools_thread);
d7e09d03 1053 ldlm_pools_thread = NULL;
0a3bdb00 1054 return PTR_ERR(task);
d7e09d03
PT
1055 }
1056 l_wait_event(ldlm_pools_thread->t_ctl_waitq,
1057 thread_is_running(ldlm_pools_thread), &lwi);
0a3bdb00 1058 return 0;
d7e09d03
PT
1059}
1060
1061static void ldlm_pools_thread_stop(void)
1062{
44b53f18 1063 if (!ldlm_pools_thread)
d7e09d03 1064 return;
d7e09d03
PT
1065
1066 thread_set_flags(ldlm_pools_thread, SVC_STOPPING);
1067 wake_up(&ldlm_pools_thread->t_ctl_waitq);
1068
1069 /*
1070 * Make sure that pools thread is finished before freeing @thread.
1071 * This fixes possible race and oops due to accessing freed memory
1072 * in pools thread.
1073 */
1074 wait_for_completion(&ldlm_pools_comp);
352f7891 1075 kfree(ldlm_pools_thread);
d7e09d03 1076 ldlm_pools_thread = NULL;
d7e09d03
PT
1077}
1078
cbc3769e
PT
1079static struct shrinker ldlm_pools_cli_shrinker = {
1080 .count_objects = ldlm_pools_cli_count,
1081 .scan_objects = ldlm_pools_cli_scan,
1082 .seeks = DEFAULT_SEEKS,
1083};
1084
d7e09d03
PT
1085int ldlm_pools_init(void)
1086{
1087 int rc;
d7e09d03
PT
1088
1089 rc = ldlm_pools_thread_start();
00f9d12b 1090 if (rc == 0)
cbc3769e 1091 register_shrinker(&ldlm_pools_cli_shrinker);
00f9d12b 1092
0a3bdb00 1093 return rc;
d7e09d03
PT
1094}
1095EXPORT_SYMBOL(ldlm_pools_init);
1096
1097void ldlm_pools_fini(void)
1098{
00f9d12b 1099 if (ldlm_pools_thread)
faa7a4e3 1100 unregister_shrinker(&ldlm_pools_cli_shrinker);
00f9d12b 1101
d7e09d03
PT
1102 ldlm_pools_thread_stop();
1103}
1104EXPORT_SYMBOL(ldlm_pools_fini);
This page took 0.606718 seconds and 5 git commands to generate.