Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
[deliverable/linux.git] / drivers / staging / lustre / lustre / ldlm / ldlm_pool.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
1dc563a6 30 * Copyright (c) 2010, 2015, Intel Corporation.
d7e09d03
PT
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/ldlm/ldlm_pool.c
37 *
38 * Author: Yury Umanets <umka@clusterfs.com>
39 */
40
41/*
42 * Idea of this code is rather simple. Each second, for each server namespace
43 * we have SLV - server lock volume which is calculated on current number of
44 * granted locks, grant speed for past period, etc - that is, locking load.
45 * This SLV number may be thought as a flow definition for simplicity. It is
46 * sent to clients with each occasion to let them know what is current load
47 * situation on the server. By default, at the beginning, SLV on server is
48 * set max value which is calculated as the following: allow to one client
49 * have all locks of limit ->pl_limit for 10h.
50 *
51 * Next, on clients, number of cached locks is not limited artificially in any
52 * way as it was before. Instead, client calculates CLV, that is, client lock
53 * volume for each lock and compares it with last SLV from the server. CLV is
54 * calculated as the number of locks in LRU * lock live time in seconds. If
55 * CLV > SLV - lock is canceled.
56 *
e7ddc48c
AR
57 * Client has LVF, that is, lock volume factor which regulates how much
58 * sensitive client should be about last SLV from server. The higher LVF is the
59 * more locks will be canceled on client. Default value for it is 1. Setting LVF
60 * to 2 means that client will cancel locks 2 times faster.
d7e09d03
PT
61 *
62 * Locks on a client will be canceled more intensively in these cases:
63 * (1) if SLV is smaller, that is, load is higher on the server;
64 * (2) client has a lot of locks (the more locks are held by client, the bigger
65 * chances that some of them should be canceled);
66 * (3) client has old locks (taken some time ago);
67 *
68 * Thus, according to flow paradigm that we use for better understanding SLV,
69 * CLV is the volume of particle in flow described by SLV. According to this,
70 * if flow is getting thinner, more and more particles become outside of it and
71 * as particles are locks, they should be canceled.
72 *
e7ddc48c
AR
73 * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com).
74 * Andreas Dilger (adilger@clusterfs.com) proposed few nice ideas like using
75 * LVF and many cleanups. Flow definition to allow more easy understanding of
76 * the logic belongs to Nikita Danilov (nikita@clusterfs.com) as well as many
77 * cleanups and fixes. And design and implementation are done by Yury Umanets
78 * (umka@clusterfs.com).
d7e09d03
PT
79 *
80 * Glossary for terms used:
81 *
82 * pl_limit - Number of allowed locks in pool. Applies to server and client
83 * side (tunable);
84 *
85 * pl_granted - Number of granted locks (calculated);
86 * pl_grant_rate - Number of granted locks for last T (calculated);
87 * pl_cancel_rate - Number of canceled locks for last T (calculated);
88 * pl_grant_speed - Grant speed (GR - CR) for last T (calculated);
89 * pl_grant_plan - Planned number of granted locks for next T (calculated);
90 * pl_server_lock_volume - Current server lock volume (calculated);
91 *
92 * As it may be seen from list above, we have few possible tunables which may
f2825e03 93 * affect behavior much. They all may be modified via sysfs. However, they also
d7e09d03
PT
94 * give a possibility for constructing few pre-defined behavior policies. If
95 * none of predefines is suitable for a working pattern being used, new one may
f2825e03 96 * be "constructed" via sysfs tunables.
d7e09d03
PT
97 */
98
99#define DEBUG_SUBSYSTEM S_LDLM
100
e27db149
GKH
101#include "../include/lustre_dlm.h"
102#include "../include/cl_object.h"
103#include "../include/obd_class.h"
104#include "../include/obd_support.h"
d7e09d03
PT
105#include "ldlm_internal.h"
106
d7e09d03
PT
107/*
108 * 50 ldlm locks for 1MB of RAM.
109 */
09cbfeaf 110#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_SHIFT)) * 50)
d7e09d03
PT
111
112/*
113 * Maximal possible grant step plan in %.
114 */
115#define LDLM_POOL_MAX_GSP (30)
116
117/*
118 * Minimal possible grant step plan in %.
119 */
120#define LDLM_POOL_MIN_GSP (1)
121
122/*
123 * This controls the speed of reaching LDLM_POOL_MAX_GSP
124 * with increasing thread period.
125 */
126#define LDLM_POOL_GSP_STEP_SHIFT (2)
127
128/*
129 * LDLM_POOL_GSP% of all locks is default GP.
130 */
131#define LDLM_POOL_GP(L) (((L) * LDLM_POOL_MAX_GSP) / 100)
132
133/*
134 * Max age for locks on clients.
135 */
136#define LDLM_POOL_MAX_AGE (36000)
137
138/*
139 * The granularity of SLV calculation.
140 */
141#define LDLM_POOL_SLV_SHIFT (10)
142
d7e09d03
PT
143static inline __u64 dru(__u64 val, __u32 shift, int round_up)
144{
145 return (val + (round_up ? (1 << shift) - 1 : 0)) >> shift;
146}
147
148static inline __u64 ldlm_pool_slv_max(__u32 L)
149{
150 /*
151 * Allow to have all locks for 1 client for 10 hrs.
152 * Formula is the following: limit * 10h / 1 client.
153 */
154 __u64 lim = (__u64)L * LDLM_POOL_MAX_AGE / 1;
155 return lim;
156}
157
158static inline __u64 ldlm_pool_slv_min(__u32 L)
159{
160 return 1;
161}
162
163enum {
164 LDLM_POOL_FIRST_STAT = 0,
165 LDLM_POOL_GRANTED_STAT = LDLM_POOL_FIRST_STAT,
166 LDLM_POOL_GRANT_STAT,
167 LDLM_POOL_CANCEL_STAT,
168 LDLM_POOL_GRANT_RATE_STAT,
169 LDLM_POOL_CANCEL_RATE_STAT,
170 LDLM_POOL_GRANT_PLAN_STAT,
171 LDLM_POOL_SLV_STAT,
172 LDLM_POOL_SHRINK_REQTD_STAT,
173 LDLM_POOL_SHRINK_FREED_STAT,
174 LDLM_POOL_RECALC_STAT,
175 LDLM_POOL_TIMING_STAT,
176 LDLM_POOL_LAST_STAT
177};
178
d7e09d03
PT
179/**
180 * Calculates suggested grant_step in % of available locks for passed
181 * \a period. This is later used in grant_plan calculations.
182 */
183static inline int ldlm_pool_t2gsp(unsigned int t)
184{
185 /*
186 * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP
187 * and up to 30% for anything higher than LDLM_POOL_GSP_STEP.
188 *
189 * How this will affect execution is the following:
190 *
191 * - for thread period 1s we will have grant_step 1% which good from
192 * pov of taking some load off from server and push it out to clients.
193 * This is like that because 1% for grant_step means that server will
194 * not allow clients to get lots of locks in short period of time and
195 * keep all old locks in their caches. Clients will always have to
196 * get some locks back if they want to take some new;
197 *
198 * - for thread period 10s (which is default) we will have 23% which
199 * means that clients will have enough of room to take some new locks
200 * without getting some back. All locks from this 23% which were not
201 * taken by clients in current period will contribute in SLV growing.
202 * SLV growing means more locks cached on clients until limit or grant
203 * plan is reached.
204 */
205 return LDLM_POOL_MAX_GSP -
206 ((LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) >>
207 (t >> LDLM_POOL_GSP_STEP_SHIFT));
208}
209
d7e09d03
PT
210/**
211 * Recalculates next stats on passed \a pl.
212 *
213 * \pre ->pl_lock is locked.
214 */
215static void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
216{
217 int grant_plan = pl->pl_grant_plan;
218 __u64 slv = pl->pl_server_lock_volume;
219 int granted = atomic_read(&pl->pl_granted);
220 int grant_rate = atomic_read(&pl->pl_grant_rate);
221 int cancel_rate = atomic_read(&pl->pl_cancel_rate);
222
223 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
224 slv);
225 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
226 granted);
227 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
228 grant_rate);
229 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
230 grant_plan);
231 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
232 cancel_rate);
233}
234
d7e09d03 235/**
7c37abe0
SB
236 * Sets SLV and Limit from container_of(pl, struct ldlm_namespace,
237 * ns_pool)->ns_obd tp passed \a pl.
d7e09d03
PT
238 */
239static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
240{
241 struct obd_device *obd;
242
243 /*
244 * Get new SLV and Limit from obd which is updated with coming
245 * RPCs.
246 */
7c37abe0
SB
247 obd = container_of(pl, struct ldlm_namespace,
248 ns_pool)->ns_obd;
d7e09d03
PT
249 read_lock(&obd->obd_pool_lock);
250 pl->pl_server_lock_volume = obd->obd_pool_slv;
f7ec22b5 251 atomic_set(&pl->pl_limit, obd->obd_pool_limit);
d7e09d03
PT
252 read_unlock(&obd->obd_pool_lock);
253}
254
255/**
256 * Recalculates client size pool \a pl according to current SLV and Limit.
257 */
258static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
259{
8f83409c 260 time64_t recalc_interval_sec;
4d2c7b30 261 int ret;
d7e09d03 262
8f83409c 263 recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
d7e09d03 264 if (recalc_interval_sec < pl->pl_recalc_period)
0a3bdb00 265 return 0;
d7e09d03
PT
266
267 spin_lock(&pl->pl_lock);
268 /*
269 * Check if we need to recalc lists now.
270 */
8f83409c 271 recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
d7e09d03
PT
272 if (recalc_interval_sec < pl->pl_recalc_period) {
273 spin_unlock(&pl->pl_lock);
0a3bdb00 274 return 0;
d7e09d03
PT
275 }
276
277 /*
278 * Make sure that pool knows last SLV and Limit from obd.
279 */
280 ldlm_cli_pool_pop_slv(pl);
281
d7e09d03
PT
282 spin_unlock(&pl->pl_lock);
283
284 /*
285 * Do not cancel locks in case lru resize is disabled for this ns.
286 */
7c37abe0
SB
287 if (!ns_connect_lru_resize(container_of(pl, struct ldlm_namespace,
288 ns_pool))) {
4d2c7b30
LX
289 ret = 0;
290 goto out;
291 }
d7e09d03
PT
292
293 /*
294 * In the time of canceling locks on client we do not need to maintain
295 * sharp timing, we only want to cancel locks asap according to new SLV.
296 * It may be called when SLV has changed much, this is why we do not
297 * take into account pl->pl_recalc_time here.
298 */
7c37abe0
SB
299 ret = ldlm_cancel_lru(container_of(pl, struct ldlm_namespace, ns_pool),
300 0, LCF_ASYNC, LDLM_CANCEL_LRUR);
4d2c7b30
LX
301
302out:
303 spin_lock(&pl->pl_lock);
304 /*
305 * Time of LRU resizing might be longer than period,
306 * so update after LRU resizing rather than before it.
307 */
8f83409c 308 pl->pl_recalc_time = ktime_get_real_seconds();
4d2c7b30
LX
309 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
310 recalc_interval_sec);
311 spin_unlock(&pl->pl_lock);
312 return ret;
d7e09d03
PT
313}
314
315/**
316 * This function is main entry point for memory pressure handling on client
317 * side. Main goal of this function is to cancel some number of locks on
318 * passed \a pl according to \a nr and \a gfp_mask.
319 */
320static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
5802572e 321 int nr, gfp_t gfp_mask)
d7e09d03
PT
322{
323 struct ldlm_namespace *ns;
cbc3769e 324 int unused;
d7e09d03 325
7c37abe0 326 ns = container_of(pl, struct ldlm_namespace, ns_pool);
d7e09d03
PT
327
328 /*
329 * Do not cancel locks in case lru resize is disabled for this ns.
330 */
331 if (!ns_connect_lru_resize(ns))
0a3bdb00 332 return 0;
d7e09d03
PT
333
334 /*
335 * Make sure that pool knows last SLV and Limit from obd.
336 */
337 ldlm_cli_pool_pop_slv(pl);
338
339 spin_lock(&ns->ns_lock);
340 unused = ns->ns_nr_unused;
341 spin_unlock(&ns->ns_lock);
342
cbc3769e
PT
343 if (nr == 0)
344 return (unused / 100) * sysctl_vfs_cache_pressure;
345 else
346 return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK);
d7e09d03
PT
347}
348
b9c98cfa 349static const struct ldlm_pool_ops ldlm_cli_pool_ops = {
d7e09d03
PT
350 .po_recalc = ldlm_cli_pool_recalc,
351 .po_shrink = ldlm_cli_pool_shrink
352};
353
354/**
355 * Pool recalc wrapper. Will call either client or server pool recalc callback
356 * depending what pool \a pl is used.
357 */
58c6d133 358static int ldlm_pool_recalc(struct ldlm_pool *pl)
d7e09d03 359{
8f83409c 360 u32 recalc_interval_sec;
d7e09d03
PT
361 int count;
362
8f83409c 363 recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
d7e09d03
PT
364 if (recalc_interval_sec <= 0)
365 goto recalc;
366
367 spin_lock(&pl->pl_lock);
d7e09d03
PT
368 if (recalc_interval_sec > 0) {
369 /*
370 * Update pool statistics every 1s.
371 */
372 ldlm_pool_recalc_stats(pl);
373
374 /*
375 * Zero out all rates and speed for the last period.
376 */
377 atomic_set(&pl->pl_grant_rate, 0);
378 atomic_set(&pl->pl_cancel_rate, 0);
379 }
380 spin_unlock(&pl->pl_lock);
381
382 recalc:
44b53f18 383 if (pl->pl_ops->po_recalc) {
d7e09d03
PT
384 count = pl->pl_ops->po_recalc(pl);
385 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
386 count);
d7e09d03 387 }
8f83409c 388 recalc_interval_sec = pl->pl_recalc_time - ktime_get_seconds() +
3eface59 389 pl->pl_recalc_period;
4d2c7b30
LX
390 if (recalc_interval_sec <= 0) {
391 /* Prevent too frequent recalculation. */
8f83409c
AB
392 CDEBUG(D_DLMTRACE,
393 "Negative interval(%d), too short period(%lld)",
4d2c7b30 394 recalc_interval_sec,
8f83409c 395 (s64)pl->pl_recalc_period);
4d2c7b30
LX
396 recalc_interval_sec = 1;
397 }
d7e09d03 398
3eface59 399 return recalc_interval_sec;
d7e09d03 400}
d7e09d03 401
cbc3769e 402/*
d7e09d03 403 * Pool shrink wrapper. Will call either client or server pool recalc callback
cbc3769e
PT
404 * depending what pool pl is used. When nr == 0, just return the number of
405 * freeable locks. Otherwise, return the number of canceled locks.
d7e09d03 406 */
58c6d133 407static int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, gfp_t gfp_mask)
d7e09d03
PT
408{
409 int cancel = 0;
410
44b53f18 411 if (pl->pl_ops->po_shrink) {
d7e09d03
PT
412 cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask);
413 if (nr > 0) {
414 lprocfs_counter_add(pl->pl_stats,
415 LDLM_POOL_SHRINK_REQTD_STAT,
416 nr);
417 lprocfs_counter_add(pl->pl_stats,
418 LDLM_POOL_SHRINK_FREED_STAT,
419 cancel);
2d00bd17
JP
420 CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, shrunk %d\n",
421 pl->pl_name, nr, cancel);
d7e09d03
PT
422 }
423 }
424 return cancel;
425}
d7e09d03 426
73bb1da6 427static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
d7e09d03 428{
71570b98
OD
429 int granted, grant_rate, cancel_rate;
430 int grant_speed, lvf;
73bb1da6 431 struct ldlm_pool *pl = m->private;
d7e09d03
PT
432 __u64 slv, clv;
433 __u32 limit;
434
435 spin_lock(&pl->pl_lock);
436 slv = pl->pl_server_lock_volume;
437 clv = pl->pl_client_lock_volume;
946d6f95 438 limit = atomic_read(&pl->pl_limit);
d7e09d03
PT
439 granted = atomic_read(&pl->pl_granted);
440 grant_rate = atomic_read(&pl->pl_grant_rate);
441 cancel_rate = atomic_read(&pl->pl_cancel_rate);
442 grant_speed = grant_rate - cancel_rate;
443 lvf = atomic_read(&pl->pl_lock_volume_factor);
d7e09d03
PT
444 spin_unlock(&pl->pl_lock);
445
73bb1da6 446 seq_printf(m, "LDLM pool state (%s):\n"
b0f5aad5
GKH
447 " SLV: %llu\n"
448 " CLV: %llu\n"
73bb1da6
PT
449 " LVF: %d\n",
450 pl->pl_name, slv, clv, lvf);
d7e09d03 451
2c2b7c05
HM
452 seq_printf(m, " GR: %d\n CR: %d\n GS: %d\n"
453 " G: %d\n L: %d\n",
73bb1da6
PT
454 grant_rate, cancel_rate, grant_speed,
455 granted, limit);
456
457 return 0;
d7e09d03 458}
c9f6bb96 459
73bb1da6 460LPROC_SEQ_FOPS_RO(lprocfs_pool_state);
d7e09d03 461
24b8c88a
OD
462static ssize_t grant_speed_show(struct kobject *kobj, struct attribute *attr,
463 char *buf)
d7e09d03 464{
24b8c88a
OD
465 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,
466 pl_kobj);
467
d7e09d03
PT
468 int grant_speed;
469
470 spin_lock(&pl->pl_lock);
471 /* serialize with ldlm_pool_recalc */
472 grant_speed = atomic_read(&pl->pl_grant_rate) -
473 atomic_read(&pl->pl_cancel_rate);
474 spin_unlock(&pl->pl_lock);
24b8c88a 475 return sprintf(buf, "%d\n", grant_speed);
d7e09d03 476}
24b8c88a 477LUSTRE_RO_ATTR(grant_speed);
d7e09d03 478
24b8c88a
OD
479LDLM_POOL_SYSFS_READER_SHOW(grant_plan, int);
480LUSTRE_RO_ATTR(grant_plan);
73bb1da6 481
24b8c88a
OD
482LDLM_POOL_SYSFS_READER_SHOW(recalc_period, int);
483LDLM_POOL_SYSFS_WRITER_STORE(recalc_period, int);
484LUSTRE_RW_ATTR(recalc_period);
73bb1da6 485
24b8c88a
OD
486LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(server_lock_volume, u64);
487LUSTRE_RO_ATTR(server_lock_volume);
488
489LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(limit, atomic);
490LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(limit, atomic);
491LUSTRE_RW_ATTR(limit);
492
493LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(granted, atomic);
494LUSTRE_RO_ATTR(granted);
495
496LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(cancel_rate, atomic);
497LUSTRE_RO_ATTR(cancel_rate);
73bb1da6 498
24b8c88a
OD
499LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(grant_rate, atomic);
500LUSTRE_RO_ATTR(grant_rate);
73bb1da6 501
24b8c88a
OD
502LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(lock_volume_factor, atomic);
503LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(lock_volume_factor, atomic);
504LUSTRE_RW_ATTR(lock_volume_factor);
73bb1da6
PT
505
506#define LDLM_POOL_ADD_VAR(name, var, ops) \
507 do { \
508 snprintf(var_name, MAX_STRING_SIZE, #name); \
509 pool_vars[0].data = var; \
510 pool_vars[0].fops = ops; \
700815d4 511 ldebugfs_add_vars(pl->pl_debugfs_entry, pool_vars, NULL);\
73bb1da6 512 } while (0)
d7e09d03 513
f2825e03
OD
514/* These are for pools in /sys/fs/lustre/ldlm/namespaces/.../pool */
515static struct attribute *ldlm_pl_attrs[] = {
24b8c88a
OD
516 &lustre_attr_grant_speed.attr,
517 &lustre_attr_grant_plan.attr,
518 &lustre_attr_recalc_period.attr,
519 &lustre_attr_server_lock_volume.attr,
520 &lustre_attr_limit.attr,
521 &lustre_attr_granted.attr,
522 &lustre_attr_cancel_rate.attr,
523 &lustre_attr_grant_rate.attr,
524 &lustre_attr_lock_volume_factor.attr,
f2825e03
OD
525 NULL,
526};
527
528static void ldlm_pl_release(struct kobject *kobj)
529{
530 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,
531 pl_kobj);
532 complete(&pl->pl_kobj_unregister);
533}
534
535static struct kobj_type ldlm_pl_ktype = {
536 .default_attrs = ldlm_pl_attrs,
537 .sysfs_ops = &lustre_sysfs_ops,
538 .release = ldlm_pl_release,
539};
540
541static int ldlm_pool_sysfs_init(struct ldlm_pool *pl)
542{
7c37abe0
SB
543 struct ldlm_namespace *ns = container_of(pl, struct ldlm_namespace,
544 ns_pool);
f2825e03
OD
545 int err;
546
547 init_completion(&pl->pl_kobj_unregister);
548 err = kobject_init_and_add(&pl->pl_kobj, &ldlm_pl_ktype, &ns->ns_kobj,
549 "pool");
550
551 return err;
552}
553
700815d4 554static int ldlm_pool_debugfs_init(struct ldlm_pool *pl)
d7e09d03 555{
7c37abe0
SB
556 struct ldlm_namespace *ns = container_of(pl, struct ldlm_namespace,
557 ns_pool);
700815d4 558 struct dentry *debugfs_ns_parent;
d7e09d03
PT
559 struct lprocfs_vars pool_vars[2];
560 char *var_name = NULL;
561 int rc = 0;
d7e09d03 562
352f7891 563 var_name = kzalloc(MAX_STRING_SIZE + 1, GFP_NOFS);
d7e09d03 564 if (!var_name)
0a3bdb00 565 return -ENOMEM;
d7e09d03 566
700815d4
DE
567 debugfs_ns_parent = ns->ns_debugfs_entry;
568 if (IS_ERR_OR_NULL(debugfs_ns_parent)) {
569 CERROR("%s: debugfs entry is not initialized\n",
d7e09d03 570 ldlm_ns_name(ns));
d1c0d446
JL
571 rc = -EINVAL;
572 goto out_free_name;
d7e09d03 573 }
700815d4
DE
574 pl->pl_debugfs_entry = ldebugfs_register("pool", debugfs_ns_parent,
575 NULL, NULL);
576 if (IS_ERR(pl->pl_debugfs_entry)) {
577 CERROR("LdebugFS failed in ldlm-pool-init\n");
578 rc = PTR_ERR(pl->pl_debugfs_entry);
579 pl->pl_debugfs_entry = NULL;
d1c0d446 580 goto out_free_name;
d7e09d03
PT
581 }
582
583 var_name[MAX_STRING_SIZE] = '\0';
584 memset(pool_vars, 0, sizeof(pool_vars));
585 pool_vars[0].name = var_name;
586
700815d4 587 LDLM_POOL_ADD_VAR(state, pl, &lprocfs_pool_state_fops);
d7e09d03
PT
588
589 pl->pl_stats = lprocfs_alloc_stats(LDLM_POOL_LAST_STAT -
590 LDLM_POOL_FIRST_STAT, 0);
d1c0d446
JL
591 if (!pl->pl_stats) {
592 rc = -ENOMEM;
593 goto out_free_name;
594 }
d7e09d03
PT
595
596 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
597 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
598 "granted", "locks");
599 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_STAT,
600 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
601 "grant", "locks");
602 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_STAT,
603 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
604 "cancel", "locks");
605 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
606 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
607 "grant_rate", "locks/s");
608 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
609 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
610 "cancel_rate", "locks/s");
611 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
612 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
613 "grant_plan", "locks/s");
614 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SLV_STAT,
615 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
616 "slv", "slv");
617 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_REQTD_STAT,
618 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
619 "shrink_request", "locks");
620 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_FREED_STAT,
621 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
622 "shrink_freed", "locks");
623 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_RECALC_STAT,
624 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
625 "recalc_freed", "locks");
626 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_TIMING_STAT,
627 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
628 "recalc_timing", "sec");
700815d4
DE
629 rc = ldebugfs_register_stats(pl->pl_debugfs_entry, "stats",
630 pl->pl_stats);
d7e09d03 631
d7e09d03 632out_free_name:
352f7891 633 kfree(var_name);
d7e09d03
PT
634 return rc;
635}
636
f2825e03
OD
637static void ldlm_pool_sysfs_fini(struct ldlm_pool *pl)
638{
639 kobject_put(&pl->pl_kobj);
640 wait_for_completion(&pl->pl_kobj_unregister);
641}
642
700815d4 643static void ldlm_pool_debugfs_fini(struct ldlm_pool *pl)
d7e09d03 644{
44b53f18 645 if (pl->pl_stats) {
d7e09d03
PT
646 lprocfs_free_stats(&pl->pl_stats);
647 pl->pl_stats = NULL;
648 }
44b53f18 649 if (pl->pl_debugfs_entry) {
700815d4
DE
650 ldebugfs_remove(&pl->pl_debugfs_entry);
651 pl->pl_debugfs_entry = NULL;
d7e09d03
PT
652 }
653}
654
655int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
656 int idx, ldlm_side_t client)
657{
658 int rc;
d7e09d03
PT
659
660 spin_lock_init(&pl->pl_lock);
661 atomic_set(&pl->pl_granted, 0);
8f83409c 662 pl->pl_recalc_time = ktime_get_seconds();
d7e09d03
PT
663 atomic_set(&pl->pl_lock_volume_factor, 1);
664
665 atomic_set(&pl->pl_grant_rate, 0);
666 atomic_set(&pl->pl_cancel_rate, 0);
667 pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
668
669 snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
670 ldlm_ns_name(ns), idx);
671
f7ec22b5 672 atomic_set(&pl->pl_limit, 1);
00f9d12b
OD
673 pl->pl_server_lock_volume = 0;
674 pl->pl_ops = &ldlm_cli_pool_ops;
675 pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
d7e09d03 676 pl->pl_client_lock_volume = 0;
700815d4 677 rc = ldlm_pool_debugfs_init(pl);
d7e09d03 678 if (rc)
0a3bdb00 679 return rc;
d7e09d03 680
f2825e03
OD
681 rc = ldlm_pool_sysfs_init(pl);
682 if (rc)
683 return rc;
684
d7e09d03
PT
685 CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name);
686
0a3bdb00 687 return rc;
d7e09d03
PT
688}
689EXPORT_SYMBOL(ldlm_pool_init);
690
691void ldlm_pool_fini(struct ldlm_pool *pl)
692{
f2825e03 693 ldlm_pool_sysfs_fini(pl);
700815d4 694 ldlm_pool_debugfs_fini(pl);
d7e09d03
PT
695
696 /*
697 * Pool should not be used after this point. We can't free it here as
698 * it lives in struct ldlm_namespace, but still interested in catching
699 * any abnormal using cases.
700 */
701 POISON(pl, 0x5a, sizeof(*pl));
d7e09d03
PT
702}
703EXPORT_SYMBOL(ldlm_pool_fini);
704
705/**
706 * Add new taken ldlm lock \a lock into pool \a pl accounting.
707 */
708void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
709{
710 /*
711 * FLOCK locks are special in a sense that they are almost never
712 * cancelled, instead special kind of lock is used to drop them.
713 * also there is no LRU for flock locks, so no point in tracking
714 * them anyway.
715 */
716 if (lock->l_resource->lr_type == LDLM_FLOCK)
717 return;
718
719 atomic_inc(&pl->pl_granted);
720 atomic_inc(&pl->pl_grant_rate);
721 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
722 /*
723 * Do not do pool recalc for client side as all locks which
724 * potentially may be canceled has already been packed into
725 * enqueue/cancel rpc. Also we do not want to run out of stack
726 * with too long call paths.
727 */
d7e09d03
PT
728}
729EXPORT_SYMBOL(ldlm_pool_add);
730
731/**
732 * Remove ldlm lock \a lock from pool \a pl accounting.
733 */
734void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
735{
736 /*
737 * Filter out FLOCK locks. Read above comment in ldlm_pool_add().
738 */
739 if (lock->l_resource->lr_type == LDLM_FLOCK)
740 return;
741
742 LASSERT(atomic_read(&pl->pl_granted) > 0);
743 atomic_dec(&pl->pl_granted);
744 atomic_inc(&pl->pl_cancel_rate);
745
746 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
d7e09d03
PT
747}
748EXPORT_SYMBOL(ldlm_pool_del);
749
750/**
751 * Returns current \a pl SLV.
752 *
753 * \pre ->pl_lock is not locked.
754 */
755__u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
756{
757 __u64 slv;
902f3bb1 758
d7e09d03
PT
759 spin_lock(&pl->pl_lock);
760 slv = pl->pl_server_lock_volume;
761 spin_unlock(&pl->pl_lock);
762 return slv;
763}
d7e09d03 764
d7e09d03
PT
765/**
766 * Sets passed \a clv to \a pl.
767 *
768 * \pre ->pl_lock is not locked.
769 */
770void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
771{
772 spin_lock(&pl->pl_lock);
773 pl->pl_client_lock_volume = clv;
774 spin_unlock(&pl->pl_lock);
775}
d7e09d03
PT
776
777/**
778 * Returns current LVF from \a pl.
779 */
780__u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
781{
782 return atomic_read(&pl->pl_lock_volume_factor);
783}
d7e09d03
PT
784
785static int ldlm_pool_granted(struct ldlm_pool *pl)
786{
787 return atomic_read(&pl->pl_granted);
788}
789
790static struct ptlrpc_thread *ldlm_pools_thread;
d7e09d03
PT
791static struct completion ldlm_pools_comp;
792
793/*
cbc3769e
PT
794 * count locks from all namespaces (if possible). Returns number of
795 * cached locks.
d7e09d03 796 */
5802572e 797static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
d7e09d03 798{
cbc3769e 799 int total = 0, nr_ns;
d7e09d03 800 struct ldlm_namespace *ns;
91a50030 801 struct ldlm_namespace *ns_old = NULL; /* loop detection */
d7e09d03
PT
802 void *cookie;
803
cbc3769e
PT
804 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
805 return 0;
d7e09d03 806
cbc3769e
PT
807 CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n",
808 client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
d7e09d03
PT
809
810 cookie = cl_env_reenter();
811
812 /*
813 * Find out how many resources we may release.
814 */
91a50030 815 for (nr_ns = ldlm_namespace_nr_read(client);
cbc3769e 816 nr_ns > 0; nr_ns--) {
d7e09d03
PT
817 mutex_lock(ldlm_namespace_lock(client));
818 if (list_empty(ldlm_namespace_list(client))) {
819 mutex_unlock(ldlm_namespace_lock(client));
820 cl_env_reexit(cookie);
821 return 0;
822 }
823 ns = ldlm_namespace_first_locked(client);
91a50030
OD
824
825 if (ns == ns_old) {
826 mutex_unlock(ldlm_namespace_lock(client));
827 break;
828 }
829
830 if (ldlm_ns_empty(ns)) {
831 ldlm_namespace_move_to_inactive_locked(ns, client);
832 mutex_unlock(ldlm_namespace_lock(client));
833 continue;
834 }
835
44b53f18 836 if (!ns_old)
91a50030
OD
837 ns_old = ns;
838
d7e09d03 839 ldlm_namespace_get(ns);
91a50030 840 ldlm_namespace_move_to_active_locked(ns, client);
d7e09d03
PT
841 mutex_unlock(ldlm_namespace_lock(client));
842 total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
843 ldlm_namespace_put(ns);
844 }
845
cbc3769e
PT
846 cl_env_reexit(cookie);
847 return total;
848}
849
5802572e 850static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask)
cbc3769e
PT
851{
852 unsigned long freed = 0;
853 int tmp, nr_ns;
854 struct ldlm_namespace *ns;
855 void *cookie;
856
857 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
858 return -1;
859
860 cookie = cl_env_reenter();
d7e09d03
PT
861
862 /*
cbc3769e 863 * Shrink at least ldlm_namespace_nr_read(client) namespaces.
d7e09d03 864 */
cbc3769e
PT
865 for (tmp = nr_ns = ldlm_namespace_nr_read(client);
866 tmp > 0; tmp--) {
d7e09d03
PT
867 int cancel, nr_locks;
868
869 /*
870 * Do not call shrink under ldlm_namespace_lock(client)
871 */
872 mutex_lock(ldlm_namespace_lock(client));
873 if (list_empty(ldlm_namespace_list(client))) {
874 mutex_unlock(ldlm_namespace_lock(client));
d7e09d03
PT
875 break;
876 }
877 ns = ldlm_namespace_first_locked(client);
878 ldlm_namespace_get(ns);
91a50030 879 ldlm_namespace_move_to_active_locked(ns, client);
d7e09d03
PT
880 mutex_unlock(ldlm_namespace_lock(client));
881
882 nr_locks = ldlm_pool_granted(&ns->ns_pool);
cbc3769e
PT
883 /*
884 * We use to shrink propotionally but with new shrinker API,
885 * we lost the total number of freeable locks.
886 */
887 cancel = 1 + min_t(int, nr_locks, nr / nr_ns);
888 freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
d7e09d03
PT
889 ldlm_namespace_put(ns);
890 }
891 cl_env_reexit(cookie);
cbc3769e
PT
892 /*
893 * we only decrease the SLV in server pools shrinker, return
894 * SHRINK_STOP to kernel to avoid needless loop. LU-1128
895 */
00f9d12b 896 return freed;
d7e09d03
PT
897}
898
e7ddc48c
AR
899static unsigned long ldlm_pools_cli_count(struct shrinker *s,
900 struct shrink_control *sc)
d7e09d03 901{
cbc3769e
PT
902 return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask);
903}
904
e7ddc48c
AR
905static unsigned long ldlm_pools_cli_scan(struct shrinker *s,
906 struct shrink_control *sc)
cbc3769e
PT
907{
908 return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan,
909 sc->gfp_mask);
d7e09d03
PT
910}
911
00f9d12b 912static int ldlm_pools_recalc(ldlm_side_t client)
d7e09d03 913{
d7e09d03 914 struct ldlm_namespace *ns;
91a50030 915 struct ldlm_namespace *ns_old = NULL;
00f9d12b 916 int nr;
3eface59 917 int time = 50; /* seconds of sleep if no active namespaces */
d7e09d03 918
d7e09d03 919 /*
cbc3769e 920 * Recalc at least ldlm_namespace_nr_read(client) namespaces.
d7e09d03 921 */
91a50030 922 for (nr = ldlm_namespace_nr_read(client); nr > 0; nr--) {
d7e09d03
PT
923 int skip;
924 /*
925 * Lock the list, get first @ns in the list, getref, move it
926 * to the tail, unlock and call pool recalc. This way we avoid
927 * calling recalc under @ns lock what is really good as we get
928 * rid of potential deadlock on client nodes when canceling
929 * locks synchronously.
930 */
931 mutex_lock(ldlm_namespace_lock(client));
932 if (list_empty(ldlm_namespace_list(client))) {
933 mutex_unlock(ldlm_namespace_lock(client));
934 break;
935 }
936 ns = ldlm_namespace_first_locked(client);
937
91a50030
OD
938 if (ns_old == ns) { /* Full pass complete */
939 mutex_unlock(ldlm_namespace_lock(client));
940 break;
941 }
942
943 /* We got an empty namespace, need to move it back to inactive
944 * list.
945 * The race with parallel resource creation is fine:
946 * - If they do namespace_get before our check, we fail the
947 * check and they move this item to the end of the list anyway
948 * - If we do the check and then they do namespace_get, then
949 * we move the namespace to inactive and they will move
950 * it back to active (synchronised by the lock, so no clash
951 * there).
952 */
953 if (ldlm_ns_empty(ns)) {
954 ldlm_namespace_move_to_inactive_locked(ns, client);
955 mutex_unlock(ldlm_namespace_lock(client));
956 continue;
957 }
958
44b53f18 959 if (!ns_old)
91a50030
OD
960 ns_old = ns;
961
d7e09d03
PT
962 spin_lock(&ns->ns_lock);
963 /*
964 * skip ns which is being freed, and we don't want to increase
965 * its refcount again, not even temporarily. bz21519 & LU-499.
966 */
967 if (ns->ns_stopping) {
968 skip = 1;
969 } else {
970 skip = 0;
971 ldlm_namespace_get(ns);
972 }
973 spin_unlock(&ns->ns_lock);
974
91a50030 975 ldlm_namespace_move_to_active_locked(ns, client);
d7e09d03
PT
976 mutex_unlock(ldlm_namespace_lock(client));
977
978 /*
979 * After setup is done - recalc the pool.
980 */
981 if (!skip) {
3eface59
OD
982 int ttime = ldlm_pool_recalc(&ns->ns_pool);
983
984 if (ttime < time)
985 time = ttime;
986
d7e09d03
PT
987 ldlm_namespace_put(ns);
988 }
989 }
3eface59 990 return time;
d7e09d03 991}
d7e09d03
PT
992
993static int ldlm_pools_thread_main(void *arg)
994{
995 struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
00f9d12b 996 int c_time;
d7e09d03
PT
997
998 thread_set_flags(thread, SVC_RUNNING);
999 wake_up(&thread->t_ctl_waitq);
1000
1001 CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
1002 "ldlm_poold", current_pid());
1003
1004 while (1) {
1005 struct l_wait_info lwi;
1006
1007 /*
1008 * Recal all pools on this tick.
1009 */
3eface59 1010 c_time = ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT);
d7e09d03
PT
1011
1012 /*
1013 * Wait until the next check time, or until we're
1014 * stopped.
1015 */
00f9d12b 1016 lwi = LWI_TIMEOUT(cfs_time_seconds(c_time),
d7e09d03
PT
1017 NULL, NULL);
1018 l_wait_event(thread->t_ctl_waitq,
1019 thread_is_stopping(thread) ||
1020 thread_is_event(thread),
1021 &lwi);
1022
1023 if (thread_test_and_clear_flags(thread, SVC_STOPPING))
1024 break;
71e8dd9a 1025 thread_test_and_clear_flags(thread, SVC_EVENT);
d7e09d03
PT
1026 }
1027
1028 thread_set_flags(thread, SVC_STOPPED);
1029 wake_up(&thread->t_ctl_waitq);
1030
1031 CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
1032 "ldlm_poold", current_pid());
1033
1034 complete_and_exit(&ldlm_pools_comp, 0);
1035}
1036
1037static int ldlm_pools_thread_start(void)
1038{
1039 struct l_wait_info lwi = { 0 };
68b636b6 1040 struct task_struct *task;
d7e09d03 1041
44b53f18 1042 if (ldlm_pools_thread)
0a3bdb00 1043 return -EALREADY;
d7e09d03 1044
352f7891 1045 ldlm_pools_thread = kzalloc(sizeof(*ldlm_pools_thread), GFP_NOFS);
94e67761 1046 if (!ldlm_pools_thread)
0a3bdb00 1047 return -ENOMEM;
d7e09d03
PT
1048
1049 init_completion(&ldlm_pools_comp);
1050 init_waitqueue_head(&ldlm_pools_thread->t_ctl_waitq);
1051
1052 task = kthread_run(ldlm_pools_thread_main, ldlm_pools_thread,
1053 "ldlm_poold");
1054 if (IS_ERR(task)) {
1055 CERROR("Can't start pool thread, error %ld\n", PTR_ERR(task));
352f7891 1056 kfree(ldlm_pools_thread);
d7e09d03 1057 ldlm_pools_thread = NULL;
0a3bdb00 1058 return PTR_ERR(task);
d7e09d03
PT
1059 }
1060 l_wait_event(ldlm_pools_thread->t_ctl_waitq,
1061 thread_is_running(ldlm_pools_thread), &lwi);
0a3bdb00 1062 return 0;
d7e09d03
PT
1063}
1064
1065static void ldlm_pools_thread_stop(void)
1066{
44b53f18 1067 if (!ldlm_pools_thread)
d7e09d03 1068 return;
d7e09d03
PT
1069
1070 thread_set_flags(ldlm_pools_thread, SVC_STOPPING);
1071 wake_up(&ldlm_pools_thread->t_ctl_waitq);
1072
1073 /*
1074 * Make sure that pools thread is finished before freeing @thread.
1075 * This fixes possible race and oops due to accessing freed memory
1076 * in pools thread.
1077 */
1078 wait_for_completion(&ldlm_pools_comp);
352f7891 1079 kfree(ldlm_pools_thread);
d7e09d03 1080 ldlm_pools_thread = NULL;
d7e09d03
PT
1081}
1082
cbc3769e
PT
1083static struct shrinker ldlm_pools_cli_shrinker = {
1084 .count_objects = ldlm_pools_cli_count,
1085 .scan_objects = ldlm_pools_cli_scan,
1086 .seeks = DEFAULT_SEEKS,
1087};
1088
d7e09d03
PT
1089int ldlm_pools_init(void)
1090{
1091 int rc;
d7e09d03
PT
1092
1093 rc = ldlm_pools_thread_start();
00f9d12b 1094 if (rc == 0)
cbc3769e 1095 register_shrinker(&ldlm_pools_cli_shrinker);
00f9d12b 1096
0a3bdb00 1097 return rc;
d7e09d03
PT
1098}
1099EXPORT_SYMBOL(ldlm_pools_init);
1100
1101void ldlm_pools_fini(void)
1102{
00f9d12b 1103 if (ldlm_pools_thread)
faa7a4e3 1104 unregister_shrinker(&ldlm_pools_cli_shrinker);
00f9d12b 1105
d7e09d03
PT
1106 ldlm_pools_thread_stop();
1107}
1108EXPORT_SYMBOL(ldlm_pools_fini);
This page took 0.497507 seconds and 5 git commands to generate.