Commit | Line | Data |
---|---|---|
d7e09d03 PT |
1 | /* |
2 | * GPL HEADER START | |
3 | * | |
4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 only, | |
8 | * as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License version 2 for more details (a copy is included | |
14 | * in the LICENSE file that accompanied this code). | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * version 2 along with this program; If not, see | |
18 | * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf | |
19 | * | |
20 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
21 | * CA 95054 USA or visit www.sun.com if you need additional information or | |
22 | * have any questions. | |
23 | * | |
24 | * GPL HEADER END | |
25 | */ | |
26 | /* | |
27 | * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. | |
28 | * Use is subject to license terms. | |
29 | * | |
30 | * Copyright (c) 2010, 2012, Intel Corporation. | |
31 | */ | |
32 | /* | |
33 | * This file is part of Lustre, http://www.lustre.org/ | |
34 | * Lustre is a trademark of Sun Microsystems, Inc. | |
35 | * | |
36 | * lustre/ldlm/ldlm_pool.c | |
37 | * | |
38 | * Author: Yury Umanets <umka@clusterfs.com> | |
39 | */ | |
40 | ||
41 | /* | |
42 | * Idea of this code is rather simple. Each second, for each server namespace | |
43 | * we have SLV - server lock volume which is calculated on current number of | |
44 | * granted locks, grant speed for past period, etc - that is, locking load. | |
45 | * This SLV number may be thought as a flow definition for simplicity. It is | |
46 | * sent to clients with each occasion to let them know what is current load | |
47 | * situation on the server. By default, at the beginning, SLV on server is | |
48 | * set max value which is calculated as the following: allow to one client | |
49 | * have all locks of limit ->pl_limit for 10h. | |
50 | * | |
51 | * Next, on clients, number of cached locks is not limited artificially in any | |
52 | * way as it was before. Instead, client calculates CLV, that is, client lock | |
53 | * volume for each lock and compares it with last SLV from the server. CLV is | |
54 | * calculated as the number of locks in LRU * lock live time in seconds. If | |
55 | * CLV > SLV - lock is canceled. | |
56 | * | |
e7ddc48c AR |
57 | * Client has LVF, that is, lock volume factor which regulates how much |
58 | * sensitive client should be about last SLV from server. The higher LVF is the | |
59 | * more locks will be canceled on client. Default value for it is 1. Setting LVF | |
60 | * to 2 means that client will cancel locks 2 times faster. | |
d7e09d03 PT |
61 | * |
62 | * Locks on a client will be canceled more intensively in these cases: | |
63 | * (1) if SLV is smaller, that is, load is higher on the server; | |
64 | * (2) client has a lot of locks (the more locks are held by client, the bigger | |
65 | * chances that some of them should be canceled); | |
66 | * (3) client has old locks (taken some time ago); | |
67 | * | |
68 | * Thus, according to flow paradigm that we use for better understanding SLV, | |
69 | * CLV is the volume of particle in flow described by SLV. According to this, | |
70 | * if flow is getting thinner, more and more particles become outside of it and | |
71 | * as particles are locks, they should be canceled. | |
72 | * | |
e7ddc48c AR |
73 | * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com). |
74 | * Andreas Dilger (adilger@clusterfs.com) proposed few nice ideas like using | |
75 | * LVF and many cleanups. Flow definition to allow more easy understanding of | |
76 | * the logic belongs to Nikita Danilov (nikita@clusterfs.com) as well as many | |
77 | * cleanups and fixes. And design and implementation are done by Yury Umanets | |
78 | * (umka@clusterfs.com). | |
d7e09d03 PT |
79 | * |
80 | * Glossary for terms used: | |
81 | * | |
82 | * pl_limit - Number of allowed locks in pool. Applies to server and client | |
83 | * side (tunable); | |
84 | * | |
85 | * pl_granted - Number of granted locks (calculated); | |
86 | * pl_grant_rate - Number of granted locks for last T (calculated); | |
87 | * pl_cancel_rate - Number of canceled locks for last T (calculated); | |
88 | * pl_grant_speed - Grant speed (GR - CR) for last T (calculated); | |
89 | * pl_grant_plan - Planned number of granted locks for next T (calculated); | |
90 | * pl_server_lock_volume - Current server lock volume (calculated); | |
91 | * | |
92 | * As it may be seen from list above, we have few possible tunables which may | |
93 | * affect behavior much. They all may be modified via proc. However, they also | |
94 | * give a possibility for constructing few pre-defined behavior policies. If | |
95 | * none of predefines is suitable for a working pattern being used, new one may | |
96 | * be "constructed" via proc tunables. | |
97 | */ | |
98 | ||
99 | #define DEBUG_SUBSYSTEM S_LDLM | |
100 | ||
e27db149 GKH |
101 | #include "../include/lustre_dlm.h" |
102 | #include "../include/cl_object.h" | |
103 | #include "../include/obd_class.h" | |
104 | #include "../include/obd_support.h" | |
d7e09d03 PT |
105 | #include "ldlm_internal.h" |
106 | ||
107 | ||
108 | /* | |
109 | * 50 ldlm locks for 1MB of RAM. | |
110 | */ | |
111 | #define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50) | |
112 | ||
113 | /* | |
114 | * Maximal possible grant step plan in %. | |
115 | */ | |
116 | #define LDLM_POOL_MAX_GSP (30) | |
117 | ||
118 | /* | |
119 | * Minimal possible grant step plan in %. | |
120 | */ | |
121 | #define LDLM_POOL_MIN_GSP (1) | |
122 | ||
123 | /* | |
124 | * This controls the speed of reaching LDLM_POOL_MAX_GSP | |
125 | * with increasing thread period. | |
126 | */ | |
127 | #define LDLM_POOL_GSP_STEP_SHIFT (2) | |
128 | ||
129 | /* | |
130 | * LDLM_POOL_GSP% of all locks is default GP. | |
131 | */ | |
132 | #define LDLM_POOL_GP(L) (((L) * LDLM_POOL_MAX_GSP) / 100) | |
133 | ||
134 | /* | |
135 | * Max age for locks on clients. | |
136 | */ | |
137 | #define LDLM_POOL_MAX_AGE (36000) | |
138 | ||
139 | /* | |
140 | * The granularity of SLV calculation. | |
141 | */ | |
142 | #define LDLM_POOL_SLV_SHIFT (10) | |
143 | ||
b59fe845 | 144 | extern struct proc_dir_entry *ldlm_ns_proc_dir; |
d7e09d03 PT |
145 | |
146 | static inline __u64 dru(__u64 val, __u32 shift, int round_up) | |
147 | { | |
148 | return (val + (round_up ? (1 << shift) - 1 : 0)) >> shift; | |
149 | } | |
150 | ||
151 | static inline __u64 ldlm_pool_slv_max(__u32 L) | |
152 | { | |
153 | /* | |
154 | * Allow to have all locks for 1 client for 10 hrs. | |
155 | * Formula is the following: limit * 10h / 1 client. | |
156 | */ | |
157 | __u64 lim = (__u64)L * LDLM_POOL_MAX_AGE / 1; | |
158 | return lim; | |
159 | } | |
160 | ||
161 | static inline __u64 ldlm_pool_slv_min(__u32 L) | |
162 | { | |
163 | return 1; | |
164 | } | |
165 | ||
166 | enum { | |
167 | LDLM_POOL_FIRST_STAT = 0, | |
168 | LDLM_POOL_GRANTED_STAT = LDLM_POOL_FIRST_STAT, | |
169 | LDLM_POOL_GRANT_STAT, | |
170 | LDLM_POOL_CANCEL_STAT, | |
171 | LDLM_POOL_GRANT_RATE_STAT, | |
172 | LDLM_POOL_CANCEL_RATE_STAT, | |
173 | LDLM_POOL_GRANT_PLAN_STAT, | |
174 | LDLM_POOL_SLV_STAT, | |
175 | LDLM_POOL_SHRINK_REQTD_STAT, | |
176 | LDLM_POOL_SHRINK_FREED_STAT, | |
177 | LDLM_POOL_RECALC_STAT, | |
178 | LDLM_POOL_TIMING_STAT, | |
179 | LDLM_POOL_LAST_STAT | |
180 | }; | |
181 | ||
182 | static inline struct ldlm_namespace *ldlm_pl2ns(struct ldlm_pool *pl) | |
183 | { | |
184 | return container_of(pl, struct ldlm_namespace, ns_pool); | |
185 | } | |
186 | ||
187 | /** | |
188 | * Calculates suggested grant_step in % of available locks for passed | |
189 | * \a period. This is later used in grant_plan calculations. | |
190 | */ | |
191 | static inline int ldlm_pool_t2gsp(unsigned int t) | |
192 | { | |
193 | /* | |
194 | * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP | |
195 | * and up to 30% for anything higher than LDLM_POOL_GSP_STEP. | |
196 | * | |
197 | * How this will affect execution is the following: | |
198 | * | |
199 | * - for thread period 1s we will have grant_step 1% which good from | |
200 | * pov of taking some load off from server and push it out to clients. | |
201 | * This is like that because 1% for grant_step means that server will | |
202 | * not allow clients to get lots of locks in short period of time and | |
203 | * keep all old locks in their caches. Clients will always have to | |
204 | * get some locks back if they want to take some new; | |
205 | * | |
206 | * - for thread period 10s (which is default) we will have 23% which | |
207 | * means that clients will have enough of room to take some new locks | |
208 | * without getting some back. All locks from this 23% which were not | |
209 | * taken by clients in current period will contribute in SLV growing. | |
210 | * SLV growing means more locks cached on clients until limit or grant | |
211 | * plan is reached. | |
212 | */ | |
213 | return LDLM_POOL_MAX_GSP - | |
214 | ((LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) >> | |
215 | (t >> LDLM_POOL_GSP_STEP_SHIFT)); | |
216 | } | |
217 | ||
218 | /** | |
219 | * Recalculates next grant limit on passed \a pl. | |
220 | * | |
221 | * \pre ->pl_lock is locked. | |
222 | */ | |
223 | static void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl) | |
224 | { | |
225 | int granted, grant_step, limit; | |
226 | ||
227 | limit = ldlm_pool_get_limit(pl); | |
228 | granted = atomic_read(&pl->pl_granted); | |
229 | ||
230 | grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period); | |
231 | grant_step = ((limit - granted) * grant_step) / 100; | |
232 | pl->pl_grant_plan = granted + grant_step; | |
233 | limit = (limit * 5) >> 2; | |
234 | if (pl->pl_grant_plan > limit) | |
235 | pl->pl_grant_plan = limit; | |
236 | } | |
237 | ||
238 | /** | |
239 | * Recalculates next SLV on passed \a pl. | |
240 | * | |
241 | * \pre ->pl_lock is locked. | |
242 | */ | |
243 | static void ldlm_pool_recalc_slv(struct ldlm_pool *pl) | |
244 | { | |
245 | int granted; | |
246 | int grant_plan; | |
247 | int round_up; | |
248 | __u64 slv; | |
249 | __u64 slv_factor; | |
250 | __u64 grant_usage; | |
251 | __u32 limit; | |
252 | ||
253 | slv = pl->pl_server_lock_volume; | |
254 | grant_plan = pl->pl_grant_plan; | |
255 | limit = ldlm_pool_get_limit(pl); | |
256 | granted = atomic_read(&pl->pl_granted); | |
257 | round_up = granted < limit; | |
258 | ||
259 | grant_usage = max_t(int, limit - (granted - grant_plan), 1); | |
260 | ||
261 | /* | |
262 | * Find out SLV change factor which is the ratio of grant usage | |
263 | * from limit. SLV changes as fast as the ratio of grant plan | |
264 | * consumption. The more locks from grant plan are not consumed | |
265 | * by clients in last interval (idle time), the faster grows | |
266 | * SLV. And the opposite, the more grant plan is over-consumed | |
267 | * (load time) the faster drops SLV. | |
268 | */ | |
1d06bb4e | 269 | slv_factor = grant_usage << LDLM_POOL_SLV_SHIFT; |
d7e09d03 PT |
270 | do_div(slv_factor, limit); |
271 | slv = slv * slv_factor; | |
272 | slv = dru(slv, LDLM_POOL_SLV_SHIFT, round_up); | |
273 | ||
8d2ff65d | 274 | if (slv > ldlm_pool_slv_max(limit)) |
d7e09d03 | 275 | slv = ldlm_pool_slv_max(limit); |
8d2ff65d | 276 | else if (slv < ldlm_pool_slv_min(limit)) |
d7e09d03 | 277 | slv = ldlm_pool_slv_min(limit); |
d7e09d03 PT |
278 | |
279 | pl->pl_server_lock_volume = slv; | |
280 | } | |
281 | ||
282 | /** | |
283 | * Recalculates next stats on passed \a pl. | |
284 | * | |
285 | * \pre ->pl_lock is locked. | |
286 | */ | |
287 | static void ldlm_pool_recalc_stats(struct ldlm_pool *pl) | |
288 | { | |
289 | int grant_plan = pl->pl_grant_plan; | |
290 | __u64 slv = pl->pl_server_lock_volume; | |
291 | int granted = atomic_read(&pl->pl_granted); | |
292 | int grant_rate = atomic_read(&pl->pl_grant_rate); | |
293 | int cancel_rate = atomic_read(&pl->pl_cancel_rate); | |
294 | ||
295 | lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT, | |
296 | slv); | |
297 | lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT, | |
298 | granted); | |
299 | lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT, | |
300 | grant_rate); | |
301 | lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT, | |
302 | grant_plan); | |
303 | lprocfs_counter_add(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT, | |
304 | cancel_rate); | |
305 | } | |
306 | ||
307 | /** | |
308 | * Sets current SLV into obd accessible via ldlm_pl2ns(pl)->ns_obd. | |
309 | */ | |
310 | static void ldlm_srv_pool_push_slv(struct ldlm_pool *pl) | |
311 | { | |
312 | struct obd_device *obd; | |
313 | ||
314 | /* | |
315 | * Set new SLV in obd field for using it later without accessing the | |
316 | * pool. This is required to avoid race between sending reply to client | |
317 | * with new SLV and cleanup server stack in which we can't guarantee | |
318 | * that namespace is still alive. We know only that obd is alive as | |
319 | * long as valid export is alive. | |
320 | */ | |
321 | obd = ldlm_pl2ns(pl)->ns_obd; | |
322 | LASSERT(obd != NULL); | |
323 | write_lock(&obd->obd_pool_lock); | |
324 | obd->obd_pool_slv = pl->pl_server_lock_volume; | |
325 | write_unlock(&obd->obd_pool_lock); | |
326 | } | |
327 | ||
328 | /** | |
329 | * Recalculates all pool fields on passed \a pl. | |
330 | * | |
331 | * \pre ->pl_lock is not locked. | |
332 | */ | |
333 | static int ldlm_srv_pool_recalc(struct ldlm_pool *pl) | |
334 | { | |
335 | time_t recalc_interval_sec; | |
d7e09d03 | 336 | |
7264b8a5 | 337 | recalc_interval_sec = get_seconds() - pl->pl_recalc_time; |
d7e09d03 | 338 | if (recalc_interval_sec < pl->pl_recalc_period) |
0a3bdb00 | 339 | return 0; |
d7e09d03 PT |
340 | |
341 | spin_lock(&pl->pl_lock); | |
7264b8a5 | 342 | recalc_interval_sec = get_seconds() - pl->pl_recalc_time; |
d7e09d03 PT |
343 | if (recalc_interval_sec < pl->pl_recalc_period) { |
344 | spin_unlock(&pl->pl_lock); | |
0a3bdb00 | 345 | return 0; |
d7e09d03 PT |
346 | } |
347 | /* | |
348 | * Recalc SLV after last period. This should be done | |
349 | * _before_ recalculating new grant plan. | |
350 | */ | |
351 | ldlm_pool_recalc_slv(pl); | |
352 | ||
353 | /* | |
354 | * Make sure that pool informed obd of last SLV changes. | |
355 | */ | |
356 | ldlm_srv_pool_push_slv(pl); | |
357 | ||
358 | /* | |
359 | * Update grant_plan for new period. | |
360 | */ | |
361 | ldlm_pool_recalc_grant_plan(pl); | |
362 | ||
7264b8a5 | 363 | pl->pl_recalc_time = get_seconds(); |
d7e09d03 PT |
364 | lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT, |
365 | recalc_interval_sec); | |
366 | spin_unlock(&pl->pl_lock); | |
0a3bdb00 | 367 | return 0; |
d7e09d03 PT |
368 | } |
369 | ||
370 | /** | |
371 | * This function is used on server side as main entry point for memory | |
372 | * pressure handling. It decreases SLV on \a pl according to passed | |
373 | * \a nr and \a gfp_mask. | |
374 | * | |
375 | * Our goal here is to decrease SLV such a way that clients hold \a nr | |
376 | * locks smaller in next 10h. | |
377 | */ | |
378 | static int ldlm_srv_pool_shrink(struct ldlm_pool *pl, | |
5802572e | 379 | int nr, gfp_t gfp_mask) |
d7e09d03 PT |
380 | { |
381 | __u32 limit; | |
382 | ||
383 | /* | |
384 | * VM is asking how many entries may be potentially freed. | |
385 | */ | |
386 | if (nr == 0) | |
387 | return atomic_read(&pl->pl_granted); | |
388 | ||
389 | /* | |
390 | * Client already canceled locks but server is already in shrinker | |
391 | * and can't cancel anything. Let's catch this race. | |
392 | */ | |
393 | if (atomic_read(&pl->pl_granted) == 0) | |
0a3bdb00 | 394 | return 0; |
d7e09d03 PT |
395 | |
396 | spin_lock(&pl->pl_lock); | |
397 | ||
398 | /* | |
399 | * We want shrinker to possibly cause cancellation of @nr locks from | |
400 | * clients or grant approximately @nr locks smaller next intervals. | |
401 | * | |
402 | * This is why we decreased SLV by @nr. This effect will only be as | |
403 | * long as one re-calc interval (1s these days) and this should be | |
404 | * enough to pass this decreased SLV to all clients. On next recalc | |
405 | * interval pool will either increase SLV if locks load is not high | |
406 | * or will keep on same level or even decrease again, thus, shrinker | |
407 | * decreased SLV will affect next recalc intervals and this way will | |
408 | * make locking load lower. | |
409 | */ | |
410 | if (nr < pl->pl_server_lock_volume) { | |
411 | pl->pl_server_lock_volume = pl->pl_server_lock_volume - nr; | |
412 | } else { | |
413 | limit = ldlm_pool_get_limit(pl); | |
414 | pl->pl_server_lock_volume = ldlm_pool_slv_min(limit); | |
415 | } | |
416 | ||
417 | /* | |
418 | * Make sure that pool informed obd of last SLV changes. | |
419 | */ | |
420 | ldlm_srv_pool_push_slv(pl); | |
421 | spin_unlock(&pl->pl_lock); | |
422 | ||
423 | /* | |
424 | * We did not really free any memory here so far, it only will be | |
425 | * freed later may be, so that we return 0 to not confuse VM. | |
426 | */ | |
427 | return 0; | |
428 | } | |
429 | ||
430 | /** | |
431 | * Setup server side pool \a pl with passed \a limit. | |
432 | */ | |
433 | static int ldlm_srv_pool_setup(struct ldlm_pool *pl, int limit) | |
434 | { | |
435 | struct obd_device *obd; | |
436 | ||
437 | obd = ldlm_pl2ns(pl)->ns_obd; | |
438 | LASSERT(obd != NULL && obd != LP_POISON); | |
439 | LASSERT(obd->obd_type != LP_POISON); | |
440 | write_lock(&obd->obd_pool_lock); | |
441 | obd->obd_pool_limit = limit; | |
442 | write_unlock(&obd->obd_pool_lock); | |
443 | ||
444 | ldlm_pool_set_limit(pl, limit); | |
445 | return 0; | |
446 | } | |
447 | ||
448 | /** | |
449 | * Sets SLV and Limit from ldlm_pl2ns(pl)->ns_obd tp passed \a pl. | |
450 | */ | |
451 | static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl) | |
452 | { | |
453 | struct obd_device *obd; | |
454 | ||
455 | /* | |
456 | * Get new SLV and Limit from obd which is updated with coming | |
457 | * RPCs. | |
458 | */ | |
459 | obd = ldlm_pl2ns(pl)->ns_obd; | |
460 | LASSERT(obd != NULL); | |
461 | read_lock(&obd->obd_pool_lock); | |
462 | pl->pl_server_lock_volume = obd->obd_pool_slv; | |
463 | ldlm_pool_set_limit(pl, obd->obd_pool_limit); | |
464 | read_unlock(&obd->obd_pool_lock); | |
465 | } | |
466 | ||
467 | /** | |
468 | * Recalculates client size pool \a pl according to current SLV and Limit. | |
469 | */ | |
470 | static int ldlm_cli_pool_recalc(struct ldlm_pool *pl) | |
471 | { | |
472 | time_t recalc_interval_sec; | |
d7e09d03 | 473 | |
7264b8a5 | 474 | recalc_interval_sec = get_seconds() - pl->pl_recalc_time; |
d7e09d03 | 475 | if (recalc_interval_sec < pl->pl_recalc_period) |
0a3bdb00 | 476 | return 0; |
d7e09d03 PT |
477 | |
478 | spin_lock(&pl->pl_lock); | |
479 | /* | |
480 | * Check if we need to recalc lists now. | |
481 | */ | |
7264b8a5 | 482 | recalc_interval_sec = get_seconds() - pl->pl_recalc_time; |
d7e09d03 PT |
483 | if (recalc_interval_sec < pl->pl_recalc_period) { |
484 | spin_unlock(&pl->pl_lock); | |
0a3bdb00 | 485 | return 0; |
d7e09d03 PT |
486 | } |
487 | ||
488 | /* | |
489 | * Make sure that pool knows last SLV and Limit from obd. | |
490 | */ | |
491 | ldlm_cli_pool_pop_slv(pl); | |
492 | ||
7264b8a5 | 493 | pl->pl_recalc_time = get_seconds(); |
d7e09d03 PT |
494 | lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT, |
495 | recalc_interval_sec); | |
496 | spin_unlock(&pl->pl_lock); | |
497 | ||
498 | /* | |
499 | * Do not cancel locks in case lru resize is disabled for this ns. | |
500 | */ | |
501 | if (!ns_connect_lru_resize(ldlm_pl2ns(pl))) | |
0a3bdb00 | 502 | return 0; |
d7e09d03 PT |
503 | |
504 | /* | |
505 | * In the time of canceling locks on client we do not need to maintain | |
506 | * sharp timing, we only want to cancel locks asap according to new SLV. | |
507 | * It may be called when SLV has changed much, this is why we do not | |
508 | * take into account pl->pl_recalc_time here. | |
509 | */ | |
0a3bdb00 | 510 | return ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LCF_ASYNC, LDLM_CANCEL_LRUR); |
d7e09d03 PT |
511 | } |
512 | ||
513 | /** | |
514 | * This function is main entry point for memory pressure handling on client | |
515 | * side. Main goal of this function is to cancel some number of locks on | |
516 | * passed \a pl according to \a nr and \a gfp_mask. | |
517 | */ | |
518 | static int ldlm_cli_pool_shrink(struct ldlm_pool *pl, | |
5802572e | 519 | int nr, gfp_t gfp_mask) |
d7e09d03 PT |
520 | { |
521 | struct ldlm_namespace *ns; | |
cbc3769e | 522 | int unused; |
d7e09d03 PT |
523 | |
524 | ns = ldlm_pl2ns(pl); | |
525 | ||
526 | /* | |
527 | * Do not cancel locks in case lru resize is disabled for this ns. | |
528 | */ | |
529 | if (!ns_connect_lru_resize(ns)) | |
0a3bdb00 | 530 | return 0; |
d7e09d03 PT |
531 | |
532 | /* | |
533 | * Make sure that pool knows last SLV and Limit from obd. | |
534 | */ | |
535 | ldlm_cli_pool_pop_slv(pl); | |
536 | ||
537 | spin_lock(&ns->ns_lock); | |
538 | unused = ns->ns_nr_unused; | |
539 | spin_unlock(&ns->ns_lock); | |
540 | ||
cbc3769e PT |
541 | if (nr == 0) |
542 | return (unused / 100) * sysctl_vfs_cache_pressure; | |
543 | else | |
544 | return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK); | |
d7e09d03 PT |
545 | } |
546 | ||
b9c98cfa | 547 | static const struct ldlm_pool_ops ldlm_srv_pool_ops = { |
d7e09d03 PT |
548 | .po_recalc = ldlm_srv_pool_recalc, |
549 | .po_shrink = ldlm_srv_pool_shrink, | |
550 | .po_setup = ldlm_srv_pool_setup | |
551 | }; | |
552 | ||
b9c98cfa | 553 | static const struct ldlm_pool_ops ldlm_cli_pool_ops = { |
d7e09d03 PT |
554 | .po_recalc = ldlm_cli_pool_recalc, |
555 | .po_shrink = ldlm_cli_pool_shrink | |
556 | }; | |
557 | ||
558 | /** | |
559 | * Pool recalc wrapper. Will call either client or server pool recalc callback | |
560 | * depending what pool \a pl is used. | |
561 | */ | |
562 | int ldlm_pool_recalc(struct ldlm_pool *pl) | |
563 | { | |
564 | time_t recalc_interval_sec; | |
565 | int count; | |
566 | ||
7264b8a5 | 567 | recalc_interval_sec = get_seconds() - pl->pl_recalc_time; |
d7e09d03 PT |
568 | if (recalc_interval_sec <= 0) |
569 | goto recalc; | |
570 | ||
571 | spin_lock(&pl->pl_lock); | |
d7e09d03 PT |
572 | if (recalc_interval_sec > 0) { |
573 | /* | |
574 | * Update pool statistics every 1s. | |
575 | */ | |
576 | ldlm_pool_recalc_stats(pl); | |
577 | ||
578 | /* | |
579 | * Zero out all rates and speed for the last period. | |
580 | */ | |
581 | atomic_set(&pl->pl_grant_rate, 0); | |
582 | atomic_set(&pl->pl_cancel_rate, 0); | |
583 | } | |
584 | spin_unlock(&pl->pl_lock); | |
585 | ||
586 | recalc: | |
587 | if (pl->pl_ops->po_recalc != NULL) { | |
588 | count = pl->pl_ops->po_recalc(pl); | |
589 | lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT, | |
590 | count); | |
d7e09d03 | 591 | } |
7264b8a5 | 592 | recalc_interval_sec = pl->pl_recalc_time - get_seconds() + |
3eface59 | 593 | pl->pl_recalc_period; |
d7e09d03 | 594 | |
3eface59 | 595 | return recalc_interval_sec; |
d7e09d03 | 596 | } |
d7e09d03 | 597 | |
cbc3769e | 598 | /* |
d7e09d03 | 599 | * Pool shrink wrapper. Will call either client or server pool recalc callback |
cbc3769e PT |
600 | * depending what pool pl is used. When nr == 0, just return the number of |
601 | * freeable locks. Otherwise, return the number of canceled locks. | |
d7e09d03 PT |
602 | */ |
603 | int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, | |
5802572e | 604 | gfp_t gfp_mask) |
d7e09d03 PT |
605 | { |
606 | int cancel = 0; | |
607 | ||
608 | if (pl->pl_ops->po_shrink != NULL) { | |
609 | cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask); | |
610 | if (nr > 0) { | |
611 | lprocfs_counter_add(pl->pl_stats, | |
612 | LDLM_POOL_SHRINK_REQTD_STAT, | |
613 | nr); | |
614 | lprocfs_counter_add(pl->pl_stats, | |
615 | LDLM_POOL_SHRINK_FREED_STAT, | |
616 | cancel); | |
2d00bd17 JP |
617 | CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, shrunk %d\n", |
618 | pl->pl_name, nr, cancel); | |
d7e09d03 PT |
619 | } |
620 | } | |
621 | return cancel; | |
622 | } | |
623 | EXPORT_SYMBOL(ldlm_pool_shrink); | |
624 | ||
625 | /** | |
626 | * Pool setup wrapper. Will call either client or server pool recalc callback | |
627 | * depending what pool \a pl is used. | |
628 | * | |
629 | * Sets passed \a limit into pool \a pl. | |
630 | */ | |
631 | int ldlm_pool_setup(struct ldlm_pool *pl, int limit) | |
632 | { | |
633 | if (pl->pl_ops->po_setup != NULL) | |
e8291974 | 634 | return pl->pl_ops->po_setup(pl, limit); |
d7e09d03 PT |
635 | return 0; |
636 | } | |
637 | EXPORT_SYMBOL(ldlm_pool_setup); | |
638 | ||
4d089bb4 | 639 | #if defined(CONFIG_PROC_FS) |
73bb1da6 | 640 | static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused) |
d7e09d03 PT |
641 | { |
642 | int granted, grant_rate, cancel_rate, grant_step; | |
73bb1da6 PT |
643 | int grant_speed, grant_plan, lvf; |
644 | struct ldlm_pool *pl = m->private; | |
d7e09d03 PT |
645 | __u64 slv, clv; |
646 | __u32 limit; | |
647 | ||
648 | spin_lock(&pl->pl_lock); | |
649 | slv = pl->pl_server_lock_volume; | |
650 | clv = pl->pl_client_lock_volume; | |
651 | limit = ldlm_pool_get_limit(pl); | |
652 | grant_plan = pl->pl_grant_plan; | |
653 | granted = atomic_read(&pl->pl_granted); | |
654 | grant_rate = atomic_read(&pl->pl_grant_rate); | |
655 | cancel_rate = atomic_read(&pl->pl_cancel_rate); | |
656 | grant_speed = grant_rate - cancel_rate; | |
657 | lvf = atomic_read(&pl->pl_lock_volume_factor); | |
658 | grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period); | |
659 | spin_unlock(&pl->pl_lock); | |
660 | ||
73bb1da6 | 661 | seq_printf(m, "LDLM pool state (%s):\n" |
b0f5aad5 GKH |
662 | " SLV: %llu\n" |
663 | " CLV: %llu\n" | |
73bb1da6 PT |
664 | " LVF: %d\n", |
665 | pl->pl_name, slv, clv, lvf); | |
d7e09d03 PT |
666 | |
667 | if (ns_is_server(ldlm_pl2ns(pl))) { | |
73bb1da6 PT |
668 | seq_printf(m, " GSP: %d%%\n" |
669 | " GP: %d\n", | |
670 | grant_step, grant_plan); | |
d7e09d03 | 671 | } |
73bb1da6 PT |
672 | seq_printf(m, " GR: %d\n" " CR: %d\n" " GS: %d\n" |
673 | " G: %d\n" " L: %d\n", | |
674 | grant_rate, cancel_rate, grant_speed, | |
675 | granted, limit); | |
676 | ||
677 | return 0; | |
d7e09d03 | 678 | } |
73bb1da6 | 679 | LPROC_SEQ_FOPS_RO(lprocfs_pool_state); |
d7e09d03 | 680 | |
73bb1da6 | 681 | static int lprocfs_grant_speed_seq_show(struct seq_file *m, void *unused) |
d7e09d03 | 682 | { |
73bb1da6 | 683 | struct ldlm_pool *pl = m->private; |
d7e09d03 PT |
684 | int grant_speed; |
685 | ||
686 | spin_lock(&pl->pl_lock); | |
687 | /* serialize with ldlm_pool_recalc */ | |
688 | grant_speed = atomic_read(&pl->pl_grant_rate) - | |
689 | atomic_read(&pl->pl_cancel_rate); | |
690 | spin_unlock(&pl->pl_lock); | |
73bb1da6 | 691 | return lprocfs_rd_uint(m, &grant_speed); |
d7e09d03 PT |
692 | } |
693 | ||
73bb1da6 PT |
694 | LDLM_POOL_PROC_READER_SEQ_SHOW(grant_plan, int); |
695 | LPROC_SEQ_FOPS_RO(lprocfs_grant_plan); | |
696 | ||
697 | LDLM_POOL_PROC_READER_SEQ_SHOW(recalc_period, int); | |
d7e09d03 | 698 | LDLM_POOL_PROC_WRITER(recalc_period, int); |
e7ddc48c | 699 | static ssize_t lprocfs_recalc_period_seq_write(struct file *file, |
e84962e3 TL |
700 | const char __user *buf, |
701 | size_t len, loff_t *off) | |
73bb1da6 PT |
702 | { |
703 | struct seq_file *seq = file->private_data; | |
704 | ||
705 | return lprocfs_wr_recalc_period(file, buf, len, seq->private); | |
706 | } | |
707 | LPROC_SEQ_FOPS(lprocfs_recalc_period); | |
708 | ||
709 | LPROC_SEQ_FOPS_RO_TYPE(ldlm_pool, u64); | |
710 | LPROC_SEQ_FOPS_RO_TYPE(ldlm_pool, atomic); | |
711 | LPROC_SEQ_FOPS_RW_TYPE(ldlm_pool_rw, atomic); | |
712 | ||
713 | LPROC_SEQ_FOPS_RO(lprocfs_grant_speed); | |
714 | ||
715 | #define LDLM_POOL_ADD_VAR(name, var, ops) \ | |
716 | do { \ | |
717 | snprintf(var_name, MAX_STRING_SIZE, #name); \ | |
718 | pool_vars[0].data = var; \ | |
719 | pool_vars[0].fops = ops; \ | |
3eb84460 | 720 | lprocfs_add_vars(pl->pl_proc_dir, pool_vars, NULL);\ |
73bb1da6 | 721 | } while (0) |
d7e09d03 PT |
722 | |
723 | static int ldlm_pool_proc_init(struct ldlm_pool *pl) | |
724 | { | |
725 | struct ldlm_namespace *ns = ldlm_pl2ns(pl); | |
726 | struct proc_dir_entry *parent_ns_proc; | |
727 | struct lprocfs_vars pool_vars[2]; | |
728 | char *var_name = NULL; | |
729 | int rc = 0; | |
d7e09d03 PT |
730 | |
731 | OBD_ALLOC(var_name, MAX_STRING_SIZE + 1); | |
732 | if (!var_name) | |
0a3bdb00 | 733 | return -ENOMEM; |
d7e09d03 | 734 | |
73bb1da6 | 735 | parent_ns_proc = ns->ns_proc_dir_entry; |
d7e09d03 PT |
736 | if (parent_ns_proc == NULL) { |
737 | CERROR("%s: proc entry is not initialized\n", | |
738 | ldlm_ns_name(ns)); | |
d1c0d446 JL |
739 | rc = -EINVAL; |
740 | goto out_free_name; | |
d7e09d03 PT |
741 | } |
742 | pl->pl_proc_dir = lprocfs_register("pool", parent_ns_proc, | |
743 | NULL, NULL); | |
744 | if (IS_ERR(pl->pl_proc_dir)) { | |
745 | CERROR("LProcFS failed in ldlm-pool-init\n"); | |
746 | rc = PTR_ERR(pl->pl_proc_dir); | |
5907838a | 747 | pl->pl_proc_dir = NULL; |
d1c0d446 | 748 | goto out_free_name; |
d7e09d03 PT |
749 | } |
750 | ||
751 | var_name[MAX_STRING_SIZE] = '\0'; | |
752 | memset(pool_vars, 0, sizeof(pool_vars)); | |
753 | pool_vars[0].name = var_name; | |
754 | ||
73bb1da6 PT |
755 | LDLM_POOL_ADD_VAR("server_lock_volume", &pl->pl_server_lock_volume, |
756 | &ldlm_pool_u64_fops); | |
757 | LDLM_POOL_ADD_VAR("limit", &pl->pl_limit, &ldlm_pool_rw_atomic_fops); | |
758 | LDLM_POOL_ADD_VAR("granted", &pl->pl_granted, &ldlm_pool_atomic_fops); | |
759 | LDLM_POOL_ADD_VAR("grant_speed", pl, &lprocfs_grant_speed_fops); | |
760 | LDLM_POOL_ADD_VAR("cancel_rate", &pl->pl_cancel_rate, | |
761 | &ldlm_pool_atomic_fops); | |
762 | LDLM_POOL_ADD_VAR("grant_rate", &pl->pl_grant_rate, | |
763 | &ldlm_pool_atomic_fops); | |
764 | LDLM_POOL_ADD_VAR("grant_plan", pl, &lprocfs_grant_plan_fops); | |
765 | LDLM_POOL_ADD_VAR("recalc_period", pl, &lprocfs_recalc_period_fops); | |
766 | LDLM_POOL_ADD_VAR("lock_volume_factor", &pl->pl_lock_volume_factor, | |
767 | &ldlm_pool_rw_atomic_fops); | |
768 | LDLM_POOL_ADD_VAR("state", pl, &lprocfs_pool_state_fops); | |
d7e09d03 PT |
769 | |
770 | pl->pl_stats = lprocfs_alloc_stats(LDLM_POOL_LAST_STAT - | |
771 | LDLM_POOL_FIRST_STAT, 0); | |
d1c0d446 JL |
772 | if (!pl->pl_stats) { |
773 | rc = -ENOMEM; | |
774 | goto out_free_name; | |
775 | } | |
d7e09d03 PT |
776 | |
777 | lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANTED_STAT, | |
778 | LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, | |
779 | "granted", "locks"); | |
780 | lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_STAT, | |
781 | LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, | |
782 | "grant", "locks"); | |
783 | lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_STAT, | |
784 | LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, | |
785 | "cancel", "locks"); | |
786 | lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT, | |
787 | LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, | |
788 | "grant_rate", "locks/s"); | |
789 | lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT, | |
790 | LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, | |
791 | "cancel_rate", "locks/s"); | |
792 | lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT, | |
793 | LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, | |
794 | "grant_plan", "locks/s"); | |
795 | lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SLV_STAT, | |
796 | LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, | |
797 | "slv", "slv"); | |
798 | lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_REQTD_STAT, | |
799 | LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, | |
800 | "shrink_request", "locks"); | |
801 | lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_FREED_STAT, | |
802 | LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, | |
803 | "shrink_freed", "locks"); | |
804 | lprocfs_counter_init(pl->pl_stats, LDLM_POOL_RECALC_STAT, | |
805 | LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, | |
806 | "recalc_freed", "locks"); | |
807 | lprocfs_counter_init(pl->pl_stats, LDLM_POOL_TIMING_STAT, | |
808 | LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, | |
809 | "recalc_timing", "sec"); | |
810 | rc = lprocfs_register_stats(pl->pl_proc_dir, "stats", pl->pl_stats); | |
811 | ||
d7e09d03 PT |
812 | out_free_name: |
813 | OBD_FREE(var_name, MAX_STRING_SIZE + 1); | |
814 | return rc; | |
815 | } | |
816 | ||
817 | static void ldlm_pool_proc_fini(struct ldlm_pool *pl) | |
818 | { | |
819 | if (pl->pl_stats != NULL) { | |
820 | lprocfs_free_stats(&pl->pl_stats); | |
821 | pl->pl_stats = NULL; | |
822 | } | |
823 | if (pl->pl_proc_dir != NULL) { | |
824 | lprocfs_remove(&pl->pl_proc_dir); | |
825 | pl->pl_proc_dir = NULL; | |
826 | } | |
827 | } | |
f267cdb4 | 828 | #else /* !CONFIG_PROC_FS */ |
2c185ffa PT |
829 | static int ldlm_pool_proc_init(struct ldlm_pool *pl) |
830 | { | |
831 | return 0; | |
832 | } | |
833 | ||
834 | static void ldlm_pool_proc_fini(struct ldlm_pool *pl) {} | |
f267cdb4 | 835 | #endif /* CONFIG_PROC_FS */ |
d7e09d03 PT |
836 | |
837 | int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns, | |
838 | int idx, ldlm_side_t client) | |
839 | { | |
840 | int rc; | |
d7e09d03 PT |
841 | |
842 | spin_lock_init(&pl->pl_lock); | |
843 | atomic_set(&pl->pl_granted, 0); | |
7264b8a5 | 844 | pl->pl_recalc_time = get_seconds(); |
d7e09d03 PT |
845 | atomic_set(&pl->pl_lock_volume_factor, 1); |
846 | ||
847 | atomic_set(&pl->pl_grant_rate, 0); | |
848 | atomic_set(&pl->pl_cancel_rate, 0); | |
849 | pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L); | |
850 | ||
851 | snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d", | |
852 | ldlm_ns_name(ns), idx); | |
853 | ||
854 | if (client == LDLM_NAMESPACE_SERVER) { | |
855 | pl->pl_ops = &ldlm_srv_pool_ops; | |
856 | ldlm_pool_set_limit(pl, LDLM_POOL_HOST_L); | |
857 | pl->pl_recalc_period = LDLM_POOL_SRV_DEF_RECALC_PERIOD; | |
858 | pl->pl_server_lock_volume = ldlm_pool_slv_max(LDLM_POOL_HOST_L); | |
859 | } else { | |
860 | ldlm_pool_set_limit(pl, 1); | |
861 | pl->pl_server_lock_volume = 0; | |
862 | pl->pl_ops = &ldlm_cli_pool_ops; | |
863 | pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD; | |
864 | } | |
865 | pl->pl_client_lock_volume = 0; | |
866 | rc = ldlm_pool_proc_init(pl); | |
867 | if (rc) | |
0a3bdb00 | 868 | return rc; |
d7e09d03 PT |
869 | |
870 | CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name); | |
871 | ||
0a3bdb00 | 872 | return rc; |
d7e09d03 PT |
873 | } |
874 | EXPORT_SYMBOL(ldlm_pool_init); | |
875 | ||
876 | void ldlm_pool_fini(struct ldlm_pool *pl) | |
877 | { | |
d7e09d03 PT |
878 | ldlm_pool_proc_fini(pl); |
879 | ||
880 | /* | |
881 | * Pool should not be used after this point. We can't free it here as | |
882 | * it lives in struct ldlm_namespace, but still interested in catching | |
883 | * any abnormal using cases. | |
884 | */ | |
885 | POISON(pl, 0x5a, sizeof(*pl)); | |
d7e09d03 PT |
886 | } |
887 | EXPORT_SYMBOL(ldlm_pool_fini); | |
888 | ||
889 | /** | |
890 | * Add new taken ldlm lock \a lock into pool \a pl accounting. | |
891 | */ | |
892 | void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock) | |
893 | { | |
894 | /* | |
895 | * FLOCK locks are special in a sense that they are almost never | |
896 | * cancelled, instead special kind of lock is used to drop them. | |
897 | * also there is no LRU for flock locks, so no point in tracking | |
898 | * them anyway. | |
899 | */ | |
900 | if (lock->l_resource->lr_type == LDLM_FLOCK) | |
901 | return; | |
902 | ||
903 | atomic_inc(&pl->pl_granted); | |
904 | atomic_inc(&pl->pl_grant_rate); | |
905 | lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT); | |
906 | /* | |
907 | * Do not do pool recalc for client side as all locks which | |
908 | * potentially may be canceled has already been packed into | |
909 | * enqueue/cancel rpc. Also we do not want to run out of stack | |
910 | * with too long call paths. | |
911 | */ | |
912 | if (ns_is_server(ldlm_pl2ns(pl))) | |
913 | ldlm_pool_recalc(pl); | |
914 | } | |
915 | EXPORT_SYMBOL(ldlm_pool_add); | |
916 | ||
917 | /** | |
918 | * Remove ldlm lock \a lock from pool \a pl accounting. | |
919 | */ | |
920 | void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock) | |
921 | { | |
922 | /* | |
923 | * Filter out FLOCK locks. Read above comment in ldlm_pool_add(). | |
924 | */ | |
925 | if (lock->l_resource->lr_type == LDLM_FLOCK) | |
926 | return; | |
927 | ||
928 | LASSERT(atomic_read(&pl->pl_granted) > 0); | |
929 | atomic_dec(&pl->pl_granted); | |
930 | atomic_inc(&pl->pl_cancel_rate); | |
931 | ||
932 | lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT); | |
933 | ||
934 | if (ns_is_server(ldlm_pl2ns(pl))) | |
935 | ldlm_pool_recalc(pl); | |
936 | } | |
937 | EXPORT_SYMBOL(ldlm_pool_del); | |
938 | ||
939 | /** | |
940 | * Returns current \a pl SLV. | |
941 | * | |
942 | * \pre ->pl_lock is not locked. | |
943 | */ | |
944 | __u64 ldlm_pool_get_slv(struct ldlm_pool *pl) | |
945 | { | |
946 | __u64 slv; | |
902f3bb1 | 947 | |
d7e09d03 PT |
948 | spin_lock(&pl->pl_lock); |
949 | slv = pl->pl_server_lock_volume; | |
950 | spin_unlock(&pl->pl_lock); | |
951 | return slv; | |
952 | } | |
953 | EXPORT_SYMBOL(ldlm_pool_get_slv); | |
954 | ||
955 | /** | |
956 | * Sets passed \a slv to \a pl. | |
957 | * | |
958 | * \pre ->pl_lock is not locked. | |
959 | */ | |
960 | void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv) | |
961 | { | |
962 | spin_lock(&pl->pl_lock); | |
963 | pl->pl_server_lock_volume = slv; | |
964 | spin_unlock(&pl->pl_lock); | |
965 | } | |
966 | EXPORT_SYMBOL(ldlm_pool_set_slv); | |
967 | ||
968 | /** | |
969 | * Returns current \a pl CLV. | |
970 | * | |
971 | * \pre ->pl_lock is not locked. | |
972 | */ | |
973 | __u64 ldlm_pool_get_clv(struct ldlm_pool *pl) | |
974 | { | |
975 | __u64 slv; | |
902f3bb1 | 976 | |
d7e09d03 PT |
977 | spin_lock(&pl->pl_lock); |
978 | slv = pl->pl_client_lock_volume; | |
979 | spin_unlock(&pl->pl_lock); | |
980 | return slv; | |
981 | } | |
982 | EXPORT_SYMBOL(ldlm_pool_get_clv); | |
983 | ||
984 | /** | |
985 | * Sets passed \a clv to \a pl. | |
986 | * | |
987 | * \pre ->pl_lock is not locked. | |
988 | */ | |
989 | void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv) | |
990 | { | |
991 | spin_lock(&pl->pl_lock); | |
992 | pl->pl_client_lock_volume = clv; | |
993 | spin_unlock(&pl->pl_lock); | |
994 | } | |
995 | EXPORT_SYMBOL(ldlm_pool_set_clv); | |
996 | ||
997 | /** | |
998 | * Returns current \a pl limit. | |
999 | */ | |
1000 | __u32 ldlm_pool_get_limit(struct ldlm_pool *pl) | |
1001 | { | |
1002 | return atomic_read(&pl->pl_limit); | |
1003 | } | |
1004 | EXPORT_SYMBOL(ldlm_pool_get_limit); | |
1005 | ||
1006 | /** | |
1007 | * Sets passed \a limit to \a pl. | |
1008 | */ | |
1009 | void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit) | |
1010 | { | |
1011 | atomic_set(&pl->pl_limit, limit); | |
1012 | } | |
1013 | EXPORT_SYMBOL(ldlm_pool_set_limit); | |
1014 | ||
1015 | /** | |
1016 | * Returns current LVF from \a pl. | |
1017 | */ | |
1018 | __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl) | |
1019 | { | |
1020 | return atomic_read(&pl->pl_lock_volume_factor); | |
1021 | } | |
1022 | EXPORT_SYMBOL(ldlm_pool_get_lvf); | |
1023 | ||
1024 | static int ldlm_pool_granted(struct ldlm_pool *pl) | |
1025 | { | |
1026 | return atomic_read(&pl->pl_granted); | |
1027 | } | |
1028 | ||
1029 | static struct ptlrpc_thread *ldlm_pools_thread; | |
d7e09d03 PT |
1030 | static struct completion ldlm_pools_comp; |
1031 | ||
1032 | /* | |
cbc3769e PT |
1033 | * count locks from all namespaces (if possible). Returns number of |
1034 | * cached locks. | |
d7e09d03 | 1035 | */ |
5802572e | 1036 | static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask) |
d7e09d03 | 1037 | { |
cbc3769e | 1038 | int total = 0, nr_ns; |
d7e09d03 | 1039 | struct ldlm_namespace *ns; |
91a50030 | 1040 | struct ldlm_namespace *ns_old = NULL; /* loop detection */ |
d7e09d03 PT |
1041 | void *cookie; |
1042 | ||
cbc3769e PT |
1043 | if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS)) |
1044 | return 0; | |
d7e09d03 | 1045 | |
cbc3769e PT |
1046 | CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n", |
1047 | client == LDLM_NAMESPACE_CLIENT ? "client" : "server"); | |
d7e09d03 PT |
1048 | |
1049 | cookie = cl_env_reenter(); | |
1050 | ||
1051 | /* | |
1052 | * Find out how many resources we may release. | |
1053 | */ | |
91a50030 | 1054 | for (nr_ns = ldlm_namespace_nr_read(client); |
cbc3769e | 1055 | nr_ns > 0; nr_ns--) { |
d7e09d03 PT |
1056 | mutex_lock(ldlm_namespace_lock(client)); |
1057 | if (list_empty(ldlm_namespace_list(client))) { | |
1058 | mutex_unlock(ldlm_namespace_lock(client)); | |
1059 | cl_env_reexit(cookie); | |
1060 | return 0; | |
1061 | } | |
1062 | ns = ldlm_namespace_first_locked(client); | |
91a50030 OD |
1063 | |
1064 | if (ns == ns_old) { | |
1065 | mutex_unlock(ldlm_namespace_lock(client)); | |
1066 | break; | |
1067 | } | |
1068 | ||
1069 | if (ldlm_ns_empty(ns)) { | |
1070 | ldlm_namespace_move_to_inactive_locked(ns, client); | |
1071 | mutex_unlock(ldlm_namespace_lock(client)); | |
1072 | continue; | |
1073 | } | |
1074 | ||
1075 | if (ns_old == NULL) | |
1076 | ns_old = ns; | |
1077 | ||
d7e09d03 | 1078 | ldlm_namespace_get(ns); |
91a50030 | 1079 | ldlm_namespace_move_to_active_locked(ns, client); |
d7e09d03 PT |
1080 | mutex_unlock(ldlm_namespace_lock(client)); |
1081 | total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask); | |
1082 | ldlm_namespace_put(ns); | |
1083 | } | |
1084 | ||
cbc3769e PT |
1085 | cl_env_reexit(cookie); |
1086 | return total; | |
1087 | } | |
1088 | ||
5802572e | 1089 | static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask) |
cbc3769e PT |
1090 | { |
1091 | unsigned long freed = 0; | |
1092 | int tmp, nr_ns; | |
1093 | struct ldlm_namespace *ns; | |
1094 | void *cookie; | |
1095 | ||
1096 | if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS)) | |
1097 | return -1; | |
1098 | ||
1099 | cookie = cl_env_reenter(); | |
d7e09d03 PT |
1100 | |
1101 | /* | |
cbc3769e | 1102 | * Shrink at least ldlm_namespace_nr_read(client) namespaces. |
d7e09d03 | 1103 | */ |
cbc3769e PT |
1104 | for (tmp = nr_ns = ldlm_namespace_nr_read(client); |
1105 | tmp > 0; tmp--) { | |
d7e09d03 PT |
1106 | int cancel, nr_locks; |
1107 | ||
1108 | /* | |
1109 | * Do not call shrink under ldlm_namespace_lock(client) | |
1110 | */ | |
1111 | mutex_lock(ldlm_namespace_lock(client)); | |
1112 | if (list_empty(ldlm_namespace_list(client))) { | |
1113 | mutex_unlock(ldlm_namespace_lock(client)); | |
d7e09d03 PT |
1114 | break; |
1115 | } | |
1116 | ns = ldlm_namespace_first_locked(client); | |
1117 | ldlm_namespace_get(ns); | |
91a50030 | 1118 | ldlm_namespace_move_to_active_locked(ns, client); |
d7e09d03 PT |
1119 | mutex_unlock(ldlm_namespace_lock(client)); |
1120 | ||
1121 | nr_locks = ldlm_pool_granted(&ns->ns_pool); | |
cbc3769e PT |
1122 | /* |
1123 | * We use to shrink propotionally but with new shrinker API, | |
1124 | * we lost the total number of freeable locks. | |
1125 | */ | |
1126 | cancel = 1 + min_t(int, nr_locks, nr / nr_ns); | |
1127 | freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask); | |
d7e09d03 PT |
1128 | ldlm_namespace_put(ns); |
1129 | } | |
1130 | cl_env_reexit(cookie); | |
cbc3769e PT |
1131 | /* |
1132 | * we only decrease the SLV in server pools shrinker, return | |
1133 | * SHRINK_STOP to kernel to avoid needless loop. LU-1128 | |
1134 | */ | |
1135 | return (client == LDLM_NAMESPACE_SERVER) ? SHRINK_STOP : freed; | |
1136 | } | |
1137 | ||
e7ddc48c AR |
1138 | static unsigned long ldlm_pools_srv_count(struct shrinker *s, |
1139 | struct shrink_control *sc) | |
cbc3769e PT |
1140 | { |
1141 | return ldlm_pools_count(LDLM_NAMESPACE_SERVER, sc->gfp_mask); | |
d7e09d03 PT |
1142 | } |
1143 | ||
e7ddc48c AR |
1144 | static unsigned long ldlm_pools_srv_scan(struct shrinker *s, |
1145 | struct shrink_control *sc) | |
d7e09d03 | 1146 | { |
cbc3769e PT |
1147 | return ldlm_pools_scan(LDLM_NAMESPACE_SERVER, sc->nr_to_scan, |
1148 | sc->gfp_mask); | |
d7e09d03 PT |
1149 | } |
1150 | ||
e7ddc48c AR |
1151 | static unsigned long ldlm_pools_cli_count(struct shrinker *s, |
1152 | struct shrink_control *sc) | |
d7e09d03 | 1153 | { |
cbc3769e PT |
1154 | return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask); |
1155 | } | |
1156 | ||
e7ddc48c AR |
1157 | static unsigned long ldlm_pools_cli_scan(struct shrinker *s, |
1158 | struct shrink_control *sc) | |
cbc3769e PT |
1159 | { |
1160 | return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan, | |
1161 | sc->gfp_mask); | |
d7e09d03 PT |
1162 | } |
1163 | ||
3eface59 | 1164 | int ldlm_pools_recalc(ldlm_side_t client) |
d7e09d03 PT |
1165 | { |
1166 | __u32 nr_l = 0, nr_p = 0, l; | |
1167 | struct ldlm_namespace *ns; | |
91a50030 | 1168 | struct ldlm_namespace *ns_old = NULL; |
d7e09d03 | 1169 | int nr, equal = 0; |
3eface59 | 1170 | int time = 50; /* seconds of sleep if no active namespaces */ |
d7e09d03 PT |
1171 | |
1172 | /* | |
1173 | * No need to setup pool limit for client pools. | |
1174 | */ | |
1175 | if (client == LDLM_NAMESPACE_SERVER) { | |
1176 | /* | |
1177 | * Check all modest namespaces first. | |
1178 | */ | |
1179 | mutex_lock(ldlm_namespace_lock(client)); | |
1180 | list_for_each_entry(ns, ldlm_namespace_list(client), | |
9d0b2b7a | 1181 | ns_list_chain) { |
d7e09d03 PT |
1182 | if (ns->ns_appetite != LDLM_NAMESPACE_MODEST) |
1183 | continue; | |
1184 | ||
1185 | l = ldlm_pool_granted(&ns->ns_pool); | |
1186 | if (l == 0) | |
1187 | l = 1; | |
1188 | ||
1189 | /* | |
1190 | * Set the modest pools limit equal to their avg granted | |
1191 | * locks + ~6%. | |
1192 | */ | |
1193 | l += dru(l, LDLM_POOLS_MODEST_MARGIN_SHIFT, 0); | |
1194 | ldlm_pool_setup(&ns->ns_pool, l); | |
1195 | nr_l += l; | |
1196 | nr_p++; | |
1197 | } | |
1198 | ||
1199 | /* | |
1200 | * Make sure that modest namespaces did not eat more that 2/3 | |
1201 | * of limit. | |
1202 | */ | |
1203 | if (nr_l >= 2 * (LDLM_POOL_HOST_L / 3)) { | |
2d00bd17 JP |
1204 | CWARN("\"Modest\" pools eat out 2/3 of server locks limit (%d of %lu). This means that you have too many clients for this amount of server RAM. Upgrade server!\n", |
1205 | nr_l, LDLM_POOL_HOST_L); | |
d7e09d03 PT |
1206 | equal = 1; |
1207 | } | |
1208 | ||
1209 | /* | |
1210 | * The rest is given to greedy namespaces. | |
1211 | */ | |
1212 | list_for_each_entry(ns, ldlm_namespace_list(client), | |
e9e2fa69 | 1213 | ns_list_chain) { |
d7e09d03 PT |
1214 | if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY) |
1215 | continue; | |
1216 | ||
1217 | if (equal) { | |
1218 | /* | |
1219 | * In the case 2/3 locks are eaten out by | |
1220 | * modest pools, we re-setup equal limit | |
1221 | * for _all_ pools. | |
1222 | */ | |
1223 | l = LDLM_POOL_HOST_L / | |
91a50030 | 1224 | ldlm_namespace_nr_read(client); |
d7e09d03 PT |
1225 | } else { |
1226 | /* | |
1227 | * All the rest of greedy pools will have | |
1228 | * all locks in equal parts. | |
1229 | */ | |
1230 | l = (LDLM_POOL_HOST_L - nr_l) / | |
91a50030 | 1231 | (ldlm_namespace_nr_read(client) - |
d7e09d03 PT |
1232 | nr_p); |
1233 | } | |
1234 | ldlm_pool_setup(&ns->ns_pool, l); | |
1235 | } | |
1236 | mutex_unlock(ldlm_namespace_lock(client)); | |
1237 | } | |
1238 | ||
1239 | /* | |
cbc3769e | 1240 | * Recalc at least ldlm_namespace_nr_read(client) namespaces. |
d7e09d03 | 1241 | */ |
91a50030 | 1242 | for (nr = ldlm_namespace_nr_read(client); nr > 0; nr--) { |
d7e09d03 PT |
1243 | int skip; |
1244 | /* | |
1245 | * Lock the list, get first @ns in the list, getref, move it | |
1246 | * to the tail, unlock and call pool recalc. This way we avoid | |
1247 | * calling recalc under @ns lock what is really good as we get | |
1248 | * rid of potential deadlock on client nodes when canceling | |
1249 | * locks synchronously. | |
1250 | */ | |
1251 | mutex_lock(ldlm_namespace_lock(client)); | |
1252 | if (list_empty(ldlm_namespace_list(client))) { | |
1253 | mutex_unlock(ldlm_namespace_lock(client)); | |
1254 | break; | |
1255 | } | |
1256 | ns = ldlm_namespace_first_locked(client); | |
1257 | ||
91a50030 OD |
1258 | if (ns_old == ns) { /* Full pass complete */ |
1259 | mutex_unlock(ldlm_namespace_lock(client)); | |
1260 | break; | |
1261 | } | |
1262 | ||
1263 | /* We got an empty namespace, need to move it back to inactive | |
1264 | * list. | |
1265 | * The race with parallel resource creation is fine: | |
1266 | * - If they do namespace_get before our check, we fail the | |
1267 | * check and they move this item to the end of the list anyway | |
1268 | * - If we do the check and then they do namespace_get, then | |
1269 | * we move the namespace to inactive and they will move | |
1270 | * it back to active (synchronised by the lock, so no clash | |
1271 | * there). | |
1272 | */ | |
1273 | if (ldlm_ns_empty(ns)) { | |
1274 | ldlm_namespace_move_to_inactive_locked(ns, client); | |
1275 | mutex_unlock(ldlm_namespace_lock(client)); | |
1276 | continue; | |
1277 | } | |
1278 | ||
1279 | if (ns_old == NULL) | |
1280 | ns_old = ns; | |
1281 | ||
d7e09d03 PT |
1282 | spin_lock(&ns->ns_lock); |
1283 | /* | |
1284 | * skip ns which is being freed, and we don't want to increase | |
1285 | * its refcount again, not even temporarily. bz21519 & LU-499. | |
1286 | */ | |
1287 | if (ns->ns_stopping) { | |
1288 | skip = 1; | |
1289 | } else { | |
1290 | skip = 0; | |
1291 | ldlm_namespace_get(ns); | |
1292 | } | |
1293 | spin_unlock(&ns->ns_lock); | |
1294 | ||
91a50030 | 1295 | ldlm_namespace_move_to_active_locked(ns, client); |
d7e09d03 PT |
1296 | mutex_unlock(ldlm_namespace_lock(client)); |
1297 | ||
1298 | /* | |
1299 | * After setup is done - recalc the pool. | |
1300 | */ | |
1301 | if (!skip) { | |
3eface59 OD |
1302 | int ttime = ldlm_pool_recalc(&ns->ns_pool); |
1303 | ||
1304 | if (ttime < time) | |
1305 | time = ttime; | |
1306 | ||
d7e09d03 PT |
1307 | ldlm_namespace_put(ns); |
1308 | } | |
1309 | } | |
3eface59 | 1310 | return time; |
d7e09d03 PT |
1311 | } |
1312 | EXPORT_SYMBOL(ldlm_pools_recalc); | |
1313 | ||
1314 | static int ldlm_pools_thread_main(void *arg) | |
1315 | { | |
1316 | struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg; | |
3eface59 | 1317 | int s_time, c_time; |
d7e09d03 PT |
1318 | |
1319 | thread_set_flags(thread, SVC_RUNNING); | |
1320 | wake_up(&thread->t_ctl_waitq); | |
1321 | ||
1322 | CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n", | |
1323 | "ldlm_poold", current_pid()); | |
1324 | ||
1325 | while (1) { | |
1326 | struct l_wait_info lwi; | |
1327 | ||
1328 | /* | |
1329 | * Recal all pools on this tick. | |
1330 | */ | |
3eface59 OD |
1331 | s_time = ldlm_pools_recalc(LDLM_NAMESPACE_SERVER); |
1332 | c_time = ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT); | |
d7e09d03 PT |
1333 | |
1334 | /* | |
1335 | * Wait until the next check time, or until we're | |
1336 | * stopped. | |
1337 | */ | |
3eface59 | 1338 | lwi = LWI_TIMEOUT(cfs_time_seconds(min(s_time, c_time)), |
d7e09d03 PT |
1339 | NULL, NULL); |
1340 | l_wait_event(thread->t_ctl_waitq, | |
1341 | thread_is_stopping(thread) || | |
1342 | thread_is_event(thread), | |
1343 | &lwi); | |
1344 | ||
1345 | if (thread_test_and_clear_flags(thread, SVC_STOPPING)) | |
1346 | break; | |
1347 | else | |
1348 | thread_test_and_clear_flags(thread, SVC_EVENT); | |
1349 | } | |
1350 | ||
1351 | thread_set_flags(thread, SVC_STOPPED); | |
1352 | wake_up(&thread->t_ctl_waitq); | |
1353 | ||
1354 | CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n", | |
1355 | "ldlm_poold", current_pid()); | |
1356 | ||
1357 | complete_and_exit(&ldlm_pools_comp, 0); | |
1358 | } | |
1359 | ||
1360 | static int ldlm_pools_thread_start(void) | |
1361 | { | |
1362 | struct l_wait_info lwi = { 0 }; | |
68b636b6 | 1363 | struct task_struct *task; |
d7e09d03 PT |
1364 | |
1365 | if (ldlm_pools_thread != NULL) | |
0a3bdb00 | 1366 | return -EALREADY; |
d7e09d03 PT |
1367 | |
1368 | OBD_ALLOC_PTR(ldlm_pools_thread); | |
1369 | if (ldlm_pools_thread == NULL) | |
0a3bdb00 | 1370 | return -ENOMEM; |
d7e09d03 PT |
1371 | |
1372 | init_completion(&ldlm_pools_comp); | |
1373 | init_waitqueue_head(&ldlm_pools_thread->t_ctl_waitq); | |
1374 | ||
1375 | task = kthread_run(ldlm_pools_thread_main, ldlm_pools_thread, | |
1376 | "ldlm_poold"); | |
1377 | if (IS_ERR(task)) { | |
1378 | CERROR("Can't start pool thread, error %ld\n", PTR_ERR(task)); | |
1379 | OBD_FREE(ldlm_pools_thread, sizeof(*ldlm_pools_thread)); | |
1380 | ldlm_pools_thread = NULL; | |
0a3bdb00 | 1381 | return PTR_ERR(task); |
d7e09d03 PT |
1382 | } |
1383 | l_wait_event(ldlm_pools_thread->t_ctl_waitq, | |
1384 | thread_is_running(ldlm_pools_thread), &lwi); | |
0a3bdb00 | 1385 | return 0; |
d7e09d03 PT |
1386 | } |
1387 | ||
1388 | static void ldlm_pools_thread_stop(void) | |
1389 | { | |
8d2ff65d | 1390 | if (ldlm_pools_thread == NULL) |
d7e09d03 | 1391 | return; |
d7e09d03 PT |
1392 | |
1393 | thread_set_flags(ldlm_pools_thread, SVC_STOPPING); | |
1394 | wake_up(&ldlm_pools_thread->t_ctl_waitq); | |
1395 | ||
1396 | /* | |
1397 | * Make sure that pools thread is finished before freeing @thread. | |
1398 | * This fixes possible race and oops due to accessing freed memory | |
1399 | * in pools thread. | |
1400 | */ | |
1401 | wait_for_completion(&ldlm_pools_comp); | |
1402 | OBD_FREE_PTR(ldlm_pools_thread); | |
1403 | ldlm_pools_thread = NULL; | |
d7e09d03 PT |
1404 | } |
1405 | ||
cbc3769e PT |
1406 | static struct shrinker ldlm_pools_srv_shrinker = { |
1407 | .count_objects = ldlm_pools_srv_count, | |
1408 | .scan_objects = ldlm_pools_srv_scan, | |
1409 | .seeks = DEFAULT_SEEKS, | |
1410 | }; | |
1411 | ||
1412 | static struct shrinker ldlm_pools_cli_shrinker = { | |
1413 | .count_objects = ldlm_pools_cli_count, | |
1414 | .scan_objects = ldlm_pools_cli_scan, | |
1415 | .seeks = DEFAULT_SEEKS, | |
1416 | }; | |
1417 | ||
d7e09d03 PT |
1418 | int ldlm_pools_init(void) |
1419 | { | |
1420 | int rc; | |
d7e09d03 PT |
1421 | |
1422 | rc = ldlm_pools_thread_start(); | |
1423 | if (rc == 0) { | |
cbc3769e PT |
1424 | register_shrinker(&ldlm_pools_srv_shrinker); |
1425 | register_shrinker(&ldlm_pools_cli_shrinker); | |
d7e09d03 | 1426 | } |
0a3bdb00 | 1427 | return rc; |
d7e09d03 PT |
1428 | } |
1429 | EXPORT_SYMBOL(ldlm_pools_init); | |
1430 | ||
1431 | void ldlm_pools_fini(void) | |
1432 | { | |
cbc3769e PT |
1433 | unregister_shrinker(&ldlm_pools_srv_shrinker); |
1434 | unregister_shrinker(&ldlm_pools_cli_shrinker); | |
d7e09d03 PT |
1435 | ldlm_pools_thread_stop(); |
1436 | } | |
1437 | EXPORT_SYMBOL(ldlm_pools_fini); |