Merge branch 'mm-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / drivers / staging / lustre / lustre / llite / lproc_llite.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2015, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 */
36 #define DEBUG_SUBSYSTEM S_LLITE
37
38 #include "../include/lustre_lite.h"
39 #include "../include/lprocfs_status.h"
40 #include <linux/seq_file.h>
41 #include "../include/obd_support.h"
42
43 #include "llite_internal.h"
44 #include "vvp_internal.h"
45
46 /* debugfs llite mount point registration */
47 static struct file_operations ll_rw_extents_stats_fops;
48 static struct file_operations ll_rw_extents_stats_pp_fops;
49 static struct file_operations ll_rw_offset_stats_fops;
50
51 static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
52 char *buf)
53 {
54 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
55 ll_kobj);
56 struct obd_statfs osfs;
57 int rc;
58
59 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
60 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
61 OBD_STATFS_NODELAY);
62 if (!rc)
63 return sprintf(buf, "%u\n", osfs.os_bsize);
64
65 return rc;
66 }
67 LUSTRE_RO_ATTR(blocksize);
68
69 static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr,
70 char *buf)
71 {
72 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
73 ll_kobj);
74 struct obd_statfs osfs;
75 int rc;
76
77 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
78 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
79 OBD_STATFS_NODELAY);
80 if (!rc) {
81 __u32 blk_size = osfs.os_bsize >> 10;
82 __u64 result = osfs.os_blocks;
83
84 while (blk_size >>= 1)
85 result <<= 1;
86
87 rc = sprintf(buf, "%llu\n", result);
88 }
89
90 return rc;
91 }
92 LUSTRE_RO_ATTR(kbytestotal);
93
94 static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr,
95 char *buf)
96 {
97 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
98 ll_kobj);
99 struct obd_statfs osfs;
100 int rc;
101
102 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
103 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
104 OBD_STATFS_NODELAY);
105 if (!rc) {
106 __u32 blk_size = osfs.os_bsize >> 10;
107 __u64 result = osfs.os_bfree;
108
109 while (blk_size >>= 1)
110 result <<= 1;
111
112 rc = sprintf(buf, "%llu\n", result);
113 }
114
115 return rc;
116 }
117 LUSTRE_RO_ATTR(kbytesfree);
118
119 static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr,
120 char *buf)
121 {
122 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
123 ll_kobj);
124 struct obd_statfs osfs;
125 int rc;
126
127 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
128 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
129 OBD_STATFS_NODELAY);
130 if (!rc) {
131 __u32 blk_size = osfs.os_bsize >> 10;
132 __u64 result = osfs.os_bavail;
133
134 while (blk_size >>= 1)
135 result <<= 1;
136
137 rc = sprintf(buf, "%llu\n", result);
138 }
139
140 return rc;
141 }
142 LUSTRE_RO_ATTR(kbytesavail);
143
144 static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr,
145 char *buf)
146 {
147 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
148 ll_kobj);
149 struct obd_statfs osfs;
150 int rc;
151
152 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
153 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
154 OBD_STATFS_NODELAY);
155 if (!rc)
156 return sprintf(buf, "%llu\n", osfs.os_files);
157
158 return rc;
159 }
160 LUSTRE_RO_ATTR(filestotal);
161
162 static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr,
163 char *buf)
164 {
165 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
166 ll_kobj);
167 struct obd_statfs osfs;
168 int rc;
169
170 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
171 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
172 OBD_STATFS_NODELAY);
173 if (!rc)
174 return sprintf(buf, "%llu\n", osfs.os_ffree);
175
176 return rc;
177 }
178 LUSTRE_RO_ATTR(filesfree);
179
180 static ssize_t client_type_show(struct kobject *kobj, struct attribute *attr,
181 char *buf)
182 {
183 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
184 ll_kobj);
185
186 return sprintf(buf, "%s client\n",
187 sbi->ll_flags & LL_SBI_RMT_CLIENT ? "remote" : "local");
188 }
189 LUSTRE_RO_ATTR(client_type);
190
191 static ssize_t fstype_show(struct kobject *kobj, struct attribute *attr,
192 char *buf)
193 {
194 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
195 ll_kobj);
196
197 return sprintf(buf, "%s\n", sbi->ll_sb->s_type->name);
198 }
199 LUSTRE_RO_ATTR(fstype);
200
201 static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
202 char *buf)
203 {
204 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
205 ll_kobj);
206
207 return sprintf(buf, "%s\n", sbi->ll_sb_uuid.uuid);
208 }
209 LUSTRE_RO_ATTR(uuid);
210
211 static int ll_site_stats_seq_show(struct seq_file *m, void *v)
212 {
213 struct super_block *sb = m->private;
214
215 /*
216 * See description of statistical counters in struct cl_site, and
217 * struct lu_site.
218 */
219 return cl_site_stats_print(lu2cl_site(ll_s2sbi(sb)->ll_site), m);
220 }
221
222 LPROC_SEQ_FOPS_RO(ll_site_stats);
223
224 static ssize_t max_read_ahead_mb_show(struct kobject *kobj,
225 struct attribute *attr, char *buf)
226 {
227 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
228 ll_kobj);
229 long pages_number;
230 int mult;
231
232 spin_lock(&sbi->ll_lock);
233 pages_number = sbi->ll_ra_info.ra_max_pages;
234 spin_unlock(&sbi->ll_lock);
235
236 mult = 1 << (20 - PAGE_SHIFT);
237 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
238 }
239
240 static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
241 struct attribute *attr,
242 const char *buffer,
243 size_t count)
244 {
245 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
246 ll_kobj);
247 int rc;
248 unsigned long pages_number;
249
250 rc = kstrtoul(buffer, 10, &pages_number);
251 if (rc)
252 return rc;
253
254 pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
255
256 if (pages_number > totalram_pages / 2) {
257
258 CERROR("can't set file readahead more than %lu MB\n",
259 totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/
260 return -ERANGE;
261 }
262
263 spin_lock(&sbi->ll_lock);
264 sbi->ll_ra_info.ra_max_pages = pages_number;
265 spin_unlock(&sbi->ll_lock);
266
267 return count;
268 }
269 LUSTRE_RW_ATTR(max_read_ahead_mb);
270
271 static ssize_t max_read_ahead_per_file_mb_show(struct kobject *kobj,
272 struct attribute *attr,
273 char *buf)
274 {
275 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
276 ll_kobj);
277 long pages_number;
278 int mult;
279
280 spin_lock(&sbi->ll_lock);
281 pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
282 spin_unlock(&sbi->ll_lock);
283
284 mult = 1 << (20 - PAGE_SHIFT);
285 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
286 }
287
288 static ssize_t max_read_ahead_per_file_mb_store(struct kobject *kobj,
289 struct attribute *attr,
290 const char *buffer,
291 size_t count)
292 {
293 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
294 ll_kobj);
295 int rc;
296 unsigned long pages_number;
297
298 rc = kstrtoul(buffer, 10, &pages_number);
299 if (rc)
300 return rc;
301
302 if (pages_number > sbi->ll_ra_info.ra_max_pages) {
303 CERROR("can't set file readahead more than max_read_ahead_mb %lu MB\n",
304 sbi->ll_ra_info.ra_max_pages);
305 return -ERANGE;
306 }
307
308 spin_lock(&sbi->ll_lock);
309 sbi->ll_ra_info.ra_max_pages_per_file = pages_number;
310 spin_unlock(&sbi->ll_lock);
311
312 return count;
313 }
314 LUSTRE_RW_ATTR(max_read_ahead_per_file_mb);
315
316 static ssize_t max_read_ahead_whole_mb_show(struct kobject *kobj,
317 struct attribute *attr,
318 char *buf)
319 {
320 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
321 ll_kobj);
322 long pages_number;
323 int mult;
324
325 spin_lock(&sbi->ll_lock);
326 pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
327 spin_unlock(&sbi->ll_lock);
328
329 mult = 1 << (20 - PAGE_SHIFT);
330 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
331 }
332
333 static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj,
334 struct attribute *attr,
335 const char *buffer,
336 size_t count)
337 {
338 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
339 ll_kobj);
340 int rc;
341 unsigned long pages_number;
342
343 rc = kstrtoul(buffer, 10, &pages_number);
344 if (rc)
345 return rc;
346
347 /* Cap this at the current max readahead window size, the readahead
348 * algorithm does this anyway so it's pointless to set it larger.
349 */
350 if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
351 CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n",
352 sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_SHIFT));
353 return -ERANGE;
354 }
355
356 spin_lock(&sbi->ll_lock);
357 sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
358 spin_unlock(&sbi->ll_lock);
359
360 return count;
361 }
362 LUSTRE_RW_ATTR(max_read_ahead_whole_mb);
363
364 static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
365 {
366 struct super_block *sb = m->private;
367 struct ll_sb_info *sbi = ll_s2sbi(sb);
368 struct cl_client_cache *cache = &sbi->ll_cache;
369 int shift = 20 - PAGE_SHIFT;
370 int max_cached_mb;
371 int unused_mb;
372
373 max_cached_mb = cache->ccc_lru_max >> shift;
374 unused_mb = atomic_read(&cache->ccc_lru_left) >> shift;
375 seq_printf(m,
376 "users: %d\n"
377 "max_cached_mb: %d\n"
378 "used_mb: %d\n"
379 "unused_mb: %d\n"
380 "reclaim_count: %u\n",
381 atomic_read(&cache->ccc_users),
382 max_cached_mb,
383 max_cached_mb - unused_mb,
384 unused_mb,
385 cache->ccc_lru_shrinkers);
386 return 0;
387 }
388
389 static ssize_t ll_max_cached_mb_seq_write(struct file *file,
390 const char __user *buffer,
391 size_t count, loff_t *off)
392 {
393 struct super_block *sb = ((struct seq_file *)file->private_data)->private;
394 struct ll_sb_info *sbi = ll_s2sbi(sb);
395 struct cl_client_cache *cache = &sbi->ll_cache;
396 int mult, rc, pages_number;
397 int diff = 0;
398 int nrpages = 0;
399 char kernbuf[128];
400
401 if (count >= sizeof(kernbuf))
402 return -EINVAL;
403
404 if (copy_from_user(kernbuf, buffer, count))
405 return -EFAULT;
406 kernbuf[count] = 0;
407
408 mult = 1 << (20 - PAGE_SHIFT);
409 buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
410 kernbuf;
411 rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
412 if (rc)
413 return rc;
414
415 if (pages_number < 0 || pages_number > totalram_pages) {
416 CERROR("%s: can't set max cache more than %lu MB\n",
417 ll_get_fsname(sb, NULL, 0),
418 totalram_pages >> (20 - PAGE_SHIFT));
419 return -ERANGE;
420 }
421
422 spin_lock(&sbi->ll_lock);
423 diff = pages_number - cache->ccc_lru_max;
424 spin_unlock(&sbi->ll_lock);
425
426 /* easy - add more LRU slots. */
427 if (diff >= 0) {
428 atomic_add(diff, &cache->ccc_lru_left);
429 rc = 0;
430 goto out;
431 }
432
433 diff = -diff;
434 while (diff > 0) {
435 int tmp;
436
437 /* reduce LRU budget from free slots. */
438 do {
439 int ov, nv;
440
441 ov = atomic_read(&cache->ccc_lru_left);
442 if (ov == 0)
443 break;
444
445 nv = ov > diff ? ov - diff : 0;
446 rc = atomic_cmpxchg(&cache->ccc_lru_left, ov, nv);
447 if (likely(ov == rc)) {
448 diff -= ov - nv;
449 nrpages += ov - nv;
450 break;
451 }
452 } while (1);
453
454 if (diff <= 0)
455 break;
456
457 if (!sbi->ll_dt_exp) { /* being initialized */
458 rc = -ENODEV;
459 break;
460 }
461
462 /* difficult - have to ask OSCs to drop LRU slots. */
463 tmp = diff << 1;
464 rc = obd_set_info_async(NULL, sbi->ll_dt_exp,
465 sizeof(KEY_CACHE_LRU_SHRINK),
466 KEY_CACHE_LRU_SHRINK,
467 sizeof(tmp), &tmp, NULL);
468 if (rc < 0)
469 break;
470 }
471
472 out:
473 if (rc >= 0) {
474 spin_lock(&sbi->ll_lock);
475 cache->ccc_lru_max = pages_number;
476 spin_unlock(&sbi->ll_lock);
477 rc = count;
478 } else {
479 atomic_add(nrpages, &cache->ccc_lru_left);
480 }
481 return rc;
482 }
483
484 LPROC_SEQ_FOPS(ll_max_cached_mb);
485
486 static ssize_t checksum_pages_show(struct kobject *kobj, struct attribute *attr,
487 char *buf)
488 {
489 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
490 ll_kobj);
491
492 return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_CHECKSUM) ? 1 : 0);
493 }
494
495 static ssize_t checksum_pages_store(struct kobject *kobj,
496 struct attribute *attr,
497 const char *buffer,
498 size_t count)
499 {
500 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
501 ll_kobj);
502 int rc;
503 unsigned long val;
504
505 if (!sbi->ll_dt_exp)
506 /* Not set up yet */
507 return -EAGAIN;
508
509 rc = kstrtoul(buffer, 10, &val);
510 if (rc)
511 return rc;
512 if (val)
513 sbi->ll_flags |= LL_SBI_CHECKSUM;
514 else
515 sbi->ll_flags &= ~LL_SBI_CHECKSUM;
516
517 rc = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
518 KEY_CHECKSUM, sizeof(val), &val, NULL);
519 if (rc)
520 CWARN("Failed to set OSC checksum flags: %d\n", rc);
521
522 return count;
523 }
524 LUSTRE_RW_ATTR(checksum_pages);
525
526 static ssize_t ll_rd_track_id(struct kobject *kobj, char *buf,
527 enum stats_track_type type)
528 {
529 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
530 ll_kobj);
531
532 if (sbi->ll_stats_track_type == type)
533 return sprintf(buf, "%d\n", sbi->ll_stats_track_id);
534 else if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
535 return sprintf(buf, "0 (all)\n");
536 else
537 return sprintf(buf, "untracked\n");
538 }
539
540 static ssize_t ll_wr_track_id(struct kobject *kobj, const char *buffer,
541 size_t count,
542 enum stats_track_type type)
543 {
544 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
545 ll_kobj);
546 int rc;
547 unsigned long pid;
548
549 rc = kstrtoul(buffer, 10, &pid);
550 if (rc)
551 return rc;
552 sbi->ll_stats_track_id = pid;
553 if (pid == 0)
554 sbi->ll_stats_track_type = STATS_TRACK_ALL;
555 else
556 sbi->ll_stats_track_type = type;
557 lprocfs_clear_stats(sbi->ll_stats);
558 return count;
559 }
560
561 static ssize_t stats_track_pid_show(struct kobject *kobj,
562 struct attribute *attr,
563 char *buf)
564 {
565 return ll_rd_track_id(kobj, buf, STATS_TRACK_PID);
566 }
567
568 static ssize_t stats_track_pid_store(struct kobject *kobj,
569 struct attribute *attr,
570 const char *buffer,
571 size_t count)
572 {
573 return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PID);
574 }
575 LUSTRE_RW_ATTR(stats_track_pid);
576
577 static ssize_t stats_track_ppid_show(struct kobject *kobj,
578 struct attribute *attr,
579 char *buf)
580 {
581 return ll_rd_track_id(kobj, buf, STATS_TRACK_PPID);
582 }
583
584 static ssize_t stats_track_ppid_store(struct kobject *kobj,
585 struct attribute *attr,
586 const char *buffer,
587 size_t count)
588 {
589 return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PPID);
590 }
591 LUSTRE_RW_ATTR(stats_track_ppid);
592
593 static ssize_t stats_track_gid_show(struct kobject *kobj,
594 struct attribute *attr,
595 char *buf)
596 {
597 return ll_rd_track_id(kobj, buf, STATS_TRACK_GID);
598 }
599
600 static ssize_t stats_track_gid_store(struct kobject *kobj,
601 struct attribute *attr,
602 const char *buffer,
603 size_t count)
604 {
605 return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_GID);
606 }
607 LUSTRE_RW_ATTR(stats_track_gid);
608
609 static ssize_t statahead_max_show(struct kobject *kobj,
610 struct attribute *attr,
611 char *buf)
612 {
613 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
614 ll_kobj);
615
616 return sprintf(buf, "%u\n", sbi->ll_sa_max);
617 }
618
619 static ssize_t statahead_max_store(struct kobject *kobj,
620 struct attribute *attr,
621 const char *buffer,
622 size_t count)
623 {
624 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
625 ll_kobj);
626 int rc;
627 unsigned long val;
628
629 rc = kstrtoul(buffer, 10, &val);
630 if (rc)
631 return rc;
632
633 if (val <= LL_SA_RPC_MAX)
634 sbi->ll_sa_max = val;
635 else
636 CERROR("Bad statahead_max value %lu. Valid values are in the range [0, %d]\n",
637 val, LL_SA_RPC_MAX);
638
639 return count;
640 }
641 LUSTRE_RW_ATTR(statahead_max);
642
643 static ssize_t statahead_agl_show(struct kobject *kobj,
644 struct attribute *attr,
645 char *buf)
646 {
647 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
648 ll_kobj);
649
650 return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_AGL_ENABLED ? 1 : 0);
651 }
652
653 static ssize_t statahead_agl_store(struct kobject *kobj,
654 struct attribute *attr,
655 const char *buffer,
656 size_t count)
657 {
658 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
659 ll_kobj);
660 int rc;
661 unsigned long val;
662
663 rc = kstrtoul(buffer, 10, &val);
664 if (rc)
665 return rc;
666
667 if (val)
668 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
669 else
670 sbi->ll_flags &= ~LL_SBI_AGL_ENABLED;
671
672 return count;
673 }
674 LUSTRE_RW_ATTR(statahead_agl);
675
676 static int ll_statahead_stats_seq_show(struct seq_file *m, void *v)
677 {
678 struct super_block *sb = m->private;
679 struct ll_sb_info *sbi = ll_s2sbi(sb);
680
681 seq_printf(m,
682 "statahead total: %u\n"
683 "statahead wrong: %u\n"
684 "agl total: %u\n",
685 atomic_read(&sbi->ll_sa_total),
686 atomic_read(&sbi->ll_sa_wrong),
687 atomic_read(&sbi->ll_agl_total));
688 return 0;
689 }
690
691 LPROC_SEQ_FOPS_RO(ll_statahead_stats);
692
693 static ssize_t lazystatfs_show(struct kobject *kobj,
694 struct attribute *attr,
695 char *buf)
696 {
697 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
698 ll_kobj);
699
700 return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_LAZYSTATFS ? 1 : 0);
701 }
702
703 static ssize_t lazystatfs_store(struct kobject *kobj,
704 struct attribute *attr,
705 const char *buffer,
706 size_t count)
707 {
708 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
709 ll_kobj);
710 int rc;
711 unsigned long val;
712
713 rc = kstrtoul(buffer, 10, &val);
714 if (rc)
715 return rc;
716
717 if (val)
718 sbi->ll_flags |= LL_SBI_LAZYSTATFS;
719 else
720 sbi->ll_flags &= ~LL_SBI_LAZYSTATFS;
721
722 return count;
723 }
724 LUSTRE_RW_ATTR(lazystatfs);
725
726 static ssize_t max_easize_show(struct kobject *kobj,
727 struct attribute *attr,
728 char *buf)
729 {
730 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
731 ll_kobj);
732 unsigned int ealen;
733 int rc;
734
735 rc = ll_get_max_mdsize(sbi, &ealen);
736 if (rc)
737 return rc;
738
739 return sprintf(buf, "%u\n", ealen);
740 }
741 LUSTRE_RO_ATTR(max_easize);
742
743 static ssize_t default_easize_show(struct kobject *kobj,
744 struct attribute *attr,
745 char *buf)
746 {
747 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
748 ll_kobj);
749 unsigned int ealen;
750 int rc;
751
752 rc = ll_get_default_mdsize(sbi, &ealen);
753 if (rc)
754 return rc;
755
756 return sprintf(buf, "%u\n", ealen);
757 }
758 LUSTRE_RO_ATTR(default_easize);
759
760 static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
761 {
762 const char *str[] = LL_SBI_FLAGS;
763 struct super_block *sb = m->private;
764 int flags = ll_s2sbi(sb)->ll_flags;
765 int i = 0;
766
767 while (flags != 0) {
768 if (ARRAY_SIZE(str) <= i) {
769 CERROR("%s: Revise array LL_SBI_FLAGS to match sbi flags please.\n",
770 ll_get_fsname(sb, NULL, 0));
771 return -EINVAL;
772 }
773
774 if (flags & 0x1)
775 seq_printf(m, "%s ", str[i]);
776 flags >>= 1;
777 ++i;
778 }
779 seq_printf(m, "\b\n");
780 return 0;
781 }
782
783 LPROC_SEQ_FOPS_RO(ll_sbi_flags);
784
785 static ssize_t xattr_cache_show(struct kobject *kobj,
786 struct attribute *attr,
787 char *buf)
788 {
789 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
790 ll_kobj);
791
792 return sprintf(buf, "%u\n", sbi->ll_xattr_cache_enabled);
793 }
794
795 static ssize_t xattr_cache_store(struct kobject *kobj,
796 struct attribute *attr,
797 const char *buffer,
798 size_t count)
799 {
800 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
801 ll_kobj);
802 int rc;
803 unsigned long val;
804
805 rc = kstrtoul(buffer, 10, &val);
806 if (rc)
807 return rc;
808
809 if (val != 0 && val != 1)
810 return -ERANGE;
811
812 if (val == 1 && !(sbi->ll_flags & LL_SBI_XATTR_CACHE))
813 return -ENOTSUPP;
814
815 sbi->ll_xattr_cache_enabled = val;
816
817 return count;
818 }
819 LUSTRE_RW_ATTR(xattr_cache);
820
821 static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
822 /* { "mntpt_path", ll_rd_path, 0, 0 }, */
823 { "site", &ll_site_stats_fops, NULL, 0 },
824 /* { "filegroups", lprocfs_rd_filegroups, 0, 0 }, */
825 { "max_cached_mb", &ll_max_cached_mb_fops, NULL },
826 { "statahead_stats", &ll_statahead_stats_fops, NULL, 0 },
827 { "sbi_flags", &ll_sbi_flags_fops, NULL, 0 },
828 { NULL }
829 };
830
831 #define MAX_STRING_SIZE 128
832
833 static struct attribute *llite_attrs[] = {
834 &lustre_attr_blocksize.attr,
835 &lustre_attr_kbytestotal.attr,
836 &lustre_attr_kbytesfree.attr,
837 &lustre_attr_kbytesavail.attr,
838 &lustre_attr_filestotal.attr,
839 &lustre_attr_filesfree.attr,
840 &lustre_attr_client_type.attr,
841 &lustre_attr_fstype.attr,
842 &lustre_attr_uuid.attr,
843 &lustre_attr_max_read_ahead_mb.attr,
844 &lustre_attr_max_read_ahead_per_file_mb.attr,
845 &lustre_attr_max_read_ahead_whole_mb.attr,
846 &lustre_attr_checksum_pages.attr,
847 &lustre_attr_stats_track_pid.attr,
848 &lustre_attr_stats_track_ppid.attr,
849 &lustre_attr_stats_track_gid.attr,
850 &lustre_attr_statahead_max.attr,
851 &lustre_attr_statahead_agl.attr,
852 &lustre_attr_lazystatfs.attr,
853 &lustre_attr_max_easize.attr,
854 &lustre_attr_default_easize.attr,
855 &lustre_attr_xattr_cache.attr,
856 NULL,
857 };
858
859 static void llite_sb_release(struct kobject *kobj)
860 {
861 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
862 ll_kobj);
863 complete(&sbi->ll_kobj_unregister);
864 }
865
866 static struct kobj_type llite_ktype = {
867 .default_attrs = llite_attrs,
868 .sysfs_ops = &lustre_sysfs_ops,
869 .release = llite_sb_release,
870 };
871
872 static const struct llite_file_opcode {
873 __u32 opcode;
874 __u32 type;
875 const char *opname;
876 } llite_opcode_table[LPROC_LL_FILE_OPCODES] = {
877 /* file operation */
878 { LPROC_LL_DIRTY_HITS, LPROCFS_TYPE_REGS, "dirty_pages_hits" },
879 { LPROC_LL_DIRTY_MISSES, LPROCFS_TYPE_REGS, "dirty_pages_misses" },
880 { LPROC_LL_READ_BYTES, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
881 "read_bytes" },
882 { LPROC_LL_WRITE_BYTES, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
883 "write_bytes" },
884 { LPROC_LL_BRW_READ, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
885 "brw_read" },
886 { LPROC_LL_BRW_WRITE, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
887 "brw_write" },
888 { LPROC_LL_OSC_READ, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
889 "osc_read" },
890 { LPROC_LL_OSC_WRITE, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
891 "osc_write" },
892 { LPROC_LL_IOCTL, LPROCFS_TYPE_REGS, "ioctl" },
893 { LPROC_LL_OPEN, LPROCFS_TYPE_REGS, "open" },
894 { LPROC_LL_RELEASE, LPROCFS_TYPE_REGS, "close" },
895 { LPROC_LL_MAP, LPROCFS_TYPE_REGS, "mmap" },
896 { LPROC_LL_LLSEEK, LPROCFS_TYPE_REGS, "seek" },
897 { LPROC_LL_FSYNC, LPROCFS_TYPE_REGS, "fsync" },
898 { LPROC_LL_READDIR, LPROCFS_TYPE_REGS, "readdir" },
899 /* inode operation */
900 { LPROC_LL_SETATTR, LPROCFS_TYPE_REGS, "setattr" },
901 { LPROC_LL_TRUNC, LPROCFS_TYPE_REGS, "truncate" },
902 { LPROC_LL_FLOCK, LPROCFS_TYPE_REGS, "flock" },
903 { LPROC_LL_GETATTR, LPROCFS_TYPE_REGS, "getattr" },
904 /* dir inode operation */
905 { LPROC_LL_CREATE, LPROCFS_TYPE_REGS, "create" },
906 { LPROC_LL_LINK, LPROCFS_TYPE_REGS, "link" },
907 { LPROC_LL_UNLINK, LPROCFS_TYPE_REGS, "unlink" },
908 { LPROC_LL_SYMLINK, LPROCFS_TYPE_REGS, "symlink" },
909 { LPROC_LL_MKDIR, LPROCFS_TYPE_REGS, "mkdir" },
910 { LPROC_LL_RMDIR, LPROCFS_TYPE_REGS, "rmdir" },
911 { LPROC_LL_MKNOD, LPROCFS_TYPE_REGS, "mknod" },
912 { LPROC_LL_RENAME, LPROCFS_TYPE_REGS, "rename" },
913 /* special inode operation */
914 { LPROC_LL_STAFS, LPROCFS_TYPE_REGS, "statfs" },
915 { LPROC_LL_ALLOC_INODE, LPROCFS_TYPE_REGS, "alloc_inode" },
916 { LPROC_LL_SETXATTR, LPROCFS_TYPE_REGS, "setxattr" },
917 { LPROC_LL_GETXATTR, LPROCFS_TYPE_REGS, "getxattr" },
918 { LPROC_LL_GETXATTR_HITS, LPROCFS_TYPE_REGS, "getxattr_hits" },
919 { LPROC_LL_LISTXATTR, LPROCFS_TYPE_REGS, "listxattr" },
920 { LPROC_LL_REMOVEXATTR, LPROCFS_TYPE_REGS, "removexattr" },
921 { LPROC_LL_INODE_PERM, LPROCFS_TYPE_REGS, "inode_permission" },
922 };
923
924 void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count)
925 {
926 if (!sbi->ll_stats)
927 return;
928 if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
929 lprocfs_counter_add(sbi->ll_stats, op, count);
930 else if (sbi->ll_stats_track_type == STATS_TRACK_PID &&
931 sbi->ll_stats_track_id == current->pid)
932 lprocfs_counter_add(sbi->ll_stats, op, count);
933 else if (sbi->ll_stats_track_type == STATS_TRACK_PPID &&
934 sbi->ll_stats_track_id == current->real_parent->pid)
935 lprocfs_counter_add(sbi->ll_stats, op, count);
936 else if (sbi->ll_stats_track_type == STATS_TRACK_GID &&
937 sbi->ll_stats_track_id ==
938 from_kgid(&init_user_ns, current_gid()))
939 lprocfs_counter_add(sbi->ll_stats, op, count);
940 }
941 EXPORT_SYMBOL(ll_stats_ops_tally);
942
943 static const char *ra_stat_string[] = {
944 [RA_STAT_HIT] = "hits",
945 [RA_STAT_MISS] = "misses",
946 [RA_STAT_DISTANT_READPAGE] = "readpage not consecutive",
947 [RA_STAT_MISS_IN_WINDOW] = "miss inside window",
948 [RA_STAT_FAILED_GRAB_PAGE] = "failed grab_cache_page",
949 [RA_STAT_FAILED_MATCH] = "failed lock match",
950 [RA_STAT_DISCARDED] = "read but discarded",
951 [RA_STAT_ZERO_LEN] = "zero length file",
952 [RA_STAT_ZERO_WINDOW] = "zero size window",
953 [RA_STAT_EOF] = "read-ahead to EOF",
954 [RA_STAT_MAX_IN_FLIGHT] = "hit max r-a issue",
955 [RA_STAT_WRONG_GRAB_PAGE] = "wrong page from grab_cache_page",
956 };
957
958 int ldebugfs_register_mountpoint(struct dentry *parent,
959 struct super_block *sb, char *osc, char *mdc)
960 {
961 struct lustre_sb_info *lsi = s2lsi(sb);
962 struct ll_sb_info *sbi = ll_s2sbi(sb);
963 struct obd_device *obd;
964 struct dentry *dir;
965 char name[MAX_STRING_SIZE + 1], *ptr;
966 int err, id, len, rc;
967
968 name[MAX_STRING_SIZE] = '\0';
969
970 LASSERT(sbi);
971 LASSERT(mdc);
972 LASSERT(osc);
973
974 /* Get fsname */
975 len = strlen(lsi->lsi_lmd->lmd_profile);
976 ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-');
977 if (ptr && (strcmp(ptr, "-client") == 0))
978 len -= 7;
979
980 /* Mount info */
981 snprintf(name, MAX_STRING_SIZE, "%.*s-%p", len,
982 lsi->lsi_lmd->lmd_profile, sb);
983
984 dir = ldebugfs_register(name, parent, NULL, NULL);
985 if (IS_ERR_OR_NULL(dir)) {
986 err = dir ? PTR_ERR(dir) : -ENOMEM;
987 sbi->ll_debugfs_entry = NULL;
988 return err;
989 }
990 sbi->ll_debugfs_entry = dir;
991
992 rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "dump_page_cache", 0444,
993 &vvp_dump_pgcache_file_ops, sbi);
994 if (rc)
995 CWARN("Error adding the dump_page_cache file\n");
996
997 rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "extents_stats", 0644,
998 &ll_rw_extents_stats_fops, sbi);
999 if (rc)
1000 CWARN("Error adding the extent_stats file\n");
1001
1002 rc = ldebugfs_seq_create(sbi->ll_debugfs_entry,
1003 "extents_stats_per_process",
1004 0644, &ll_rw_extents_stats_pp_fops, sbi);
1005 if (rc)
1006 CWARN("Error adding the extents_stats_per_process file\n");
1007
1008 rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "offset_stats", 0644,
1009 &ll_rw_offset_stats_fops, sbi);
1010 if (rc)
1011 CWARN("Error adding the offset_stats file\n");
1012
1013 /* File operations stats */
1014 sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES,
1015 LPROCFS_STATS_FLAG_NONE);
1016 if (!sbi->ll_stats) {
1017 err = -ENOMEM;
1018 goto out;
1019 }
1020 /* do counter init */
1021 for (id = 0; id < LPROC_LL_FILE_OPCODES; id++) {
1022 __u32 type = llite_opcode_table[id].type;
1023 void *ptr = NULL;
1024
1025 if (type & LPROCFS_TYPE_REGS)
1026 ptr = "regs";
1027 else if (type & LPROCFS_TYPE_BYTES)
1028 ptr = "bytes";
1029 else if (type & LPROCFS_TYPE_PAGES)
1030 ptr = "pages";
1031 lprocfs_counter_init(sbi->ll_stats,
1032 llite_opcode_table[id].opcode,
1033 (type & LPROCFS_CNTR_AVGMINMAX),
1034 llite_opcode_table[id].opname, ptr);
1035 }
1036 err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "stats",
1037 sbi->ll_stats);
1038 if (err)
1039 goto out;
1040
1041 sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string),
1042 LPROCFS_STATS_FLAG_NONE);
1043 if (!sbi->ll_ra_stats) {
1044 err = -ENOMEM;
1045 goto out;
1046 }
1047
1048 for (id = 0; id < ARRAY_SIZE(ra_stat_string); id++)
1049 lprocfs_counter_init(sbi->ll_ra_stats, id, 0,
1050 ra_stat_string[id], "pages");
1051
1052 err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "read_ahead_stats",
1053 sbi->ll_ra_stats);
1054 if (err)
1055 goto out;
1056
1057 err = ldebugfs_add_vars(sbi->ll_debugfs_entry,
1058 lprocfs_llite_obd_vars, sb);
1059 if (err)
1060 goto out;
1061
1062 sbi->ll_kobj.kset = llite_kset;
1063 init_completion(&sbi->ll_kobj_unregister);
1064 err = kobject_init_and_add(&sbi->ll_kobj, &llite_ktype, NULL,
1065 "%s", name);
1066 if (err)
1067 goto out;
1068
1069 /* MDC info */
1070 obd = class_name2obd(mdc);
1071
1072 err = sysfs_create_link(&sbi->ll_kobj, &obd->obd_kobj,
1073 obd->obd_type->typ_name);
1074 if (err)
1075 goto out;
1076
1077 /* OSC */
1078 obd = class_name2obd(osc);
1079
1080 err = sysfs_create_link(&sbi->ll_kobj, &obd->obd_kobj,
1081 obd->obd_type->typ_name);
1082 out:
1083 if (err) {
1084 ldebugfs_remove(&sbi->ll_debugfs_entry);
1085 lprocfs_free_stats(&sbi->ll_ra_stats);
1086 lprocfs_free_stats(&sbi->ll_stats);
1087 }
1088 return err;
1089 }
1090
1091 void ldebugfs_unregister_mountpoint(struct ll_sb_info *sbi)
1092 {
1093 if (sbi->ll_debugfs_entry) {
1094 ldebugfs_remove(&sbi->ll_debugfs_entry);
1095 kobject_put(&sbi->ll_kobj);
1096 wait_for_completion(&sbi->ll_kobj_unregister);
1097 lprocfs_free_stats(&sbi->ll_ra_stats);
1098 lprocfs_free_stats(&sbi->ll_stats);
1099 }
1100 }
1101
1102 #undef MAX_STRING_SIZE
1103
1104 #define pct(a, b) (b ? a * 100 / b : 0)
1105
1106 static void ll_display_extents_info(struct ll_rw_extents_info *io_extents,
1107 struct seq_file *seq, int which)
1108 {
1109 unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
1110 unsigned long start, end, r, w;
1111 char *unitp = "KMGTPEZY";
1112 int i, units = 10;
1113 struct per_process_info *pp_info = &io_extents->pp_extents[which];
1114
1115 read_cum = 0;
1116 write_cum = 0;
1117 start = 0;
1118
1119 for (i = 0; i < LL_HIST_MAX; i++) {
1120 read_tot += pp_info->pp_r_hist.oh_buckets[i];
1121 write_tot += pp_info->pp_w_hist.oh_buckets[i];
1122 }
1123
1124 for (i = 0; i < LL_HIST_MAX; i++) {
1125 r = pp_info->pp_r_hist.oh_buckets[i];
1126 w = pp_info->pp_w_hist.oh_buckets[i];
1127 read_cum += r;
1128 write_cum += w;
1129 end = 1 << (i + LL_HIST_START - units);
1130 seq_printf(seq, "%4lu%c - %4lu%c%c: %14lu %4lu %4lu | %14lu %4lu %4lu\n",
1131 start, *unitp, end, *unitp,
1132 (i == LL_HIST_MAX - 1) ? '+' : ' ',
1133 r, pct(r, read_tot), pct(read_cum, read_tot),
1134 w, pct(w, write_tot), pct(write_cum, write_tot));
1135 start = end;
1136 if (start == 1<<10) {
1137 start = 1;
1138 units += 10;
1139 unitp++;
1140 }
1141 if (read_cum == read_tot && write_cum == write_tot)
1142 break;
1143 }
1144 }
1145
1146 static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
1147 {
1148 struct timespec64 now;
1149 struct ll_sb_info *sbi = seq->private;
1150 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1151 int k;
1152
1153 ktime_get_real_ts64(&now);
1154
1155 if (!sbi->ll_rw_stats_on) {
1156 seq_printf(seq, "disabled\n"
1157 "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
1158 return 0;
1159 }
1160 seq_printf(seq, "snapshot_time: %llu.%09lu (secs.usecs)\n",
1161 (s64)now.tv_sec, (unsigned long)now.tv_nsec);
1162 seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write");
1163 seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
1164 "extents", "calls", "%", "cum%",
1165 "calls", "%", "cum%");
1166 spin_lock(&sbi->ll_pp_extent_lock);
1167 for (k = 0; k < LL_PROCESS_HIST_MAX; k++) {
1168 if (io_extents->pp_extents[k].pid != 0) {
1169 seq_printf(seq, "\nPID: %d\n",
1170 io_extents->pp_extents[k].pid);
1171 ll_display_extents_info(io_extents, seq, k);
1172 }
1173 }
1174 spin_unlock(&sbi->ll_pp_extent_lock);
1175 return 0;
1176 }
1177
1178 static ssize_t ll_rw_extents_stats_pp_seq_write(struct file *file,
1179 const char __user *buf,
1180 size_t len,
1181 loff_t *off)
1182 {
1183 struct seq_file *seq = file->private_data;
1184 struct ll_sb_info *sbi = seq->private;
1185 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1186 int i;
1187 int value = 1, rc = 0;
1188
1189 if (len == 0)
1190 return -EINVAL;
1191
1192 rc = lprocfs_write_helper(buf, len, &value);
1193 if (rc < 0 && len < 16) {
1194 char kernbuf[16];
1195
1196 if (copy_from_user(kernbuf, buf, len))
1197 return -EFAULT;
1198 kernbuf[len] = 0;
1199
1200 if (kernbuf[len - 1] == '\n')
1201 kernbuf[len - 1] = 0;
1202
1203 if (strcmp(kernbuf, "disabled") == 0 ||
1204 strcmp(kernbuf, "Disabled") == 0)
1205 value = 0;
1206 }
1207
1208 if (value == 0)
1209 sbi->ll_rw_stats_on = 0;
1210 else
1211 sbi->ll_rw_stats_on = 1;
1212
1213 spin_lock(&sbi->ll_pp_extent_lock);
1214 for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1215 io_extents->pp_extents[i].pid = 0;
1216 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1217 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1218 }
1219 spin_unlock(&sbi->ll_pp_extent_lock);
1220 return len;
1221 }
1222
1223 LPROC_SEQ_FOPS(ll_rw_extents_stats_pp);
1224
1225 static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
1226 {
1227 struct timespec64 now;
1228 struct ll_sb_info *sbi = seq->private;
1229 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1230
1231 ktime_get_real_ts64(&now);
1232
1233 if (!sbi->ll_rw_stats_on) {
1234 seq_printf(seq, "disabled\n"
1235 "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
1236 return 0;
1237 }
1238 seq_printf(seq, "snapshot_time: %llu.%09lu (secs.usecs)\n",
1239 (u64)now.tv_sec, (unsigned long)now.tv_nsec);
1240
1241 seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write");
1242 seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
1243 "extents", "calls", "%", "cum%",
1244 "calls", "%", "cum%");
1245 spin_lock(&sbi->ll_lock);
1246 ll_display_extents_info(io_extents, seq, LL_PROCESS_HIST_MAX);
1247 spin_unlock(&sbi->ll_lock);
1248
1249 return 0;
1250 }
1251
1252 static ssize_t ll_rw_extents_stats_seq_write(struct file *file,
1253 const char __user *buf,
1254 size_t len, loff_t *off)
1255 {
1256 struct seq_file *seq = file->private_data;
1257 struct ll_sb_info *sbi = seq->private;
1258 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1259 int i;
1260 int value = 1, rc = 0;
1261
1262 if (len == 0)
1263 return -EINVAL;
1264
1265 rc = lprocfs_write_helper(buf, len, &value);
1266 if (rc < 0 && len < 16) {
1267 char kernbuf[16];
1268
1269 if (copy_from_user(kernbuf, buf, len))
1270 return -EFAULT;
1271 kernbuf[len] = 0;
1272
1273 if (kernbuf[len - 1] == '\n')
1274 kernbuf[len - 1] = 0;
1275
1276 if (strcmp(kernbuf, "disabled") == 0 ||
1277 strcmp(kernbuf, "Disabled") == 0)
1278 value = 0;
1279 }
1280
1281 if (value == 0)
1282 sbi->ll_rw_stats_on = 0;
1283 else
1284 sbi->ll_rw_stats_on = 1;
1285
1286 spin_lock(&sbi->ll_pp_extent_lock);
1287 for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
1288 io_extents->pp_extents[i].pid = 0;
1289 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1290 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1291 }
1292 spin_unlock(&sbi->ll_pp_extent_lock);
1293
1294 return len;
1295 }
1296
1297 LPROC_SEQ_FOPS(ll_rw_extents_stats);
1298
1299 void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
1300 struct ll_file_data *file, loff_t pos,
1301 size_t count, int rw)
1302 {
1303 int i, cur = -1;
1304 struct ll_rw_process_info *process;
1305 struct ll_rw_process_info *offset;
1306 int *off_count = &sbi->ll_rw_offset_entry_count;
1307 int *process_count = &sbi->ll_offset_process_count;
1308 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1309
1310 if (!sbi->ll_rw_stats_on)
1311 return;
1312 process = sbi->ll_rw_process_info;
1313 offset = sbi->ll_rw_offset_info;
1314
1315 spin_lock(&sbi->ll_pp_extent_lock);
1316 /* Extent statistics */
1317 for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1318 if (io_extents->pp_extents[i].pid == pid) {
1319 cur = i;
1320 break;
1321 }
1322 }
1323
1324 if (cur == -1) {
1325 /* new process */
1326 sbi->ll_extent_process_count =
1327 (sbi->ll_extent_process_count + 1) % LL_PROCESS_HIST_MAX;
1328 cur = sbi->ll_extent_process_count;
1329 io_extents->pp_extents[cur].pid = pid;
1330 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_r_hist);
1331 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_w_hist);
1332 }
1333
1334 for (i = 0; (count >= (1 << LL_HIST_START << i)) &&
1335 (i < (LL_HIST_MAX - 1)); i++)
1336 ;
1337 if (rw == 0) {
1338 io_extents->pp_extents[cur].pp_r_hist.oh_buckets[i]++;
1339 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_r_hist.oh_buckets[i]++;
1340 } else {
1341 io_extents->pp_extents[cur].pp_w_hist.oh_buckets[i]++;
1342 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_w_hist.oh_buckets[i]++;
1343 }
1344 spin_unlock(&sbi->ll_pp_extent_lock);
1345
1346 spin_lock(&sbi->ll_process_lock);
1347 /* Offset statistics */
1348 for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1349 if (process[i].rw_pid == pid) {
1350 if (process[i].rw_last_file != file) {
1351 process[i].rw_range_start = pos;
1352 process[i].rw_last_file_pos = pos + count;
1353 process[i].rw_smallest_extent = count;
1354 process[i].rw_largest_extent = count;
1355 process[i].rw_offset = 0;
1356 process[i].rw_last_file = file;
1357 spin_unlock(&sbi->ll_process_lock);
1358 return;
1359 }
1360 if (process[i].rw_last_file_pos != pos) {
1361 *off_count =
1362 (*off_count + 1) % LL_OFFSET_HIST_MAX;
1363 offset[*off_count].rw_op = process[i].rw_op;
1364 offset[*off_count].rw_pid = pid;
1365 offset[*off_count].rw_range_start =
1366 process[i].rw_range_start;
1367 offset[*off_count].rw_range_end =
1368 process[i].rw_last_file_pos;
1369 offset[*off_count].rw_smallest_extent =
1370 process[i].rw_smallest_extent;
1371 offset[*off_count].rw_largest_extent =
1372 process[i].rw_largest_extent;
1373 offset[*off_count].rw_offset =
1374 process[i].rw_offset;
1375 process[i].rw_op = rw;
1376 process[i].rw_range_start = pos;
1377 process[i].rw_smallest_extent = count;
1378 process[i].rw_largest_extent = count;
1379 process[i].rw_offset = pos -
1380 process[i].rw_last_file_pos;
1381 }
1382 if (process[i].rw_smallest_extent > count)
1383 process[i].rw_smallest_extent = count;
1384 if (process[i].rw_largest_extent < count)
1385 process[i].rw_largest_extent = count;
1386 process[i].rw_last_file_pos = pos + count;
1387 spin_unlock(&sbi->ll_process_lock);
1388 return;
1389 }
1390 }
1391 *process_count = (*process_count + 1) % LL_PROCESS_HIST_MAX;
1392 process[*process_count].rw_pid = pid;
1393 process[*process_count].rw_op = rw;
1394 process[*process_count].rw_range_start = pos;
1395 process[*process_count].rw_last_file_pos = pos + count;
1396 process[*process_count].rw_smallest_extent = count;
1397 process[*process_count].rw_largest_extent = count;
1398 process[*process_count].rw_offset = 0;
1399 process[*process_count].rw_last_file = file;
1400 spin_unlock(&sbi->ll_process_lock);
1401 }
1402
1403 static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
1404 {
1405 struct timespec64 now;
1406 struct ll_sb_info *sbi = seq->private;
1407 struct ll_rw_process_info *offset = sbi->ll_rw_offset_info;
1408 struct ll_rw_process_info *process = sbi->ll_rw_process_info;
1409 int i;
1410
1411 ktime_get_real_ts64(&now);
1412
1413 if (!sbi->ll_rw_stats_on) {
1414 seq_printf(seq, "disabled\n"
1415 "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
1416 return 0;
1417 }
1418 spin_lock(&sbi->ll_process_lock);
1419
1420 seq_printf(seq, "snapshot_time: %llu.%09lu (secs.usecs)\n",
1421 (s64)now.tv_sec, (unsigned long)now.tv_nsec);
1422 seq_printf(seq, "%3s %10s %14s %14s %17s %17s %14s\n",
1423 "R/W", "PID", "RANGE START", "RANGE END",
1424 "SMALLEST EXTENT", "LARGEST EXTENT", "OFFSET");
1425 /* We stored the discontiguous offsets here; print them first */
1426 for (i = 0; i < LL_OFFSET_HIST_MAX; i++) {
1427 if (offset[i].rw_pid != 0)
1428 seq_printf(seq,
1429 "%3c %10d %14Lu %14Lu %17lu %17lu %14Lu",
1430 offset[i].rw_op == READ ? 'R' : 'W',
1431 offset[i].rw_pid,
1432 offset[i].rw_range_start,
1433 offset[i].rw_range_end,
1434 (unsigned long)offset[i].rw_smallest_extent,
1435 (unsigned long)offset[i].rw_largest_extent,
1436 offset[i].rw_offset);
1437 }
1438 /* Then print the current offsets for each process */
1439 for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1440 if (process[i].rw_pid != 0)
1441 seq_printf(seq,
1442 "%3c %10d %14Lu %14Lu %17lu %17lu %14Lu",
1443 process[i].rw_op == READ ? 'R' : 'W',
1444 process[i].rw_pid,
1445 process[i].rw_range_start,
1446 process[i].rw_last_file_pos,
1447 (unsigned long)process[i].rw_smallest_extent,
1448 (unsigned long)process[i].rw_largest_extent,
1449 process[i].rw_offset);
1450 }
1451 spin_unlock(&sbi->ll_process_lock);
1452
1453 return 0;
1454 }
1455
1456 static ssize_t ll_rw_offset_stats_seq_write(struct file *file,
1457 const char __user *buf,
1458 size_t len, loff_t *off)
1459 {
1460 struct seq_file *seq = file->private_data;
1461 struct ll_sb_info *sbi = seq->private;
1462 struct ll_rw_process_info *process_info = sbi->ll_rw_process_info;
1463 struct ll_rw_process_info *offset_info = sbi->ll_rw_offset_info;
1464 int value = 1, rc = 0;
1465
1466 if (len == 0)
1467 return -EINVAL;
1468
1469 rc = lprocfs_write_helper(buf, len, &value);
1470
1471 if (rc < 0 && len < 16) {
1472 char kernbuf[16];
1473
1474 if (copy_from_user(kernbuf, buf, len))
1475 return -EFAULT;
1476 kernbuf[len] = 0;
1477
1478 if (kernbuf[len - 1] == '\n')
1479 kernbuf[len - 1] = 0;
1480
1481 if (strcmp(kernbuf, "disabled") == 0 ||
1482 strcmp(kernbuf, "Disabled") == 0)
1483 value = 0;
1484 }
1485
1486 if (value == 0)
1487 sbi->ll_rw_stats_on = 0;
1488 else
1489 sbi->ll_rw_stats_on = 1;
1490
1491 spin_lock(&sbi->ll_process_lock);
1492 sbi->ll_offset_process_count = 0;
1493 sbi->ll_rw_offset_entry_count = 0;
1494 memset(process_info, 0, sizeof(struct ll_rw_process_info) *
1495 LL_PROCESS_HIST_MAX);
1496 memset(offset_info, 0, sizeof(struct ll_rw_process_info) *
1497 LL_OFFSET_HIST_MAX);
1498 spin_unlock(&sbi->ll_process_lock);
1499
1500 return len;
1501 }
1502
1503 LPROC_SEQ_FOPS(ll_rw_offset_stats);
1504
1505 void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars)
1506 {
1507 lvars->obd_vars = lprocfs_llite_obd_vars;
1508 }
This page took 0.060343 seconds and 6 git commands to generate.