Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6 into next
[deliverable/linux.git] / tools / perf / builtin-kmem.c
1 #include "builtin.h"
2 #include "perf.h"
3
4 #include "util/evlist.h"
5 #include "util/evsel.h"
6 #include "util/util.h"
7 #include "util/cache.h"
8 #include "util/symbol.h"
9 #include "util/thread.h"
10 #include "util/header.h"
11 #include "util/session.h"
12 #include "util/tool.h"
13
14 #include "util/parse-options.h"
15 #include "util/trace-event.h"
16 #include "util/data.h"
17 #include "util/cpumap.h"
18
19 #include "util/debug.h"
20
21 #include <linux/rbtree.h>
22 #include <linux/string.h>
23
24 struct alloc_stat;
25 typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
26
27 static int alloc_flag;
28 static int caller_flag;
29
30 static int alloc_lines = -1;
31 static int caller_lines = -1;
32
33 static bool raw_ip;
34
35 struct alloc_stat {
36 u64 call_site;
37 u64 ptr;
38 u64 bytes_req;
39 u64 bytes_alloc;
40 u32 hit;
41 u32 pingpong;
42
43 short alloc_cpu;
44
45 struct rb_node node;
46 };
47
48 static struct rb_root root_alloc_stat;
49 static struct rb_root root_alloc_sorted;
50 static struct rb_root root_caller_stat;
51 static struct rb_root root_caller_sorted;
52
53 static unsigned long total_requested, total_allocated;
54 static unsigned long nr_allocs, nr_cross_allocs;
55
56 static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
57 int bytes_req, int bytes_alloc, int cpu)
58 {
59 struct rb_node **node = &root_alloc_stat.rb_node;
60 struct rb_node *parent = NULL;
61 struct alloc_stat *data = NULL;
62
63 while (*node) {
64 parent = *node;
65 data = rb_entry(*node, struct alloc_stat, node);
66
67 if (ptr > data->ptr)
68 node = &(*node)->rb_right;
69 else if (ptr < data->ptr)
70 node = &(*node)->rb_left;
71 else
72 break;
73 }
74
75 if (data && data->ptr == ptr) {
76 data->hit++;
77 data->bytes_req += bytes_req;
78 data->bytes_alloc += bytes_alloc;
79 } else {
80 data = malloc(sizeof(*data));
81 if (!data) {
82 pr_err("%s: malloc failed\n", __func__);
83 return -1;
84 }
85 data->ptr = ptr;
86 data->pingpong = 0;
87 data->hit = 1;
88 data->bytes_req = bytes_req;
89 data->bytes_alloc = bytes_alloc;
90
91 rb_link_node(&data->node, parent, node);
92 rb_insert_color(&data->node, &root_alloc_stat);
93 }
94 data->call_site = call_site;
95 data->alloc_cpu = cpu;
96 return 0;
97 }
98
99 static int insert_caller_stat(unsigned long call_site,
100 int bytes_req, int bytes_alloc)
101 {
102 struct rb_node **node = &root_caller_stat.rb_node;
103 struct rb_node *parent = NULL;
104 struct alloc_stat *data = NULL;
105
106 while (*node) {
107 parent = *node;
108 data = rb_entry(*node, struct alloc_stat, node);
109
110 if (call_site > data->call_site)
111 node = &(*node)->rb_right;
112 else if (call_site < data->call_site)
113 node = &(*node)->rb_left;
114 else
115 break;
116 }
117
118 if (data && data->call_site == call_site) {
119 data->hit++;
120 data->bytes_req += bytes_req;
121 data->bytes_alloc += bytes_alloc;
122 } else {
123 data = malloc(sizeof(*data));
124 if (!data) {
125 pr_err("%s: malloc failed\n", __func__);
126 return -1;
127 }
128 data->call_site = call_site;
129 data->pingpong = 0;
130 data->hit = 1;
131 data->bytes_req = bytes_req;
132 data->bytes_alloc = bytes_alloc;
133
134 rb_link_node(&data->node, parent, node);
135 rb_insert_color(&data->node, &root_caller_stat);
136 }
137
138 return 0;
139 }
140
141 static int perf_evsel__process_alloc_event(struct perf_evsel *evsel,
142 struct perf_sample *sample)
143 {
144 unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"),
145 call_site = perf_evsel__intval(evsel, sample, "call_site");
146 int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"),
147 bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc");
148
149 if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
150 insert_caller_stat(call_site, bytes_req, bytes_alloc))
151 return -1;
152
153 total_requested += bytes_req;
154 total_allocated += bytes_alloc;
155
156 nr_allocs++;
157 return 0;
158 }
159
160 static int perf_evsel__process_alloc_node_event(struct perf_evsel *evsel,
161 struct perf_sample *sample)
162 {
163 int ret = perf_evsel__process_alloc_event(evsel, sample);
164
165 if (!ret) {
166 int node1 = cpu__get_node(sample->cpu),
167 node2 = perf_evsel__intval(evsel, sample, "node");
168
169 if (node1 != node2)
170 nr_cross_allocs++;
171 }
172
173 return ret;
174 }
175
176 static int ptr_cmp(struct alloc_stat *, struct alloc_stat *);
177 static int callsite_cmp(struct alloc_stat *, struct alloc_stat *);
178
179 static struct alloc_stat *search_alloc_stat(unsigned long ptr,
180 unsigned long call_site,
181 struct rb_root *root,
182 sort_fn_t sort_fn)
183 {
184 struct rb_node *node = root->rb_node;
185 struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
186
187 while (node) {
188 struct alloc_stat *data;
189 int cmp;
190
191 data = rb_entry(node, struct alloc_stat, node);
192
193 cmp = sort_fn(&key, data);
194 if (cmp < 0)
195 node = node->rb_left;
196 else if (cmp > 0)
197 node = node->rb_right;
198 else
199 return data;
200 }
201 return NULL;
202 }
203
204 static int perf_evsel__process_free_event(struct perf_evsel *evsel,
205 struct perf_sample *sample)
206 {
207 unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr");
208 struct alloc_stat *s_alloc, *s_caller;
209
210 s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
211 if (!s_alloc)
212 return 0;
213
214 if ((short)sample->cpu != s_alloc->alloc_cpu) {
215 s_alloc->pingpong++;
216
217 s_caller = search_alloc_stat(0, s_alloc->call_site,
218 &root_caller_stat, callsite_cmp);
219 if (!s_caller)
220 return -1;
221 s_caller->pingpong++;
222 }
223 s_alloc->alloc_cpu = -1;
224
225 return 0;
226 }
227
228 typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
229 struct perf_sample *sample);
230
231 static int process_sample_event(struct perf_tool *tool __maybe_unused,
232 union perf_event *event,
233 struct perf_sample *sample,
234 struct perf_evsel *evsel,
235 struct machine *machine)
236 {
237 struct thread *thread = machine__findnew_thread(machine, sample->pid,
238 sample->tid);
239
240 if (thread == NULL) {
241 pr_debug("problem processing %d event, skipping it.\n",
242 event->header.type);
243 return -1;
244 }
245
246 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
247
248 if (evsel->handler != NULL) {
249 tracepoint_handler f = evsel->handler;
250 return f(evsel, sample);
251 }
252
253 return 0;
254 }
255
256 static struct perf_tool perf_kmem = {
257 .sample = process_sample_event,
258 .comm = perf_event__process_comm,
259 .ordered_samples = true,
260 };
261
262 static double fragmentation(unsigned long n_req, unsigned long n_alloc)
263 {
264 if (n_alloc == 0)
265 return 0.0;
266 else
267 return 100.0 - (100.0 * n_req / n_alloc);
268 }
269
270 static void __print_result(struct rb_root *root, struct perf_session *session,
271 int n_lines, int is_caller)
272 {
273 struct rb_node *next;
274 struct machine *machine = &session->machines.host;
275
276 printf("%.102s\n", graph_dotted_line);
277 printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
278 printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
279 printf("%.102s\n", graph_dotted_line);
280
281 next = rb_first(root);
282
283 while (next && n_lines--) {
284 struct alloc_stat *data = rb_entry(next, struct alloc_stat,
285 node);
286 struct symbol *sym = NULL;
287 struct map *map;
288 char buf[BUFSIZ];
289 u64 addr;
290
291 if (is_caller) {
292 addr = data->call_site;
293 if (!raw_ip)
294 sym = machine__find_kernel_function(machine, addr, &map, NULL);
295 } else
296 addr = data->ptr;
297
298 if (sym != NULL)
299 snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
300 addr - map->unmap_ip(map, sym->start));
301 else
302 snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
303 printf(" %-34s |", buf);
304
305 printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %8lu | %6.3f%%\n",
306 (unsigned long long)data->bytes_alloc,
307 (unsigned long)data->bytes_alloc / data->hit,
308 (unsigned long long)data->bytes_req,
309 (unsigned long)data->bytes_req / data->hit,
310 (unsigned long)data->hit,
311 (unsigned long)data->pingpong,
312 fragmentation(data->bytes_req, data->bytes_alloc));
313
314 next = rb_next(next);
315 }
316
317 if (n_lines == -1)
318 printf(" ... | ... | ... | ... | ... | ... \n");
319
320 printf("%.102s\n", graph_dotted_line);
321 }
322
323 static void print_summary(void)
324 {
325 printf("\nSUMMARY\n=======\n");
326 printf("Total bytes requested: %lu\n", total_requested);
327 printf("Total bytes allocated: %lu\n", total_allocated);
328 printf("Total bytes wasted on internal fragmentation: %lu\n",
329 total_allocated - total_requested);
330 printf("Internal fragmentation: %f%%\n",
331 fragmentation(total_requested, total_allocated));
332 printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs);
333 }
334
335 static void print_result(struct perf_session *session)
336 {
337 if (caller_flag)
338 __print_result(&root_caller_sorted, session, caller_lines, 1);
339 if (alloc_flag)
340 __print_result(&root_alloc_sorted, session, alloc_lines, 0);
341 print_summary();
342 }
343
344 struct sort_dimension {
345 const char name[20];
346 sort_fn_t cmp;
347 struct list_head list;
348 };
349
350 static LIST_HEAD(caller_sort);
351 static LIST_HEAD(alloc_sort);
352
353 static void sort_insert(struct rb_root *root, struct alloc_stat *data,
354 struct list_head *sort_list)
355 {
356 struct rb_node **new = &(root->rb_node);
357 struct rb_node *parent = NULL;
358 struct sort_dimension *sort;
359
360 while (*new) {
361 struct alloc_stat *this;
362 int cmp = 0;
363
364 this = rb_entry(*new, struct alloc_stat, node);
365 parent = *new;
366
367 list_for_each_entry(sort, sort_list, list) {
368 cmp = sort->cmp(data, this);
369 if (cmp)
370 break;
371 }
372
373 if (cmp > 0)
374 new = &((*new)->rb_left);
375 else
376 new = &((*new)->rb_right);
377 }
378
379 rb_link_node(&data->node, parent, new);
380 rb_insert_color(&data->node, root);
381 }
382
383 static void __sort_result(struct rb_root *root, struct rb_root *root_sorted,
384 struct list_head *sort_list)
385 {
386 struct rb_node *node;
387 struct alloc_stat *data;
388
389 for (;;) {
390 node = rb_first(root);
391 if (!node)
392 break;
393
394 rb_erase(node, root);
395 data = rb_entry(node, struct alloc_stat, node);
396 sort_insert(root_sorted, data, sort_list);
397 }
398 }
399
400 static void sort_result(void)
401 {
402 __sort_result(&root_alloc_stat, &root_alloc_sorted, &alloc_sort);
403 __sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort);
404 }
405
406 static int __cmd_kmem(void)
407 {
408 int err = -EINVAL;
409 struct perf_session *session;
410 const struct perf_evsel_str_handler kmem_tracepoints[] = {
411 { "kmem:kmalloc", perf_evsel__process_alloc_event, },
412 { "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, },
413 { "kmem:kmalloc_node", perf_evsel__process_alloc_node_event, },
414 { "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, },
415 { "kmem:kfree", perf_evsel__process_free_event, },
416 { "kmem:kmem_cache_free", perf_evsel__process_free_event, },
417 };
418 struct perf_data_file file = {
419 .path = input_name,
420 .mode = PERF_DATA_MODE_READ,
421 };
422
423 session = perf_session__new(&file, false, &perf_kmem);
424 if (session == NULL)
425 return -ENOMEM;
426
427 if (perf_session__create_kernel_maps(session) < 0)
428 goto out_delete;
429
430 if (!perf_session__has_traces(session, "kmem record"))
431 goto out_delete;
432
433 if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
434 pr_err("Initializing perf session tracepoint handlers failed\n");
435 return -1;
436 }
437
438 setup_pager();
439 err = perf_session__process_events(session, &perf_kmem);
440 if (err != 0)
441 goto out_delete;
442 sort_result();
443 print_result(session);
444 out_delete:
445 perf_session__delete(session);
446 return err;
447 }
448
449 static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r)
450 {
451 if (l->ptr < r->ptr)
452 return -1;
453 else if (l->ptr > r->ptr)
454 return 1;
455 return 0;
456 }
457
458 static struct sort_dimension ptr_sort_dimension = {
459 .name = "ptr",
460 .cmp = ptr_cmp,
461 };
462
463 static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
464 {
465 if (l->call_site < r->call_site)
466 return -1;
467 else if (l->call_site > r->call_site)
468 return 1;
469 return 0;
470 }
471
472 static struct sort_dimension callsite_sort_dimension = {
473 .name = "callsite",
474 .cmp = callsite_cmp,
475 };
476
477 static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r)
478 {
479 if (l->hit < r->hit)
480 return -1;
481 else if (l->hit > r->hit)
482 return 1;
483 return 0;
484 }
485
486 static struct sort_dimension hit_sort_dimension = {
487 .name = "hit",
488 .cmp = hit_cmp,
489 };
490
491 static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
492 {
493 if (l->bytes_alloc < r->bytes_alloc)
494 return -1;
495 else if (l->bytes_alloc > r->bytes_alloc)
496 return 1;
497 return 0;
498 }
499
500 static struct sort_dimension bytes_sort_dimension = {
501 .name = "bytes",
502 .cmp = bytes_cmp,
503 };
504
505 static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r)
506 {
507 double x, y;
508
509 x = fragmentation(l->bytes_req, l->bytes_alloc);
510 y = fragmentation(r->bytes_req, r->bytes_alloc);
511
512 if (x < y)
513 return -1;
514 else if (x > y)
515 return 1;
516 return 0;
517 }
518
519 static struct sort_dimension frag_sort_dimension = {
520 .name = "frag",
521 .cmp = frag_cmp,
522 };
523
524 static int pingpong_cmp(struct alloc_stat *l, struct alloc_stat *r)
525 {
526 if (l->pingpong < r->pingpong)
527 return -1;
528 else if (l->pingpong > r->pingpong)
529 return 1;
530 return 0;
531 }
532
533 static struct sort_dimension pingpong_sort_dimension = {
534 .name = "pingpong",
535 .cmp = pingpong_cmp,
536 };
537
538 static struct sort_dimension *avail_sorts[] = {
539 &ptr_sort_dimension,
540 &callsite_sort_dimension,
541 &hit_sort_dimension,
542 &bytes_sort_dimension,
543 &frag_sort_dimension,
544 &pingpong_sort_dimension,
545 };
546
547 #define NUM_AVAIL_SORTS ((int)ARRAY_SIZE(avail_sorts))
548
549 static int sort_dimension__add(const char *tok, struct list_head *list)
550 {
551 struct sort_dimension *sort;
552 int i;
553
554 for (i = 0; i < NUM_AVAIL_SORTS; i++) {
555 if (!strcmp(avail_sorts[i]->name, tok)) {
556 sort = memdup(avail_sorts[i], sizeof(*avail_sorts[i]));
557 if (!sort) {
558 pr_err("%s: memdup failed\n", __func__);
559 return -1;
560 }
561 list_add_tail(&sort->list, list);
562 return 0;
563 }
564 }
565
566 return -1;
567 }
568
569 static int setup_sorting(struct list_head *sort_list, const char *arg)
570 {
571 char *tok;
572 char *str = strdup(arg);
573
574 if (!str) {
575 pr_err("%s: strdup failed\n", __func__);
576 return -1;
577 }
578
579 while (true) {
580 tok = strsep(&str, ",");
581 if (!tok)
582 break;
583 if (sort_dimension__add(tok, sort_list) < 0) {
584 error("Unknown --sort key: '%s'", tok);
585 free(str);
586 return -1;
587 }
588 }
589
590 free(str);
591 return 0;
592 }
593
594 static int parse_sort_opt(const struct option *opt __maybe_unused,
595 const char *arg, int unset __maybe_unused)
596 {
597 if (!arg)
598 return -1;
599
600 if (caller_flag > alloc_flag)
601 return setup_sorting(&caller_sort, arg);
602 else
603 return setup_sorting(&alloc_sort, arg);
604
605 return 0;
606 }
607
608 static int parse_caller_opt(const struct option *opt __maybe_unused,
609 const char *arg __maybe_unused,
610 int unset __maybe_unused)
611 {
612 caller_flag = (alloc_flag + 1);
613 return 0;
614 }
615
616 static int parse_alloc_opt(const struct option *opt __maybe_unused,
617 const char *arg __maybe_unused,
618 int unset __maybe_unused)
619 {
620 alloc_flag = (caller_flag + 1);
621 return 0;
622 }
623
624 static int parse_line_opt(const struct option *opt __maybe_unused,
625 const char *arg, int unset __maybe_unused)
626 {
627 int lines;
628
629 if (!arg)
630 return -1;
631
632 lines = strtoul(arg, NULL, 10);
633
634 if (caller_flag > alloc_flag)
635 caller_lines = lines;
636 else
637 alloc_lines = lines;
638
639 return 0;
640 }
641
642 static int __cmd_record(int argc, const char **argv)
643 {
644 const char * const record_args[] = {
645 "record", "-a", "-R", "-c", "1",
646 "-e", "kmem:kmalloc",
647 "-e", "kmem:kmalloc_node",
648 "-e", "kmem:kfree",
649 "-e", "kmem:kmem_cache_alloc",
650 "-e", "kmem:kmem_cache_alloc_node",
651 "-e", "kmem:kmem_cache_free",
652 };
653 unsigned int rec_argc, i, j;
654 const char **rec_argv;
655
656 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
657 rec_argv = calloc(rec_argc + 1, sizeof(char *));
658
659 if (rec_argv == NULL)
660 return -ENOMEM;
661
662 for (i = 0; i < ARRAY_SIZE(record_args); i++)
663 rec_argv[i] = strdup(record_args[i]);
664
665 for (j = 1; j < (unsigned int)argc; j++, i++)
666 rec_argv[i] = argv[j];
667
668 return cmd_record(i, rec_argv, NULL);
669 }
670
671 int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
672 {
673 const char * const default_sort_order = "frag,hit,bytes";
674 const struct option kmem_options[] = {
675 OPT_STRING('i', "input", &input_name, "file", "input file name"),
676 OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
677 "show per-callsite statistics", parse_caller_opt),
678 OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
679 "show per-allocation statistics", parse_alloc_opt),
680 OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
681 "sort by keys: ptr, call_site, bytes, hit, pingpong, frag",
682 parse_sort_opt),
683 OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
684 OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
685 OPT_END()
686 };
687 const char *const kmem_subcommands[] = { "record", "stat", NULL };
688 const char *kmem_usage[] = {
689 NULL,
690 NULL
691 };
692 argc = parse_options_subcommand(argc, argv, kmem_options,
693 kmem_subcommands, kmem_usage, 0);
694
695 if (!argc)
696 usage_with_options(kmem_usage, kmem_options);
697
698 symbol__init();
699
700 if (!strncmp(argv[0], "rec", 3)) {
701 return __cmd_record(argc, argv);
702 } else if (!strcmp(argv[0], "stat")) {
703 if (cpu__setup_cpunode_map())
704 return -1;
705
706 if (list_empty(&caller_sort))
707 setup_sorting(&caller_sort, default_sort_order);
708 if (list_empty(&alloc_sort))
709 setup_sorting(&alloc_sort, default_sort_order);
710
711 return __cmd_kmem();
712 } else
713 usage_with_options(kmem_usage, kmem_options);
714
715 return 0;
716 }
717
This page took 0.061283 seconds and 6 git commands to generate.