544ab376ac42c71ccb16eafb61b598fd21c67a62
[deliverable/linux.git] / tools / perf / util / sort.c
1 #include <sys/mman.h>
2 #include "sort.h"
3 #include "hist.h"
4 #include "comm.h"
5 #include "symbol.h"
6 #include "evsel.h"
7 #include "evlist.h"
8 #include <traceevent/event-parse.h>
9 #include "mem-events.h"
10
11 regex_t parent_regex;
12 const char default_parent_pattern[] = "^sys_|^do_page_fault";
13 const char *parent_pattern = default_parent_pattern;
14 const char default_sort_order[] = "comm,dso,symbol";
15 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
16 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
17 const char default_top_sort_order[] = "dso,symbol";
18 const char default_diff_sort_order[] = "dso,symbol";
19 const char default_tracepoint_sort_order[] = "trace";
20 const char *sort_order;
21 const char *field_order;
22 regex_t ignore_callees_regex;
23 int have_ignore_callees = 0;
24 int sort__has_dso = 0;
25 int sort__has_socket = 0;
26 int sort__has_thread = 0;
27 int sort__has_comm = 0;
28 enum sort_mode sort__mode = SORT_MODE__NORMAL;
29
30 /*
31 * Replaces all occurrences of a char used with the:
32 *
33 * -t, --field-separator
34 *
35 * option, that uses a special separator character and don't pad with spaces,
36 * replacing all occurances of this separator in symbol names (and other
37 * output) with a '.' character, that thus it's the only non valid separator.
38 */
39 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
40 {
41 int n;
42 va_list ap;
43
44 va_start(ap, fmt);
45 n = vsnprintf(bf, size, fmt, ap);
46 if (symbol_conf.field_sep && n > 0) {
47 char *sep = bf;
48
49 while (1) {
50 sep = strchr(sep, *symbol_conf.field_sep);
51 if (sep == NULL)
52 break;
53 *sep = '.';
54 }
55 }
56 va_end(ap);
57
58 if (n >= (int)size)
59 return size - 1;
60 return n;
61 }
62
63 static int64_t cmp_null(const void *l, const void *r)
64 {
65 if (!l && !r)
66 return 0;
67 else if (!l)
68 return -1;
69 else
70 return 1;
71 }
72
73 /* --sort pid */
74
75 static int64_t
76 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
77 {
78 return right->thread->tid - left->thread->tid;
79 }
80
81 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
82 size_t size, unsigned int width)
83 {
84 const char *comm = thread__comm_str(he->thread);
85
86 width = max(7U, width) - 6;
87 return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid,
88 width, width, comm ?: "");
89 }
90
91 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
92 {
93 const struct thread *th = arg;
94
95 if (type != HIST_FILTER__THREAD)
96 return -1;
97
98 return th && he->thread != th;
99 }
100
101 struct sort_entry sort_thread = {
102 .se_header = " Pid:Command",
103 .se_cmp = sort__thread_cmp,
104 .se_snprintf = hist_entry__thread_snprintf,
105 .se_filter = hist_entry__thread_filter,
106 .se_width_idx = HISTC_THREAD,
107 };
108
109 /* --sort comm */
110
111 static int64_t
112 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
113 {
114 /* Compare the addr that should be unique among comm */
115 return strcmp(comm__str(right->comm), comm__str(left->comm));
116 }
117
118 static int64_t
119 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
120 {
121 /* Compare the addr that should be unique among comm */
122 return strcmp(comm__str(right->comm), comm__str(left->comm));
123 }
124
125 static int64_t
126 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
127 {
128 return strcmp(comm__str(right->comm), comm__str(left->comm));
129 }
130
131 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
132 size_t size, unsigned int width)
133 {
134 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
135 }
136
137 struct sort_entry sort_comm = {
138 .se_header = "Command",
139 .se_cmp = sort__comm_cmp,
140 .se_collapse = sort__comm_collapse,
141 .se_sort = sort__comm_sort,
142 .se_snprintf = hist_entry__comm_snprintf,
143 .se_filter = hist_entry__thread_filter,
144 .se_width_idx = HISTC_COMM,
145 };
146
147 /* --sort dso */
148
149 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
150 {
151 struct dso *dso_l = map_l ? map_l->dso : NULL;
152 struct dso *dso_r = map_r ? map_r->dso : NULL;
153 const char *dso_name_l, *dso_name_r;
154
155 if (!dso_l || !dso_r)
156 return cmp_null(dso_r, dso_l);
157
158 if (verbose) {
159 dso_name_l = dso_l->long_name;
160 dso_name_r = dso_r->long_name;
161 } else {
162 dso_name_l = dso_l->short_name;
163 dso_name_r = dso_r->short_name;
164 }
165
166 return strcmp(dso_name_l, dso_name_r);
167 }
168
169 static int64_t
170 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
171 {
172 return _sort__dso_cmp(right->ms.map, left->ms.map);
173 }
174
175 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
176 size_t size, unsigned int width)
177 {
178 if (map && map->dso) {
179 const char *dso_name = !verbose ? map->dso->short_name :
180 map->dso->long_name;
181 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
182 }
183
184 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
185 }
186
187 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
188 size_t size, unsigned int width)
189 {
190 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
191 }
192
193 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
194 {
195 const struct dso *dso = arg;
196
197 if (type != HIST_FILTER__DSO)
198 return -1;
199
200 return dso && (!he->ms.map || he->ms.map->dso != dso);
201 }
202
203 struct sort_entry sort_dso = {
204 .se_header = "Shared Object",
205 .se_cmp = sort__dso_cmp,
206 .se_snprintf = hist_entry__dso_snprintf,
207 .se_filter = hist_entry__dso_filter,
208 .se_width_idx = HISTC_DSO,
209 };
210
211 /* --sort symbol */
212
213 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
214 {
215 return (int64_t)(right_ip - left_ip);
216 }
217
218 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
219 {
220 if (!sym_l || !sym_r)
221 return cmp_null(sym_l, sym_r);
222
223 if (sym_l == sym_r)
224 return 0;
225
226 if (sym_l->start != sym_r->start)
227 return (int64_t)(sym_r->start - sym_l->start);
228
229 return (int64_t)(sym_r->end - sym_l->end);
230 }
231
232 static int64_t
233 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
234 {
235 int64_t ret;
236
237 if (!left->ms.sym && !right->ms.sym)
238 return _sort__addr_cmp(left->ip, right->ip);
239
240 /*
241 * comparing symbol address alone is not enough since it's a
242 * relative address within a dso.
243 */
244 if (!sort__has_dso) {
245 ret = sort__dso_cmp(left, right);
246 if (ret != 0)
247 return ret;
248 }
249
250 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
251 }
252
253 static int64_t
254 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
255 {
256 if (!left->ms.sym || !right->ms.sym)
257 return cmp_null(left->ms.sym, right->ms.sym);
258
259 return strcmp(right->ms.sym->name, left->ms.sym->name);
260 }
261
262 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
263 u64 ip, char level, char *bf, size_t size,
264 unsigned int width)
265 {
266 size_t ret = 0;
267
268 if (verbose) {
269 char o = map ? dso__symtab_origin(map->dso) : '!';
270 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
271 BITS_PER_LONG / 4 + 2, ip, o);
272 }
273
274 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
275 if (sym && map) {
276 if (map->type == MAP__VARIABLE) {
277 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
278 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
279 ip - map->unmap_ip(map, sym->start));
280 } else {
281 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
282 width - ret,
283 sym->name);
284 }
285 } else {
286 size_t len = BITS_PER_LONG / 4;
287 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
288 len, ip);
289 }
290
291 return ret;
292 }
293
294 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
295 size_t size, unsigned int width)
296 {
297 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
298 he->level, bf, size, width);
299 }
300
301 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
302 {
303 const char *sym = arg;
304
305 if (type != HIST_FILTER__SYMBOL)
306 return -1;
307
308 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
309 }
310
311 struct sort_entry sort_sym = {
312 .se_header = "Symbol",
313 .se_cmp = sort__sym_cmp,
314 .se_sort = sort__sym_sort,
315 .se_snprintf = hist_entry__sym_snprintf,
316 .se_filter = hist_entry__sym_filter,
317 .se_width_idx = HISTC_SYMBOL,
318 };
319
320 /* --sort srcline */
321
322 static char *hist_entry__get_srcline(struct hist_entry *he)
323 {
324 struct map *map = he->ms.map;
325
326 if (!map)
327 return SRCLINE_UNKNOWN;
328
329 return get_srcline(map->dso, map__rip_2objdump(map, he->ip),
330 he->ms.sym, true);
331 }
332
333 static int64_t
334 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
335 {
336 if (!left->srcline)
337 left->srcline = hist_entry__get_srcline(left);
338 if (!right->srcline)
339 right->srcline = hist_entry__get_srcline(right);
340
341 return strcmp(right->srcline, left->srcline);
342 }
343
344 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
345 size_t size, unsigned int width)
346 {
347 if (!he->srcline)
348 he->srcline = hist_entry__get_srcline(he);
349
350 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
351 }
352
353 struct sort_entry sort_srcline = {
354 .se_header = "Source:Line",
355 .se_cmp = sort__srcline_cmp,
356 .se_snprintf = hist_entry__srcline_snprintf,
357 .se_width_idx = HISTC_SRCLINE,
358 };
359
360 /* --sort srcfile */
361
362 static char no_srcfile[1];
363
364 static char *hist_entry__get_srcfile(struct hist_entry *e)
365 {
366 char *sf, *p;
367 struct map *map = e->ms.map;
368
369 if (!map)
370 return no_srcfile;
371
372 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
373 e->ms.sym, false, true);
374 if (!strcmp(sf, SRCLINE_UNKNOWN))
375 return no_srcfile;
376 p = strchr(sf, ':');
377 if (p && *sf) {
378 *p = 0;
379 return sf;
380 }
381 free(sf);
382 return no_srcfile;
383 }
384
385 static int64_t
386 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
387 {
388 if (!left->srcfile)
389 left->srcfile = hist_entry__get_srcfile(left);
390 if (!right->srcfile)
391 right->srcfile = hist_entry__get_srcfile(right);
392
393 return strcmp(right->srcfile, left->srcfile);
394 }
395
396 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
397 size_t size, unsigned int width)
398 {
399 if (!he->srcfile)
400 he->srcfile = hist_entry__get_srcfile(he);
401
402 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
403 }
404
405 struct sort_entry sort_srcfile = {
406 .se_header = "Source File",
407 .se_cmp = sort__srcfile_cmp,
408 .se_snprintf = hist_entry__srcfile_snprintf,
409 .se_width_idx = HISTC_SRCFILE,
410 };
411
412 /* --sort parent */
413
414 static int64_t
415 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
416 {
417 struct symbol *sym_l = left->parent;
418 struct symbol *sym_r = right->parent;
419
420 if (!sym_l || !sym_r)
421 return cmp_null(sym_l, sym_r);
422
423 return strcmp(sym_r->name, sym_l->name);
424 }
425
426 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
427 size_t size, unsigned int width)
428 {
429 return repsep_snprintf(bf, size, "%-*.*s", width, width,
430 he->parent ? he->parent->name : "[other]");
431 }
432
433 struct sort_entry sort_parent = {
434 .se_header = "Parent symbol",
435 .se_cmp = sort__parent_cmp,
436 .se_snprintf = hist_entry__parent_snprintf,
437 .se_width_idx = HISTC_PARENT,
438 };
439
440 /* --sort cpu */
441
442 static int64_t
443 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
444 {
445 return right->cpu - left->cpu;
446 }
447
448 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
449 size_t size, unsigned int width)
450 {
451 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
452 }
453
454 struct sort_entry sort_cpu = {
455 .se_header = "CPU",
456 .se_cmp = sort__cpu_cmp,
457 .se_snprintf = hist_entry__cpu_snprintf,
458 .se_width_idx = HISTC_CPU,
459 };
460
461 /* --sort socket */
462
463 static int64_t
464 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
465 {
466 return right->socket - left->socket;
467 }
468
469 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
470 size_t size, unsigned int width)
471 {
472 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
473 }
474
475 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
476 {
477 int sk = *(const int *)arg;
478
479 if (type != HIST_FILTER__SOCKET)
480 return -1;
481
482 return sk >= 0 && he->socket != sk;
483 }
484
485 struct sort_entry sort_socket = {
486 .se_header = "Socket",
487 .se_cmp = sort__socket_cmp,
488 .se_snprintf = hist_entry__socket_snprintf,
489 .se_filter = hist_entry__socket_filter,
490 .se_width_idx = HISTC_SOCKET,
491 };
492
493 /* --sort trace */
494
495 static char *get_trace_output(struct hist_entry *he)
496 {
497 struct trace_seq seq;
498 struct perf_evsel *evsel;
499 struct pevent_record rec = {
500 .data = he->raw_data,
501 .size = he->raw_size,
502 };
503
504 evsel = hists_to_evsel(he->hists);
505
506 trace_seq_init(&seq);
507 if (symbol_conf.raw_trace) {
508 pevent_print_fields(&seq, he->raw_data, he->raw_size,
509 evsel->tp_format);
510 } else {
511 pevent_event_info(&seq, evsel->tp_format, &rec);
512 }
513 return seq.buffer;
514 }
515
516 static int64_t
517 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
518 {
519 struct perf_evsel *evsel;
520
521 evsel = hists_to_evsel(left->hists);
522 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
523 return 0;
524
525 if (left->trace_output == NULL)
526 left->trace_output = get_trace_output(left);
527 if (right->trace_output == NULL)
528 right->trace_output = get_trace_output(right);
529
530 return strcmp(right->trace_output, left->trace_output);
531 }
532
533 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
534 size_t size, unsigned int width)
535 {
536 struct perf_evsel *evsel;
537
538 evsel = hists_to_evsel(he->hists);
539 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
540 return scnprintf(bf, size, "%-.*s", width, "N/A");
541
542 if (he->trace_output == NULL)
543 he->trace_output = get_trace_output(he);
544 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
545 }
546
547 struct sort_entry sort_trace = {
548 .se_header = "Trace output",
549 .se_cmp = sort__trace_cmp,
550 .se_snprintf = hist_entry__trace_snprintf,
551 .se_width_idx = HISTC_TRACE,
552 };
553
554 /* sort keys for branch stacks */
555
556 static int64_t
557 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
558 {
559 if (!left->branch_info || !right->branch_info)
560 return cmp_null(left->branch_info, right->branch_info);
561
562 return _sort__dso_cmp(left->branch_info->from.map,
563 right->branch_info->from.map);
564 }
565
566 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
567 size_t size, unsigned int width)
568 {
569 if (he->branch_info)
570 return _hist_entry__dso_snprintf(he->branch_info->from.map,
571 bf, size, width);
572 else
573 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
574 }
575
576 static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
577 const void *arg)
578 {
579 const struct dso *dso = arg;
580
581 if (type != HIST_FILTER__DSO)
582 return -1;
583
584 return dso && (!he->branch_info || !he->branch_info->from.map ||
585 he->branch_info->from.map->dso != dso);
586 }
587
588 static int64_t
589 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
590 {
591 if (!left->branch_info || !right->branch_info)
592 return cmp_null(left->branch_info, right->branch_info);
593
594 return _sort__dso_cmp(left->branch_info->to.map,
595 right->branch_info->to.map);
596 }
597
598 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
599 size_t size, unsigned int width)
600 {
601 if (he->branch_info)
602 return _hist_entry__dso_snprintf(he->branch_info->to.map,
603 bf, size, width);
604 else
605 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
606 }
607
608 static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
609 const void *arg)
610 {
611 const struct dso *dso = arg;
612
613 if (type != HIST_FILTER__DSO)
614 return -1;
615
616 return dso && (!he->branch_info || !he->branch_info->to.map ||
617 he->branch_info->to.map->dso != dso);
618 }
619
620 static int64_t
621 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
622 {
623 struct addr_map_symbol *from_l = &left->branch_info->from;
624 struct addr_map_symbol *from_r = &right->branch_info->from;
625
626 if (!left->branch_info || !right->branch_info)
627 return cmp_null(left->branch_info, right->branch_info);
628
629 from_l = &left->branch_info->from;
630 from_r = &right->branch_info->from;
631
632 if (!from_l->sym && !from_r->sym)
633 return _sort__addr_cmp(from_l->addr, from_r->addr);
634
635 return _sort__sym_cmp(from_l->sym, from_r->sym);
636 }
637
638 static int64_t
639 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
640 {
641 struct addr_map_symbol *to_l, *to_r;
642
643 if (!left->branch_info || !right->branch_info)
644 return cmp_null(left->branch_info, right->branch_info);
645
646 to_l = &left->branch_info->to;
647 to_r = &right->branch_info->to;
648
649 if (!to_l->sym && !to_r->sym)
650 return _sort__addr_cmp(to_l->addr, to_r->addr);
651
652 return _sort__sym_cmp(to_l->sym, to_r->sym);
653 }
654
655 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
656 size_t size, unsigned int width)
657 {
658 if (he->branch_info) {
659 struct addr_map_symbol *from = &he->branch_info->from;
660
661 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
662 he->level, bf, size, width);
663 }
664
665 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
666 }
667
668 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
669 size_t size, unsigned int width)
670 {
671 if (he->branch_info) {
672 struct addr_map_symbol *to = &he->branch_info->to;
673
674 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
675 he->level, bf, size, width);
676 }
677
678 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
679 }
680
681 static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
682 const void *arg)
683 {
684 const char *sym = arg;
685
686 if (type != HIST_FILTER__SYMBOL)
687 return -1;
688
689 return sym && !(he->branch_info && he->branch_info->from.sym &&
690 strstr(he->branch_info->from.sym->name, sym));
691 }
692
693 static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
694 const void *arg)
695 {
696 const char *sym = arg;
697
698 if (type != HIST_FILTER__SYMBOL)
699 return -1;
700
701 return sym && !(he->branch_info && he->branch_info->to.sym &&
702 strstr(he->branch_info->to.sym->name, sym));
703 }
704
705 struct sort_entry sort_dso_from = {
706 .se_header = "Source Shared Object",
707 .se_cmp = sort__dso_from_cmp,
708 .se_snprintf = hist_entry__dso_from_snprintf,
709 .se_filter = hist_entry__dso_from_filter,
710 .se_width_idx = HISTC_DSO_FROM,
711 };
712
713 struct sort_entry sort_dso_to = {
714 .se_header = "Target Shared Object",
715 .se_cmp = sort__dso_to_cmp,
716 .se_snprintf = hist_entry__dso_to_snprintf,
717 .se_filter = hist_entry__dso_to_filter,
718 .se_width_idx = HISTC_DSO_TO,
719 };
720
721 struct sort_entry sort_sym_from = {
722 .se_header = "Source Symbol",
723 .se_cmp = sort__sym_from_cmp,
724 .se_snprintf = hist_entry__sym_from_snprintf,
725 .se_filter = hist_entry__sym_from_filter,
726 .se_width_idx = HISTC_SYMBOL_FROM,
727 };
728
729 struct sort_entry sort_sym_to = {
730 .se_header = "Target Symbol",
731 .se_cmp = sort__sym_to_cmp,
732 .se_snprintf = hist_entry__sym_to_snprintf,
733 .se_filter = hist_entry__sym_to_filter,
734 .se_width_idx = HISTC_SYMBOL_TO,
735 };
736
737 static int64_t
738 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
739 {
740 unsigned char mp, p;
741
742 if (!left->branch_info || !right->branch_info)
743 return cmp_null(left->branch_info, right->branch_info);
744
745 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
746 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
747 return mp || p;
748 }
749
750 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
751 size_t size, unsigned int width){
752 static const char *out = "N/A";
753
754 if (he->branch_info) {
755 if (he->branch_info->flags.predicted)
756 out = "N";
757 else if (he->branch_info->flags.mispred)
758 out = "Y";
759 }
760
761 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
762 }
763
764 static int64_t
765 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
766 {
767 return left->branch_info->flags.cycles -
768 right->branch_info->flags.cycles;
769 }
770
771 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
772 size_t size, unsigned int width)
773 {
774 if (he->branch_info->flags.cycles == 0)
775 return repsep_snprintf(bf, size, "%-*s", width, "-");
776 return repsep_snprintf(bf, size, "%-*hd", width,
777 he->branch_info->flags.cycles);
778 }
779
780 struct sort_entry sort_cycles = {
781 .se_header = "Basic Block Cycles",
782 .se_cmp = sort__cycles_cmp,
783 .se_snprintf = hist_entry__cycles_snprintf,
784 .se_width_idx = HISTC_CYCLES,
785 };
786
787 /* --sort daddr_sym */
788 static int64_t
789 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
790 {
791 uint64_t l = 0, r = 0;
792
793 if (left->mem_info)
794 l = left->mem_info->daddr.addr;
795 if (right->mem_info)
796 r = right->mem_info->daddr.addr;
797
798 return (int64_t)(r - l);
799 }
800
801 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
802 size_t size, unsigned int width)
803 {
804 uint64_t addr = 0;
805 struct map *map = NULL;
806 struct symbol *sym = NULL;
807
808 if (he->mem_info) {
809 addr = he->mem_info->daddr.addr;
810 map = he->mem_info->daddr.map;
811 sym = he->mem_info->daddr.sym;
812 }
813 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
814 width);
815 }
816
817 static int64_t
818 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
819 {
820 uint64_t l = 0, r = 0;
821
822 if (left->mem_info)
823 l = left->mem_info->iaddr.addr;
824 if (right->mem_info)
825 r = right->mem_info->iaddr.addr;
826
827 return (int64_t)(r - l);
828 }
829
830 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
831 size_t size, unsigned int width)
832 {
833 uint64_t addr = 0;
834 struct map *map = NULL;
835 struct symbol *sym = NULL;
836
837 if (he->mem_info) {
838 addr = he->mem_info->iaddr.addr;
839 map = he->mem_info->iaddr.map;
840 sym = he->mem_info->iaddr.sym;
841 }
842 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
843 width);
844 }
845
846 static int64_t
847 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
848 {
849 struct map *map_l = NULL;
850 struct map *map_r = NULL;
851
852 if (left->mem_info)
853 map_l = left->mem_info->daddr.map;
854 if (right->mem_info)
855 map_r = right->mem_info->daddr.map;
856
857 return _sort__dso_cmp(map_l, map_r);
858 }
859
860 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
861 size_t size, unsigned int width)
862 {
863 struct map *map = NULL;
864
865 if (he->mem_info)
866 map = he->mem_info->daddr.map;
867
868 return _hist_entry__dso_snprintf(map, bf, size, width);
869 }
870
871 static int64_t
872 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
873 {
874 union perf_mem_data_src data_src_l;
875 union perf_mem_data_src data_src_r;
876
877 if (left->mem_info)
878 data_src_l = left->mem_info->data_src;
879 else
880 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
881
882 if (right->mem_info)
883 data_src_r = right->mem_info->data_src;
884 else
885 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
886
887 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
888 }
889
890 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
891 size_t size, unsigned int width)
892 {
893 char out[10];
894
895 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
896 return repsep_snprintf(bf, size, "%.*s", width, out);
897 }
898
899 static int64_t
900 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
901 {
902 union perf_mem_data_src data_src_l;
903 union perf_mem_data_src data_src_r;
904
905 if (left->mem_info)
906 data_src_l = left->mem_info->data_src;
907 else
908 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
909
910 if (right->mem_info)
911 data_src_r = right->mem_info->data_src;
912 else
913 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
914
915 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
916 }
917
918 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
919 size_t size, unsigned int width)
920 {
921 char out[64];
922
923 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
924 return repsep_snprintf(bf, size, "%-*s", width, out);
925 }
926
927 static int64_t
928 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
929 {
930 union perf_mem_data_src data_src_l;
931 union perf_mem_data_src data_src_r;
932
933 if (left->mem_info)
934 data_src_l = left->mem_info->data_src;
935 else
936 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
937
938 if (right->mem_info)
939 data_src_r = right->mem_info->data_src;
940 else
941 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
942
943 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
944 }
945
946 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
947 size_t size, unsigned int width)
948 {
949 char out[64];
950
951 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
952 return repsep_snprintf(bf, size, "%-*s", width, out);
953 }
954
955 static int64_t
956 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
957 {
958 union perf_mem_data_src data_src_l;
959 union perf_mem_data_src data_src_r;
960
961 if (left->mem_info)
962 data_src_l = left->mem_info->data_src;
963 else
964 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
965
966 if (right->mem_info)
967 data_src_r = right->mem_info->data_src;
968 else
969 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
970
971 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
972 }
973
974 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
975 size_t size, unsigned int width)
976 {
977 char out[64];
978
979 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
980 return repsep_snprintf(bf, size, "%-*s", width, out);
981 }
982
983 static int64_t
984 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
985 {
986 u64 l, r;
987 struct map *l_map, *r_map;
988
989 if (!left->mem_info) return -1;
990 if (!right->mem_info) return 1;
991
992 /* group event types together */
993 if (left->cpumode > right->cpumode) return -1;
994 if (left->cpumode < right->cpumode) return 1;
995
996 l_map = left->mem_info->daddr.map;
997 r_map = right->mem_info->daddr.map;
998
999 /* if both are NULL, jump to sort on al_addr instead */
1000 if (!l_map && !r_map)
1001 goto addr;
1002
1003 if (!l_map) return -1;
1004 if (!r_map) return 1;
1005
1006 if (l_map->maj > r_map->maj) return -1;
1007 if (l_map->maj < r_map->maj) return 1;
1008
1009 if (l_map->min > r_map->min) return -1;
1010 if (l_map->min < r_map->min) return 1;
1011
1012 if (l_map->ino > r_map->ino) return -1;
1013 if (l_map->ino < r_map->ino) return 1;
1014
1015 if (l_map->ino_generation > r_map->ino_generation) return -1;
1016 if (l_map->ino_generation < r_map->ino_generation) return 1;
1017
1018 /*
1019 * Addresses with no major/minor numbers are assumed to be
1020 * anonymous in userspace. Sort those on pid then address.
1021 *
1022 * The kernel and non-zero major/minor mapped areas are
1023 * assumed to be unity mapped. Sort those on address.
1024 */
1025
1026 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1027 (!(l_map->flags & MAP_SHARED)) &&
1028 !l_map->maj && !l_map->min && !l_map->ino &&
1029 !l_map->ino_generation) {
1030 /* userspace anonymous */
1031
1032 if (left->thread->pid_ > right->thread->pid_) return -1;
1033 if (left->thread->pid_ < right->thread->pid_) return 1;
1034 }
1035
1036 addr:
1037 /* al_addr does all the right addr - start + offset calculations */
1038 l = cl_address(left->mem_info->daddr.al_addr);
1039 r = cl_address(right->mem_info->daddr.al_addr);
1040
1041 if (l > r) return -1;
1042 if (l < r) return 1;
1043
1044 return 0;
1045 }
1046
1047 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1048 size_t size, unsigned int width)
1049 {
1050
1051 uint64_t addr = 0;
1052 struct map *map = NULL;
1053 struct symbol *sym = NULL;
1054 char level = he->level;
1055
1056 if (he->mem_info) {
1057 addr = cl_address(he->mem_info->daddr.al_addr);
1058 map = he->mem_info->daddr.map;
1059 sym = he->mem_info->daddr.sym;
1060
1061 /* print [s] for shared data mmaps */
1062 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1063 map && (map->type == MAP__VARIABLE) &&
1064 (map->flags & MAP_SHARED) &&
1065 (map->maj || map->min || map->ino ||
1066 map->ino_generation))
1067 level = 's';
1068 else if (!map)
1069 level = 'X';
1070 }
1071 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
1072 width);
1073 }
1074
1075 struct sort_entry sort_mispredict = {
1076 .se_header = "Branch Mispredicted",
1077 .se_cmp = sort__mispredict_cmp,
1078 .se_snprintf = hist_entry__mispredict_snprintf,
1079 .se_width_idx = HISTC_MISPREDICT,
1080 };
1081
1082 static u64 he_weight(struct hist_entry *he)
1083 {
1084 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1085 }
1086
1087 static int64_t
1088 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1089 {
1090 return he_weight(left) - he_weight(right);
1091 }
1092
1093 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1094 size_t size, unsigned int width)
1095 {
1096 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
1097 }
1098
1099 struct sort_entry sort_local_weight = {
1100 .se_header = "Local Weight",
1101 .se_cmp = sort__local_weight_cmp,
1102 .se_snprintf = hist_entry__local_weight_snprintf,
1103 .se_width_idx = HISTC_LOCAL_WEIGHT,
1104 };
1105
1106 static int64_t
1107 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1108 {
1109 return left->stat.weight - right->stat.weight;
1110 }
1111
1112 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1113 size_t size, unsigned int width)
1114 {
1115 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
1116 }
1117
1118 struct sort_entry sort_global_weight = {
1119 .se_header = "Weight",
1120 .se_cmp = sort__global_weight_cmp,
1121 .se_snprintf = hist_entry__global_weight_snprintf,
1122 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1123 };
1124
1125 struct sort_entry sort_mem_daddr_sym = {
1126 .se_header = "Data Symbol",
1127 .se_cmp = sort__daddr_cmp,
1128 .se_snprintf = hist_entry__daddr_snprintf,
1129 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1130 };
1131
1132 struct sort_entry sort_mem_iaddr_sym = {
1133 .se_header = "Code Symbol",
1134 .se_cmp = sort__iaddr_cmp,
1135 .se_snprintf = hist_entry__iaddr_snprintf,
1136 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1137 };
1138
1139 struct sort_entry sort_mem_daddr_dso = {
1140 .se_header = "Data Object",
1141 .se_cmp = sort__dso_daddr_cmp,
1142 .se_snprintf = hist_entry__dso_daddr_snprintf,
1143 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1144 };
1145
1146 struct sort_entry sort_mem_locked = {
1147 .se_header = "Locked",
1148 .se_cmp = sort__locked_cmp,
1149 .se_snprintf = hist_entry__locked_snprintf,
1150 .se_width_idx = HISTC_MEM_LOCKED,
1151 };
1152
1153 struct sort_entry sort_mem_tlb = {
1154 .se_header = "TLB access",
1155 .se_cmp = sort__tlb_cmp,
1156 .se_snprintf = hist_entry__tlb_snprintf,
1157 .se_width_idx = HISTC_MEM_TLB,
1158 };
1159
1160 struct sort_entry sort_mem_lvl = {
1161 .se_header = "Memory access",
1162 .se_cmp = sort__lvl_cmp,
1163 .se_snprintf = hist_entry__lvl_snprintf,
1164 .se_width_idx = HISTC_MEM_LVL,
1165 };
1166
1167 struct sort_entry sort_mem_snoop = {
1168 .se_header = "Snoop",
1169 .se_cmp = sort__snoop_cmp,
1170 .se_snprintf = hist_entry__snoop_snprintf,
1171 .se_width_idx = HISTC_MEM_SNOOP,
1172 };
1173
1174 struct sort_entry sort_mem_dcacheline = {
1175 .se_header = "Data Cacheline",
1176 .se_cmp = sort__dcacheline_cmp,
1177 .se_snprintf = hist_entry__dcacheline_snprintf,
1178 .se_width_idx = HISTC_MEM_DCACHELINE,
1179 };
1180
1181 static int64_t
1182 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1183 {
1184 if (!left->branch_info || !right->branch_info)
1185 return cmp_null(left->branch_info, right->branch_info);
1186
1187 return left->branch_info->flags.abort !=
1188 right->branch_info->flags.abort;
1189 }
1190
1191 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1192 size_t size, unsigned int width)
1193 {
1194 static const char *out = "N/A";
1195
1196 if (he->branch_info) {
1197 if (he->branch_info->flags.abort)
1198 out = "A";
1199 else
1200 out = ".";
1201 }
1202
1203 return repsep_snprintf(bf, size, "%-*s", width, out);
1204 }
1205
1206 struct sort_entry sort_abort = {
1207 .se_header = "Transaction abort",
1208 .se_cmp = sort__abort_cmp,
1209 .se_snprintf = hist_entry__abort_snprintf,
1210 .se_width_idx = HISTC_ABORT,
1211 };
1212
1213 static int64_t
1214 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1215 {
1216 if (!left->branch_info || !right->branch_info)
1217 return cmp_null(left->branch_info, right->branch_info);
1218
1219 return left->branch_info->flags.in_tx !=
1220 right->branch_info->flags.in_tx;
1221 }
1222
1223 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1224 size_t size, unsigned int width)
1225 {
1226 static const char *out = "N/A";
1227
1228 if (he->branch_info) {
1229 if (he->branch_info->flags.in_tx)
1230 out = "T";
1231 else
1232 out = ".";
1233 }
1234
1235 return repsep_snprintf(bf, size, "%-*s", width, out);
1236 }
1237
1238 struct sort_entry sort_in_tx = {
1239 .se_header = "Branch in transaction",
1240 .se_cmp = sort__in_tx_cmp,
1241 .se_snprintf = hist_entry__in_tx_snprintf,
1242 .se_width_idx = HISTC_IN_TX,
1243 };
1244
1245 static int64_t
1246 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1247 {
1248 return left->transaction - right->transaction;
1249 }
1250
1251 static inline char *add_str(char *p, const char *str)
1252 {
1253 strcpy(p, str);
1254 return p + strlen(str);
1255 }
1256
1257 static struct txbit {
1258 unsigned flag;
1259 const char *name;
1260 int skip_for_len;
1261 } txbits[] = {
1262 { PERF_TXN_ELISION, "EL ", 0 },
1263 { PERF_TXN_TRANSACTION, "TX ", 1 },
1264 { PERF_TXN_SYNC, "SYNC ", 1 },
1265 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1266 { PERF_TXN_RETRY, "RETRY ", 0 },
1267 { PERF_TXN_CONFLICT, "CON ", 0 },
1268 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1269 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1270 { 0, NULL, 0 }
1271 };
1272
1273 int hist_entry__transaction_len(void)
1274 {
1275 int i;
1276 int len = 0;
1277
1278 for (i = 0; txbits[i].name; i++) {
1279 if (!txbits[i].skip_for_len)
1280 len += strlen(txbits[i].name);
1281 }
1282 len += 4; /* :XX<space> */
1283 return len;
1284 }
1285
1286 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1287 size_t size, unsigned int width)
1288 {
1289 u64 t = he->transaction;
1290 char buf[128];
1291 char *p = buf;
1292 int i;
1293
1294 buf[0] = 0;
1295 for (i = 0; txbits[i].name; i++)
1296 if (txbits[i].flag & t)
1297 p = add_str(p, txbits[i].name);
1298 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1299 p = add_str(p, "NEITHER ");
1300 if (t & PERF_TXN_ABORT_MASK) {
1301 sprintf(p, ":%" PRIx64,
1302 (t & PERF_TXN_ABORT_MASK) >>
1303 PERF_TXN_ABORT_SHIFT);
1304 p += strlen(p);
1305 }
1306
1307 return repsep_snprintf(bf, size, "%-*s", width, buf);
1308 }
1309
1310 struct sort_entry sort_transaction = {
1311 .se_header = "Transaction ",
1312 .se_cmp = sort__transaction_cmp,
1313 .se_snprintf = hist_entry__transaction_snprintf,
1314 .se_width_idx = HISTC_TRANSACTION,
1315 };
1316
1317 struct sort_dimension {
1318 const char *name;
1319 struct sort_entry *entry;
1320 int taken;
1321 };
1322
1323 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1324
1325 static struct sort_dimension common_sort_dimensions[] = {
1326 DIM(SORT_PID, "pid", sort_thread),
1327 DIM(SORT_COMM, "comm", sort_comm),
1328 DIM(SORT_DSO, "dso", sort_dso),
1329 DIM(SORT_SYM, "symbol", sort_sym),
1330 DIM(SORT_PARENT, "parent", sort_parent),
1331 DIM(SORT_CPU, "cpu", sort_cpu),
1332 DIM(SORT_SOCKET, "socket", sort_socket),
1333 DIM(SORT_SRCLINE, "srcline", sort_srcline),
1334 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
1335 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1336 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1337 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1338 DIM(SORT_TRACE, "trace", sort_trace),
1339 };
1340
1341 #undef DIM
1342
1343 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1344
1345 static struct sort_dimension bstack_sort_dimensions[] = {
1346 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1347 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1348 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1349 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1350 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1351 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1352 DIM(SORT_ABORT, "abort", sort_abort),
1353 DIM(SORT_CYCLES, "cycles", sort_cycles),
1354 };
1355
1356 #undef DIM
1357
1358 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1359
1360 static struct sort_dimension memory_sort_dimensions[] = {
1361 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1362 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
1363 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1364 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1365 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1366 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1367 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1368 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1369 };
1370
1371 #undef DIM
1372
1373 struct hpp_dimension {
1374 const char *name;
1375 struct perf_hpp_fmt *fmt;
1376 int taken;
1377 };
1378
1379 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1380
1381 static struct hpp_dimension hpp_sort_dimensions[] = {
1382 DIM(PERF_HPP__OVERHEAD, "overhead"),
1383 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1384 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1385 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1386 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1387 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1388 DIM(PERF_HPP__SAMPLES, "sample"),
1389 DIM(PERF_HPP__PERIOD, "period"),
1390 };
1391
1392 #undef DIM
1393
1394 struct hpp_sort_entry {
1395 struct perf_hpp_fmt hpp;
1396 struct sort_entry *se;
1397 };
1398
1399 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1400 {
1401 struct hpp_sort_entry *hse;
1402
1403 if (!perf_hpp__is_sort_entry(fmt))
1404 return;
1405
1406 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1407 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1408 }
1409
1410 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1411 struct perf_evsel *evsel)
1412 {
1413 struct hpp_sort_entry *hse;
1414 size_t len = fmt->user_len;
1415
1416 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1417
1418 if (!len)
1419 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1420
1421 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1422 }
1423
1424 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1425 struct perf_hpp *hpp __maybe_unused,
1426 struct perf_evsel *evsel)
1427 {
1428 struct hpp_sort_entry *hse;
1429 size_t len = fmt->user_len;
1430
1431 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1432
1433 if (!len)
1434 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1435
1436 return len;
1437 }
1438
1439 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1440 struct hist_entry *he)
1441 {
1442 struct hpp_sort_entry *hse;
1443 size_t len = fmt->user_len;
1444
1445 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1446
1447 if (!len)
1448 len = hists__col_len(he->hists, hse->se->se_width_idx);
1449
1450 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1451 }
1452
1453 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1454 struct hist_entry *a, struct hist_entry *b)
1455 {
1456 struct hpp_sort_entry *hse;
1457
1458 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1459 return hse->se->se_cmp(a, b);
1460 }
1461
1462 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1463 struct hist_entry *a, struct hist_entry *b)
1464 {
1465 struct hpp_sort_entry *hse;
1466 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1467
1468 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1469 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1470 return collapse_fn(a, b);
1471 }
1472
1473 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1474 struct hist_entry *a, struct hist_entry *b)
1475 {
1476 struct hpp_sort_entry *hse;
1477 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1478
1479 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1480 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1481 return sort_fn(a, b);
1482 }
1483
1484 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1485 {
1486 return format->header == __sort__hpp_header;
1487 }
1488
1489 #define MK_SORT_ENTRY_CHK(key) \
1490 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
1491 { \
1492 struct hpp_sort_entry *hse; \
1493 \
1494 if (!perf_hpp__is_sort_entry(fmt)) \
1495 return false; \
1496 \
1497 hse = container_of(fmt, struct hpp_sort_entry, hpp); \
1498 return hse->se == &sort_ ## key ; \
1499 }
1500
1501 MK_SORT_ENTRY_CHK(trace)
1502 MK_SORT_ENTRY_CHK(srcline)
1503 MK_SORT_ENTRY_CHK(srcfile)
1504 MK_SORT_ENTRY_CHK(thread)
1505 MK_SORT_ENTRY_CHK(comm)
1506 MK_SORT_ENTRY_CHK(dso)
1507 MK_SORT_ENTRY_CHK(sym)
1508
1509
1510 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1511 {
1512 struct hpp_sort_entry *hse_a;
1513 struct hpp_sort_entry *hse_b;
1514
1515 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1516 return false;
1517
1518 hse_a = container_of(a, struct hpp_sort_entry, hpp);
1519 hse_b = container_of(b, struct hpp_sort_entry, hpp);
1520
1521 return hse_a->se == hse_b->se;
1522 }
1523
1524 static void hse_free(struct perf_hpp_fmt *fmt)
1525 {
1526 struct hpp_sort_entry *hse;
1527
1528 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1529 free(hse);
1530 }
1531
1532 static struct hpp_sort_entry *
1533 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
1534 {
1535 struct hpp_sort_entry *hse;
1536
1537 hse = malloc(sizeof(*hse));
1538 if (hse == NULL) {
1539 pr_err("Memory allocation failed\n");
1540 return NULL;
1541 }
1542
1543 hse->se = sd->entry;
1544 hse->hpp.name = sd->entry->se_header;
1545 hse->hpp.header = __sort__hpp_header;
1546 hse->hpp.width = __sort__hpp_width;
1547 hse->hpp.entry = __sort__hpp_entry;
1548 hse->hpp.color = NULL;
1549
1550 hse->hpp.cmp = __sort__hpp_cmp;
1551 hse->hpp.collapse = __sort__hpp_collapse;
1552 hse->hpp.sort = __sort__hpp_sort;
1553 hse->hpp.equal = __sort__hpp_equal;
1554 hse->hpp.free = hse_free;
1555
1556 INIT_LIST_HEAD(&hse->hpp.list);
1557 INIT_LIST_HEAD(&hse->hpp.sort_list);
1558 hse->hpp.elide = false;
1559 hse->hpp.len = 0;
1560 hse->hpp.user_len = 0;
1561 hse->hpp.level = level;
1562
1563 return hse;
1564 }
1565
1566 static void hpp_free(struct perf_hpp_fmt *fmt)
1567 {
1568 free(fmt);
1569 }
1570
1571 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
1572 int level)
1573 {
1574 struct perf_hpp_fmt *fmt;
1575
1576 fmt = memdup(hd->fmt, sizeof(*fmt));
1577 if (fmt) {
1578 INIT_LIST_HEAD(&fmt->list);
1579 INIT_LIST_HEAD(&fmt->sort_list);
1580 fmt->free = hpp_free;
1581 fmt->level = level;
1582 }
1583
1584 return fmt;
1585 }
1586
1587 int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
1588 {
1589 struct perf_hpp_fmt *fmt;
1590 struct hpp_sort_entry *hse;
1591 int ret = -1;
1592 int r;
1593
1594 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1595 if (!perf_hpp__is_sort_entry(fmt))
1596 continue;
1597
1598 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1599 if (hse->se->se_filter == NULL)
1600 continue;
1601
1602 /*
1603 * hist entry is filtered if any of sort key in the hpp list
1604 * is applied. But it should skip non-matched filter types.
1605 */
1606 r = hse->se->se_filter(he, type, arg);
1607 if (r >= 0) {
1608 if (ret < 0)
1609 ret = 0;
1610 ret |= r;
1611 }
1612 }
1613
1614 return ret;
1615 }
1616
1617 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
1618 struct perf_hpp_list *list,
1619 int level)
1620 {
1621 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
1622
1623 if (hse == NULL)
1624 return -1;
1625
1626 perf_hpp_list__register_sort_field(list, &hse->hpp);
1627 return 0;
1628 }
1629
1630 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
1631 struct perf_hpp_list *list)
1632 {
1633 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
1634
1635 if (hse == NULL)
1636 return -1;
1637
1638 perf_hpp_list__column_register(list, &hse->hpp);
1639 return 0;
1640 }
1641
1642 struct hpp_dynamic_entry {
1643 struct perf_hpp_fmt hpp;
1644 struct perf_evsel *evsel;
1645 struct format_field *field;
1646 unsigned dynamic_len;
1647 bool raw_trace;
1648 };
1649
1650 static int hde_width(struct hpp_dynamic_entry *hde)
1651 {
1652 if (!hde->hpp.len) {
1653 int len = hde->dynamic_len;
1654 int namelen = strlen(hde->field->name);
1655 int fieldlen = hde->field->size;
1656
1657 if (namelen > len)
1658 len = namelen;
1659
1660 if (!(hde->field->flags & FIELD_IS_STRING)) {
1661 /* length for print hex numbers */
1662 fieldlen = hde->field->size * 2 + 2;
1663 }
1664 if (fieldlen > len)
1665 len = fieldlen;
1666
1667 hde->hpp.len = len;
1668 }
1669 return hde->hpp.len;
1670 }
1671
1672 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
1673 struct hist_entry *he)
1674 {
1675 char *str, *pos;
1676 struct format_field *field = hde->field;
1677 size_t namelen;
1678 bool last = false;
1679
1680 if (hde->raw_trace)
1681 return;
1682
1683 /* parse pretty print result and update max length */
1684 if (!he->trace_output)
1685 he->trace_output = get_trace_output(he);
1686
1687 namelen = strlen(field->name);
1688 str = he->trace_output;
1689
1690 while (str) {
1691 pos = strchr(str, ' ');
1692 if (pos == NULL) {
1693 last = true;
1694 pos = str + strlen(str);
1695 }
1696
1697 if (!strncmp(str, field->name, namelen)) {
1698 size_t len;
1699
1700 str += namelen + 1;
1701 len = pos - str;
1702
1703 if (len > hde->dynamic_len)
1704 hde->dynamic_len = len;
1705 break;
1706 }
1707
1708 if (last)
1709 str = NULL;
1710 else
1711 str = pos + 1;
1712 }
1713 }
1714
1715 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1716 struct perf_evsel *evsel __maybe_unused)
1717 {
1718 struct hpp_dynamic_entry *hde;
1719 size_t len = fmt->user_len;
1720
1721 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1722
1723 if (!len)
1724 len = hde_width(hde);
1725
1726 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
1727 }
1728
1729 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
1730 struct perf_hpp *hpp __maybe_unused,
1731 struct perf_evsel *evsel __maybe_unused)
1732 {
1733 struct hpp_dynamic_entry *hde;
1734 size_t len = fmt->user_len;
1735
1736 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1737
1738 if (!len)
1739 len = hde_width(hde);
1740
1741 return len;
1742 }
1743
1744 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
1745 {
1746 struct hpp_dynamic_entry *hde;
1747
1748 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1749
1750 return hists_to_evsel(hists) == hde->evsel;
1751 }
1752
1753 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1754 struct hist_entry *he)
1755 {
1756 struct hpp_dynamic_entry *hde;
1757 size_t len = fmt->user_len;
1758 char *str, *pos;
1759 struct format_field *field;
1760 size_t namelen;
1761 bool last = false;
1762 int ret;
1763
1764 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1765
1766 if (!len)
1767 len = hde_width(hde);
1768
1769 if (hde->raw_trace)
1770 goto raw_field;
1771
1772 if (!he->trace_output)
1773 he->trace_output = get_trace_output(he);
1774
1775 field = hde->field;
1776 namelen = strlen(field->name);
1777 str = he->trace_output;
1778
1779 while (str) {
1780 pos = strchr(str, ' ');
1781 if (pos == NULL) {
1782 last = true;
1783 pos = str + strlen(str);
1784 }
1785
1786 if (!strncmp(str, field->name, namelen)) {
1787 str += namelen + 1;
1788 str = strndup(str, pos - str);
1789
1790 if (str == NULL)
1791 return scnprintf(hpp->buf, hpp->size,
1792 "%*.*s", len, len, "ERROR");
1793 break;
1794 }
1795
1796 if (last)
1797 str = NULL;
1798 else
1799 str = pos + 1;
1800 }
1801
1802 if (str == NULL) {
1803 struct trace_seq seq;
1804 raw_field:
1805 trace_seq_init(&seq);
1806 pevent_print_field(&seq, he->raw_data, hde->field);
1807 str = seq.buffer;
1808 }
1809
1810 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
1811 free(str);
1812 return ret;
1813 }
1814
1815 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
1816 struct hist_entry *a, struct hist_entry *b)
1817 {
1818 struct hpp_dynamic_entry *hde;
1819 struct format_field *field;
1820 unsigned offset, size;
1821
1822 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1823
1824 if (b == NULL) {
1825 update_dynamic_len(hde, a);
1826 return 0;
1827 }
1828
1829 field = hde->field;
1830 if (field->flags & FIELD_IS_DYNAMIC) {
1831 unsigned long long dyn;
1832
1833 pevent_read_number_field(field, a->raw_data, &dyn);
1834 offset = dyn & 0xffff;
1835 size = (dyn >> 16) & 0xffff;
1836
1837 /* record max width for output */
1838 if (size > hde->dynamic_len)
1839 hde->dynamic_len = size;
1840 } else {
1841 offset = field->offset;
1842 size = field->size;
1843 }
1844
1845 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
1846 }
1847
1848 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
1849 {
1850 return fmt->cmp == __sort__hde_cmp;
1851 }
1852
1853 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1854 {
1855 struct hpp_dynamic_entry *hde_a;
1856 struct hpp_dynamic_entry *hde_b;
1857
1858 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
1859 return false;
1860
1861 hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
1862 hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
1863
1864 return hde_a->field == hde_b->field;
1865 }
1866
1867 static void hde_free(struct perf_hpp_fmt *fmt)
1868 {
1869 struct hpp_dynamic_entry *hde;
1870
1871 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1872 free(hde);
1873 }
1874
1875 static struct hpp_dynamic_entry *
1876 __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field,
1877 int level)
1878 {
1879 struct hpp_dynamic_entry *hde;
1880
1881 hde = malloc(sizeof(*hde));
1882 if (hde == NULL) {
1883 pr_debug("Memory allocation failed\n");
1884 return NULL;
1885 }
1886
1887 hde->evsel = evsel;
1888 hde->field = field;
1889 hde->dynamic_len = 0;
1890
1891 hde->hpp.name = field->name;
1892 hde->hpp.header = __sort__hde_header;
1893 hde->hpp.width = __sort__hde_width;
1894 hde->hpp.entry = __sort__hde_entry;
1895 hde->hpp.color = NULL;
1896
1897 hde->hpp.cmp = __sort__hde_cmp;
1898 hde->hpp.collapse = __sort__hde_cmp;
1899 hde->hpp.sort = __sort__hde_cmp;
1900 hde->hpp.equal = __sort__hde_equal;
1901 hde->hpp.free = hde_free;
1902
1903 INIT_LIST_HEAD(&hde->hpp.list);
1904 INIT_LIST_HEAD(&hde->hpp.sort_list);
1905 hde->hpp.elide = false;
1906 hde->hpp.len = 0;
1907 hde->hpp.user_len = 0;
1908 hde->hpp.level = level;
1909
1910 return hde;
1911 }
1912
1913 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
1914 {
1915 struct perf_hpp_fmt *new_fmt = NULL;
1916
1917 if (perf_hpp__is_sort_entry(fmt)) {
1918 struct hpp_sort_entry *hse, *new_hse;
1919
1920 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1921 new_hse = memdup(hse, sizeof(*hse));
1922 if (new_hse)
1923 new_fmt = &new_hse->hpp;
1924 } else if (perf_hpp__is_dynamic_entry(fmt)) {
1925 struct hpp_dynamic_entry *hde, *new_hde;
1926
1927 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1928 new_hde = memdup(hde, sizeof(*hde));
1929 if (new_hde)
1930 new_fmt = &new_hde->hpp;
1931 } else {
1932 new_fmt = memdup(fmt, sizeof(*fmt));
1933 }
1934
1935 INIT_LIST_HEAD(&new_fmt->list);
1936 INIT_LIST_HEAD(&new_fmt->sort_list);
1937
1938 return new_fmt;
1939 }
1940
1941 static int parse_field_name(char *str, char **event, char **field, char **opt)
1942 {
1943 char *event_name, *field_name, *opt_name;
1944
1945 event_name = str;
1946 field_name = strchr(str, '.');
1947
1948 if (field_name) {
1949 *field_name++ = '\0';
1950 } else {
1951 event_name = NULL;
1952 field_name = str;
1953 }
1954
1955 opt_name = strchr(field_name, '/');
1956 if (opt_name)
1957 *opt_name++ = '\0';
1958
1959 *event = event_name;
1960 *field = field_name;
1961 *opt = opt_name;
1962
1963 return 0;
1964 }
1965
1966 /* find match evsel using a given event name. The event name can be:
1967 * 1. '%' + event index (e.g. '%1' for first event)
1968 * 2. full event name (e.g. sched:sched_switch)
1969 * 3. partial event name (should not contain ':')
1970 */
1971 static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
1972 {
1973 struct perf_evsel *evsel = NULL;
1974 struct perf_evsel *pos;
1975 bool full_name;
1976
1977 /* case 1 */
1978 if (event_name[0] == '%') {
1979 int nr = strtol(event_name+1, NULL, 0);
1980
1981 if (nr > evlist->nr_entries)
1982 return NULL;
1983
1984 evsel = perf_evlist__first(evlist);
1985 while (--nr > 0)
1986 evsel = perf_evsel__next(evsel);
1987
1988 return evsel;
1989 }
1990
1991 full_name = !!strchr(event_name, ':');
1992 evlist__for_each(evlist, pos) {
1993 /* case 2 */
1994 if (full_name && !strcmp(pos->name, event_name))
1995 return pos;
1996 /* case 3 */
1997 if (!full_name && strstr(pos->name, event_name)) {
1998 if (evsel) {
1999 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
2000 event_name, evsel->name, pos->name);
2001 return NULL;
2002 }
2003 evsel = pos;
2004 }
2005 }
2006
2007 return evsel;
2008 }
2009
2010 static int __dynamic_dimension__add(struct perf_evsel *evsel,
2011 struct format_field *field,
2012 bool raw_trace, int level)
2013 {
2014 struct hpp_dynamic_entry *hde;
2015
2016 hde = __alloc_dynamic_entry(evsel, field, level);
2017 if (hde == NULL)
2018 return -ENOMEM;
2019
2020 hde->raw_trace = raw_trace;
2021
2022 perf_hpp__register_sort_field(&hde->hpp);
2023 return 0;
2024 }
2025
2026 static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level)
2027 {
2028 int ret;
2029 struct format_field *field;
2030
2031 field = evsel->tp_format->format.fields;
2032 while (field) {
2033 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2034 if (ret < 0)
2035 return ret;
2036
2037 field = field->next;
2038 }
2039 return 0;
2040 }
2041
2042 static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace,
2043 int level)
2044 {
2045 int ret;
2046 struct perf_evsel *evsel;
2047
2048 evlist__for_each(evlist, evsel) {
2049 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2050 continue;
2051
2052 ret = add_evsel_fields(evsel, raw_trace, level);
2053 if (ret < 0)
2054 return ret;
2055 }
2056 return 0;
2057 }
2058
2059 static int add_all_matching_fields(struct perf_evlist *evlist,
2060 char *field_name, bool raw_trace, int level)
2061 {
2062 int ret = -ESRCH;
2063 struct perf_evsel *evsel;
2064 struct format_field *field;
2065
2066 evlist__for_each(evlist, evsel) {
2067 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2068 continue;
2069
2070 field = pevent_find_any_field(evsel->tp_format, field_name);
2071 if (field == NULL)
2072 continue;
2073
2074 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2075 if (ret < 0)
2076 break;
2077 }
2078 return ret;
2079 }
2080
2081 static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok,
2082 int level)
2083 {
2084 char *str, *event_name, *field_name, *opt_name;
2085 struct perf_evsel *evsel;
2086 struct format_field *field;
2087 bool raw_trace = symbol_conf.raw_trace;
2088 int ret = 0;
2089
2090 if (evlist == NULL)
2091 return -ENOENT;
2092
2093 str = strdup(tok);
2094 if (str == NULL)
2095 return -ENOMEM;
2096
2097 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
2098 ret = -EINVAL;
2099 goto out;
2100 }
2101
2102 if (opt_name) {
2103 if (strcmp(opt_name, "raw")) {
2104 pr_debug("unsupported field option %s\n", opt_name);
2105 ret = -EINVAL;
2106 goto out;
2107 }
2108 raw_trace = true;
2109 }
2110
2111 if (!strcmp(field_name, "trace_fields")) {
2112 ret = add_all_dynamic_fields(evlist, raw_trace, level);
2113 goto out;
2114 }
2115
2116 if (event_name == NULL) {
2117 ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
2118 goto out;
2119 }
2120
2121 evsel = find_evsel(evlist, event_name);
2122 if (evsel == NULL) {
2123 pr_debug("Cannot find event: %s\n", event_name);
2124 ret = -ENOENT;
2125 goto out;
2126 }
2127
2128 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2129 pr_debug("%s is not a tracepoint event\n", event_name);
2130 ret = -EINVAL;
2131 goto out;
2132 }
2133
2134 if (!strcmp(field_name, "*")) {
2135 ret = add_evsel_fields(evsel, raw_trace, level);
2136 } else {
2137 field = pevent_find_any_field(evsel->tp_format, field_name);
2138 if (field == NULL) {
2139 pr_debug("Cannot find event field for %s.%s\n",
2140 event_name, field_name);
2141 return -ENOENT;
2142 }
2143
2144 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2145 }
2146
2147 out:
2148 free(str);
2149 return ret;
2150 }
2151
2152 static int __sort_dimension__add(struct sort_dimension *sd,
2153 struct perf_hpp_list *list,
2154 int level)
2155 {
2156 if (sd->taken)
2157 return 0;
2158
2159 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
2160 return -1;
2161
2162 if (sd->entry->se_collapse)
2163 list->need_collapse = 1;
2164
2165 sd->taken = 1;
2166
2167 return 0;
2168 }
2169
2170 static int __hpp_dimension__add(struct hpp_dimension *hd,
2171 struct perf_hpp_list *list,
2172 int level)
2173 {
2174 struct perf_hpp_fmt *fmt;
2175
2176 if (hd->taken)
2177 return 0;
2178
2179 fmt = __hpp_dimension__alloc_hpp(hd, level);
2180 if (!fmt)
2181 return -1;
2182
2183 hd->taken = 1;
2184 perf_hpp_list__register_sort_field(list, fmt);
2185 return 0;
2186 }
2187
2188 static int __sort_dimension__add_output(struct perf_hpp_list *list,
2189 struct sort_dimension *sd)
2190 {
2191 if (sd->taken)
2192 return 0;
2193
2194 if (__sort_dimension__add_hpp_output(sd, list) < 0)
2195 return -1;
2196
2197 sd->taken = 1;
2198 return 0;
2199 }
2200
2201 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
2202 struct hpp_dimension *hd)
2203 {
2204 struct perf_hpp_fmt *fmt;
2205
2206 if (hd->taken)
2207 return 0;
2208
2209 fmt = __hpp_dimension__alloc_hpp(hd, 0);
2210 if (!fmt)
2211 return -1;
2212
2213 hd->taken = 1;
2214 perf_hpp_list__column_register(list, fmt);
2215 return 0;
2216 }
2217
2218 int hpp_dimension__add_output(unsigned col)
2219 {
2220 BUG_ON(col >= PERF_HPP__MAX_INDEX);
2221 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
2222 }
2223
2224 static int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
2225 struct perf_evlist *evlist,
2226 int level)
2227 {
2228 unsigned int i;
2229
2230 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2231 struct sort_dimension *sd = &common_sort_dimensions[i];
2232
2233 if (strncasecmp(tok, sd->name, strlen(tok)))
2234 continue;
2235
2236 if (sd->entry == &sort_parent) {
2237 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2238 if (ret) {
2239 char err[BUFSIZ];
2240
2241 regerror(ret, &parent_regex, err, sizeof(err));
2242 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2243 return -EINVAL;
2244 }
2245 list->parent = 1;
2246 } else if (sd->entry == &sort_sym) {
2247 list->sym = 1;
2248 /*
2249 * perf diff displays the performance difference amongst
2250 * two or more perf.data files. Those files could come
2251 * from different binaries. So we should not compare
2252 * their ips, but the name of symbol.
2253 */
2254 if (sort__mode == SORT_MODE__DIFF)
2255 sd->entry->se_collapse = sort__sym_sort;
2256
2257 } else if (sd->entry == &sort_dso) {
2258 sort__has_dso = 1;
2259 } else if (sd->entry == &sort_socket) {
2260 sort__has_socket = 1;
2261 } else if (sd->entry == &sort_thread) {
2262 sort__has_thread = 1;
2263 } else if (sd->entry == &sort_comm) {
2264 sort__has_comm = 1;
2265 }
2266
2267 return __sort_dimension__add(sd, list, level);
2268 }
2269
2270 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2271 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2272
2273 if (strncasecmp(tok, hd->name, strlen(tok)))
2274 continue;
2275
2276 return __hpp_dimension__add(hd, list, level);
2277 }
2278
2279 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2280 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2281
2282 if (strncasecmp(tok, sd->name, strlen(tok)))
2283 continue;
2284
2285 if (sort__mode != SORT_MODE__BRANCH)
2286 return -EINVAL;
2287
2288 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2289 list->sym = 1;
2290
2291 __sort_dimension__add(sd, list, level);
2292 return 0;
2293 }
2294
2295 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2296 struct sort_dimension *sd = &memory_sort_dimensions[i];
2297
2298 if (strncasecmp(tok, sd->name, strlen(tok)))
2299 continue;
2300
2301 if (sort__mode != SORT_MODE__MEMORY)
2302 return -EINVAL;
2303
2304 if (sd->entry == &sort_mem_daddr_sym)
2305 list->sym = 1;
2306
2307 __sort_dimension__add(sd, list, level);
2308 return 0;
2309 }
2310
2311 if (!add_dynamic_entry(evlist, tok, level))
2312 return 0;
2313
2314 return -ESRCH;
2315 }
2316
2317 static int setup_sort_list(struct perf_hpp_list *list, char *str,
2318 struct perf_evlist *evlist)
2319 {
2320 char *tmp, *tok;
2321 int ret = 0;
2322 int level = 0;
2323 int next_level = 1;
2324 bool in_group = false;
2325
2326 do {
2327 tok = str;
2328 tmp = strpbrk(str, "{}, ");
2329 if (tmp) {
2330 if (in_group)
2331 next_level = level;
2332 else
2333 next_level = level + 1;
2334
2335 if (*tmp == '{')
2336 in_group = true;
2337 else if (*tmp == '}')
2338 in_group = false;
2339
2340 *tmp = '\0';
2341 str = tmp + 1;
2342 }
2343
2344 if (*tok) {
2345 ret = sort_dimension__add(list, tok, evlist, level);
2346 if (ret == -EINVAL) {
2347 error("Invalid --sort key: `%s'", tok);
2348 break;
2349 } else if (ret == -ESRCH) {
2350 error("Unknown --sort key: `%s'", tok);
2351 break;
2352 }
2353 }
2354
2355 level = next_level;
2356 } while (tmp);
2357
2358 return ret;
2359 }
2360
2361 static const char *get_default_sort_order(struct perf_evlist *evlist)
2362 {
2363 const char *default_sort_orders[] = {
2364 default_sort_order,
2365 default_branch_sort_order,
2366 default_mem_sort_order,
2367 default_top_sort_order,
2368 default_diff_sort_order,
2369 default_tracepoint_sort_order,
2370 };
2371 bool use_trace = true;
2372 struct perf_evsel *evsel;
2373
2374 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2375
2376 if (evlist == NULL)
2377 goto out_no_evlist;
2378
2379 evlist__for_each(evlist, evsel) {
2380 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2381 use_trace = false;
2382 break;
2383 }
2384 }
2385
2386 if (use_trace) {
2387 sort__mode = SORT_MODE__TRACEPOINT;
2388 if (symbol_conf.raw_trace)
2389 return "trace_fields";
2390 }
2391 out_no_evlist:
2392 return default_sort_orders[sort__mode];
2393 }
2394
2395 static int setup_sort_order(struct perf_evlist *evlist)
2396 {
2397 char *new_sort_order;
2398
2399 /*
2400 * Append '+'-prefixed sort order to the default sort
2401 * order string.
2402 */
2403 if (!sort_order || is_strict_order(sort_order))
2404 return 0;
2405
2406 if (sort_order[1] == '\0') {
2407 error("Invalid --sort key: `+'");
2408 return -EINVAL;
2409 }
2410
2411 /*
2412 * We allocate new sort_order string, but we never free it,
2413 * because it's checked over the rest of the code.
2414 */
2415 if (asprintf(&new_sort_order, "%s,%s",
2416 get_default_sort_order(evlist), sort_order + 1) < 0) {
2417 error("Not enough memory to set up --sort");
2418 return -ENOMEM;
2419 }
2420
2421 sort_order = new_sort_order;
2422 return 0;
2423 }
2424
2425 /*
2426 * Adds 'pre,' prefix into 'str' is 'pre' is
2427 * not already part of 'str'.
2428 */
2429 static char *prefix_if_not_in(const char *pre, char *str)
2430 {
2431 char *n;
2432
2433 if (!str || strstr(str, pre))
2434 return str;
2435
2436 if (asprintf(&n, "%s,%s", pre, str) < 0)
2437 return NULL;
2438
2439 free(str);
2440 return n;
2441 }
2442
2443 static char *setup_overhead(char *keys)
2444 {
2445 keys = prefix_if_not_in("overhead", keys);
2446
2447 if (symbol_conf.cumulate_callchain)
2448 keys = prefix_if_not_in("overhead_children", keys);
2449
2450 return keys;
2451 }
2452
2453 static int __setup_sorting(struct perf_evlist *evlist)
2454 {
2455 char *str;
2456 const char *sort_keys;
2457 int ret = 0;
2458
2459 ret = setup_sort_order(evlist);
2460 if (ret)
2461 return ret;
2462
2463 sort_keys = sort_order;
2464 if (sort_keys == NULL) {
2465 if (is_strict_order(field_order)) {
2466 /*
2467 * If user specified field order but no sort order,
2468 * we'll honor it and not add default sort orders.
2469 */
2470 return 0;
2471 }
2472
2473 sort_keys = get_default_sort_order(evlist);
2474 }
2475
2476 str = strdup(sort_keys);
2477 if (str == NULL) {
2478 error("Not enough memory to setup sort keys");
2479 return -ENOMEM;
2480 }
2481
2482 /*
2483 * Prepend overhead fields for backward compatibility.
2484 */
2485 if (!is_strict_order(field_order)) {
2486 str = setup_overhead(str);
2487 if (str == NULL) {
2488 error("Not enough memory to setup overhead keys");
2489 return -ENOMEM;
2490 }
2491 }
2492
2493 ret = setup_sort_list(&perf_hpp_list, str, evlist);
2494
2495 free(str);
2496 return ret;
2497 }
2498
2499 void perf_hpp__set_elide(int idx, bool elide)
2500 {
2501 struct perf_hpp_fmt *fmt;
2502 struct hpp_sort_entry *hse;
2503
2504 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2505 if (!perf_hpp__is_sort_entry(fmt))
2506 continue;
2507
2508 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2509 if (hse->se->se_width_idx == idx) {
2510 fmt->elide = elide;
2511 break;
2512 }
2513 }
2514 }
2515
2516 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
2517 {
2518 if (list && strlist__nr_entries(list) == 1) {
2519 if (fp != NULL)
2520 fprintf(fp, "# %s: %s\n", list_name,
2521 strlist__entry(list, 0)->s);
2522 return true;
2523 }
2524 return false;
2525 }
2526
2527 static bool get_elide(int idx, FILE *output)
2528 {
2529 switch (idx) {
2530 case HISTC_SYMBOL:
2531 return __get_elide(symbol_conf.sym_list, "symbol", output);
2532 case HISTC_DSO:
2533 return __get_elide(symbol_conf.dso_list, "dso", output);
2534 case HISTC_COMM:
2535 return __get_elide(symbol_conf.comm_list, "comm", output);
2536 default:
2537 break;
2538 }
2539
2540 if (sort__mode != SORT_MODE__BRANCH)
2541 return false;
2542
2543 switch (idx) {
2544 case HISTC_SYMBOL_FROM:
2545 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2546 case HISTC_SYMBOL_TO:
2547 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2548 case HISTC_DSO_FROM:
2549 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2550 case HISTC_DSO_TO:
2551 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2552 default:
2553 break;
2554 }
2555
2556 return false;
2557 }
2558
2559 void sort__setup_elide(FILE *output)
2560 {
2561 struct perf_hpp_fmt *fmt;
2562 struct hpp_sort_entry *hse;
2563
2564 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2565 if (!perf_hpp__is_sort_entry(fmt))
2566 continue;
2567
2568 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2569 fmt->elide = get_elide(hse->se->se_width_idx, output);
2570 }
2571
2572 /*
2573 * It makes no sense to elide all of sort entries.
2574 * Just revert them to show up again.
2575 */
2576 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2577 if (!perf_hpp__is_sort_entry(fmt))
2578 continue;
2579
2580 if (!fmt->elide)
2581 return;
2582 }
2583
2584 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2585 if (!perf_hpp__is_sort_entry(fmt))
2586 continue;
2587
2588 fmt->elide = false;
2589 }
2590 }
2591
2592 static int output_field_add(struct perf_hpp_list *list, char *tok)
2593 {
2594 unsigned int i;
2595
2596 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2597 struct sort_dimension *sd = &common_sort_dimensions[i];
2598
2599 if (strncasecmp(tok, sd->name, strlen(tok)))
2600 continue;
2601
2602 return __sort_dimension__add_output(list, sd);
2603 }
2604
2605 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2606 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2607
2608 if (strncasecmp(tok, hd->name, strlen(tok)))
2609 continue;
2610
2611 return __hpp_dimension__add_output(list, hd);
2612 }
2613
2614 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2615 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2616
2617 if (strncasecmp(tok, sd->name, strlen(tok)))
2618 continue;
2619
2620 return __sort_dimension__add_output(list, sd);
2621 }
2622
2623 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2624 struct sort_dimension *sd = &memory_sort_dimensions[i];
2625
2626 if (strncasecmp(tok, sd->name, strlen(tok)))
2627 continue;
2628
2629 return __sort_dimension__add_output(list, sd);
2630 }
2631
2632 return -ESRCH;
2633 }
2634
2635 static int setup_output_list(struct perf_hpp_list *list, char *str)
2636 {
2637 char *tmp, *tok;
2638 int ret = 0;
2639
2640 for (tok = strtok_r(str, ", ", &tmp);
2641 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2642 ret = output_field_add(list, tok);
2643 if (ret == -EINVAL) {
2644 error("Invalid --fields key: `%s'", tok);
2645 break;
2646 } else if (ret == -ESRCH) {
2647 error("Unknown --fields key: `%s'", tok);
2648 break;
2649 }
2650 }
2651
2652 return ret;
2653 }
2654
2655 static void reset_dimensions(void)
2656 {
2657 unsigned int i;
2658
2659 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
2660 common_sort_dimensions[i].taken = 0;
2661
2662 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
2663 hpp_sort_dimensions[i].taken = 0;
2664
2665 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
2666 bstack_sort_dimensions[i].taken = 0;
2667
2668 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
2669 memory_sort_dimensions[i].taken = 0;
2670 }
2671
2672 bool is_strict_order(const char *order)
2673 {
2674 return order && (*order != '+');
2675 }
2676
2677 static int __setup_output_field(void)
2678 {
2679 char *str, *strp;
2680 int ret = -EINVAL;
2681
2682 if (field_order == NULL)
2683 return 0;
2684
2685 strp = str = strdup(field_order);
2686 if (str == NULL) {
2687 error("Not enough memory to setup output fields");
2688 return -ENOMEM;
2689 }
2690
2691 if (!is_strict_order(field_order))
2692 strp++;
2693
2694 if (!strlen(strp)) {
2695 error("Invalid --fields key: `+'");
2696 goto out;
2697 }
2698
2699 ret = setup_output_list(&perf_hpp_list, strp);
2700
2701 out:
2702 free(str);
2703 return ret;
2704 }
2705
2706 int setup_sorting(struct perf_evlist *evlist)
2707 {
2708 int err;
2709
2710 err = __setup_sorting(evlist);
2711 if (err < 0)
2712 return err;
2713
2714 if (parent_pattern != default_parent_pattern) {
2715 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
2716 if (err < 0)
2717 return err;
2718 }
2719
2720 reset_dimensions();
2721
2722 /*
2723 * perf diff doesn't use default hpp output fields.
2724 */
2725 if (sort__mode != SORT_MODE__DIFF)
2726 perf_hpp__init();
2727
2728 err = __setup_output_field();
2729 if (err < 0)
2730 return err;
2731
2732 /* copy sort keys to output fields */
2733 perf_hpp__setup_output_field(&perf_hpp_list);
2734 /* and then copy output fields to sort keys */
2735 perf_hpp__append_sort_keys(&perf_hpp_list);
2736
2737 /* setup hists-specific output fields */
2738 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
2739 return -1;
2740
2741 return 0;
2742 }
2743
2744 void reset_output_field(void)
2745 {
2746 perf_hpp_list.need_collapse = 0;
2747 perf_hpp_list.parent = 0;
2748 perf_hpp_list.sym = 0;
2749 sort__has_dso = 0;
2750
2751 field_order = NULL;
2752 sort_order = NULL;
2753
2754 reset_dimensions();
2755 perf_hpp__reset_output_field(&perf_hpp_list);
2756 }
This page took 0.126374 seconds and 4 git commands to generate.