Merge branches 'acpi-video' and 'acpi-hotplug'
[deliverable/linux.git] / tools / perf / util / sort.c
1 #include <sys/mman.h>
2 #include "sort.h"
3 #include "hist.h"
4 #include "comm.h"
5 #include "symbol.h"
6 #include "evsel.h"
7 #include "evlist.h"
8 #include <traceevent/event-parse.h>
9
10 regex_t parent_regex;
11 const char default_parent_pattern[] = "^sys_|^do_page_fault";
12 const char *parent_pattern = default_parent_pattern;
13 const char default_sort_order[] = "comm,dso,symbol";
14 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
15 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
16 const char default_top_sort_order[] = "dso,symbol";
17 const char default_diff_sort_order[] = "dso,symbol";
18 const char default_tracepoint_sort_order[] = "trace";
19 const char *sort_order;
20 const char *field_order;
21 regex_t ignore_callees_regex;
22 int have_ignore_callees = 0;
23 int sort__need_collapse = 0;
24 int sort__has_parent = 0;
25 int sort__has_sym = 0;
26 int sort__has_dso = 0;
27 int sort__has_socket = 0;
28 enum sort_mode sort__mode = SORT_MODE__NORMAL;
29
30
31 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
32 {
33 int n;
34 va_list ap;
35
36 va_start(ap, fmt);
37 n = vsnprintf(bf, size, fmt, ap);
38 if (symbol_conf.field_sep && n > 0) {
39 char *sep = bf;
40
41 while (1) {
42 sep = strchr(sep, *symbol_conf.field_sep);
43 if (sep == NULL)
44 break;
45 *sep = '.';
46 }
47 }
48 va_end(ap);
49
50 if (n >= (int)size)
51 return size - 1;
52 return n;
53 }
54
55 static int64_t cmp_null(const void *l, const void *r)
56 {
57 if (!l && !r)
58 return 0;
59 else if (!l)
60 return -1;
61 else
62 return 1;
63 }
64
65 /* --sort pid */
66
67 static int64_t
68 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
69 {
70 return right->thread->tid - left->thread->tid;
71 }
72
73 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
74 size_t size, unsigned int width)
75 {
76 const char *comm = thread__comm_str(he->thread);
77
78 width = max(7U, width) - 6;
79 return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid,
80 width, width, comm ?: "");
81 }
82
83 struct sort_entry sort_thread = {
84 .se_header = " Pid:Command",
85 .se_cmp = sort__thread_cmp,
86 .se_snprintf = hist_entry__thread_snprintf,
87 .se_width_idx = HISTC_THREAD,
88 };
89
90 /* --sort comm */
91
92 static int64_t
93 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
94 {
95 /* Compare the addr that should be unique among comm */
96 return strcmp(comm__str(right->comm), comm__str(left->comm));
97 }
98
99 static int64_t
100 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
101 {
102 /* Compare the addr that should be unique among comm */
103 return strcmp(comm__str(right->comm), comm__str(left->comm));
104 }
105
106 static int64_t
107 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
108 {
109 return strcmp(comm__str(right->comm), comm__str(left->comm));
110 }
111
112 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
113 size_t size, unsigned int width)
114 {
115 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
116 }
117
118 struct sort_entry sort_comm = {
119 .se_header = "Command",
120 .se_cmp = sort__comm_cmp,
121 .se_collapse = sort__comm_collapse,
122 .se_sort = sort__comm_sort,
123 .se_snprintf = hist_entry__comm_snprintf,
124 .se_width_idx = HISTC_COMM,
125 };
126
127 /* --sort dso */
128
129 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
130 {
131 struct dso *dso_l = map_l ? map_l->dso : NULL;
132 struct dso *dso_r = map_r ? map_r->dso : NULL;
133 const char *dso_name_l, *dso_name_r;
134
135 if (!dso_l || !dso_r)
136 return cmp_null(dso_r, dso_l);
137
138 if (verbose) {
139 dso_name_l = dso_l->long_name;
140 dso_name_r = dso_r->long_name;
141 } else {
142 dso_name_l = dso_l->short_name;
143 dso_name_r = dso_r->short_name;
144 }
145
146 return strcmp(dso_name_l, dso_name_r);
147 }
148
149 static int64_t
150 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
151 {
152 return _sort__dso_cmp(right->ms.map, left->ms.map);
153 }
154
155 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
156 size_t size, unsigned int width)
157 {
158 if (map && map->dso) {
159 const char *dso_name = !verbose ? map->dso->short_name :
160 map->dso->long_name;
161 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
162 }
163
164 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
165 }
166
167 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
168 size_t size, unsigned int width)
169 {
170 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
171 }
172
173 struct sort_entry sort_dso = {
174 .se_header = "Shared Object",
175 .se_cmp = sort__dso_cmp,
176 .se_snprintf = hist_entry__dso_snprintf,
177 .se_width_idx = HISTC_DSO,
178 };
179
180 /* --sort symbol */
181
182 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
183 {
184 return (int64_t)(right_ip - left_ip);
185 }
186
187 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
188 {
189 if (!sym_l || !sym_r)
190 return cmp_null(sym_l, sym_r);
191
192 if (sym_l == sym_r)
193 return 0;
194
195 if (sym_l->start != sym_r->start)
196 return (int64_t)(sym_r->start - sym_l->start);
197
198 return (int64_t)(sym_r->end - sym_l->end);
199 }
200
201 static int64_t
202 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
203 {
204 int64_t ret;
205
206 if (!left->ms.sym && !right->ms.sym)
207 return _sort__addr_cmp(left->ip, right->ip);
208
209 /*
210 * comparing symbol address alone is not enough since it's a
211 * relative address within a dso.
212 */
213 if (!sort__has_dso) {
214 ret = sort__dso_cmp(left, right);
215 if (ret != 0)
216 return ret;
217 }
218
219 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
220 }
221
222 static int64_t
223 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
224 {
225 if (!left->ms.sym || !right->ms.sym)
226 return cmp_null(left->ms.sym, right->ms.sym);
227
228 return strcmp(right->ms.sym->name, left->ms.sym->name);
229 }
230
231 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
232 u64 ip, char level, char *bf, size_t size,
233 unsigned int width)
234 {
235 size_t ret = 0;
236
237 if (verbose) {
238 char o = map ? dso__symtab_origin(map->dso) : '!';
239 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
240 BITS_PER_LONG / 4 + 2, ip, o);
241 }
242
243 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
244 if (sym && map) {
245 if (map->type == MAP__VARIABLE) {
246 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
247 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
248 ip - map->unmap_ip(map, sym->start));
249 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
250 width - ret, "");
251 } else {
252 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
253 width - ret,
254 sym->name);
255 }
256 } else {
257 size_t len = BITS_PER_LONG / 4;
258 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
259 len, ip);
260 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
261 width - ret, "");
262 }
263
264 if (ret > width)
265 bf[width] = '\0';
266
267 return width;
268 }
269
270 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
271 size_t size, unsigned int width)
272 {
273 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
274 he->level, bf, size, width);
275 }
276
277 struct sort_entry sort_sym = {
278 .se_header = "Symbol",
279 .se_cmp = sort__sym_cmp,
280 .se_sort = sort__sym_sort,
281 .se_snprintf = hist_entry__sym_snprintf,
282 .se_width_idx = HISTC_SYMBOL,
283 };
284
285 /* --sort srcline */
286
287 static int64_t
288 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
289 {
290 if (!left->srcline) {
291 if (!left->ms.map)
292 left->srcline = SRCLINE_UNKNOWN;
293 else {
294 struct map *map = left->ms.map;
295 left->srcline = get_srcline(map->dso,
296 map__rip_2objdump(map, left->ip),
297 left->ms.sym, true);
298 }
299 }
300 if (!right->srcline) {
301 if (!right->ms.map)
302 right->srcline = SRCLINE_UNKNOWN;
303 else {
304 struct map *map = right->ms.map;
305 right->srcline = get_srcline(map->dso,
306 map__rip_2objdump(map, right->ip),
307 right->ms.sym, true);
308 }
309 }
310 return strcmp(right->srcline, left->srcline);
311 }
312
313 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
314 size_t size, unsigned int width)
315 {
316 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcline);
317 }
318
319 struct sort_entry sort_srcline = {
320 .se_header = "Source:Line",
321 .se_cmp = sort__srcline_cmp,
322 .se_snprintf = hist_entry__srcline_snprintf,
323 .se_width_idx = HISTC_SRCLINE,
324 };
325
326 /* --sort srcfile */
327
328 static char no_srcfile[1];
329
330 static char *get_srcfile(struct hist_entry *e)
331 {
332 char *sf, *p;
333 struct map *map = e->ms.map;
334
335 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
336 e->ms.sym, false, true);
337 if (!strcmp(sf, SRCLINE_UNKNOWN))
338 return no_srcfile;
339 p = strchr(sf, ':');
340 if (p && *sf) {
341 *p = 0;
342 return sf;
343 }
344 free(sf);
345 return no_srcfile;
346 }
347
348 static int64_t
349 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
350 {
351 if (!left->srcfile) {
352 if (!left->ms.map)
353 left->srcfile = no_srcfile;
354 else
355 left->srcfile = get_srcfile(left);
356 }
357 if (!right->srcfile) {
358 if (!right->ms.map)
359 right->srcfile = no_srcfile;
360 else
361 right->srcfile = get_srcfile(right);
362 }
363 return strcmp(right->srcfile, left->srcfile);
364 }
365
366 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
367 size_t size, unsigned int width)
368 {
369 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcfile);
370 }
371
372 struct sort_entry sort_srcfile = {
373 .se_header = "Source File",
374 .se_cmp = sort__srcfile_cmp,
375 .se_snprintf = hist_entry__srcfile_snprintf,
376 .se_width_idx = HISTC_SRCFILE,
377 };
378
379 /* --sort parent */
380
381 static int64_t
382 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
383 {
384 struct symbol *sym_l = left->parent;
385 struct symbol *sym_r = right->parent;
386
387 if (!sym_l || !sym_r)
388 return cmp_null(sym_l, sym_r);
389
390 return strcmp(sym_r->name, sym_l->name);
391 }
392
393 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
394 size_t size, unsigned int width)
395 {
396 return repsep_snprintf(bf, size, "%-*.*s", width, width,
397 he->parent ? he->parent->name : "[other]");
398 }
399
400 struct sort_entry sort_parent = {
401 .se_header = "Parent symbol",
402 .se_cmp = sort__parent_cmp,
403 .se_snprintf = hist_entry__parent_snprintf,
404 .se_width_idx = HISTC_PARENT,
405 };
406
407 /* --sort cpu */
408
409 static int64_t
410 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
411 {
412 return right->cpu - left->cpu;
413 }
414
415 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
416 size_t size, unsigned int width)
417 {
418 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
419 }
420
421 struct sort_entry sort_cpu = {
422 .se_header = "CPU",
423 .se_cmp = sort__cpu_cmp,
424 .se_snprintf = hist_entry__cpu_snprintf,
425 .se_width_idx = HISTC_CPU,
426 };
427
428 /* --sort socket */
429
430 static int64_t
431 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
432 {
433 return right->socket - left->socket;
434 }
435
436 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
437 size_t size, unsigned int width)
438 {
439 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
440 }
441
442 struct sort_entry sort_socket = {
443 .se_header = "Socket",
444 .se_cmp = sort__socket_cmp,
445 .se_snprintf = hist_entry__socket_snprintf,
446 .se_width_idx = HISTC_SOCKET,
447 };
448
449 /* --sort trace */
450
451 static char *get_trace_output(struct hist_entry *he)
452 {
453 struct trace_seq seq;
454 struct perf_evsel *evsel;
455 struct pevent_record rec = {
456 .data = he->raw_data,
457 .size = he->raw_size,
458 };
459
460 evsel = hists_to_evsel(he->hists);
461
462 trace_seq_init(&seq);
463 if (symbol_conf.raw_trace) {
464 pevent_print_fields(&seq, he->raw_data, he->raw_size,
465 evsel->tp_format);
466 } else {
467 pevent_event_info(&seq, evsel->tp_format, &rec);
468 }
469 return seq.buffer;
470 }
471
472 static int64_t
473 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
474 {
475 struct perf_evsel *evsel;
476
477 evsel = hists_to_evsel(left->hists);
478 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
479 return 0;
480
481 if (left->trace_output == NULL)
482 left->trace_output = get_trace_output(left);
483 if (right->trace_output == NULL)
484 right->trace_output = get_trace_output(right);
485
486 hists__new_col_len(left->hists, HISTC_TRACE, strlen(left->trace_output));
487 hists__new_col_len(right->hists, HISTC_TRACE, strlen(right->trace_output));
488
489 return strcmp(right->trace_output, left->trace_output);
490 }
491
492 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
493 size_t size, unsigned int width)
494 {
495 struct perf_evsel *evsel;
496
497 evsel = hists_to_evsel(he->hists);
498 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
499 return scnprintf(bf, size, "%-*.*s", width, width, "N/A");
500
501 if (he->trace_output == NULL)
502 he->trace_output = get_trace_output(he);
503 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->trace_output);
504 }
505
506 struct sort_entry sort_trace = {
507 .se_header = "Trace output",
508 .se_cmp = sort__trace_cmp,
509 .se_snprintf = hist_entry__trace_snprintf,
510 .se_width_idx = HISTC_TRACE,
511 };
512
513 /* sort keys for branch stacks */
514
515 static int64_t
516 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
517 {
518 if (!left->branch_info || !right->branch_info)
519 return cmp_null(left->branch_info, right->branch_info);
520
521 return _sort__dso_cmp(left->branch_info->from.map,
522 right->branch_info->from.map);
523 }
524
525 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
526 size_t size, unsigned int width)
527 {
528 if (he->branch_info)
529 return _hist_entry__dso_snprintf(he->branch_info->from.map,
530 bf, size, width);
531 else
532 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
533 }
534
535 static int64_t
536 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
537 {
538 if (!left->branch_info || !right->branch_info)
539 return cmp_null(left->branch_info, right->branch_info);
540
541 return _sort__dso_cmp(left->branch_info->to.map,
542 right->branch_info->to.map);
543 }
544
545 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
546 size_t size, unsigned int width)
547 {
548 if (he->branch_info)
549 return _hist_entry__dso_snprintf(he->branch_info->to.map,
550 bf, size, width);
551 else
552 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
553 }
554
555 static int64_t
556 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
557 {
558 struct addr_map_symbol *from_l = &left->branch_info->from;
559 struct addr_map_symbol *from_r = &right->branch_info->from;
560
561 if (!left->branch_info || !right->branch_info)
562 return cmp_null(left->branch_info, right->branch_info);
563
564 from_l = &left->branch_info->from;
565 from_r = &right->branch_info->from;
566
567 if (!from_l->sym && !from_r->sym)
568 return _sort__addr_cmp(from_l->addr, from_r->addr);
569
570 return _sort__sym_cmp(from_l->sym, from_r->sym);
571 }
572
573 static int64_t
574 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
575 {
576 struct addr_map_symbol *to_l, *to_r;
577
578 if (!left->branch_info || !right->branch_info)
579 return cmp_null(left->branch_info, right->branch_info);
580
581 to_l = &left->branch_info->to;
582 to_r = &right->branch_info->to;
583
584 if (!to_l->sym && !to_r->sym)
585 return _sort__addr_cmp(to_l->addr, to_r->addr);
586
587 return _sort__sym_cmp(to_l->sym, to_r->sym);
588 }
589
590 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
591 size_t size, unsigned int width)
592 {
593 if (he->branch_info) {
594 struct addr_map_symbol *from = &he->branch_info->from;
595
596 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
597 he->level, bf, size, width);
598 }
599
600 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
601 }
602
603 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
604 size_t size, unsigned int width)
605 {
606 if (he->branch_info) {
607 struct addr_map_symbol *to = &he->branch_info->to;
608
609 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
610 he->level, bf, size, width);
611 }
612
613 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
614 }
615
616 struct sort_entry sort_dso_from = {
617 .se_header = "Source Shared Object",
618 .se_cmp = sort__dso_from_cmp,
619 .se_snprintf = hist_entry__dso_from_snprintf,
620 .se_width_idx = HISTC_DSO_FROM,
621 };
622
623 struct sort_entry sort_dso_to = {
624 .se_header = "Target Shared Object",
625 .se_cmp = sort__dso_to_cmp,
626 .se_snprintf = hist_entry__dso_to_snprintf,
627 .se_width_idx = HISTC_DSO_TO,
628 };
629
630 struct sort_entry sort_sym_from = {
631 .se_header = "Source Symbol",
632 .se_cmp = sort__sym_from_cmp,
633 .se_snprintf = hist_entry__sym_from_snprintf,
634 .se_width_idx = HISTC_SYMBOL_FROM,
635 };
636
637 struct sort_entry sort_sym_to = {
638 .se_header = "Target Symbol",
639 .se_cmp = sort__sym_to_cmp,
640 .se_snprintf = hist_entry__sym_to_snprintf,
641 .se_width_idx = HISTC_SYMBOL_TO,
642 };
643
644 static int64_t
645 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
646 {
647 unsigned char mp, p;
648
649 if (!left->branch_info || !right->branch_info)
650 return cmp_null(left->branch_info, right->branch_info);
651
652 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
653 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
654 return mp || p;
655 }
656
657 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
658 size_t size, unsigned int width){
659 static const char *out = "N/A";
660
661 if (he->branch_info) {
662 if (he->branch_info->flags.predicted)
663 out = "N";
664 else if (he->branch_info->flags.mispred)
665 out = "Y";
666 }
667
668 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
669 }
670
671 static int64_t
672 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
673 {
674 return left->branch_info->flags.cycles -
675 right->branch_info->flags.cycles;
676 }
677
678 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
679 size_t size, unsigned int width)
680 {
681 if (he->branch_info->flags.cycles == 0)
682 return repsep_snprintf(bf, size, "%-*s", width, "-");
683 return repsep_snprintf(bf, size, "%-*hd", width,
684 he->branch_info->flags.cycles);
685 }
686
687 struct sort_entry sort_cycles = {
688 .se_header = "Basic Block Cycles",
689 .se_cmp = sort__cycles_cmp,
690 .se_snprintf = hist_entry__cycles_snprintf,
691 .se_width_idx = HISTC_CYCLES,
692 };
693
694 /* --sort daddr_sym */
695 static int64_t
696 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
697 {
698 uint64_t l = 0, r = 0;
699
700 if (left->mem_info)
701 l = left->mem_info->daddr.addr;
702 if (right->mem_info)
703 r = right->mem_info->daddr.addr;
704
705 return (int64_t)(r - l);
706 }
707
708 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
709 size_t size, unsigned int width)
710 {
711 uint64_t addr = 0;
712 struct map *map = NULL;
713 struct symbol *sym = NULL;
714
715 if (he->mem_info) {
716 addr = he->mem_info->daddr.addr;
717 map = he->mem_info->daddr.map;
718 sym = he->mem_info->daddr.sym;
719 }
720 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
721 width);
722 }
723
724 static int64_t
725 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
726 {
727 uint64_t l = 0, r = 0;
728
729 if (left->mem_info)
730 l = left->mem_info->iaddr.addr;
731 if (right->mem_info)
732 r = right->mem_info->iaddr.addr;
733
734 return (int64_t)(r - l);
735 }
736
737 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
738 size_t size, unsigned int width)
739 {
740 uint64_t addr = 0;
741 struct map *map = NULL;
742 struct symbol *sym = NULL;
743
744 if (he->mem_info) {
745 addr = he->mem_info->iaddr.addr;
746 map = he->mem_info->iaddr.map;
747 sym = he->mem_info->iaddr.sym;
748 }
749 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
750 width);
751 }
752
753 static int64_t
754 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
755 {
756 struct map *map_l = NULL;
757 struct map *map_r = NULL;
758
759 if (left->mem_info)
760 map_l = left->mem_info->daddr.map;
761 if (right->mem_info)
762 map_r = right->mem_info->daddr.map;
763
764 return _sort__dso_cmp(map_l, map_r);
765 }
766
767 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
768 size_t size, unsigned int width)
769 {
770 struct map *map = NULL;
771
772 if (he->mem_info)
773 map = he->mem_info->daddr.map;
774
775 return _hist_entry__dso_snprintf(map, bf, size, width);
776 }
777
778 static int64_t
779 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
780 {
781 union perf_mem_data_src data_src_l;
782 union perf_mem_data_src data_src_r;
783
784 if (left->mem_info)
785 data_src_l = left->mem_info->data_src;
786 else
787 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
788
789 if (right->mem_info)
790 data_src_r = right->mem_info->data_src;
791 else
792 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
793
794 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
795 }
796
797 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
798 size_t size, unsigned int width)
799 {
800 const char *out;
801 u64 mask = PERF_MEM_LOCK_NA;
802
803 if (he->mem_info)
804 mask = he->mem_info->data_src.mem_lock;
805
806 if (mask & PERF_MEM_LOCK_NA)
807 out = "N/A";
808 else if (mask & PERF_MEM_LOCK_LOCKED)
809 out = "Yes";
810 else
811 out = "No";
812
813 return repsep_snprintf(bf, size, "%-*s", width, out);
814 }
815
816 static int64_t
817 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
818 {
819 union perf_mem_data_src data_src_l;
820 union perf_mem_data_src data_src_r;
821
822 if (left->mem_info)
823 data_src_l = left->mem_info->data_src;
824 else
825 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
826
827 if (right->mem_info)
828 data_src_r = right->mem_info->data_src;
829 else
830 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
831
832 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
833 }
834
835 static const char * const tlb_access[] = {
836 "N/A",
837 "HIT",
838 "MISS",
839 "L1",
840 "L2",
841 "Walker",
842 "Fault",
843 };
844 #define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *))
845
846 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
847 size_t size, unsigned int width)
848 {
849 char out[64];
850 size_t sz = sizeof(out) - 1; /* -1 for null termination */
851 size_t l = 0, i;
852 u64 m = PERF_MEM_TLB_NA;
853 u64 hit, miss;
854
855 out[0] = '\0';
856
857 if (he->mem_info)
858 m = he->mem_info->data_src.mem_dtlb;
859
860 hit = m & PERF_MEM_TLB_HIT;
861 miss = m & PERF_MEM_TLB_MISS;
862
863 /* already taken care of */
864 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
865
866 for (i = 0; m && i < NUM_TLB_ACCESS; i++, m >>= 1) {
867 if (!(m & 0x1))
868 continue;
869 if (l) {
870 strcat(out, " or ");
871 l += 4;
872 }
873 strncat(out, tlb_access[i], sz - l);
874 l += strlen(tlb_access[i]);
875 }
876 if (*out == '\0')
877 strcpy(out, "N/A");
878 if (hit)
879 strncat(out, " hit", sz - l);
880 if (miss)
881 strncat(out, " miss", sz - l);
882
883 return repsep_snprintf(bf, size, "%-*s", width, out);
884 }
885
886 static int64_t
887 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
888 {
889 union perf_mem_data_src data_src_l;
890 union perf_mem_data_src data_src_r;
891
892 if (left->mem_info)
893 data_src_l = left->mem_info->data_src;
894 else
895 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
896
897 if (right->mem_info)
898 data_src_r = right->mem_info->data_src;
899 else
900 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
901
902 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
903 }
904
905 static const char * const mem_lvl[] = {
906 "N/A",
907 "HIT",
908 "MISS",
909 "L1",
910 "LFB",
911 "L2",
912 "L3",
913 "Local RAM",
914 "Remote RAM (1 hop)",
915 "Remote RAM (2 hops)",
916 "Remote Cache (1 hop)",
917 "Remote Cache (2 hops)",
918 "I/O",
919 "Uncached",
920 };
921 #define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *))
922
923 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
924 size_t size, unsigned int width)
925 {
926 char out[64];
927 size_t sz = sizeof(out) - 1; /* -1 for null termination */
928 size_t i, l = 0;
929 u64 m = PERF_MEM_LVL_NA;
930 u64 hit, miss;
931
932 if (he->mem_info)
933 m = he->mem_info->data_src.mem_lvl;
934
935 out[0] = '\0';
936
937 hit = m & PERF_MEM_LVL_HIT;
938 miss = m & PERF_MEM_LVL_MISS;
939
940 /* already taken care of */
941 m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS);
942
943 for (i = 0; m && i < NUM_MEM_LVL; i++, m >>= 1) {
944 if (!(m & 0x1))
945 continue;
946 if (l) {
947 strcat(out, " or ");
948 l += 4;
949 }
950 strncat(out, mem_lvl[i], sz - l);
951 l += strlen(mem_lvl[i]);
952 }
953 if (*out == '\0')
954 strcpy(out, "N/A");
955 if (hit)
956 strncat(out, " hit", sz - l);
957 if (miss)
958 strncat(out, " miss", sz - l);
959
960 return repsep_snprintf(bf, size, "%-*s", width, out);
961 }
962
963 static int64_t
964 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
965 {
966 union perf_mem_data_src data_src_l;
967 union perf_mem_data_src data_src_r;
968
969 if (left->mem_info)
970 data_src_l = left->mem_info->data_src;
971 else
972 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
973
974 if (right->mem_info)
975 data_src_r = right->mem_info->data_src;
976 else
977 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
978
979 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
980 }
981
982 static const char * const snoop_access[] = {
983 "N/A",
984 "None",
985 "Miss",
986 "Hit",
987 "HitM",
988 };
989 #define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *))
990
991 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
992 size_t size, unsigned int width)
993 {
994 char out[64];
995 size_t sz = sizeof(out) - 1; /* -1 for null termination */
996 size_t i, l = 0;
997 u64 m = PERF_MEM_SNOOP_NA;
998
999 out[0] = '\0';
1000
1001 if (he->mem_info)
1002 m = he->mem_info->data_src.mem_snoop;
1003
1004 for (i = 0; m && i < NUM_SNOOP_ACCESS; i++, m >>= 1) {
1005 if (!(m & 0x1))
1006 continue;
1007 if (l) {
1008 strcat(out, " or ");
1009 l += 4;
1010 }
1011 strncat(out, snoop_access[i], sz - l);
1012 l += strlen(snoop_access[i]);
1013 }
1014
1015 if (*out == '\0')
1016 strcpy(out, "N/A");
1017
1018 return repsep_snprintf(bf, size, "%-*s", width, out);
1019 }
1020
1021 static inline u64 cl_address(u64 address)
1022 {
1023 /* return the cacheline of the address */
1024 return (address & ~(cacheline_size - 1));
1025 }
1026
1027 static int64_t
1028 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1029 {
1030 u64 l, r;
1031 struct map *l_map, *r_map;
1032
1033 if (!left->mem_info) return -1;
1034 if (!right->mem_info) return 1;
1035
1036 /* group event types together */
1037 if (left->cpumode > right->cpumode) return -1;
1038 if (left->cpumode < right->cpumode) return 1;
1039
1040 l_map = left->mem_info->daddr.map;
1041 r_map = right->mem_info->daddr.map;
1042
1043 /* if both are NULL, jump to sort on al_addr instead */
1044 if (!l_map && !r_map)
1045 goto addr;
1046
1047 if (!l_map) return -1;
1048 if (!r_map) return 1;
1049
1050 if (l_map->maj > r_map->maj) return -1;
1051 if (l_map->maj < r_map->maj) return 1;
1052
1053 if (l_map->min > r_map->min) return -1;
1054 if (l_map->min < r_map->min) return 1;
1055
1056 if (l_map->ino > r_map->ino) return -1;
1057 if (l_map->ino < r_map->ino) return 1;
1058
1059 if (l_map->ino_generation > r_map->ino_generation) return -1;
1060 if (l_map->ino_generation < r_map->ino_generation) return 1;
1061
1062 /*
1063 * Addresses with no major/minor numbers are assumed to be
1064 * anonymous in userspace. Sort those on pid then address.
1065 *
1066 * The kernel and non-zero major/minor mapped areas are
1067 * assumed to be unity mapped. Sort those on address.
1068 */
1069
1070 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1071 (!(l_map->flags & MAP_SHARED)) &&
1072 !l_map->maj && !l_map->min && !l_map->ino &&
1073 !l_map->ino_generation) {
1074 /* userspace anonymous */
1075
1076 if (left->thread->pid_ > right->thread->pid_) return -1;
1077 if (left->thread->pid_ < right->thread->pid_) return 1;
1078 }
1079
1080 addr:
1081 /* al_addr does all the right addr - start + offset calculations */
1082 l = cl_address(left->mem_info->daddr.al_addr);
1083 r = cl_address(right->mem_info->daddr.al_addr);
1084
1085 if (l > r) return -1;
1086 if (l < r) return 1;
1087
1088 return 0;
1089 }
1090
1091 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1092 size_t size, unsigned int width)
1093 {
1094
1095 uint64_t addr = 0;
1096 struct map *map = NULL;
1097 struct symbol *sym = NULL;
1098 char level = he->level;
1099
1100 if (he->mem_info) {
1101 addr = cl_address(he->mem_info->daddr.al_addr);
1102 map = he->mem_info->daddr.map;
1103 sym = he->mem_info->daddr.sym;
1104
1105 /* print [s] for shared data mmaps */
1106 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1107 map && (map->type == MAP__VARIABLE) &&
1108 (map->flags & MAP_SHARED) &&
1109 (map->maj || map->min || map->ino ||
1110 map->ino_generation))
1111 level = 's';
1112 else if (!map)
1113 level = 'X';
1114 }
1115 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
1116 width);
1117 }
1118
1119 struct sort_entry sort_mispredict = {
1120 .se_header = "Branch Mispredicted",
1121 .se_cmp = sort__mispredict_cmp,
1122 .se_snprintf = hist_entry__mispredict_snprintf,
1123 .se_width_idx = HISTC_MISPREDICT,
1124 };
1125
1126 static u64 he_weight(struct hist_entry *he)
1127 {
1128 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1129 }
1130
1131 static int64_t
1132 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1133 {
1134 return he_weight(left) - he_weight(right);
1135 }
1136
1137 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1138 size_t size, unsigned int width)
1139 {
1140 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
1141 }
1142
1143 struct sort_entry sort_local_weight = {
1144 .se_header = "Local Weight",
1145 .se_cmp = sort__local_weight_cmp,
1146 .se_snprintf = hist_entry__local_weight_snprintf,
1147 .se_width_idx = HISTC_LOCAL_WEIGHT,
1148 };
1149
1150 static int64_t
1151 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1152 {
1153 return left->stat.weight - right->stat.weight;
1154 }
1155
1156 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1157 size_t size, unsigned int width)
1158 {
1159 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
1160 }
1161
1162 struct sort_entry sort_global_weight = {
1163 .se_header = "Weight",
1164 .se_cmp = sort__global_weight_cmp,
1165 .se_snprintf = hist_entry__global_weight_snprintf,
1166 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1167 };
1168
1169 struct sort_entry sort_mem_daddr_sym = {
1170 .se_header = "Data Symbol",
1171 .se_cmp = sort__daddr_cmp,
1172 .se_snprintf = hist_entry__daddr_snprintf,
1173 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1174 };
1175
1176 struct sort_entry sort_mem_iaddr_sym = {
1177 .se_header = "Code Symbol",
1178 .se_cmp = sort__iaddr_cmp,
1179 .se_snprintf = hist_entry__iaddr_snprintf,
1180 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1181 };
1182
1183 struct sort_entry sort_mem_daddr_dso = {
1184 .se_header = "Data Object",
1185 .se_cmp = sort__dso_daddr_cmp,
1186 .se_snprintf = hist_entry__dso_daddr_snprintf,
1187 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1188 };
1189
1190 struct sort_entry sort_mem_locked = {
1191 .se_header = "Locked",
1192 .se_cmp = sort__locked_cmp,
1193 .se_snprintf = hist_entry__locked_snprintf,
1194 .se_width_idx = HISTC_MEM_LOCKED,
1195 };
1196
1197 struct sort_entry sort_mem_tlb = {
1198 .se_header = "TLB access",
1199 .se_cmp = sort__tlb_cmp,
1200 .se_snprintf = hist_entry__tlb_snprintf,
1201 .se_width_idx = HISTC_MEM_TLB,
1202 };
1203
1204 struct sort_entry sort_mem_lvl = {
1205 .se_header = "Memory access",
1206 .se_cmp = sort__lvl_cmp,
1207 .se_snprintf = hist_entry__lvl_snprintf,
1208 .se_width_idx = HISTC_MEM_LVL,
1209 };
1210
1211 struct sort_entry sort_mem_snoop = {
1212 .se_header = "Snoop",
1213 .se_cmp = sort__snoop_cmp,
1214 .se_snprintf = hist_entry__snoop_snprintf,
1215 .se_width_idx = HISTC_MEM_SNOOP,
1216 };
1217
1218 struct sort_entry sort_mem_dcacheline = {
1219 .se_header = "Data Cacheline",
1220 .se_cmp = sort__dcacheline_cmp,
1221 .se_snprintf = hist_entry__dcacheline_snprintf,
1222 .se_width_idx = HISTC_MEM_DCACHELINE,
1223 };
1224
1225 static int64_t
1226 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1227 {
1228 if (!left->branch_info || !right->branch_info)
1229 return cmp_null(left->branch_info, right->branch_info);
1230
1231 return left->branch_info->flags.abort !=
1232 right->branch_info->flags.abort;
1233 }
1234
1235 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1236 size_t size, unsigned int width)
1237 {
1238 static const char *out = "N/A";
1239
1240 if (he->branch_info) {
1241 if (he->branch_info->flags.abort)
1242 out = "A";
1243 else
1244 out = ".";
1245 }
1246
1247 return repsep_snprintf(bf, size, "%-*s", width, out);
1248 }
1249
1250 struct sort_entry sort_abort = {
1251 .se_header = "Transaction abort",
1252 .se_cmp = sort__abort_cmp,
1253 .se_snprintf = hist_entry__abort_snprintf,
1254 .se_width_idx = HISTC_ABORT,
1255 };
1256
1257 static int64_t
1258 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1259 {
1260 if (!left->branch_info || !right->branch_info)
1261 return cmp_null(left->branch_info, right->branch_info);
1262
1263 return left->branch_info->flags.in_tx !=
1264 right->branch_info->flags.in_tx;
1265 }
1266
1267 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1268 size_t size, unsigned int width)
1269 {
1270 static const char *out = "N/A";
1271
1272 if (he->branch_info) {
1273 if (he->branch_info->flags.in_tx)
1274 out = "T";
1275 else
1276 out = ".";
1277 }
1278
1279 return repsep_snprintf(bf, size, "%-*s", width, out);
1280 }
1281
1282 struct sort_entry sort_in_tx = {
1283 .se_header = "Branch in transaction",
1284 .se_cmp = sort__in_tx_cmp,
1285 .se_snprintf = hist_entry__in_tx_snprintf,
1286 .se_width_idx = HISTC_IN_TX,
1287 };
1288
1289 static int64_t
1290 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1291 {
1292 return left->transaction - right->transaction;
1293 }
1294
1295 static inline char *add_str(char *p, const char *str)
1296 {
1297 strcpy(p, str);
1298 return p + strlen(str);
1299 }
1300
1301 static struct txbit {
1302 unsigned flag;
1303 const char *name;
1304 int skip_for_len;
1305 } txbits[] = {
1306 { PERF_TXN_ELISION, "EL ", 0 },
1307 { PERF_TXN_TRANSACTION, "TX ", 1 },
1308 { PERF_TXN_SYNC, "SYNC ", 1 },
1309 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1310 { PERF_TXN_RETRY, "RETRY ", 0 },
1311 { PERF_TXN_CONFLICT, "CON ", 0 },
1312 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1313 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1314 { 0, NULL, 0 }
1315 };
1316
1317 int hist_entry__transaction_len(void)
1318 {
1319 int i;
1320 int len = 0;
1321
1322 for (i = 0; txbits[i].name; i++) {
1323 if (!txbits[i].skip_for_len)
1324 len += strlen(txbits[i].name);
1325 }
1326 len += 4; /* :XX<space> */
1327 return len;
1328 }
1329
1330 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1331 size_t size, unsigned int width)
1332 {
1333 u64 t = he->transaction;
1334 char buf[128];
1335 char *p = buf;
1336 int i;
1337
1338 buf[0] = 0;
1339 for (i = 0; txbits[i].name; i++)
1340 if (txbits[i].flag & t)
1341 p = add_str(p, txbits[i].name);
1342 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1343 p = add_str(p, "NEITHER ");
1344 if (t & PERF_TXN_ABORT_MASK) {
1345 sprintf(p, ":%" PRIx64,
1346 (t & PERF_TXN_ABORT_MASK) >>
1347 PERF_TXN_ABORT_SHIFT);
1348 p += strlen(p);
1349 }
1350
1351 return repsep_snprintf(bf, size, "%-*s", width, buf);
1352 }
1353
1354 struct sort_entry sort_transaction = {
1355 .se_header = "Transaction ",
1356 .se_cmp = sort__transaction_cmp,
1357 .se_snprintf = hist_entry__transaction_snprintf,
1358 .se_width_idx = HISTC_TRANSACTION,
1359 };
1360
1361 struct sort_dimension {
1362 const char *name;
1363 struct sort_entry *entry;
1364 int taken;
1365 };
1366
1367 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1368
1369 static struct sort_dimension common_sort_dimensions[] = {
1370 DIM(SORT_PID, "pid", sort_thread),
1371 DIM(SORT_COMM, "comm", sort_comm),
1372 DIM(SORT_DSO, "dso", sort_dso),
1373 DIM(SORT_SYM, "symbol", sort_sym),
1374 DIM(SORT_PARENT, "parent", sort_parent),
1375 DIM(SORT_CPU, "cpu", sort_cpu),
1376 DIM(SORT_SOCKET, "socket", sort_socket),
1377 DIM(SORT_SRCLINE, "srcline", sort_srcline),
1378 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
1379 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1380 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1381 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1382 DIM(SORT_TRACE, "trace", sort_trace),
1383 };
1384
1385 #undef DIM
1386
1387 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1388
1389 static struct sort_dimension bstack_sort_dimensions[] = {
1390 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1391 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1392 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1393 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1394 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1395 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1396 DIM(SORT_ABORT, "abort", sort_abort),
1397 DIM(SORT_CYCLES, "cycles", sort_cycles),
1398 };
1399
1400 #undef DIM
1401
1402 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1403
1404 static struct sort_dimension memory_sort_dimensions[] = {
1405 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1406 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
1407 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1408 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1409 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1410 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1411 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1412 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1413 };
1414
1415 #undef DIM
1416
1417 struct hpp_dimension {
1418 const char *name;
1419 struct perf_hpp_fmt *fmt;
1420 int taken;
1421 };
1422
1423 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1424
1425 static struct hpp_dimension hpp_sort_dimensions[] = {
1426 DIM(PERF_HPP__OVERHEAD, "overhead"),
1427 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1428 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1429 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1430 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1431 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1432 DIM(PERF_HPP__SAMPLES, "sample"),
1433 DIM(PERF_HPP__PERIOD, "period"),
1434 };
1435
1436 #undef DIM
1437
1438 struct hpp_sort_entry {
1439 struct perf_hpp_fmt hpp;
1440 struct sort_entry *se;
1441 };
1442
1443 bool perf_hpp__same_sort_entry(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1444 {
1445 struct hpp_sort_entry *hse_a;
1446 struct hpp_sort_entry *hse_b;
1447
1448 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1449 return false;
1450
1451 hse_a = container_of(a, struct hpp_sort_entry, hpp);
1452 hse_b = container_of(b, struct hpp_sort_entry, hpp);
1453
1454 return hse_a->se == hse_b->se;
1455 }
1456
1457 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1458 {
1459 struct hpp_sort_entry *hse;
1460
1461 if (!perf_hpp__is_sort_entry(fmt))
1462 return;
1463
1464 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1465 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1466 }
1467
1468 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1469 struct perf_evsel *evsel)
1470 {
1471 struct hpp_sort_entry *hse;
1472 size_t len = fmt->user_len;
1473
1474 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1475
1476 if (!len)
1477 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1478
1479 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1480 }
1481
1482 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1483 struct perf_hpp *hpp __maybe_unused,
1484 struct perf_evsel *evsel)
1485 {
1486 struct hpp_sort_entry *hse;
1487 size_t len = fmt->user_len;
1488
1489 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1490
1491 if (!len)
1492 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1493
1494 return len;
1495 }
1496
1497 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1498 struct hist_entry *he)
1499 {
1500 struct hpp_sort_entry *hse;
1501 size_t len = fmt->user_len;
1502
1503 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1504
1505 if (!len)
1506 len = hists__col_len(he->hists, hse->se->se_width_idx);
1507
1508 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1509 }
1510
1511 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1512 struct hist_entry *a, struct hist_entry *b)
1513 {
1514 struct hpp_sort_entry *hse;
1515
1516 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1517 return hse->se->se_cmp(a, b);
1518 }
1519
1520 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1521 struct hist_entry *a, struct hist_entry *b)
1522 {
1523 struct hpp_sort_entry *hse;
1524 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1525
1526 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1527 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1528 return collapse_fn(a, b);
1529 }
1530
1531 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1532 struct hist_entry *a, struct hist_entry *b)
1533 {
1534 struct hpp_sort_entry *hse;
1535 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1536
1537 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1538 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1539 return sort_fn(a, b);
1540 }
1541
1542 static struct hpp_sort_entry *
1543 __sort_dimension__alloc_hpp(struct sort_dimension *sd)
1544 {
1545 struct hpp_sort_entry *hse;
1546
1547 hse = malloc(sizeof(*hse));
1548 if (hse == NULL) {
1549 pr_err("Memory allocation failed\n");
1550 return NULL;
1551 }
1552
1553 hse->se = sd->entry;
1554 hse->hpp.name = sd->entry->se_header;
1555 hse->hpp.header = __sort__hpp_header;
1556 hse->hpp.width = __sort__hpp_width;
1557 hse->hpp.entry = __sort__hpp_entry;
1558 hse->hpp.color = NULL;
1559
1560 hse->hpp.cmp = __sort__hpp_cmp;
1561 hse->hpp.collapse = __sort__hpp_collapse;
1562 hse->hpp.sort = __sort__hpp_sort;
1563
1564 INIT_LIST_HEAD(&hse->hpp.list);
1565 INIT_LIST_HEAD(&hse->hpp.sort_list);
1566 hse->hpp.elide = false;
1567 hse->hpp.len = 0;
1568 hse->hpp.user_len = 0;
1569
1570 return hse;
1571 }
1572
1573 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1574 {
1575 return format->header == __sort__hpp_header;
1576 }
1577
1578 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd)
1579 {
1580 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1581
1582 if (hse == NULL)
1583 return -1;
1584
1585 perf_hpp__register_sort_field(&hse->hpp);
1586 return 0;
1587 }
1588
1589 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd)
1590 {
1591 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1592
1593 if (hse == NULL)
1594 return -1;
1595
1596 perf_hpp__column_register(&hse->hpp);
1597 return 0;
1598 }
1599
1600 struct hpp_dynamic_entry {
1601 struct perf_hpp_fmt hpp;
1602 struct perf_evsel *evsel;
1603 struct format_field *field;
1604 unsigned dynamic_len;
1605 bool raw_trace;
1606 };
1607
1608 static int hde_width(struct hpp_dynamic_entry *hde)
1609 {
1610 if (!hde->hpp.len) {
1611 int len = hde->dynamic_len;
1612 int namelen = strlen(hde->field->name);
1613 int fieldlen = hde->field->size;
1614
1615 if (namelen > len)
1616 len = namelen;
1617
1618 if (!(hde->field->flags & FIELD_IS_STRING)) {
1619 /* length for print hex numbers */
1620 fieldlen = hde->field->size * 2 + 2;
1621 }
1622 if (fieldlen > len)
1623 len = fieldlen;
1624
1625 hde->hpp.len = len;
1626 }
1627 return hde->hpp.len;
1628 }
1629
1630 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
1631 struct hist_entry *he)
1632 {
1633 char *str, *pos;
1634 struct format_field *field = hde->field;
1635 size_t namelen;
1636 bool last = false;
1637
1638 if (hde->raw_trace)
1639 return;
1640
1641 /* parse pretty print result and update max length */
1642 if (!he->trace_output)
1643 he->trace_output = get_trace_output(he);
1644
1645 namelen = strlen(field->name);
1646 str = he->trace_output;
1647
1648 while (str) {
1649 pos = strchr(str, ' ');
1650 if (pos == NULL) {
1651 last = true;
1652 pos = str + strlen(str);
1653 }
1654
1655 if (!strncmp(str, field->name, namelen)) {
1656 size_t len;
1657
1658 str += namelen + 1;
1659 len = pos - str;
1660
1661 if (len > hde->dynamic_len)
1662 hde->dynamic_len = len;
1663 break;
1664 }
1665
1666 if (last)
1667 str = NULL;
1668 else
1669 str = pos + 1;
1670 }
1671 }
1672
1673 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1674 struct perf_evsel *evsel __maybe_unused)
1675 {
1676 struct hpp_dynamic_entry *hde;
1677 size_t len = fmt->user_len;
1678
1679 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1680
1681 if (!len)
1682 len = hde_width(hde);
1683
1684 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
1685 }
1686
1687 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
1688 struct perf_hpp *hpp __maybe_unused,
1689 struct perf_evsel *evsel __maybe_unused)
1690 {
1691 struct hpp_dynamic_entry *hde;
1692 size_t len = fmt->user_len;
1693
1694 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1695
1696 if (!len)
1697 len = hde_width(hde);
1698
1699 return len;
1700 }
1701
1702 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
1703 {
1704 struct hpp_dynamic_entry *hde;
1705
1706 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1707
1708 return hists_to_evsel(hists) == hde->evsel;
1709 }
1710
1711 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1712 struct hist_entry *he)
1713 {
1714 struct hpp_dynamic_entry *hde;
1715 size_t len = fmt->user_len;
1716 char *str, *pos;
1717 struct format_field *field;
1718 size_t namelen;
1719 bool last = false;
1720 int ret;
1721
1722 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1723
1724 if (!len)
1725 len = hde_width(hde);
1726
1727 if (hde->raw_trace)
1728 goto raw_field;
1729
1730 field = hde->field;
1731 namelen = strlen(field->name);
1732 str = he->trace_output;
1733
1734 while (str) {
1735 pos = strchr(str, ' ');
1736 if (pos == NULL) {
1737 last = true;
1738 pos = str + strlen(str);
1739 }
1740
1741 if (!strncmp(str, field->name, namelen)) {
1742 str += namelen + 1;
1743 str = strndup(str, pos - str);
1744
1745 if (str == NULL)
1746 return scnprintf(hpp->buf, hpp->size,
1747 "%*.*s", len, len, "ERROR");
1748 break;
1749 }
1750
1751 if (last)
1752 str = NULL;
1753 else
1754 str = pos + 1;
1755 }
1756
1757 if (str == NULL) {
1758 struct trace_seq seq;
1759 raw_field:
1760 trace_seq_init(&seq);
1761 pevent_print_field(&seq, he->raw_data, hde->field);
1762 str = seq.buffer;
1763 }
1764
1765 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
1766 free(str);
1767 return ret;
1768 }
1769
1770 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
1771 struct hist_entry *a, struct hist_entry *b)
1772 {
1773 struct hpp_dynamic_entry *hde;
1774 struct format_field *field;
1775 unsigned offset, size;
1776
1777 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1778
1779 field = hde->field;
1780 if (field->flags & FIELD_IS_DYNAMIC) {
1781 unsigned long long dyn;
1782
1783 pevent_read_number_field(field, a->raw_data, &dyn);
1784 offset = dyn & 0xffff;
1785 size = (dyn >> 16) & 0xffff;
1786
1787 /* record max width for output */
1788 if (size > hde->dynamic_len)
1789 hde->dynamic_len = size;
1790 } else {
1791 offset = field->offset;
1792 size = field->size;
1793
1794 update_dynamic_len(hde, a);
1795 update_dynamic_len(hde, b);
1796 }
1797
1798 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
1799 }
1800
1801 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
1802 {
1803 return fmt->cmp == __sort__hde_cmp;
1804 }
1805
1806 static struct hpp_dynamic_entry *
1807 __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field)
1808 {
1809 struct hpp_dynamic_entry *hde;
1810
1811 hde = malloc(sizeof(*hde));
1812 if (hde == NULL) {
1813 pr_debug("Memory allocation failed\n");
1814 return NULL;
1815 }
1816
1817 hde->evsel = evsel;
1818 hde->field = field;
1819 hde->dynamic_len = 0;
1820
1821 hde->hpp.name = field->name;
1822 hde->hpp.header = __sort__hde_header;
1823 hde->hpp.width = __sort__hde_width;
1824 hde->hpp.entry = __sort__hde_entry;
1825 hde->hpp.color = NULL;
1826
1827 hde->hpp.cmp = __sort__hde_cmp;
1828 hde->hpp.collapse = __sort__hde_cmp;
1829 hde->hpp.sort = __sort__hde_cmp;
1830
1831 INIT_LIST_HEAD(&hde->hpp.list);
1832 INIT_LIST_HEAD(&hde->hpp.sort_list);
1833 hde->hpp.elide = false;
1834 hde->hpp.len = 0;
1835 hde->hpp.user_len = 0;
1836
1837 return hde;
1838 }
1839
1840 static int parse_field_name(char *str, char **event, char **field, char **opt)
1841 {
1842 char *event_name, *field_name, *opt_name;
1843
1844 event_name = str;
1845 field_name = strchr(str, '.');
1846
1847 if (field_name) {
1848 *field_name++ = '\0';
1849 } else {
1850 event_name = NULL;
1851 field_name = str;
1852 }
1853
1854 opt_name = strchr(field_name, '/');
1855 if (opt_name)
1856 *opt_name++ = '\0';
1857
1858 *event = event_name;
1859 *field = field_name;
1860 *opt = opt_name;
1861
1862 return 0;
1863 }
1864
1865 /* find match evsel using a given event name. The event name can be:
1866 * 1. '%' + event index (e.g. '%1' for first event)
1867 * 2. full event name (e.g. sched:sched_switch)
1868 * 3. partial event name (should not contain ':')
1869 */
1870 static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
1871 {
1872 struct perf_evsel *evsel = NULL;
1873 struct perf_evsel *pos;
1874 bool full_name;
1875
1876 /* case 1 */
1877 if (event_name[0] == '%') {
1878 int nr = strtol(event_name+1, NULL, 0);
1879
1880 if (nr > evlist->nr_entries)
1881 return NULL;
1882
1883 evsel = perf_evlist__first(evlist);
1884 while (--nr > 0)
1885 evsel = perf_evsel__next(evsel);
1886
1887 return evsel;
1888 }
1889
1890 full_name = !!strchr(event_name, ':');
1891 evlist__for_each(evlist, pos) {
1892 /* case 2 */
1893 if (full_name && !strcmp(pos->name, event_name))
1894 return pos;
1895 /* case 3 */
1896 if (!full_name && strstr(pos->name, event_name)) {
1897 if (evsel) {
1898 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
1899 event_name, evsel->name, pos->name);
1900 return NULL;
1901 }
1902 evsel = pos;
1903 }
1904 }
1905
1906 return evsel;
1907 }
1908
1909 static int __dynamic_dimension__add(struct perf_evsel *evsel,
1910 struct format_field *field,
1911 bool raw_trace)
1912 {
1913 struct hpp_dynamic_entry *hde;
1914
1915 hde = __alloc_dynamic_entry(evsel, field);
1916 if (hde == NULL)
1917 return -ENOMEM;
1918
1919 hde->raw_trace = raw_trace;
1920
1921 perf_hpp__register_sort_field(&hde->hpp);
1922 return 0;
1923 }
1924
1925 static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace)
1926 {
1927 int ret;
1928 struct format_field *field;
1929
1930 field = evsel->tp_format->format.fields;
1931 while (field) {
1932 ret = __dynamic_dimension__add(evsel, field, raw_trace);
1933 if (ret < 0)
1934 return ret;
1935
1936 field = field->next;
1937 }
1938 return 0;
1939 }
1940
1941 static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace)
1942 {
1943 int ret;
1944 struct perf_evsel *evsel;
1945
1946 evlist__for_each(evlist, evsel) {
1947 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1948 continue;
1949
1950 ret = add_evsel_fields(evsel, raw_trace);
1951 if (ret < 0)
1952 return ret;
1953 }
1954 return 0;
1955 }
1956
1957 static int add_all_matching_fields(struct perf_evlist *evlist,
1958 char *field_name, bool raw_trace)
1959 {
1960 int ret = -ESRCH;
1961 struct perf_evsel *evsel;
1962 struct format_field *field;
1963
1964 evlist__for_each(evlist, evsel) {
1965 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1966 continue;
1967
1968 field = pevent_find_any_field(evsel->tp_format, field_name);
1969 if (field == NULL)
1970 continue;
1971
1972 ret = __dynamic_dimension__add(evsel, field, raw_trace);
1973 if (ret < 0)
1974 break;
1975 }
1976 return ret;
1977 }
1978
1979 static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok)
1980 {
1981 char *str, *event_name, *field_name, *opt_name;
1982 struct perf_evsel *evsel;
1983 struct format_field *field;
1984 bool raw_trace = symbol_conf.raw_trace;
1985 int ret = 0;
1986
1987 if (evlist == NULL)
1988 return -ENOENT;
1989
1990 str = strdup(tok);
1991 if (str == NULL)
1992 return -ENOMEM;
1993
1994 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
1995 ret = -EINVAL;
1996 goto out;
1997 }
1998
1999 if (opt_name) {
2000 if (strcmp(opt_name, "raw")) {
2001 pr_debug("unsupported field option %s\n", opt_name);
2002 ret = -EINVAL;
2003 goto out;
2004 }
2005 raw_trace = true;
2006 }
2007
2008 if (!strcmp(field_name, "trace_fields")) {
2009 ret = add_all_dynamic_fields(evlist, raw_trace);
2010 goto out;
2011 }
2012
2013 if (event_name == NULL) {
2014 ret = add_all_matching_fields(evlist, field_name, raw_trace);
2015 goto out;
2016 }
2017
2018 evsel = find_evsel(evlist, event_name);
2019 if (evsel == NULL) {
2020 pr_debug("Cannot find event: %s\n", event_name);
2021 ret = -ENOENT;
2022 goto out;
2023 }
2024
2025 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2026 pr_debug("%s is not a tracepoint event\n", event_name);
2027 ret = -EINVAL;
2028 goto out;
2029 }
2030
2031 if (!strcmp(field_name, "*")) {
2032 ret = add_evsel_fields(evsel, raw_trace);
2033 } else {
2034 field = pevent_find_any_field(evsel->tp_format, field_name);
2035 if (field == NULL) {
2036 pr_debug("Cannot find event field for %s.%s\n",
2037 event_name, field_name);
2038 return -ENOENT;
2039 }
2040
2041 ret = __dynamic_dimension__add(evsel, field, raw_trace);
2042 }
2043
2044 out:
2045 free(str);
2046 return ret;
2047 }
2048
2049 static int __sort_dimension__add(struct sort_dimension *sd)
2050 {
2051 if (sd->taken)
2052 return 0;
2053
2054 if (__sort_dimension__add_hpp_sort(sd) < 0)
2055 return -1;
2056
2057 if (sd->entry->se_collapse)
2058 sort__need_collapse = 1;
2059
2060 sd->taken = 1;
2061
2062 return 0;
2063 }
2064
2065 static int __hpp_dimension__add(struct hpp_dimension *hd)
2066 {
2067 if (!hd->taken) {
2068 hd->taken = 1;
2069
2070 perf_hpp__register_sort_field(hd->fmt);
2071 }
2072 return 0;
2073 }
2074
2075 static int __sort_dimension__add_output(struct sort_dimension *sd)
2076 {
2077 if (sd->taken)
2078 return 0;
2079
2080 if (__sort_dimension__add_hpp_output(sd) < 0)
2081 return -1;
2082
2083 sd->taken = 1;
2084 return 0;
2085 }
2086
2087 static int __hpp_dimension__add_output(struct hpp_dimension *hd)
2088 {
2089 if (!hd->taken) {
2090 hd->taken = 1;
2091
2092 perf_hpp__column_register(hd->fmt);
2093 }
2094 return 0;
2095 }
2096
2097 int hpp_dimension__add_output(unsigned col)
2098 {
2099 BUG_ON(col >= PERF_HPP__MAX_INDEX);
2100 return __hpp_dimension__add_output(&hpp_sort_dimensions[col]);
2101 }
2102
2103 static int sort_dimension__add(const char *tok,
2104 struct perf_evlist *evlist __maybe_unused)
2105 {
2106 unsigned int i;
2107
2108 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2109 struct sort_dimension *sd = &common_sort_dimensions[i];
2110
2111 if (strncasecmp(tok, sd->name, strlen(tok)))
2112 continue;
2113
2114 if (sd->entry == &sort_parent) {
2115 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2116 if (ret) {
2117 char err[BUFSIZ];
2118
2119 regerror(ret, &parent_regex, err, sizeof(err));
2120 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2121 return -EINVAL;
2122 }
2123 sort__has_parent = 1;
2124 } else if (sd->entry == &sort_sym) {
2125 sort__has_sym = 1;
2126 /*
2127 * perf diff displays the performance difference amongst
2128 * two or more perf.data files. Those files could come
2129 * from different binaries. So we should not compare
2130 * their ips, but the name of symbol.
2131 */
2132 if (sort__mode == SORT_MODE__DIFF)
2133 sd->entry->se_collapse = sort__sym_sort;
2134
2135 } else if (sd->entry == &sort_dso) {
2136 sort__has_dso = 1;
2137 } else if (sd->entry == &sort_socket) {
2138 sort__has_socket = 1;
2139 }
2140
2141 return __sort_dimension__add(sd);
2142 }
2143
2144 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2145 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2146
2147 if (strncasecmp(tok, hd->name, strlen(tok)))
2148 continue;
2149
2150 return __hpp_dimension__add(hd);
2151 }
2152
2153 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2154 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2155
2156 if (strncasecmp(tok, sd->name, strlen(tok)))
2157 continue;
2158
2159 if (sort__mode != SORT_MODE__BRANCH)
2160 return -EINVAL;
2161
2162 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2163 sort__has_sym = 1;
2164
2165 __sort_dimension__add(sd);
2166 return 0;
2167 }
2168
2169 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2170 struct sort_dimension *sd = &memory_sort_dimensions[i];
2171
2172 if (strncasecmp(tok, sd->name, strlen(tok)))
2173 continue;
2174
2175 if (sort__mode != SORT_MODE__MEMORY)
2176 return -EINVAL;
2177
2178 if (sd->entry == &sort_mem_daddr_sym)
2179 sort__has_sym = 1;
2180
2181 __sort_dimension__add(sd);
2182 return 0;
2183 }
2184
2185 if (!add_dynamic_entry(evlist, tok))
2186 return 0;
2187
2188 return -ESRCH;
2189 }
2190
2191 static const char *get_default_sort_order(struct perf_evlist *evlist)
2192 {
2193 const char *default_sort_orders[] = {
2194 default_sort_order,
2195 default_branch_sort_order,
2196 default_mem_sort_order,
2197 default_top_sort_order,
2198 default_diff_sort_order,
2199 default_tracepoint_sort_order,
2200 };
2201 bool use_trace = true;
2202 struct perf_evsel *evsel;
2203
2204 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2205
2206 if (evlist == NULL)
2207 goto out_no_evlist;
2208
2209 evlist__for_each(evlist, evsel) {
2210 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2211 use_trace = false;
2212 break;
2213 }
2214 }
2215
2216 if (use_trace) {
2217 sort__mode = SORT_MODE__TRACEPOINT;
2218 if (symbol_conf.raw_trace)
2219 return "trace_fields";
2220 }
2221 out_no_evlist:
2222 return default_sort_orders[sort__mode];
2223 }
2224
2225 static int setup_sort_order(struct perf_evlist *evlist)
2226 {
2227 char *new_sort_order;
2228
2229 /*
2230 * Append '+'-prefixed sort order to the default sort
2231 * order string.
2232 */
2233 if (!sort_order || is_strict_order(sort_order))
2234 return 0;
2235
2236 if (sort_order[1] == '\0') {
2237 error("Invalid --sort key: `+'");
2238 return -EINVAL;
2239 }
2240
2241 /*
2242 * We allocate new sort_order string, but we never free it,
2243 * because it's checked over the rest of the code.
2244 */
2245 if (asprintf(&new_sort_order, "%s,%s",
2246 get_default_sort_order(evlist), sort_order + 1) < 0) {
2247 error("Not enough memory to set up --sort");
2248 return -ENOMEM;
2249 }
2250
2251 sort_order = new_sort_order;
2252 return 0;
2253 }
2254
2255 /*
2256 * Adds 'pre,' prefix into 'str' is 'pre' is
2257 * not already part of 'str'.
2258 */
2259 static char *prefix_if_not_in(const char *pre, char *str)
2260 {
2261 char *n;
2262
2263 if (!str || strstr(str, pre))
2264 return str;
2265
2266 if (asprintf(&n, "%s,%s", pre, str) < 0)
2267 return NULL;
2268
2269 free(str);
2270 return n;
2271 }
2272
2273 static char *setup_overhead(char *keys)
2274 {
2275 keys = prefix_if_not_in("overhead", keys);
2276
2277 if (symbol_conf.cumulate_callchain)
2278 keys = prefix_if_not_in("overhead_children", keys);
2279
2280 return keys;
2281 }
2282
2283 static int __setup_sorting(struct perf_evlist *evlist)
2284 {
2285 char *tmp, *tok, *str;
2286 const char *sort_keys;
2287 int ret = 0;
2288
2289 ret = setup_sort_order(evlist);
2290 if (ret)
2291 return ret;
2292
2293 sort_keys = sort_order;
2294 if (sort_keys == NULL) {
2295 if (is_strict_order(field_order)) {
2296 /*
2297 * If user specified field order but no sort order,
2298 * we'll honor it and not add default sort orders.
2299 */
2300 return 0;
2301 }
2302
2303 sort_keys = get_default_sort_order(evlist);
2304 }
2305
2306 str = strdup(sort_keys);
2307 if (str == NULL) {
2308 error("Not enough memory to setup sort keys");
2309 return -ENOMEM;
2310 }
2311
2312 /*
2313 * Prepend overhead fields for backward compatibility.
2314 */
2315 if (!is_strict_order(field_order)) {
2316 str = setup_overhead(str);
2317 if (str == NULL) {
2318 error("Not enough memory to setup overhead keys");
2319 return -ENOMEM;
2320 }
2321 }
2322
2323 for (tok = strtok_r(str, ", ", &tmp);
2324 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2325 ret = sort_dimension__add(tok, evlist);
2326 if (ret == -EINVAL) {
2327 error("Invalid --sort key: `%s'", tok);
2328 break;
2329 } else if (ret == -ESRCH) {
2330 error("Unknown --sort key: `%s'", tok);
2331 break;
2332 }
2333 }
2334
2335 free(str);
2336 return ret;
2337 }
2338
2339 void perf_hpp__set_elide(int idx, bool elide)
2340 {
2341 struct perf_hpp_fmt *fmt;
2342 struct hpp_sort_entry *hse;
2343
2344 perf_hpp__for_each_format(fmt) {
2345 if (!perf_hpp__is_sort_entry(fmt))
2346 continue;
2347
2348 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2349 if (hse->se->se_width_idx == idx) {
2350 fmt->elide = elide;
2351 break;
2352 }
2353 }
2354 }
2355
2356 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
2357 {
2358 if (list && strlist__nr_entries(list) == 1) {
2359 if (fp != NULL)
2360 fprintf(fp, "# %s: %s\n", list_name,
2361 strlist__entry(list, 0)->s);
2362 return true;
2363 }
2364 return false;
2365 }
2366
2367 static bool get_elide(int idx, FILE *output)
2368 {
2369 switch (idx) {
2370 case HISTC_SYMBOL:
2371 return __get_elide(symbol_conf.sym_list, "symbol", output);
2372 case HISTC_DSO:
2373 return __get_elide(symbol_conf.dso_list, "dso", output);
2374 case HISTC_COMM:
2375 return __get_elide(symbol_conf.comm_list, "comm", output);
2376 default:
2377 break;
2378 }
2379
2380 if (sort__mode != SORT_MODE__BRANCH)
2381 return false;
2382
2383 switch (idx) {
2384 case HISTC_SYMBOL_FROM:
2385 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2386 case HISTC_SYMBOL_TO:
2387 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2388 case HISTC_DSO_FROM:
2389 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2390 case HISTC_DSO_TO:
2391 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2392 default:
2393 break;
2394 }
2395
2396 return false;
2397 }
2398
2399 void sort__setup_elide(FILE *output)
2400 {
2401 struct perf_hpp_fmt *fmt;
2402 struct hpp_sort_entry *hse;
2403
2404 perf_hpp__for_each_format(fmt) {
2405 if (!perf_hpp__is_sort_entry(fmt))
2406 continue;
2407
2408 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2409 fmt->elide = get_elide(hse->se->se_width_idx, output);
2410 }
2411
2412 /*
2413 * It makes no sense to elide all of sort entries.
2414 * Just revert them to show up again.
2415 */
2416 perf_hpp__for_each_format(fmt) {
2417 if (!perf_hpp__is_sort_entry(fmt))
2418 continue;
2419
2420 if (!fmt->elide)
2421 return;
2422 }
2423
2424 perf_hpp__for_each_format(fmt) {
2425 if (!perf_hpp__is_sort_entry(fmt))
2426 continue;
2427
2428 fmt->elide = false;
2429 }
2430 }
2431
2432 static int output_field_add(char *tok)
2433 {
2434 unsigned int i;
2435
2436 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2437 struct sort_dimension *sd = &common_sort_dimensions[i];
2438
2439 if (strncasecmp(tok, sd->name, strlen(tok)))
2440 continue;
2441
2442 return __sort_dimension__add_output(sd);
2443 }
2444
2445 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2446 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2447
2448 if (strncasecmp(tok, hd->name, strlen(tok)))
2449 continue;
2450
2451 return __hpp_dimension__add_output(hd);
2452 }
2453
2454 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2455 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2456
2457 if (strncasecmp(tok, sd->name, strlen(tok)))
2458 continue;
2459
2460 return __sort_dimension__add_output(sd);
2461 }
2462
2463 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2464 struct sort_dimension *sd = &memory_sort_dimensions[i];
2465
2466 if (strncasecmp(tok, sd->name, strlen(tok)))
2467 continue;
2468
2469 return __sort_dimension__add_output(sd);
2470 }
2471
2472 return -ESRCH;
2473 }
2474
2475 static void reset_dimensions(void)
2476 {
2477 unsigned int i;
2478
2479 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
2480 common_sort_dimensions[i].taken = 0;
2481
2482 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
2483 hpp_sort_dimensions[i].taken = 0;
2484
2485 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
2486 bstack_sort_dimensions[i].taken = 0;
2487
2488 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
2489 memory_sort_dimensions[i].taken = 0;
2490 }
2491
2492 bool is_strict_order(const char *order)
2493 {
2494 return order && (*order != '+');
2495 }
2496
2497 static int __setup_output_field(void)
2498 {
2499 char *tmp, *tok, *str, *strp;
2500 int ret = -EINVAL;
2501
2502 if (field_order == NULL)
2503 return 0;
2504
2505 strp = str = strdup(field_order);
2506 if (str == NULL) {
2507 error("Not enough memory to setup output fields");
2508 return -ENOMEM;
2509 }
2510
2511 if (!is_strict_order(field_order))
2512 strp++;
2513
2514 if (!strlen(strp)) {
2515 error("Invalid --fields key: `+'");
2516 goto out;
2517 }
2518
2519 for (tok = strtok_r(strp, ", ", &tmp);
2520 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2521 ret = output_field_add(tok);
2522 if (ret == -EINVAL) {
2523 error("Invalid --fields key: `%s'", tok);
2524 break;
2525 } else if (ret == -ESRCH) {
2526 error("Unknown --fields key: `%s'", tok);
2527 break;
2528 }
2529 }
2530
2531 out:
2532 free(str);
2533 return ret;
2534 }
2535
2536 int setup_sorting(struct perf_evlist *evlist)
2537 {
2538 int err;
2539
2540 err = __setup_sorting(evlist);
2541 if (err < 0)
2542 return err;
2543
2544 if (parent_pattern != default_parent_pattern) {
2545 err = sort_dimension__add("parent", evlist);
2546 if (err < 0)
2547 return err;
2548 }
2549
2550 reset_dimensions();
2551
2552 /*
2553 * perf diff doesn't use default hpp output fields.
2554 */
2555 if (sort__mode != SORT_MODE__DIFF)
2556 perf_hpp__init();
2557
2558 err = __setup_output_field();
2559 if (err < 0)
2560 return err;
2561
2562 /* copy sort keys to output fields */
2563 perf_hpp__setup_output_field();
2564 /* and then copy output fields to sort keys */
2565 perf_hpp__append_sort_keys();
2566
2567 return 0;
2568 }
2569
2570 void reset_output_field(void)
2571 {
2572 sort__need_collapse = 0;
2573 sort__has_parent = 0;
2574 sort__has_sym = 0;
2575 sort__has_dso = 0;
2576
2577 field_order = NULL;
2578 sort_order = NULL;
2579
2580 reset_dimensions();
2581 perf_hpp__reset_output_field();
2582 }
This page took 0.120718 seconds and 6 git commands to generate.