Merge branch 'kbuild' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[deliverable/linux.git] / tools / perf / util / sort.c
1 #include <sys/mman.h>
2 #include "sort.h"
3 #include "hist.h"
4 #include "comm.h"
5 #include "symbol.h"
6 #include "evsel.h"
7
8 regex_t parent_regex;
9 const char default_parent_pattern[] = "^sys_|^do_page_fault";
10 const char *parent_pattern = default_parent_pattern;
11 const char default_sort_order[] = "comm,dso,symbol";
12 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,dso_to,symbol_to";
13 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
14 const char default_top_sort_order[] = "dso,symbol";
15 const char default_diff_sort_order[] = "dso,symbol";
16 const char *sort_order;
17 const char *field_order;
18 regex_t ignore_callees_regex;
19 int have_ignore_callees = 0;
20 int sort__need_collapse = 0;
21 int sort__has_parent = 0;
22 int sort__has_sym = 0;
23 int sort__has_dso = 0;
24 enum sort_mode sort__mode = SORT_MODE__NORMAL;
25
26
27 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
28 {
29 int n;
30 va_list ap;
31
32 va_start(ap, fmt);
33 n = vsnprintf(bf, size, fmt, ap);
34 if (symbol_conf.field_sep && n > 0) {
35 char *sep = bf;
36
37 while (1) {
38 sep = strchr(sep, *symbol_conf.field_sep);
39 if (sep == NULL)
40 break;
41 *sep = '.';
42 }
43 }
44 va_end(ap);
45
46 if (n >= (int)size)
47 return size - 1;
48 return n;
49 }
50
51 static int64_t cmp_null(const void *l, const void *r)
52 {
53 if (!l && !r)
54 return 0;
55 else if (!l)
56 return -1;
57 else
58 return 1;
59 }
60
61 /* --sort pid */
62
63 static int64_t
64 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
65 {
66 return right->thread->tid - left->thread->tid;
67 }
68
69 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
70 size_t size, unsigned int width)
71 {
72 const char *comm = thread__comm_str(he->thread);
73 return repsep_snprintf(bf, size, "%*s:%5d", width - 6,
74 comm ?: "", he->thread->tid);
75 }
76
77 struct sort_entry sort_thread = {
78 .se_header = "Command: Pid",
79 .se_cmp = sort__thread_cmp,
80 .se_snprintf = hist_entry__thread_snprintf,
81 .se_width_idx = HISTC_THREAD,
82 };
83
84 /* --sort comm */
85
86 static int64_t
87 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
88 {
89 /* Compare the addr that should be unique among comm */
90 return comm__str(right->comm) - comm__str(left->comm);
91 }
92
93 static int64_t
94 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
95 {
96 /* Compare the addr that should be unique among comm */
97 return comm__str(right->comm) - comm__str(left->comm);
98 }
99
100 static int64_t
101 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
102 {
103 return strcmp(comm__str(right->comm), comm__str(left->comm));
104 }
105
106 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
107 size_t size, unsigned int width)
108 {
109 return repsep_snprintf(bf, size, "%*s", width, comm__str(he->comm));
110 }
111
112 struct sort_entry sort_comm = {
113 .se_header = "Command",
114 .se_cmp = sort__comm_cmp,
115 .se_collapse = sort__comm_collapse,
116 .se_sort = sort__comm_sort,
117 .se_snprintf = hist_entry__comm_snprintf,
118 .se_width_idx = HISTC_COMM,
119 };
120
121 /* --sort dso */
122
123 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
124 {
125 struct dso *dso_l = map_l ? map_l->dso : NULL;
126 struct dso *dso_r = map_r ? map_r->dso : NULL;
127 const char *dso_name_l, *dso_name_r;
128
129 if (!dso_l || !dso_r)
130 return cmp_null(dso_r, dso_l);
131
132 if (verbose) {
133 dso_name_l = dso_l->long_name;
134 dso_name_r = dso_r->long_name;
135 } else {
136 dso_name_l = dso_l->short_name;
137 dso_name_r = dso_r->short_name;
138 }
139
140 return strcmp(dso_name_l, dso_name_r);
141 }
142
143 static int64_t
144 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
145 {
146 return _sort__dso_cmp(right->ms.map, left->ms.map);
147 }
148
149 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
150 size_t size, unsigned int width)
151 {
152 if (map && map->dso) {
153 const char *dso_name = !verbose ? map->dso->short_name :
154 map->dso->long_name;
155 return repsep_snprintf(bf, size, "%-*s", width, dso_name);
156 }
157
158 return repsep_snprintf(bf, size, "%-*s", width, "[unknown]");
159 }
160
161 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
162 size_t size, unsigned int width)
163 {
164 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
165 }
166
167 struct sort_entry sort_dso = {
168 .se_header = "Shared Object",
169 .se_cmp = sort__dso_cmp,
170 .se_snprintf = hist_entry__dso_snprintf,
171 .se_width_idx = HISTC_DSO,
172 };
173
174 /* --sort symbol */
175
176 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
177 {
178 return (int64_t)(right_ip - left_ip);
179 }
180
181 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
182 {
183 u64 ip_l, ip_r;
184
185 if (!sym_l || !sym_r)
186 return cmp_null(sym_l, sym_r);
187
188 if (sym_l == sym_r)
189 return 0;
190
191 ip_l = sym_l->start;
192 ip_r = sym_r->start;
193
194 return (int64_t)(ip_r - ip_l);
195 }
196
197 static int64_t
198 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
199 {
200 int64_t ret;
201
202 if (!left->ms.sym && !right->ms.sym)
203 return _sort__addr_cmp(left->ip, right->ip);
204
205 /*
206 * comparing symbol address alone is not enough since it's a
207 * relative address within a dso.
208 */
209 if (!sort__has_dso) {
210 ret = sort__dso_cmp(left, right);
211 if (ret != 0)
212 return ret;
213 }
214
215 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
216 }
217
218 static int64_t
219 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
220 {
221 if (!left->ms.sym || !right->ms.sym)
222 return cmp_null(left->ms.sym, right->ms.sym);
223
224 return strcmp(right->ms.sym->name, left->ms.sym->name);
225 }
226
227 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
228 u64 ip, char level, char *bf, size_t size,
229 unsigned int width)
230 {
231 size_t ret = 0;
232
233 if (verbose) {
234 char o = map ? dso__symtab_origin(map->dso) : '!';
235 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
236 BITS_PER_LONG / 4 + 2, ip, o);
237 }
238
239 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
240 if (sym && map) {
241 if (map->type == MAP__VARIABLE) {
242 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
243 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
244 ip - map->unmap_ip(map, sym->start));
245 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
246 width - ret, "");
247 } else {
248 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
249 width - ret,
250 sym->name);
251 }
252 } else {
253 size_t len = BITS_PER_LONG / 4;
254 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
255 len, ip);
256 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
257 width - ret, "");
258 }
259
260 return ret;
261 }
262
263 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
264 size_t size, unsigned int width)
265 {
266 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
267 he->level, bf, size, width);
268 }
269
270 struct sort_entry sort_sym = {
271 .se_header = "Symbol",
272 .se_cmp = sort__sym_cmp,
273 .se_sort = sort__sym_sort,
274 .se_snprintf = hist_entry__sym_snprintf,
275 .se_width_idx = HISTC_SYMBOL,
276 };
277
278 /* --sort srcline */
279
280 static int64_t
281 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
282 {
283 if (!left->srcline) {
284 if (!left->ms.map)
285 left->srcline = SRCLINE_UNKNOWN;
286 else {
287 struct map *map = left->ms.map;
288 left->srcline = get_srcline(map->dso,
289 map__rip_2objdump(map, left->ip));
290 }
291 }
292 if (!right->srcline) {
293 if (!right->ms.map)
294 right->srcline = SRCLINE_UNKNOWN;
295 else {
296 struct map *map = right->ms.map;
297 right->srcline = get_srcline(map->dso,
298 map__rip_2objdump(map, right->ip));
299 }
300 }
301 return strcmp(right->srcline, left->srcline);
302 }
303
304 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
305 size_t size,
306 unsigned int width __maybe_unused)
307 {
308 return repsep_snprintf(bf, size, "%s", he->srcline);
309 }
310
311 struct sort_entry sort_srcline = {
312 .se_header = "Source:Line",
313 .se_cmp = sort__srcline_cmp,
314 .se_snprintf = hist_entry__srcline_snprintf,
315 .se_width_idx = HISTC_SRCLINE,
316 };
317
318 /* --sort parent */
319
320 static int64_t
321 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
322 {
323 struct symbol *sym_l = left->parent;
324 struct symbol *sym_r = right->parent;
325
326 if (!sym_l || !sym_r)
327 return cmp_null(sym_l, sym_r);
328
329 return strcmp(sym_r->name, sym_l->name);
330 }
331
332 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
333 size_t size, unsigned int width)
334 {
335 return repsep_snprintf(bf, size, "%-*s", width,
336 he->parent ? he->parent->name : "[other]");
337 }
338
339 struct sort_entry sort_parent = {
340 .se_header = "Parent symbol",
341 .se_cmp = sort__parent_cmp,
342 .se_snprintf = hist_entry__parent_snprintf,
343 .se_width_idx = HISTC_PARENT,
344 };
345
346 /* --sort cpu */
347
348 static int64_t
349 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
350 {
351 return right->cpu - left->cpu;
352 }
353
354 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
355 size_t size, unsigned int width)
356 {
357 return repsep_snprintf(bf, size, "%*d", width, he->cpu);
358 }
359
360 struct sort_entry sort_cpu = {
361 .se_header = "CPU",
362 .se_cmp = sort__cpu_cmp,
363 .se_snprintf = hist_entry__cpu_snprintf,
364 .se_width_idx = HISTC_CPU,
365 };
366
367 /* sort keys for branch stacks */
368
369 static int64_t
370 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
371 {
372 return _sort__dso_cmp(left->branch_info->from.map,
373 right->branch_info->from.map);
374 }
375
376 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
377 size_t size, unsigned int width)
378 {
379 return _hist_entry__dso_snprintf(he->branch_info->from.map,
380 bf, size, width);
381 }
382
383 static int64_t
384 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
385 {
386 return _sort__dso_cmp(left->branch_info->to.map,
387 right->branch_info->to.map);
388 }
389
390 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
391 size_t size, unsigned int width)
392 {
393 return _hist_entry__dso_snprintf(he->branch_info->to.map,
394 bf, size, width);
395 }
396
397 static int64_t
398 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
399 {
400 struct addr_map_symbol *from_l = &left->branch_info->from;
401 struct addr_map_symbol *from_r = &right->branch_info->from;
402
403 if (!from_l->sym && !from_r->sym)
404 return _sort__addr_cmp(from_l->addr, from_r->addr);
405
406 return _sort__sym_cmp(from_l->sym, from_r->sym);
407 }
408
409 static int64_t
410 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
411 {
412 struct addr_map_symbol *to_l = &left->branch_info->to;
413 struct addr_map_symbol *to_r = &right->branch_info->to;
414
415 if (!to_l->sym && !to_r->sym)
416 return _sort__addr_cmp(to_l->addr, to_r->addr);
417
418 return _sort__sym_cmp(to_l->sym, to_r->sym);
419 }
420
421 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
422 size_t size, unsigned int width)
423 {
424 struct addr_map_symbol *from = &he->branch_info->from;
425 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
426 he->level, bf, size, width);
427
428 }
429
430 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
431 size_t size, unsigned int width)
432 {
433 struct addr_map_symbol *to = &he->branch_info->to;
434 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
435 he->level, bf, size, width);
436
437 }
438
439 struct sort_entry sort_dso_from = {
440 .se_header = "Source Shared Object",
441 .se_cmp = sort__dso_from_cmp,
442 .se_snprintf = hist_entry__dso_from_snprintf,
443 .se_width_idx = HISTC_DSO_FROM,
444 };
445
446 struct sort_entry sort_dso_to = {
447 .se_header = "Target Shared Object",
448 .se_cmp = sort__dso_to_cmp,
449 .se_snprintf = hist_entry__dso_to_snprintf,
450 .se_width_idx = HISTC_DSO_TO,
451 };
452
453 struct sort_entry sort_sym_from = {
454 .se_header = "Source Symbol",
455 .se_cmp = sort__sym_from_cmp,
456 .se_snprintf = hist_entry__sym_from_snprintf,
457 .se_width_idx = HISTC_SYMBOL_FROM,
458 };
459
460 struct sort_entry sort_sym_to = {
461 .se_header = "Target Symbol",
462 .se_cmp = sort__sym_to_cmp,
463 .se_snprintf = hist_entry__sym_to_snprintf,
464 .se_width_idx = HISTC_SYMBOL_TO,
465 };
466
467 static int64_t
468 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
469 {
470 const unsigned char mp = left->branch_info->flags.mispred !=
471 right->branch_info->flags.mispred;
472 const unsigned char p = left->branch_info->flags.predicted !=
473 right->branch_info->flags.predicted;
474
475 return mp || p;
476 }
477
478 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
479 size_t size, unsigned int width){
480 static const char *out = "N/A";
481
482 if (he->branch_info->flags.predicted)
483 out = "N";
484 else if (he->branch_info->flags.mispred)
485 out = "Y";
486
487 return repsep_snprintf(bf, size, "%-*s", width, out);
488 }
489
490 /* --sort daddr_sym */
491 static int64_t
492 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
493 {
494 uint64_t l = 0, r = 0;
495
496 if (left->mem_info)
497 l = left->mem_info->daddr.addr;
498 if (right->mem_info)
499 r = right->mem_info->daddr.addr;
500
501 return (int64_t)(r - l);
502 }
503
504 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
505 size_t size, unsigned int width)
506 {
507 uint64_t addr = 0;
508 struct map *map = NULL;
509 struct symbol *sym = NULL;
510
511 if (he->mem_info) {
512 addr = he->mem_info->daddr.addr;
513 map = he->mem_info->daddr.map;
514 sym = he->mem_info->daddr.sym;
515 }
516 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
517 width);
518 }
519
520 static int64_t
521 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
522 {
523 struct map *map_l = NULL;
524 struct map *map_r = NULL;
525
526 if (left->mem_info)
527 map_l = left->mem_info->daddr.map;
528 if (right->mem_info)
529 map_r = right->mem_info->daddr.map;
530
531 return _sort__dso_cmp(map_l, map_r);
532 }
533
534 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
535 size_t size, unsigned int width)
536 {
537 struct map *map = NULL;
538
539 if (he->mem_info)
540 map = he->mem_info->daddr.map;
541
542 return _hist_entry__dso_snprintf(map, bf, size, width);
543 }
544
545 static int64_t
546 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
547 {
548 union perf_mem_data_src data_src_l;
549 union perf_mem_data_src data_src_r;
550
551 if (left->mem_info)
552 data_src_l = left->mem_info->data_src;
553 else
554 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
555
556 if (right->mem_info)
557 data_src_r = right->mem_info->data_src;
558 else
559 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
560
561 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
562 }
563
564 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
565 size_t size, unsigned int width)
566 {
567 const char *out;
568 u64 mask = PERF_MEM_LOCK_NA;
569
570 if (he->mem_info)
571 mask = he->mem_info->data_src.mem_lock;
572
573 if (mask & PERF_MEM_LOCK_NA)
574 out = "N/A";
575 else if (mask & PERF_MEM_LOCK_LOCKED)
576 out = "Yes";
577 else
578 out = "No";
579
580 return repsep_snprintf(bf, size, "%-*s", width, out);
581 }
582
583 static int64_t
584 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
585 {
586 union perf_mem_data_src data_src_l;
587 union perf_mem_data_src data_src_r;
588
589 if (left->mem_info)
590 data_src_l = left->mem_info->data_src;
591 else
592 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
593
594 if (right->mem_info)
595 data_src_r = right->mem_info->data_src;
596 else
597 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
598
599 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
600 }
601
602 static const char * const tlb_access[] = {
603 "N/A",
604 "HIT",
605 "MISS",
606 "L1",
607 "L2",
608 "Walker",
609 "Fault",
610 };
611 #define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *))
612
613 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
614 size_t size, unsigned int width)
615 {
616 char out[64];
617 size_t sz = sizeof(out) - 1; /* -1 for null termination */
618 size_t l = 0, i;
619 u64 m = PERF_MEM_TLB_NA;
620 u64 hit, miss;
621
622 out[0] = '\0';
623
624 if (he->mem_info)
625 m = he->mem_info->data_src.mem_dtlb;
626
627 hit = m & PERF_MEM_TLB_HIT;
628 miss = m & PERF_MEM_TLB_MISS;
629
630 /* already taken care of */
631 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
632
633 for (i = 0; m && i < NUM_TLB_ACCESS; i++, m >>= 1) {
634 if (!(m & 0x1))
635 continue;
636 if (l) {
637 strcat(out, " or ");
638 l += 4;
639 }
640 strncat(out, tlb_access[i], sz - l);
641 l += strlen(tlb_access[i]);
642 }
643 if (*out == '\0')
644 strcpy(out, "N/A");
645 if (hit)
646 strncat(out, " hit", sz - l);
647 if (miss)
648 strncat(out, " miss", sz - l);
649
650 return repsep_snprintf(bf, size, "%-*s", width, out);
651 }
652
653 static int64_t
654 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
655 {
656 union perf_mem_data_src data_src_l;
657 union perf_mem_data_src data_src_r;
658
659 if (left->mem_info)
660 data_src_l = left->mem_info->data_src;
661 else
662 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
663
664 if (right->mem_info)
665 data_src_r = right->mem_info->data_src;
666 else
667 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
668
669 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
670 }
671
672 static const char * const mem_lvl[] = {
673 "N/A",
674 "HIT",
675 "MISS",
676 "L1",
677 "LFB",
678 "L2",
679 "L3",
680 "Local RAM",
681 "Remote RAM (1 hop)",
682 "Remote RAM (2 hops)",
683 "Remote Cache (1 hop)",
684 "Remote Cache (2 hops)",
685 "I/O",
686 "Uncached",
687 };
688 #define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *))
689
690 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
691 size_t size, unsigned int width)
692 {
693 char out[64];
694 size_t sz = sizeof(out) - 1; /* -1 for null termination */
695 size_t i, l = 0;
696 u64 m = PERF_MEM_LVL_NA;
697 u64 hit, miss;
698
699 if (he->mem_info)
700 m = he->mem_info->data_src.mem_lvl;
701
702 out[0] = '\0';
703
704 hit = m & PERF_MEM_LVL_HIT;
705 miss = m & PERF_MEM_LVL_MISS;
706
707 /* already taken care of */
708 m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS);
709
710 for (i = 0; m && i < NUM_MEM_LVL; i++, m >>= 1) {
711 if (!(m & 0x1))
712 continue;
713 if (l) {
714 strcat(out, " or ");
715 l += 4;
716 }
717 strncat(out, mem_lvl[i], sz - l);
718 l += strlen(mem_lvl[i]);
719 }
720 if (*out == '\0')
721 strcpy(out, "N/A");
722 if (hit)
723 strncat(out, " hit", sz - l);
724 if (miss)
725 strncat(out, " miss", sz - l);
726
727 return repsep_snprintf(bf, size, "%-*s", width, out);
728 }
729
730 static int64_t
731 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
732 {
733 union perf_mem_data_src data_src_l;
734 union perf_mem_data_src data_src_r;
735
736 if (left->mem_info)
737 data_src_l = left->mem_info->data_src;
738 else
739 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
740
741 if (right->mem_info)
742 data_src_r = right->mem_info->data_src;
743 else
744 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
745
746 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
747 }
748
749 static const char * const snoop_access[] = {
750 "N/A",
751 "None",
752 "Miss",
753 "Hit",
754 "HitM",
755 };
756 #define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *))
757
758 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
759 size_t size, unsigned int width)
760 {
761 char out[64];
762 size_t sz = sizeof(out) - 1; /* -1 for null termination */
763 size_t i, l = 0;
764 u64 m = PERF_MEM_SNOOP_NA;
765
766 out[0] = '\0';
767
768 if (he->mem_info)
769 m = he->mem_info->data_src.mem_snoop;
770
771 for (i = 0; m && i < NUM_SNOOP_ACCESS; i++, m >>= 1) {
772 if (!(m & 0x1))
773 continue;
774 if (l) {
775 strcat(out, " or ");
776 l += 4;
777 }
778 strncat(out, snoop_access[i], sz - l);
779 l += strlen(snoop_access[i]);
780 }
781
782 if (*out == '\0')
783 strcpy(out, "N/A");
784
785 return repsep_snprintf(bf, size, "%-*s", width, out);
786 }
787
788 static inline u64 cl_address(u64 address)
789 {
790 /* return the cacheline of the address */
791 return (address & ~(cacheline_size - 1));
792 }
793
794 static int64_t
795 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
796 {
797 u64 l, r;
798 struct map *l_map, *r_map;
799
800 if (!left->mem_info) return -1;
801 if (!right->mem_info) return 1;
802
803 /* group event types together */
804 if (left->cpumode > right->cpumode) return -1;
805 if (left->cpumode < right->cpumode) return 1;
806
807 l_map = left->mem_info->daddr.map;
808 r_map = right->mem_info->daddr.map;
809
810 /* if both are NULL, jump to sort on al_addr instead */
811 if (!l_map && !r_map)
812 goto addr;
813
814 if (!l_map) return -1;
815 if (!r_map) return 1;
816
817 if (l_map->maj > r_map->maj) return -1;
818 if (l_map->maj < r_map->maj) return 1;
819
820 if (l_map->min > r_map->min) return -1;
821 if (l_map->min < r_map->min) return 1;
822
823 if (l_map->ino > r_map->ino) return -1;
824 if (l_map->ino < r_map->ino) return 1;
825
826 if (l_map->ino_generation > r_map->ino_generation) return -1;
827 if (l_map->ino_generation < r_map->ino_generation) return 1;
828
829 /*
830 * Addresses with no major/minor numbers are assumed to be
831 * anonymous in userspace. Sort those on pid then address.
832 *
833 * The kernel and non-zero major/minor mapped areas are
834 * assumed to be unity mapped. Sort those on address.
835 */
836
837 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
838 (!(l_map->flags & MAP_SHARED)) &&
839 !l_map->maj && !l_map->min && !l_map->ino &&
840 !l_map->ino_generation) {
841 /* userspace anonymous */
842
843 if (left->thread->pid_ > right->thread->pid_) return -1;
844 if (left->thread->pid_ < right->thread->pid_) return 1;
845 }
846
847 addr:
848 /* al_addr does all the right addr - start + offset calculations */
849 l = cl_address(left->mem_info->daddr.al_addr);
850 r = cl_address(right->mem_info->daddr.al_addr);
851
852 if (l > r) return -1;
853 if (l < r) return 1;
854
855 return 0;
856 }
857
858 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
859 size_t size, unsigned int width)
860 {
861
862 uint64_t addr = 0;
863 struct map *map = NULL;
864 struct symbol *sym = NULL;
865 char level = he->level;
866
867 if (he->mem_info) {
868 addr = cl_address(he->mem_info->daddr.al_addr);
869 map = he->mem_info->daddr.map;
870 sym = he->mem_info->daddr.sym;
871
872 /* print [s] for shared data mmaps */
873 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
874 map && (map->type == MAP__VARIABLE) &&
875 (map->flags & MAP_SHARED) &&
876 (map->maj || map->min || map->ino ||
877 map->ino_generation))
878 level = 's';
879 else if (!map)
880 level = 'X';
881 }
882 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
883 width);
884 }
885
886 struct sort_entry sort_mispredict = {
887 .se_header = "Branch Mispredicted",
888 .se_cmp = sort__mispredict_cmp,
889 .se_snprintf = hist_entry__mispredict_snprintf,
890 .se_width_idx = HISTC_MISPREDICT,
891 };
892
893 static u64 he_weight(struct hist_entry *he)
894 {
895 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
896 }
897
898 static int64_t
899 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
900 {
901 return he_weight(left) - he_weight(right);
902 }
903
904 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
905 size_t size, unsigned int width)
906 {
907 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
908 }
909
910 struct sort_entry sort_local_weight = {
911 .se_header = "Local Weight",
912 .se_cmp = sort__local_weight_cmp,
913 .se_snprintf = hist_entry__local_weight_snprintf,
914 .se_width_idx = HISTC_LOCAL_WEIGHT,
915 };
916
917 static int64_t
918 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
919 {
920 return left->stat.weight - right->stat.weight;
921 }
922
923 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
924 size_t size, unsigned int width)
925 {
926 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
927 }
928
929 struct sort_entry sort_global_weight = {
930 .se_header = "Weight",
931 .se_cmp = sort__global_weight_cmp,
932 .se_snprintf = hist_entry__global_weight_snprintf,
933 .se_width_idx = HISTC_GLOBAL_WEIGHT,
934 };
935
936 struct sort_entry sort_mem_daddr_sym = {
937 .se_header = "Data Symbol",
938 .se_cmp = sort__daddr_cmp,
939 .se_snprintf = hist_entry__daddr_snprintf,
940 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
941 };
942
943 struct sort_entry sort_mem_daddr_dso = {
944 .se_header = "Data Object",
945 .se_cmp = sort__dso_daddr_cmp,
946 .se_snprintf = hist_entry__dso_daddr_snprintf,
947 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
948 };
949
950 struct sort_entry sort_mem_locked = {
951 .se_header = "Locked",
952 .se_cmp = sort__locked_cmp,
953 .se_snprintf = hist_entry__locked_snprintf,
954 .se_width_idx = HISTC_MEM_LOCKED,
955 };
956
957 struct sort_entry sort_mem_tlb = {
958 .se_header = "TLB access",
959 .se_cmp = sort__tlb_cmp,
960 .se_snprintf = hist_entry__tlb_snprintf,
961 .se_width_idx = HISTC_MEM_TLB,
962 };
963
964 struct sort_entry sort_mem_lvl = {
965 .se_header = "Memory access",
966 .se_cmp = sort__lvl_cmp,
967 .se_snprintf = hist_entry__lvl_snprintf,
968 .se_width_idx = HISTC_MEM_LVL,
969 };
970
971 struct sort_entry sort_mem_snoop = {
972 .se_header = "Snoop",
973 .se_cmp = sort__snoop_cmp,
974 .se_snprintf = hist_entry__snoop_snprintf,
975 .se_width_idx = HISTC_MEM_SNOOP,
976 };
977
978 struct sort_entry sort_mem_dcacheline = {
979 .se_header = "Data Cacheline",
980 .se_cmp = sort__dcacheline_cmp,
981 .se_snprintf = hist_entry__dcacheline_snprintf,
982 .se_width_idx = HISTC_MEM_DCACHELINE,
983 };
984
985 static int64_t
986 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
987 {
988 return left->branch_info->flags.abort !=
989 right->branch_info->flags.abort;
990 }
991
992 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
993 size_t size, unsigned int width)
994 {
995 static const char *out = ".";
996
997 if (he->branch_info->flags.abort)
998 out = "A";
999 return repsep_snprintf(bf, size, "%-*s", width, out);
1000 }
1001
1002 struct sort_entry sort_abort = {
1003 .se_header = "Transaction abort",
1004 .se_cmp = sort__abort_cmp,
1005 .se_snprintf = hist_entry__abort_snprintf,
1006 .se_width_idx = HISTC_ABORT,
1007 };
1008
1009 static int64_t
1010 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1011 {
1012 return left->branch_info->flags.in_tx !=
1013 right->branch_info->flags.in_tx;
1014 }
1015
1016 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1017 size_t size, unsigned int width)
1018 {
1019 static const char *out = ".";
1020
1021 if (he->branch_info->flags.in_tx)
1022 out = "T";
1023
1024 return repsep_snprintf(bf, size, "%-*s", width, out);
1025 }
1026
1027 struct sort_entry sort_in_tx = {
1028 .se_header = "Branch in transaction",
1029 .se_cmp = sort__in_tx_cmp,
1030 .se_snprintf = hist_entry__in_tx_snprintf,
1031 .se_width_idx = HISTC_IN_TX,
1032 };
1033
1034 static int64_t
1035 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1036 {
1037 return left->transaction - right->transaction;
1038 }
1039
1040 static inline char *add_str(char *p, const char *str)
1041 {
1042 strcpy(p, str);
1043 return p + strlen(str);
1044 }
1045
1046 static struct txbit {
1047 unsigned flag;
1048 const char *name;
1049 int skip_for_len;
1050 } txbits[] = {
1051 { PERF_TXN_ELISION, "EL ", 0 },
1052 { PERF_TXN_TRANSACTION, "TX ", 1 },
1053 { PERF_TXN_SYNC, "SYNC ", 1 },
1054 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1055 { PERF_TXN_RETRY, "RETRY ", 0 },
1056 { PERF_TXN_CONFLICT, "CON ", 0 },
1057 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1058 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1059 { 0, NULL, 0 }
1060 };
1061
1062 int hist_entry__transaction_len(void)
1063 {
1064 int i;
1065 int len = 0;
1066
1067 for (i = 0; txbits[i].name; i++) {
1068 if (!txbits[i].skip_for_len)
1069 len += strlen(txbits[i].name);
1070 }
1071 len += 4; /* :XX<space> */
1072 return len;
1073 }
1074
1075 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1076 size_t size, unsigned int width)
1077 {
1078 u64 t = he->transaction;
1079 char buf[128];
1080 char *p = buf;
1081 int i;
1082
1083 buf[0] = 0;
1084 for (i = 0; txbits[i].name; i++)
1085 if (txbits[i].flag & t)
1086 p = add_str(p, txbits[i].name);
1087 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1088 p = add_str(p, "NEITHER ");
1089 if (t & PERF_TXN_ABORT_MASK) {
1090 sprintf(p, ":%" PRIx64,
1091 (t & PERF_TXN_ABORT_MASK) >>
1092 PERF_TXN_ABORT_SHIFT);
1093 p += strlen(p);
1094 }
1095
1096 return repsep_snprintf(bf, size, "%-*s", width, buf);
1097 }
1098
1099 struct sort_entry sort_transaction = {
1100 .se_header = "Transaction ",
1101 .se_cmp = sort__transaction_cmp,
1102 .se_snprintf = hist_entry__transaction_snprintf,
1103 .se_width_idx = HISTC_TRANSACTION,
1104 };
1105
1106 struct sort_dimension {
1107 const char *name;
1108 struct sort_entry *entry;
1109 int taken;
1110 };
1111
1112 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1113
1114 static struct sort_dimension common_sort_dimensions[] = {
1115 DIM(SORT_PID, "pid", sort_thread),
1116 DIM(SORT_COMM, "comm", sort_comm),
1117 DIM(SORT_DSO, "dso", sort_dso),
1118 DIM(SORT_SYM, "symbol", sort_sym),
1119 DIM(SORT_PARENT, "parent", sort_parent),
1120 DIM(SORT_CPU, "cpu", sort_cpu),
1121 DIM(SORT_SRCLINE, "srcline", sort_srcline),
1122 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1123 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1124 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1125 };
1126
1127 #undef DIM
1128
1129 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1130
1131 static struct sort_dimension bstack_sort_dimensions[] = {
1132 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1133 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1134 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1135 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1136 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1137 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1138 DIM(SORT_ABORT, "abort", sort_abort),
1139 };
1140
1141 #undef DIM
1142
1143 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1144
1145 static struct sort_dimension memory_sort_dimensions[] = {
1146 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1147 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1148 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1149 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1150 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1151 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1152 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1153 };
1154
1155 #undef DIM
1156
1157 struct hpp_dimension {
1158 const char *name;
1159 struct perf_hpp_fmt *fmt;
1160 int taken;
1161 };
1162
1163 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1164
1165 static struct hpp_dimension hpp_sort_dimensions[] = {
1166 DIM(PERF_HPP__OVERHEAD, "overhead"),
1167 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1168 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1169 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1170 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1171 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1172 DIM(PERF_HPP__SAMPLES, "sample"),
1173 DIM(PERF_HPP__PERIOD, "period"),
1174 };
1175
1176 #undef DIM
1177
1178 struct hpp_sort_entry {
1179 struct perf_hpp_fmt hpp;
1180 struct sort_entry *se;
1181 };
1182
1183 bool perf_hpp__same_sort_entry(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1184 {
1185 struct hpp_sort_entry *hse_a;
1186 struct hpp_sort_entry *hse_b;
1187
1188 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1189 return false;
1190
1191 hse_a = container_of(a, struct hpp_sort_entry, hpp);
1192 hse_b = container_of(b, struct hpp_sort_entry, hpp);
1193
1194 return hse_a->se == hse_b->se;
1195 }
1196
1197 void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1198 {
1199 struct hpp_sort_entry *hse;
1200
1201 if (!perf_hpp__is_sort_entry(fmt))
1202 return;
1203
1204 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1205 hists__new_col_len(hists, hse->se->se_width_idx,
1206 strlen(hse->se->se_header));
1207 }
1208
1209 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1210 struct perf_evsel *evsel)
1211 {
1212 struct hpp_sort_entry *hse;
1213 size_t len;
1214
1215 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1216 len = hists__col_len(&evsel->hists, hse->se->se_width_idx);
1217
1218 return scnprintf(hpp->buf, hpp->size, "%-*s", len, hse->se->se_header);
1219 }
1220
1221 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1222 struct perf_hpp *hpp __maybe_unused,
1223 struct perf_evsel *evsel)
1224 {
1225 struct hpp_sort_entry *hse;
1226
1227 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1228
1229 return hists__col_len(&evsel->hists, hse->se->se_width_idx);
1230 }
1231
1232 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1233 struct hist_entry *he)
1234 {
1235 struct hpp_sort_entry *hse;
1236 size_t len;
1237
1238 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1239 len = hists__col_len(he->hists, hse->se->se_width_idx);
1240
1241 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1242 }
1243
1244 static struct hpp_sort_entry *
1245 __sort_dimension__alloc_hpp(struct sort_dimension *sd)
1246 {
1247 struct hpp_sort_entry *hse;
1248
1249 hse = malloc(sizeof(*hse));
1250 if (hse == NULL) {
1251 pr_err("Memory allocation failed\n");
1252 return NULL;
1253 }
1254
1255 hse->se = sd->entry;
1256 hse->hpp.header = __sort__hpp_header;
1257 hse->hpp.width = __sort__hpp_width;
1258 hse->hpp.entry = __sort__hpp_entry;
1259 hse->hpp.color = NULL;
1260
1261 hse->hpp.cmp = sd->entry->se_cmp;
1262 hse->hpp.collapse = sd->entry->se_collapse ? : sd->entry->se_cmp;
1263 hse->hpp.sort = sd->entry->se_sort ? : hse->hpp.collapse;
1264
1265 INIT_LIST_HEAD(&hse->hpp.list);
1266 INIT_LIST_HEAD(&hse->hpp.sort_list);
1267 hse->hpp.elide = false;
1268
1269 return hse;
1270 }
1271
1272 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1273 {
1274 return format->header == __sort__hpp_header;
1275 }
1276
1277 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd)
1278 {
1279 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1280
1281 if (hse == NULL)
1282 return -1;
1283
1284 perf_hpp__register_sort_field(&hse->hpp);
1285 return 0;
1286 }
1287
1288 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd)
1289 {
1290 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1291
1292 if (hse == NULL)
1293 return -1;
1294
1295 perf_hpp__column_register(&hse->hpp);
1296 return 0;
1297 }
1298
1299 static int __sort_dimension__add(struct sort_dimension *sd)
1300 {
1301 if (sd->taken)
1302 return 0;
1303
1304 if (__sort_dimension__add_hpp_sort(sd) < 0)
1305 return -1;
1306
1307 if (sd->entry->se_collapse)
1308 sort__need_collapse = 1;
1309
1310 sd->taken = 1;
1311
1312 return 0;
1313 }
1314
1315 static int __hpp_dimension__add(struct hpp_dimension *hd)
1316 {
1317 if (!hd->taken) {
1318 hd->taken = 1;
1319
1320 perf_hpp__register_sort_field(hd->fmt);
1321 }
1322 return 0;
1323 }
1324
1325 static int __sort_dimension__add_output(struct sort_dimension *sd)
1326 {
1327 if (sd->taken)
1328 return 0;
1329
1330 if (__sort_dimension__add_hpp_output(sd) < 0)
1331 return -1;
1332
1333 sd->taken = 1;
1334 return 0;
1335 }
1336
1337 static int __hpp_dimension__add_output(struct hpp_dimension *hd)
1338 {
1339 if (!hd->taken) {
1340 hd->taken = 1;
1341
1342 perf_hpp__column_register(hd->fmt);
1343 }
1344 return 0;
1345 }
1346
1347 int sort_dimension__add(const char *tok)
1348 {
1349 unsigned int i;
1350
1351 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
1352 struct sort_dimension *sd = &common_sort_dimensions[i];
1353
1354 if (strncasecmp(tok, sd->name, strlen(tok)))
1355 continue;
1356
1357 if (sd->entry == &sort_parent) {
1358 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
1359 if (ret) {
1360 char err[BUFSIZ];
1361
1362 regerror(ret, &parent_regex, err, sizeof(err));
1363 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
1364 return -EINVAL;
1365 }
1366 sort__has_parent = 1;
1367 } else if (sd->entry == &sort_sym) {
1368 sort__has_sym = 1;
1369 } else if (sd->entry == &sort_dso) {
1370 sort__has_dso = 1;
1371 }
1372
1373 return __sort_dimension__add(sd);
1374 }
1375
1376 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
1377 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
1378
1379 if (strncasecmp(tok, hd->name, strlen(tok)))
1380 continue;
1381
1382 return __hpp_dimension__add(hd);
1383 }
1384
1385 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
1386 struct sort_dimension *sd = &bstack_sort_dimensions[i];
1387
1388 if (strncasecmp(tok, sd->name, strlen(tok)))
1389 continue;
1390
1391 if (sort__mode != SORT_MODE__BRANCH)
1392 return -EINVAL;
1393
1394 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
1395 sort__has_sym = 1;
1396
1397 __sort_dimension__add(sd);
1398 return 0;
1399 }
1400
1401 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
1402 struct sort_dimension *sd = &memory_sort_dimensions[i];
1403
1404 if (strncasecmp(tok, sd->name, strlen(tok)))
1405 continue;
1406
1407 if (sort__mode != SORT_MODE__MEMORY)
1408 return -EINVAL;
1409
1410 if (sd->entry == &sort_mem_daddr_sym)
1411 sort__has_sym = 1;
1412
1413 __sort_dimension__add(sd);
1414 return 0;
1415 }
1416
1417 return -ESRCH;
1418 }
1419
1420 static const char *get_default_sort_order(void)
1421 {
1422 const char *default_sort_orders[] = {
1423 default_sort_order,
1424 default_branch_sort_order,
1425 default_mem_sort_order,
1426 default_top_sort_order,
1427 default_diff_sort_order,
1428 };
1429
1430 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
1431
1432 return default_sort_orders[sort__mode];
1433 }
1434
1435 static int __setup_sorting(void)
1436 {
1437 char *tmp, *tok, *str;
1438 const char *sort_keys = sort_order;
1439 int ret = 0;
1440
1441 if (sort_keys == NULL) {
1442 if (field_order) {
1443 /*
1444 * If user specified field order but no sort order,
1445 * we'll honor it and not add default sort orders.
1446 */
1447 return 0;
1448 }
1449
1450 sort_keys = get_default_sort_order();
1451 }
1452
1453 str = strdup(sort_keys);
1454 if (str == NULL) {
1455 error("Not enough memory to setup sort keys");
1456 return -ENOMEM;
1457 }
1458
1459 for (tok = strtok_r(str, ", ", &tmp);
1460 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1461 ret = sort_dimension__add(tok);
1462 if (ret == -EINVAL) {
1463 error("Invalid --sort key: `%s'", tok);
1464 break;
1465 } else if (ret == -ESRCH) {
1466 error("Unknown --sort key: `%s'", tok);
1467 break;
1468 }
1469 }
1470
1471 free(str);
1472 return ret;
1473 }
1474
1475 void perf_hpp__set_elide(int idx, bool elide)
1476 {
1477 struct perf_hpp_fmt *fmt;
1478 struct hpp_sort_entry *hse;
1479
1480 perf_hpp__for_each_format(fmt) {
1481 if (!perf_hpp__is_sort_entry(fmt))
1482 continue;
1483
1484 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1485 if (hse->se->se_width_idx == idx) {
1486 fmt->elide = elide;
1487 break;
1488 }
1489 }
1490 }
1491
1492 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
1493 {
1494 if (list && strlist__nr_entries(list) == 1) {
1495 if (fp != NULL)
1496 fprintf(fp, "# %s: %s\n", list_name,
1497 strlist__entry(list, 0)->s);
1498 return true;
1499 }
1500 return false;
1501 }
1502
1503 static bool get_elide(int idx, FILE *output)
1504 {
1505 switch (idx) {
1506 case HISTC_SYMBOL:
1507 return __get_elide(symbol_conf.sym_list, "symbol", output);
1508 case HISTC_DSO:
1509 return __get_elide(symbol_conf.dso_list, "dso", output);
1510 case HISTC_COMM:
1511 return __get_elide(symbol_conf.comm_list, "comm", output);
1512 default:
1513 break;
1514 }
1515
1516 if (sort__mode != SORT_MODE__BRANCH)
1517 return false;
1518
1519 switch (idx) {
1520 case HISTC_SYMBOL_FROM:
1521 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
1522 case HISTC_SYMBOL_TO:
1523 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
1524 case HISTC_DSO_FROM:
1525 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
1526 case HISTC_DSO_TO:
1527 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
1528 default:
1529 break;
1530 }
1531
1532 return false;
1533 }
1534
1535 void sort__setup_elide(FILE *output)
1536 {
1537 struct perf_hpp_fmt *fmt;
1538 struct hpp_sort_entry *hse;
1539
1540 perf_hpp__for_each_format(fmt) {
1541 if (!perf_hpp__is_sort_entry(fmt))
1542 continue;
1543
1544 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1545 fmt->elide = get_elide(hse->se->se_width_idx, output);
1546 }
1547
1548 /*
1549 * It makes no sense to elide all of sort entries.
1550 * Just revert them to show up again.
1551 */
1552 perf_hpp__for_each_format(fmt) {
1553 if (!perf_hpp__is_sort_entry(fmt))
1554 continue;
1555
1556 if (!fmt->elide)
1557 return;
1558 }
1559
1560 perf_hpp__for_each_format(fmt) {
1561 if (!perf_hpp__is_sort_entry(fmt))
1562 continue;
1563
1564 fmt->elide = false;
1565 }
1566 }
1567
1568 static int output_field_add(char *tok)
1569 {
1570 unsigned int i;
1571
1572 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
1573 struct sort_dimension *sd = &common_sort_dimensions[i];
1574
1575 if (strncasecmp(tok, sd->name, strlen(tok)))
1576 continue;
1577
1578 return __sort_dimension__add_output(sd);
1579 }
1580
1581 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
1582 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
1583
1584 if (strncasecmp(tok, hd->name, strlen(tok)))
1585 continue;
1586
1587 return __hpp_dimension__add_output(hd);
1588 }
1589
1590 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
1591 struct sort_dimension *sd = &bstack_sort_dimensions[i];
1592
1593 if (strncasecmp(tok, sd->name, strlen(tok)))
1594 continue;
1595
1596 return __sort_dimension__add_output(sd);
1597 }
1598
1599 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
1600 struct sort_dimension *sd = &memory_sort_dimensions[i];
1601
1602 if (strncasecmp(tok, sd->name, strlen(tok)))
1603 continue;
1604
1605 return __sort_dimension__add_output(sd);
1606 }
1607
1608 return -ESRCH;
1609 }
1610
1611 static void reset_dimensions(void)
1612 {
1613 unsigned int i;
1614
1615 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
1616 common_sort_dimensions[i].taken = 0;
1617
1618 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
1619 hpp_sort_dimensions[i].taken = 0;
1620
1621 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
1622 bstack_sort_dimensions[i].taken = 0;
1623
1624 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
1625 memory_sort_dimensions[i].taken = 0;
1626 }
1627
1628 static int __setup_output_field(void)
1629 {
1630 char *tmp, *tok, *str;
1631 int ret = 0;
1632
1633 if (field_order == NULL)
1634 return 0;
1635
1636 reset_dimensions();
1637
1638 str = strdup(field_order);
1639 if (str == NULL) {
1640 error("Not enough memory to setup output fields");
1641 return -ENOMEM;
1642 }
1643
1644 for (tok = strtok_r(str, ", ", &tmp);
1645 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1646 ret = output_field_add(tok);
1647 if (ret == -EINVAL) {
1648 error("Invalid --fields key: `%s'", tok);
1649 break;
1650 } else if (ret == -ESRCH) {
1651 error("Unknown --fields key: `%s'", tok);
1652 break;
1653 }
1654 }
1655
1656 free(str);
1657 return ret;
1658 }
1659
1660 int setup_sorting(void)
1661 {
1662 int err;
1663
1664 err = __setup_sorting();
1665 if (err < 0)
1666 return err;
1667
1668 if (parent_pattern != default_parent_pattern) {
1669 err = sort_dimension__add("parent");
1670 if (err < 0)
1671 return err;
1672 }
1673
1674 reset_dimensions();
1675
1676 /*
1677 * perf diff doesn't use default hpp output fields.
1678 */
1679 if (sort__mode != SORT_MODE__DIFF)
1680 perf_hpp__init();
1681
1682 err = __setup_output_field();
1683 if (err < 0)
1684 return err;
1685
1686 /* copy sort keys to output fields */
1687 perf_hpp__setup_output_field();
1688 /* and then copy output fields to sort keys */
1689 perf_hpp__append_sort_keys();
1690
1691 return 0;
1692 }
1693
1694 void reset_output_field(void)
1695 {
1696 sort__need_collapse = 0;
1697 sort__has_parent = 0;
1698 sort__has_sym = 0;
1699 sort__has_dso = 0;
1700
1701 field_order = NULL;
1702 sort_order = NULL;
1703
1704 reset_dimensions();
1705 perf_hpp__reset_output_field();
1706 }
This page took 0.067153 seconds and 5 git commands to generate.