10 static bool hists__filter_entry_by_dso(struct hists
*hists
,
11 struct hist_entry
*he
);
12 static bool hists__filter_entry_by_thread(struct hists
*hists
,
13 struct hist_entry
*he
);
14 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
15 struct hist_entry
*he
);
17 struct callchain_param callchain_param
= {
18 .mode
= CHAIN_GRAPH_REL
,
20 .order
= ORDER_CALLEE
,
24 u16
hists__col_len(struct hists
*hists
, enum hist_column col
)
26 return hists
->col_len
[col
];
29 void hists__set_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
31 hists
->col_len
[col
] = len
;
34 bool hists__new_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
36 if (len
> hists__col_len(hists
, col
)) {
37 hists__set_col_len(hists
, col
, len
);
43 void hists__reset_col_len(struct hists
*hists
)
47 for (col
= 0; col
< HISTC_NR_COLS
; ++col
)
48 hists__set_col_len(hists
, col
, 0);
51 static void hists__set_unres_dso_col_len(struct hists
*hists
, int dso
)
53 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
55 if (hists__col_len(hists
, dso
) < unresolved_col_width
&&
56 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
57 !symbol_conf
.dso_list
)
58 hists__set_col_len(hists
, dso
, unresolved_col_width
);
61 void hists__calc_col_len(struct hists
*hists
, struct hist_entry
*h
)
63 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
68 * +4 accounts for '[x] ' priv level info
69 * +2 accounts for 0x prefix on raw addresses
70 * +3 accounts for ' y ' symtab origin info
73 symlen
= h
->ms
.sym
->namelen
+ 4;
75 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
76 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
78 symlen
= unresolved_col_width
+ 4 + 2;
79 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
80 hists__set_unres_dso_col_len(hists
, HISTC_DSO
);
83 len
= thread__comm_len(h
->thread
);
84 if (hists__new_col_len(hists
, HISTC_COMM
, len
))
85 hists__set_col_len(hists
, HISTC_THREAD
, len
+ 6);
88 len
= dso__name_len(h
->ms
.map
->dso
);
89 hists__new_col_len(hists
, HISTC_DSO
, len
);
93 hists__new_col_len(hists
, HISTC_PARENT
, h
->parent
->namelen
);
96 if (h
->branch_info
->from
.sym
) {
97 symlen
= (int)h
->branch_info
->from
.sym
->namelen
+ 4;
99 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
100 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
102 symlen
= dso__name_len(h
->branch_info
->from
.map
->dso
);
103 hists__new_col_len(hists
, HISTC_DSO_FROM
, symlen
);
105 symlen
= unresolved_col_width
+ 4 + 2;
106 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
107 hists__set_unres_dso_col_len(hists
, HISTC_DSO_FROM
);
110 if (h
->branch_info
->to
.sym
) {
111 symlen
= (int)h
->branch_info
->to
.sym
->namelen
+ 4;
113 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
114 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
116 symlen
= dso__name_len(h
->branch_info
->to
.map
->dso
);
117 hists__new_col_len(hists
, HISTC_DSO_TO
, symlen
);
119 symlen
= unresolved_col_width
+ 4 + 2;
120 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
121 hists__set_unres_dso_col_len(hists
, HISTC_DSO_TO
);
126 if (h
->mem_info
->daddr
.sym
) {
127 symlen
= (int)h
->mem_info
->daddr
.sym
->namelen
+ 4
128 + unresolved_col_width
+ 2;
129 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
131 hists__new_col_len(hists
, HISTC_MEM_DCACHELINE
,
134 symlen
= unresolved_col_width
+ 4 + 2;
135 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
138 if (h
->mem_info
->daddr
.map
) {
139 symlen
= dso__name_len(h
->mem_info
->daddr
.map
->dso
);
140 hists__new_col_len(hists
, HISTC_MEM_DADDR_DSO
,
143 symlen
= unresolved_col_width
+ 4 + 2;
144 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
147 symlen
= unresolved_col_width
+ 4 + 2;
148 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
, symlen
);
149 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
152 hists__new_col_len(hists
, HISTC_MEM_LOCKED
, 6);
153 hists__new_col_len(hists
, HISTC_MEM_TLB
, 22);
154 hists__new_col_len(hists
, HISTC_MEM_SNOOP
, 12);
155 hists__new_col_len(hists
, HISTC_MEM_LVL
, 21 + 3);
156 hists__new_col_len(hists
, HISTC_LOCAL_WEIGHT
, 12);
157 hists__new_col_len(hists
, HISTC_GLOBAL_WEIGHT
, 12);
160 hists__new_col_len(hists
, HISTC_TRANSACTION
,
161 hist_entry__transaction_len());
164 void hists__output_recalc_col_len(struct hists
*hists
, int max_rows
)
166 struct rb_node
*next
= rb_first(&hists
->entries
);
167 struct hist_entry
*n
;
170 hists__reset_col_len(hists
);
172 while (next
&& row
++ < max_rows
) {
173 n
= rb_entry(next
, struct hist_entry
, rb_node
);
175 hists__calc_col_len(hists
, n
);
176 next
= rb_next(&n
->rb_node
);
180 static void he_stat__add_cpumode_period(struct he_stat
*he_stat
,
181 unsigned int cpumode
, u64 period
)
184 case PERF_RECORD_MISC_KERNEL
:
185 he_stat
->period_sys
+= period
;
187 case PERF_RECORD_MISC_USER
:
188 he_stat
->period_us
+= period
;
190 case PERF_RECORD_MISC_GUEST_KERNEL
:
191 he_stat
->period_guest_sys
+= period
;
193 case PERF_RECORD_MISC_GUEST_USER
:
194 he_stat
->period_guest_us
+= period
;
201 static void he_stat__add_period(struct he_stat
*he_stat
, u64 period
,
205 he_stat
->period
+= period
;
206 he_stat
->weight
+= weight
;
207 he_stat
->nr_events
+= 1;
210 static void he_stat__add_stat(struct he_stat
*dest
, struct he_stat
*src
)
212 dest
->period
+= src
->period
;
213 dest
->period_sys
+= src
->period_sys
;
214 dest
->period_us
+= src
->period_us
;
215 dest
->period_guest_sys
+= src
->period_guest_sys
;
216 dest
->period_guest_us
+= src
->period_guest_us
;
217 dest
->nr_events
+= src
->nr_events
;
218 dest
->weight
+= src
->weight
;
221 static void he_stat__decay(struct he_stat
*he_stat
)
223 he_stat
->period
= (he_stat
->period
* 7) / 8;
224 he_stat
->nr_events
= (he_stat
->nr_events
* 7) / 8;
225 /* XXX need decay for weight too? */
228 static bool hists__decay_entry(struct hists
*hists
, struct hist_entry
*he
)
230 u64 prev_period
= he
->stat
.period
;
233 if (prev_period
== 0)
236 he_stat__decay(&he
->stat
);
237 if (symbol_conf
.cumulate_callchain
)
238 he_stat__decay(he
->stat_acc
);
240 diff
= prev_period
- he
->stat
.period
;
242 hists
->stats
.total_period
-= diff
;
244 hists
->stats
.total_non_filtered_period
-= diff
;
246 return he
->stat
.period
== 0;
249 void hists__decay_entries(struct hists
*hists
, bool zap_user
, bool zap_kernel
)
251 struct rb_node
*next
= rb_first(&hists
->entries
);
252 struct hist_entry
*n
;
255 n
= rb_entry(next
, struct hist_entry
, rb_node
);
256 next
= rb_next(&n
->rb_node
);
258 * We may be annotating this, for instance, so keep it here in
259 * case some it gets new samples, we'll eventually free it when
260 * the user stops browsing and it agains gets fully decayed.
262 if (((zap_user
&& n
->level
== '.') ||
263 (zap_kernel
&& n
->level
!= '.') ||
264 hists__decay_entry(hists
, n
)) &&
266 rb_erase(&n
->rb_node
, &hists
->entries
);
268 if (sort__need_collapse
)
269 rb_erase(&n
->rb_node_in
, &hists
->entries_collapsed
);
273 --hists
->nr_non_filtered_entries
;
280 void hists__delete_entries(struct hists
*hists
)
282 struct rb_node
*next
= rb_first(&hists
->entries
);
283 struct hist_entry
*n
;
286 n
= rb_entry(next
, struct hist_entry
, rb_node
);
287 next
= rb_next(&n
->rb_node
);
289 rb_erase(&n
->rb_node
, &hists
->entries
);
291 if (sort__need_collapse
)
292 rb_erase(&n
->rb_node_in
, &hists
->entries_collapsed
);
296 --hists
->nr_non_filtered_entries
;
303 * histogram, sorted on item, collects periods
306 static struct hist_entry
*hist_entry__new(struct hist_entry
*template,
309 size_t callchain_size
= 0;
310 struct hist_entry
*he
;
312 if (symbol_conf
.use_callchain
|| symbol_conf
.cumulate_callchain
)
313 callchain_size
= sizeof(struct callchain_root
);
315 he
= zalloc(sizeof(*he
) + callchain_size
);
320 if (symbol_conf
.cumulate_callchain
) {
321 he
->stat_acc
= malloc(sizeof(he
->stat
));
322 if (he
->stat_acc
== NULL
) {
326 memcpy(he
->stat_acc
, &he
->stat
, sizeof(he
->stat
));
328 memset(&he
->stat
, 0, sizeof(he
->stat
));
332 he
->ms
.map
->referenced
= true;
334 if (he
->branch_info
) {
336 * This branch info is (a part of) allocated from
337 * sample__resolve_bstack() and will be freed after
338 * adding new entries. So we need to save a copy.
340 he
->branch_info
= malloc(sizeof(*he
->branch_info
));
341 if (he
->branch_info
== NULL
) {
347 memcpy(he
->branch_info
, template->branch_info
,
348 sizeof(*he
->branch_info
));
350 if (he
->branch_info
->from
.map
)
351 he
->branch_info
->from
.map
->referenced
= true;
352 if (he
->branch_info
->to
.map
)
353 he
->branch_info
->to
.map
->referenced
= true;
357 if (he
->mem_info
->iaddr
.map
)
358 he
->mem_info
->iaddr
.map
->referenced
= true;
359 if (he
->mem_info
->daddr
.map
)
360 he
->mem_info
->daddr
.map
->referenced
= true;
363 if (symbol_conf
.use_callchain
)
364 callchain_init(he
->callchain
);
366 INIT_LIST_HEAD(&he
->pairs
.node
);
372 static u8
symbol__parent_filter(const struct symbol
*parent
)
374 if (symbol_conf
.exclude_other
&& parent
== NULL
)
375 return 1 << HIST_FILTER__PARENT
;
379 static struct hist_entry
*add_hist_entry(struct hists
*hists
,
380 struct hist_entry
*entry
,
381 struct addr_location
*al
,
385 struct rb_node
*parent
= NULL
;
386 struct hist_entry
*he
;
388 u64 period
= entry
->stat
.period
;
389 u64 weight
= entry
->stat
.weight
;
391 p
= &hists
->entries_in
->rb_node
;
395 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
398 * Make sure that it receives arguments in a same order as
399 * hist_entry__collapse() so that we can use an appropriate
400 * function when searching an entry regardless which sort
403 cmp
= hist_entry__cmp(he
, entry
);
407 he_stat__add_period(&he
->stat
, period
, weight
);
408 if (symbol_conf
.cumulate_callchain
)
409 he_stat__add_period(he
->stat_acc
, period
, weight
);
412 * This mem info was allocated from sample__resolve_mem
413 * and will not be used anymore.
415 zfree(&entry
->mem_info
);
417 /* If the map of an existing hist_entry has
418 * become out-of-date due to an exec() or
419 * similar, update it. Otherwise we will
420 * mis-adjust symbol addresses when computing
421 * the history counter to increment.
423 if (he
->ms
.map
!= entry
->ms
.map
) {
424 he
->ms
.map
= entry
->ms
.map
;
426 he
->ms
.map
->referenced
= true;
437 he
= hist_entry__new(entry
, sample_self
);
441 rb_link_node(&he
->rb_node_in
, parent
, p
);
442 rb_insert_color(&he
->rb_node_in
, hists
->entries_in
);
445 he_stat__add_cpumode_period(&he
->stat
, al
->cpumode
, period
);
446 if (symbol_conf
.cumulate_callchain
)
447 he_stat__add_cpumode_period(he
->stat_acc
, al
->cpumode
, period
);
451 struct hist_entry
*__hists__add_entry(struct hists
*hists
,
452 struct addr_location
*al
,
453 struct symbol
*sym_parent
,
454 struct branch_info
*bi
,
456 u64 period
, u64 weight
, u64 transaction
,
459 struct hist_entry entry
= {
460 .thread
= al
->thread
,
461 .comm
= thread__comm(al
->thread
),
467 .cpumode
= al
->cpumode
,
475 .parent
= sym_parent
,
476 .filtered
= symbol__parent_filter(sym_parent
) | al
->filtered
,
480 .transaction
= transaction
,
483 return add_hist_entry(hists
, &entry
, al
, sample_self
);
487 iter_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
488 struct addr_location
*al __maybe_unused
)
494 iter_add_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
495 struct addr_location
*al __maybe_unused
)
501 iter_prepare_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
503 struct perf_sample
*sample
= iter
->sample
;
506 mi
= sample__resolve_mem(sample
, al
);
515 iter_add_single_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
518 struct mem_info
*mi
= iter
->priv
;
519 struct hist_entry
*he
;
524 cost
= iter
->sample
->weight
;
529 * must pass period=weight in order to get the correct
530 * sorting from hists__collapse_resort() which is solely
531 * based on periods. We want sorting be done on nr_events * weight
532 * and this is indirectly achieved by passing period=weight here
533 * and the he_stat__add_period() function.
535 he
= __hists__add_entry(&iter
->evsel
->hists
, al
, iter
->parent
, NULL
, mi
,
536 cost
, cost
, 0, true);
545 iter_finish_mem_entry(struct hist_entry_iter
*iter
,
546 struct addr_location
*al __maybe_unused
)
548 struct perf_evsel
*evsel
= iter
->evsel
;
549 struct hist_entry
*he
= iter
->he
;
555 hists__inc_nr_samples(&evsel
->hists
, he
->filtered
);
557 err
= hist_entry__append_callchain(he
, iter
->sample
);
561 * We don't need to free iter->priv (mem_info) here since
562 * the mem info was either already freed in add_hist_entry() or
563 * passed to a new hist entry by hist_entry__new().
572 iter_prepare_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
574 struct branch_info
*bi
;
575 struct perf_sample
*sample
= iter
->sample
;
577 bi
= sample__resolve_bstack(sample
, al
);
582 iter
->total
= sample
->branch_stack
->nr
;
589 iter_add_single_branch_entry(struct hist_entry_iter
*iter __maybe_unused
,
590 struct addr_location
*al __maybe_unused
)
592 /* to avoid calling callback function */
599 iter_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
601 struct branch_info
*bi
= iter
->priv
;
607 if (iter
->curr
>= iter
->total
)
610 al
->map
= bi
[i
].to
.map
;
611 al
->sym
= bi
[i
].to
.sym
;
612 al
->addr
= bi
[i
].to
.addr
;
617 iter_add_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
619 struct branch_info
*bi
;
620 struct perf_evsel
*evsel
= iter
->evsel
;
621 struct hist_entry
*he
= NULL
;
627 if (iter
->hide_unresolved
&& !(bi
[i
].from
.sym
&& bi
[i
].to
.sym
))
631 * The report shows the percentage of total branches captured
632 * and not events sampled. Thus we use a pseudo period of 1.
634 he
= __hists__add_entry(&evsel
->hists
, al
, iter
->parent
, &bi
[i
], NULL
,
639 hists__inc_nr_samples(&evsel
->hists
, he
->filtered
);
648 iter_finish_branch_entry(struct hist_entry_iter
*iter
,
649 struct addr_location
*al __maybe_unused
)
654 return iter
->curr
>= iter
->total
? 0 : -1;
658 iter_prepare_normal_entry(struct hist_entry_iter
*iter __maybe_unused
,
659 struct addr_location
*al __maybe_unused
)
665 iter_add_single_normal_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
667 struct perf_evsel
*evsel
= iter
->evsel
;
668 struct perf_sample
*sample
= iter
->sample
;
669 struct hist_entry
*he
;
671 he
= __hists__add_entry(&evsel
->hists
, al
, iter
->parent
, NULL
, NULL
,
672 sample
->period
, sample
->weight
,
673 sample
->transaction
, true);
682 iter_finish_normal_entry(struct hist_entry_iter
*iter
,
683 struct addr_location
*al __maybe_unused
)
685 struct hist_entry
*he
= iter
->he
;
686 struct perf_evsel
*evsel
= iter
->evsel
;
687 struct perf_sample
*sample
= iter
->sample
;
694 hists__inc_nr_samples(&evsel
->hists
, he
->filtered
);
696 return hist_entry__append_callchain(he
, sample
);
700 iter_prepare_cumulative_entry(struct hist_entry_iter
*iter __maybe_unused
,
701 struct addr_location
*al __maybe_unused
)
703 struct hist_entry
**he_cache
;
705 callchain_cursor_commit(&callchain_cursor
);
708 * This is for detecting cycles or recursions so that they're
709 * cumulated only one time to prevent entries more than 100%
712 he_cache
= malloc(sizeof(*he_cache
) * (PERF_MAX_STACK_DEPTH
+ 1));
713 if (he_cache
== NULL
)
716 iter
->priv
= he_cache
;
723 iter_add_single_cumulative_entry(struct hist_entry_iter
*iter
,
724 struct addr_location
*al
)
726 struct perf_evsel
*evsel
= iter
->evsel
;
727 struct perf_sample
*sample
= iter
->sample
;
728 struct hist_entry
**he_cache
= iter
->priv
;
729 struct hist_entry
*he
;
732 he
= __hists__add_entry(&evsel
->hists
, al
, iter
->parent
, NULL
, NULL
,
733 sample
->period
, sample
->weight
,
734 sample
->transaction
, true);
739 he_cache
[iter
->curr
++] = he
;
741 callchain_append(he
->callchain
, &callchain_cursor
, sample
->period
);
744 * We need to re-initialize the cursor since callchain_append()
745 * advanced the cursor to the end.
747 callchain_cursor_commit(&callchain_cursor
);
749 hists__inc_nr_samples(&evsel
->hists
, he
->filtered
);
755 iter_next_cumulative_entry(struct hist_entry_iter
*iter
,
756 struct addr_location
*al
)
758 struct callchain_cursor_node
*node
;
760 node
= callchain_cursor_current(&callchain_cursor
);
764 return fill_callchain_info(al
, node
, iter
->hide_unresolved
);
768 iter_add_next_cumulative_entry(struct hist_entry_iter
*iter
,
769 struct addr_location
*al
)
771 struct perf_evsel
*evsel
= iter
->evsel
;
772 struct perf_sample
*sample
= iter
->sample
;
773 struct hist_entry
**he_cache
= iter
->priv
;
774 struct hist_entry
*he
;
775 struct hist_entry he_tmp
= {
777 .thread
= al
->thread
,
778 .comm
= thread__comm(al
->thread
),
784 .parent
= iter
->parent
,
787 struct callchain_cursor cursor
;
789 callchain_cursor_snapshot(&cursor
, &callchain_cursor
);
791 callchain_cursor_advance(&callchain_cursor
);
794 * Check if there's duplicate entries in the callchain.
795 * It's possible that it has cycles or recursive calls.
797 for (i
= 0; i
< iter
->curr
; i
++) {
798 if (hist_entry__cmp(he_cache
[i
], &he_tmp
) == 0) {
799 /* to avoid calling callback function */
805 he
= __hists__add_entry(&evsel
->hists
, al
, iter
->parent
, NULL
, NULL
,
806 sample
->period
, sample
->weight
,
807 sample
->transaction
, false);
812 he_cache
[iter
->curr
++] = he
;
814 callchain_append(he
->callchain
, &cursor
, sample
->period
);
819 iter_finish_cumulative_entry(struct hist_entry_iter
*iter
,
820 struct addr_location
*al __maybe_unused
)
828 const struct hist_iter_ops hist_iter_mem
= {
829 .prepare_entry
= iter_prepare_mem_entry
,
830 .add_single_entry
= iter_add_single_mem_entry
,
831 .next_entry
= iter_next_nop_entry
,
832 .add_next_entry
= iter_add_next_nop_entry
,
833 .finish_entry
= iter_finish_mem_entry
,
836 const struct hist_iter_ops hist_iter_branch
= {
837 .prepare_entry
= iter_prepare_branch_entry
,
838 .add_single_entry
= iter_add_single_branch_entry
,
839 .next_entry
= iter_next_branch_entry
,
840 .add_next_entry
= iter_add_next_branch_entry
,
841 .finish_entry
= iter_finish_branch_entry
,
844 const struct hist_iter_ops hist_iter_normal
= {
845 .prepare_entry
= iter_prepare_normal_entry
,
846 .add_single_entry
= iter_add_single_normal_entry
,
847 .next_entry
= iter_next_nop_entry
,
848 .add_next_entry
= iter_add_next_nop_entry
,
849 .finish_entry
= iter_finish_normal_entry
,
852 const struct hist_iter_ops hist_iter_cumulative
= {
853 .prepare_entry
= iter_prepare_cumulative_entry
,
854 .add_single_entry
= iter_add_single_cumulative_entry
,
855 .next_entry
= iter_next_cumulative_entry
,
856 .add_next_entry
= iter_add_next_cumulative_entry
,
857 .finish_entry
= iter_finish_cumulative_entry
,
860 int hist_entry_iter__add(struct hist_entry_iter
*iter
, struct addr_location
*al
,
861 struct perf_evsel
*evsel
, struct perf_sample
*sample
,
862 int max_stack_depth
, void *arg
)
866 err
= sample__resolve_callchain(sample
, &iter
->parent
, evsel
, al
,
872 iter
->sample
= sample
;
874 err
= iter
->ops
->prepare_entry(iter
, al
);
878 err
= iter
->ops
->add_single_entry(iter
, al
);
882 if (iter
->he
&& iter
->add_entry_cb
) {
883 err
= iter
->add_entry_cb(iter
, al
, true, arg
);
888 while (iter
->ops
->next_entry(iter
, al
)) {
889 err
= iter
->ops
->add_next_entry(iter
, al
);
893 if (iter
->he
&& iter
->add_entry_cb
) {
894 err
= iter
->add_entry_cb(iter
, al
, false, arg
);
901 err2
= iter
->ops
->finish_entry(iter
, al
);
909 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
911 struct perf_hpp_fmt
*fmt
;
914 perf_hpp__for_each_sort_list(fmt
) {
915 if (perf_hpp__should_skip(fmt
))
918 cmp
= fmt
->cmp(left
, right
);
927 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
929 struct perf_hpp_fmt
*fmt
;
932 perf_hpp__for_each_sort_list(fmt
) {
933 if (perf_hpp__should_skip(fmt
))
936 cmp
= fmt
->collapse(left
, right
);
944 void hist_entry__free(struct hist_entry
*he
)
946 zfree(&he
->branch_info
);
947 zfree(&he
->mem_info
);
948 zfree(&he
->stat_acc
);
949 free_srcline(he
->srcline
);
954 * collapse the histogram
957 static bool hists__collapse_insert_entry(struct hists
*hists __maybe_unused
,
958 struct rb_root
*root
,
959 struct hist_entry
*he
)
961 struct rb_node
**p
= &root
->rb_node
;
962 struct rb_node
*parent
= NULL
;
963 struct hist_entry
*iter
;
968 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
970 cmp
= hist_entry__collapse(iter
, he
);
973 he_stat__add_stat(&iter
->stat
, &he
->stat
);
974 if (symbol_conf
.cumulate_callchain
)
975 he_stat__add_stat(iter
->stat_acc
, he
->stat_acc
);
977 if (symbol_conf
.use_callchain
) {
978 callchain_cursor_reset(&callchain_cursor
);
979 callchain_merge(&callchain_cursor
,
983 hist_entry__free(he
);
993 rb_link_node(&he
->rb_node_in
, parent
, p
);
994 rb_insert_color(&he
->rb_node_in
, root
);
998 static struct rb_root
*hists__get_rotate_entries_in(struct hists
*hists
)
1000 struct rb_root
*root
;
1002 pthread_mutex_lock(&hists
->lock
);
1004 root
= hists
->entries_in
;
1005 if (++hists
->entries_in
> &hists
->entries_in_array
[1])
1006 hists
->entries_in
= &hists
->entries_in_array
[0];
1008 pthread_mutex_unlock(&hists
->lock
);
1013 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
)
1015 hists__filter_entry_by_dso(hists
, he
);
1016 hists__filter_entry_by_thread(hists
, he
);
1017 hists__filter_entry_by_symbol(hists
, he
);
1020 void hists__collapse_resort(struct hists
*hists
, struct ui_progress
*prog
)
1022 struct rb_root
*root
;
1023 struct rb_node
*next
;
1024 struct hist_entry
*n
;
1026 if (!sort__need_collapse
)
1029 root
= hists__get_rotate_entries_in(hists
);
1030 next
= rb_first(root
);
1035 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1036 next
= rb_next(&n
->rb_node_in
);
1038 rb_erase(&n
->rb_node_in
, root
);
1039 if (hists__collapse_insert_entry(hists
, &hists
->entries_collapsed
, n
)) {
1041 * If it wasn't combined with one of the entries already
1042 * collapsed, we need to apply the filters that may have
1043 * been set by, say, the hist_browser.
1045 hists__apply_filters(hists
, n
);
1048 ui_progress__update(prog
, 1);
1052 static int hist_entry__sort(struct hist_entry
*a
, struct hist_entry
*b
)
1054 struct perf_hpp_fmt
*fmt
;
1057 perf_hpp__for_each_sort_list(fmt
) {
1058 if (perf_hpp__should_skip(fmt
))
1061 cmp
= fmt
->sort(a
, b
);
1069 static void hists__reset_filter_stats(struct hists
*hists
)
1071 hists
->nr_non_filtered_entries
= 0;
1072 hists
->stats
.total_non_filtered_period
= 0;
1075 void hists__reset_stats(struct hists
*hists
)
1077 hists
->nr_entries
= 0;
1078 hists
->stats
.total_period
= 0;
1080 hists__reset_filter_stats(hists
);
1083 static void hists__inc_filter_stats(struct hists
*hists
, struct hist_entry
*h
)
1085 hists
->nr_non_filtered_entries
++;
1086 hists
->stats
.total_non_filtered_period
+= h
->stat
.period
;
1089 void hists__inc_stats(struct hists
*hists
, struct hist_entry
*h
)
1092 hists__inc_filter_stats(hists
, h
);
1094 hists
->nr_entries
++;
1095 hists
->stats
.total_period
+= h
->stat
.period
;
1098 static void __hists__insert_output_entry(struct rb_root
*entries
,
1099 struct hist_entry
*he
,
1100 u64 min_callchain_hits
)
1102 struct rb_node
**p
= &entries
->rb_node
;
1103 struct rb_node
*parent
= NULL
;
1104 struct hist_entry
*iter
;
1106 if (symbol_conf
.use_callchain
)
1107 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
1108 min_callchain_hits
, &callchain_param
);
1110 while (*p
!= NULL
) {
1112 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1114 if (hist_entry__sort(he
, iter
) > 0)
1117 p
= &(*p
)->rb_right
;
1120 rb_link_node(&he
->rb_node
, parent
, p
);
1121 rb_insert_color(&he
->rb_node
, entries
);
1124 void hists__output_resort(struct hists
*hists
)
1126 struct rb_root
*root
;
1127 struct rb_node
*next
;
1128 struct hist_entry
*n
;
1129 u64 min_callchain_hits
;
1131 min_callchain_hits
= hists
->stats
.total_period
* (callchain_param
.min_percent
/ 100);
1133 if (sort__need_collapse
)
1134 root
= &hists
->entries_collapsed
;
1136 root
= hists
->entries_in
;
1138 next
= rb_first(root
);
1139 hists
->entries
= RB_ROOT
;
1141 hists__reset_stats(hists
);
1142 hists__reset_col_len(hists
);
1145 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1146 next
= rb_next(&n
->rb_node_in
);
1148 __hists__insert_output_entry(&hists
->entries
, n
, min_callchain_hits
);
1149 hists__inc_stats(hists
, n
);
1152 hists__calc_col_len(hists
, n
);
1156 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*h
,
1157 enum hist_filter filter
)
1159 h
->filtered
&= ~(1 << filter
);
1163 /* force fold unfiltered entry for simplicity */
1164 h
->ms
.unfolded
= false;
1167 hists
->stats
.nr_non_filtered_samples
+= h
->stat
.nr_events
;
1169 hists__inc_filter_stats(hists
, h
);
1170 hists__calc_col_len(hists
, h
);
1174 static bool hists__filter_entry_by_dso(struct hists
*hists
,
1175 struct hist_entry
*he
)
1177 if (hists
->dso_filter
!= NULL
&&
1178 (he
->ms
.map
== NULL
|| he
->ms
.map
->dso
!= hists
->dso_filter
)) {
1179 he
->filtered
|= (1 << HIST_FILTER__DSO
);
1186 void hists__filter_by_dso(struct hists
*hists
)
1190 hists
->stats
.nr_non_filtered_samples
= 0;
1192 hists__reset_filter_stats(hists
);
1193 hists__reset_col_len(hists
);
1195 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1196 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1198 if (symbol_conf
.exclude_other
&& !h
->parent
)
1201 if (hists__filter_entry_by_dso(hists
, h
))
1204 hists__remove_entry_filter(hists
, h
, HIST_FILTER__DSO
);
1208 static bool hists__filter_entry_by_thread(struct hists
*hists
,
1209 struct hist_entry
*he
)
1211 if (hists
->thread_filter
!= NULL
&&
1212 he
->thread
!= hists
->thread_filter
) {
1213 he
->filtered
|= (1 << HIST_FILTER__THREAD
);
1220 void hists__filter_by_thread(struct hists
*hists
)
1224 hists
->stats
.nr_non_filtered_samples
= 0;
1226 hists__reset_filter_stats(hists
);
1227 hists__reset_col_len(hists
);
1229 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1230 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1232 if (hists__filter_entry_by_thread(hists
, h
))
1235 hists__remove_entry_filter(hists
, h
, HIST_FILTER__THREAD
);
1239 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
1240 struct hist_entry
*he
)
1242 if (hists
->symbol_filter_str
!= NULL
&&
1243 (!he
->ms
.sym
|| strstr(he
->ms
.sym
->name
,
1244 hists
->symbol_filter_str
) == NULL
)) {
1245 he
->filtered
|= (1 << HIST_FILTER__SYMBOL
);
1252 void hists__filter_by_symbol(struct hists
*hists
)
1256 hists
->stats
.nr_non_filtered_samples
= 0;
1258 hists__reset_filter_stats(hists
);
1259 hists__reset_col_len(hists
);
1261 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1262 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1264 if (hists__filter_entry_by_symbol(hists
, h
))
1267 hists__remove_entry_filter(hists
, h
, HIST_FILTER__SYMBOL
);
1271 void events_stats__inc(struct events_stats
*stats
, u32 type
)
1273 ++stats
->nr_events
[0];
1274 ++stats
->nr_events
[type
];
1277 void hists__inc_nr_events(struct hists
*hists
, u32 type
)
1279 events_stats__inc(&hists
->stats
, type
);
1282 void hists__inc_nr_samples(struct hists
*hists
, bool filtered
)
1284 events_stats__inc(&hists
->stats
, PERF_RECORD_SAMPLE
);
1286 hists
->stats
.nr_non_filtered_samples
++;
1289 static struct hist_entry
*hists__add_dummy_entry(struct hists
*hists
,
1290 struct hist_entry
*pair
)
1292 struct rb_root
*root
;
1294 struct rb_node
*parent
= NULL
;
1295 struct hist_entry
*he
;
1298 if (sort__need_collapse
)
1299 root
= &hists
->entries_collapsed
;
1301 root
= hists
->entries_in
;
1305 while (*p
!= NULL
) {
1307 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1309 cmp
= hist_entry__collapse(he
, pair
);
1317 p
= &(*p
)->rb_right
;
1320 he
= hist_entry__new(pair
, true);
1322 memset(&he
->stat
, 0, sizeof(he
->stat
));
1324 rb_link_node(&he
->rb_node_in
, parent
, p
);
1325 rb_insert_color(&he
->rb_node_in
, root
);
1326 hists__inc_stats(hists
, he
);
1333 static struct hist_entry
*hists__find_entry(struct hists
*hists
,
1334 struct hist_entry
*he
)
1338 if (sort__need_collapse
)
1339 n
= hists
->entries_collapsed
.rb_node
;
1341 n
= hists
->entries_in
->rb_node
;
1344 struct hist_entry
*iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
1345 int64_t cmp
= hist_entry__collapse(iter
, he
);
1359 * Look for pairs to link to the leader buckets (hist_entries):
1361 void hists__match(struct hists
*leader
, struct hists
*other
)
1363 struct rb_root
*root
;
1365 struct hist_entry
*pos
, *pair
;
1367 if (sort__need_collapse
)
1368 root
= &leader
->entries_collapsed
;
1370 root
= leader
->entries_in
;
1372 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
1373 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
1374 pair
= hists__find_entry(other
, pos
);
1377 hist_entry__add_pair(pair
, pos
);
1382 * Look for entries in the other hists that are not present in the leader, if
1383 * we find them, just add a dummy entry on the leader hists, with period=0,
1384 * nr_events=0, to serve as the list header.
1386 int hists__link(struct hists
*leader
, struct hists
*other
)
1388 struct rb_root
*root
;
1390 struct hist_entry
*pos
, *pair
;
1392 if (sort__need_collapse
)
1393 root
= &other
->entries_collapsed
;
1395 root
= other
->entries_in
;
1397 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
1398 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
1400 if (!hist_entry__has_pairs(pos
)) {
1401 pair
= hists__add_dummy_entry(leader
, pos
);
1404 hist_entry__add_pair(pos
, pair
);
1411 u64
hists__total_period(struct hists
*hists
)
1413 return symbol_conf
.filter_relative
? hists
->stats
.total_non_filtered_period
:
1414 hists
->stats
.total_period
;
1417 int parse_filter_percentage(const struct option
*opt __maybe_unused
,
1418 const char *arg
, int unset __maybe_unused
)
1420 if (!strcmp(arg
, "relative"))
1421 symbol_conf
.filter_relative
= true;
1422 else if (!strcmp(arg
, "absolute"))
1423 symbol_conf
.filter_relative
= false;
1430 int perf_hist_config(const char *var
, const char *value
)
1432 if (!strcmp(var
, "hist.percentage"))
1433 return parse_filter_percentage(NULL
, value
, 0);