Commit | Line | Data |
---|---|---|
78f7defe | 1 | #include "annotate.h" |
8a0ecfb8 | 2 | #include "util.h" |
598357eb | 3 | #include "build-id.h" |
3d1d07ec | 4 | #include "hist.h" |
4e4f06e4 ACM |
5 | #include "session.h" |
6 | #include "sort.h" | |
29d720ed | 7 | #include "evsel.h" |
9b33827d | 8 | #include <math.h> |
3d1d07ec | 9 | |
90cf1fb5 ACM |
10 | static bool hists__filter_entry_by_dso(struct hists *hists, |
11 | struct hist_entry *he); | |
12 | static bool hists__filter_entry_by_thread(struct hists *hists, | |
13 | struct hist_entry *he); | |
e94d53eb NK |
14 | static bool hists__filter_entry_by_symbol(struct hists *hists, |
15 | struct hist_entry *he); | |
90cf1fb5 | 16 | |
7a007ca9 ACM |
17 | enum hist_filter { |
18 | HIST_FILTER__DSO, | |
19 | HIST_FILTER__THREAD, | |
20 | HIST_FILTER__PARENT, | |
e94d53eb | 21 | HIST_FILTER__SYMBOL, |
7a007ca9 ACM |
22 | }; |
23 | ||
3d1d07ec JK |
24 | struct callchain_param callchain_param = { |
25 | .mode = CHAIN_GRAPH_REL, | |
d797fdc5 SL |
26 | .min_percent = 0.5, |
27 | .order = ORDER_CALLEE | |
3d1d07ec JK |
28 | }; |
29 | ||
42b28ac0 | 30 | u16 hists__col_len(struct hists *hists, enum hist_column col) |
8a6c5b26 | 31 | { |
42b28ac0 | 32 | return hists->col_len[col]; |
8a6c5b26 ACM |
33 | } |
34 | ||
42b28ac0 | 35 | void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len) |
8a6c5b26 | 36 | { |
42b28ac0 | 37 | hists->col_len[col] = len; |
8a6c5b26 ACM |
38 | } |
39 | ||
42b28ac0 | 40 | bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len) |
8a6c5b26 | 41 | { |
42b28ac0 ACM |
42 | if (len > hists__col_len(hists, col)) { |
43 | hists__set_col_len(hists, col, len); | |
8a6c5b26 ACM |
44 | return true; |
45 | } | |
46 | return false; | |
47 | } | |
48 | ||
7ccf4f90 | 49 | void hists__reset_col_len(struct hists *hists) |
8a6c5b26 ACM |
50 | { |
51 | enum hist_column col; | |
52 | ||
53 | for (col = 0; col < HISTC_NR_COLS; ++col) | |
42b28ac0 | 54 | hists__set_col_len(hists, col, 0); |
8a6c5b26 ACM |
55 | } |
56 | ||
b5387528 RAV |
57 | static void hists__set_unres_dso_col_len(struct hists *hists, int dso) |
58 | { | |
59 | const unsigned int unresolved_col_width = BITS_PER_LONG / 4; | |
60 | ||
61 | if (hists__col_len(hists, dso) < unresolved_col_width && | |
62 | !symbol_conf.col_width_list_str && !symbol_conf.field_sep && | |
63 | !symbol_conf.dso_list) | |
64 | hists__set_col_len(hists, dso, unresolved_col_width); | |
65 | } | |
66 | ||
7ccf4f90 | 67 | void hists__calc_col_len(struct hists *hists, struct hist_entry *h) |
8a6c5b26 | 68 | { |
b5387528 | 69 | const unsigned int unresolved_col_width = BITS_PER_LONG / 4; |
98a3b32c | 70 | int symlen; |
8a6c5b26 ACM |
71 | u16 len; |
72 | ||
73 | if (h->ms.sym) | |
b5387528 | 74 | hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4); |
98a3b32c SE |
75 | else { |
76 | symlen = unresolved_col_width + 4 + 2; | |
77 | hists__new_col_len(hists, HISTC_SYMBOL, symlen); | |
b5387528 | 78 | hists__set_unres_dso_col_len(hists, HISTC_DSO); |
98a3b32c | 79 | } |
8a6c5b26 ACM |
80 | |
81 | len = thread__comm_len(h->thread); | |
42b28ac0 ACM |
82 | if (hists__new_col_len(hists, HISTC_COMM, len)) |
83 | hists__set_col_len(hists, HISTC_THREAD, len + 6); | |
8a6c5b26 ACM |
84 | |
85 | if (h->ms.map) { | |
86 | len = dso__name_len(h->ms.map->dso); | |
42b28ac0 | 87 | hists__new_col_len(hists, HISTC_DSO, len); |
8a6c5b26 | 88 | } |
b5387528 | 89 | |
cb993744 NK |
90 | if (h->parent) |
91 | hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen); | |
92 | ||
b5387528 | 93 | if (h->branch_info) { |
b5387528 RAV |
94 | /* |
95 | * +4 accounts for '[x] ' priv level info | |
96 | * +2 account of 0x prefix on raw addresses | |
97 | */ | |
98 | if (h->branch_info->from.sym) { | |
99 | symlen = (int)h->branch_info->from.sym->namelen + 4; | |
100 | hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); | |
101 | ||
102 | symlen = dso__name_len(h->branch_info->from.map->dso); | |
103 | hists__new_col_len(hists, HISTC_DSO_FROM, symlen); | |
104 | } else { | |
105 | symlen = unresolved_col_width + 4 + 2; | |
106 | hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); | |
107 | hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM); | |
108 | } | |
109 | ||
110 | if (h->branch_info->to.sym) { | |
111 | symlen = (int)h->branch_info->to.sym->namelen + 4; | |
112 | hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); | |
113 | ||
114 | symlen = dso__name_len(h->branch_info->to.map->dso); | |
115 | hists__new_col_len(hists, HISTC_DSO_TO, symlen); | |
116 | } else { | |
117 | symlen = unresolved_col_width + 4 + 2; | |
118 | hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); | |
119 | hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); | |
120 | } | |
121 | } | |
98a3b32c SE |
122 | |
123 | if (h->mem_info) { | |
124 | /* | |
125 | * +4 accounts for '[x] ' priv level info | |
126 | * +2 account of 0x prefix on raw addresses | |
127 | */ | |
128 | if (h->mem_info->daddr.sym) { | |
129 | symlen = (int)h->mem_info->daddr.sym->namelen + 4 | |
130 | + unresolved_col_width + 2; | |
131 | hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, | |
132 | symlen); | |
133 | } else { | |
134 | symlen = unresolved_col_width + 4 + 2; | |
135 | hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, | |
136 | symlen); | |
137 | } | |
138 | if (h->mem_info->daddr.map) { | |
139 | symlen = dso__name_len(h->mem_info->daddr.map->dso); | |
140 | hists__new_col_len(hists, HISTC_MEM_DADDR_DSO, | |
141 | symlen); | |
142 | } else { | |
143 | symlen = unresolved_col_width + 4 + 2; | |
144 | hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); | |
145 | } | |
146 | } else { | |
147 | symlen = unresolved_col_width + 4 + 2; | |
148 | hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen); | |
149 | hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); | |
150 | } | |
151 | ||
152 | hists__new_col_len(hists, HISTC_MEM_LOCKED, 6); | |
153 | hists__new_col_len(hists, HISTC_MEM_TLB, 22); | |
154 | hists__new_col_len(hists, HISTC_MEM_SNOOP, 12); | |
155 | hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3); | |
156 | hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12); | |
157 | hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12); | |
8a6c5b26 ACM |
158 | } |
159 | ||
7ccf4f90 NK |
160 | void hists__output_recalc_col_len(struct hists *hists, int max_rows) |
161 | { | |
162 | struct rb_node *next = rb_first(&hists->entries); | |
163 | struct hist_entry *n; | |
164 | int row = 0; | |
165 | ||
166 | hists__reset_col_len(hists); | |
167 | ||
168 | while (next && row++ < max_rows) { | |
169 | n = rb_entry(next, struct hist_entry, rb_node); | |
170 | if (!n->filtered) | |
171 | hists__calc_col_len(hists, n); | |
172 | next = rb_next(&n->rb_node); | |
173 | } | |
174 | } | |
175 | ||
12c14278 | 176 | static void hist_entry__add_cpumode_period(struct hist_entry *he, |
c82ee828 | 177 | unsigned int cpumode, u64 period) |
a1645ce1 | 178 | { |
28e2a106 | 179 | switch (cpumode) { |
a1645ce1 | 180 | case PERF_RECORD_MISC_KERNEL: |
b24c28f7 | 181 | he->stat.period_sys += period; |
a1645ce1 ZY |
182 | break; |
183 | case PERF_RECORD_MISC_USER: | |
b24c28f7 | 184 | he->stat.period_us += period; |
a1645ce1 ZY |
185 | break; |
186 | case PERF_RECORD_MISC_GUEST_KERNEL: | |
b24c28f7 | 187 | he->stat.period_guest_sys += period; |
a1645ce1 ZY |
188 | break; |
189 | case PERF_RECORD_MISC_GUEST_USER: | |
b24c28f7 | 190 | he->stat.period_guest_us += period; |
a1645ce1 ZY |
191 | break; |
192 | default: | |
193 | break; | |
194 | } | |
195 | } | |
196 | ||
05484298 AK |
197 | static void he_stat__add_period(struct he_stat *he_stat, u64 period, |
198 | u64 weight) | |
139c0815 | 199 | { |
98a3b32c | 200 | |
139c0815 | 201 | he_stat->period += period; |
05484298 | 202 | he_stat->weight += weight; |
139c0815 NK |
203 | he_stat->nr_events += 1; |
204 | } | |
205 | ||
206 | static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src) | |
207 | { | |
208 | dest->period += src->period; | |
209 | dest->period_sys += src->period_sys; | |
210 | dest->period_us += src->period_us; | |
211 | dest->period_guest_sys += src->period_guest_sys; | |
212 | dest->period_guest_us += src->period_guest_us; | |
213 | dest->nr_events += src->nr_events; | |
05484298 | 214 | dest->weight += src->weight; |
139c0815 NK |
215 | } |
216 | ||
ab81f3fd ACM |
217 | static void hist_entry__decay(struct hist_entry *he) |
218 | { | |
b24c28f7 NK |
219 | he->stat.period = (he->stat.period * 7) / 8; |
220 | he->stat.nr_events = (he->stat.nr_events * 7) / 8; | |
05484298 | 221 | /* XXX need decay for weight too? */ |
ab81f3fd ACM |
222 | } |
223 | ||
224 | static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) | |
225 | { | |
b24c28f7 | 226 | u64 prev_period = he->stat.period; |
c64550cf ACM |
227 | |
228 | if (prev_period == 0) | |
df71d95f | 229 | return true; |
c64550cf | 230 | |
ab81f3fd | 231 | hist_entry__decay(he); |
c64550cf ACM |
232 | |
233 | if (!he->filtered) | |
b24c28f7 | 234 | hists->stats.total_period -= prev_period - he->stat.period; |
c64550cf | 235 | |
b24c28f7 | 236 | return he->stat.period == 0; |
ab81f3fd ACM |
237 | } |
238 | ||
b079d4e9 ACM |
239 | static void __hists__decay_entries(struct hists *hists, bool zap_user, |
240 | bool zap_kernel, bool threaded) | |
ab81f3fd ACM |
241 | { |
242 | struct rb_node *next = rb_first(&hists->entries); | |
243 | struct hist_entry *n; | |
244 | ||
245 | while (next) { | |
246 | n = rb_entry(next, struct hist_entry, rb_node); | |
247 | next = rb_next(&n->rb_node); | |
df71d95f ACM |
248 | /* |
249 | * We may be annotating this, for instance, so keep it here in | |
250 | * case some it gets new samples, we'll eventually free it when | |
251 | * the user stops browsing and it agains gets fully decayed. | |
252 | */ | |
b079d4e9 ACM |
253 | if (((zap_user && n->level == '.') || |
254 | (zap_kernel && n->level != '.') || | |
255 | hists__decay_entry(hists, n)) && | |
256 | !n->used) { | |
ab81f3fd ACM |
257 | rb_erase(&n->rb_node, &hists->entries); |
258 | ||
e345fa18 | 259 | if (sort__need_collapse || threaded) |
ab81f3fd ACM |
260 | rb_erase(&n->rb_node_in, &hists->entries_collapsed); |
261 | ||
262 | hist_entry__free(n); | |
263 | --hists->nr_entries; | |
264 | } | |
265 | } | |
266 | } | |
267 | ||
b079d4e9 | 268 | void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) |
e345fa18 | 269 | { |
b079d4e9 | 270 | return __hists__decay_entries(hists, zap_user, zap_kernel, false); |
e345fa18 ACM |
271 | } |
272 | ||
b079d4e9 ACM |
273 | void hists__decay_entries_threaded(struct hists *hists, |
274 | bool zap_user, bool zap_kernel) | |
e345fa18 | 275 | { |
b079d4e9 | 276 | return __hists__decay_entries(hists, zap_user, zap_kernel, true); |
e345fa18 ACM |
277 | } |
278 | ||
3d1d07ec | 279 | /* |
c82ee828 | 280 | * histogram, sorted on item, collects periods |
3d1d07ec JK |
281 | */ |
282 | ||
28e2a106 ACM |
283 | static struct hist_entry *hist_entry__new(struct hist_entry *template) |
284 | { | |
d2009c51 | 285 | size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0; |
98a3b32c | 286 | struct hist_entry *he = zalloc(sizeof(*he) + callchain_size); |
28e2a106 | 287 | |
12c14278 ACM |
288 | if (he != NULL) { |
289 | *he = *template; | |
c4b35351 | 290 | |
12c14278 ACM |
291 | if (he->ms.map) |
292 | he->ms.map->referenced = true; | |
3cf0cb1f SE |
293 | |
294 | if (he->branch_info) { | |
26353a61 NK |
295 | /* |
296 | * This branch info is (a part of) allocated from | |
297 | * machine__resolve_bstack() and will be freed after | |
298 | * adding new entries. So we need to save a copy. | |
299 | */ | |
300 | he->branch_info = malloc(sizeof(*he->branch_info)); | |
301 | if (he->branch_info == NULL) { | |
302 | free(he); | |
303 | return NULL; | |
304 | } | |
305 | ||
306 | memcpy(he->branch_info, template->branch_info, | |
307 | sizeof(*he->branch_info)); | |
308 | ||
3cf0cb1f SE |
309 | if (he->branch_info->from.map) |
310 | he->branch_info->from.map->referenced = true; | |
311 | if (he->branch_info->to.map) | |
312 | he->branch_info->to.map->referenced = true; | |
313 | } | |
314 | ||
98a3b32c SE |
315 | if (he->mem_info) { |
316 | if (he->mem_info->iaddr.map) | |
317 | he->mem_info->iaddr.map->referenced = true; | |
318 | if (he->mem_info->daddr.map) | |
319 | he->mem_info->daddr.map->referenced = true; | |
320 | } | |
321 | ||
28e2a106 | 322 | if (symbol_conf.use_callchain) |
12c14278 | 323 | callchain_init(he->callchain); |
b821c732 ACM |
324 | |
325 | INIT_LIST_HEAD(&he->pairs.node); | |
28e2a106 ACM |
326 | } |
327 | ||
12c14278 | 328 | return he; |
28e2a106 ACM |
329 | } |
330 | ||
66f97ed3 | 331 | void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h) |
fefb0b94 | 332 | { |
8a6c5b26 | 333 | if (!h->filtered) { |
42b28ac0 ACM |
334 | hists__calc_col_len(hists, h); |
335 | ++hists->nr_entries; | |
b24c28f7 | 336 | hists->stats.total_period += h->stat.period; |
8a6c5b26 | 337 | } |
fefb0b94 ACM |
338 | } |
339 | ||
7a007ca9 ACM |
340 | static u8 symbol__parent_filter(const struct symbol *parent) |
341 | { | |
342 | if (symbol_conf.exclude_other && parent == NULL) | |
343 | return 1 << HIST_FILTER__PARENT; | |
344 | return 0; | |
345 | } | |
346 | ||
b5387528 RAV |
347 | static struct hist_entry *add_hist_entry(struct hists *hists, |
348 | struct hist_entry *entry, | |
1c02c4d2 | 349 | struct addr_location *al, |
05484298 AK |
350 | u64 period, |
351 | u64 weight) | |
9735abf1 | 352 | { |
1980c2eb | 353 | struct rb_node **p; |
9735abf1 ACM |
354 | struct rb_node *parent = NULL; |
355 | struct hist_entry *he; | |
9735abf1 ACM |
356 | int cmp; |
357 | ||
1980c2eb ACM |
358 | pthread_mutex_lock(&hists->lock); |
359 | ||
360 | p = &hists->entries_in->rb_node; | |
361 | ||
9735abf1 ACM |
362 | while (*p != NULL) { |
363 | parent = *p; | |
1980c2eb | 364 | he = rb_entry(parent, struct hist_entry, rb_node_in); |
9735abf1 | 365 | |
9afcf930 NK |
366 | /* |
367 | * Make sure that it receives arguments in a same order as | |
368 | * hist_entry__collapse() so that we can use an appropriate | |
369 | * function when searching an entry regardless which sort | |
370 | * keys were used. | |
371 | */ | |
372 | cmp = hist_entry__cmp(he, entry); | |
9735abf1 ACM |
373 | |
374 | if (!cmp) { | |
05484298 | 375 | he_stat__add_period(&he->stat, period, weight); |
63fa471d DM |
376 | |
377 | /* If the map of an existing hist_entry has | |
378 | * become out-of-date due to an exec() or | |
379 | * similar, update it. Otherwise we will | |
380 | * mis-adjust symbol addresses when computing | |
381 | * the history counter to increment. | |
382 | */ | |
383 | if (he->ms.map != entry->ms.map) { | |
384 | he->ms.map = entry->ms.map; | |
385 | if (he->ms.map) | |
386 | he->ms.map->referenced = true; | |
387 | } | |
28e2a106 | 388 | goto out; |
9735abf1 ACM |
389 | } |
390 | ||
391 | if (cmp < 0) | |
392 | p = &(*p)->rb_left; | |
393 | else | |
394 | p = &(*p)->rb_right; | |
395 | } | |
396 | ||
b5387528 | 397 | he = hist_entry__new(entry); |
9735abf1 | 398 | if (!he) |
1980c2eb ACM |
399 | goto out_unlock; |
400 | ||
401 | rb_link_node(&he->rb_node_in, parent, p); | |
402 | rb_insert_color(&he->rb_node_in, hists->entries_in); | |
28e2a106 | 403 | out: |
c82ee828 | 404 | hist_entry__add_cpumode_period(he, al->cpumode, period); |
1980c2eb ACM |
405 | out_unlock: |
406 | pthread_mutex_unlock(&hists->lock); | |
9735abf1 ACM |
407 | return he; |
408 | } | |
409 | ||
98a3b32c SE |
410 | struct hist_entry *__hists__add_mem_entry(struct hists *self, |
411 | struct addr_location *al, | |
412 | struct symbol *sym_parent, | |
413 | struct mem_info *mi, | |
414 | u64 period, | |
415 | u64 weight) | |
416 | { | |
417 | struct hist_entry entry = { | |
418 | .thread = al->thread, | |
419 | .ms = { | |
420 | .map = al->map, | |
421 | .sym = al->sym, | |
422 | }, | |
423 | .stat = { | |
424 | .period = period, | |
425 | .weight = weight, | |
426 | .nr_events = 1, | |
427 | }, | |
428 | .cpu = al->cpu, | |
429 | .ip = al->addr, | |
430 | .level = al->level, | |
431 | .parent = sym_parent, | |
432 | .filtered = symbol__parent_filter(sym_parent), | |
433 | .hists = self, | |
434 | .mem_info = mi, | |
435 | .branch_info = NULL, | |
436 | }; | |
437 | return add_hist_entry(self, &entry, al, period, weight); | |
438 | } | |
439 | ||
b5387528 RAV |
440 | struct hist_entry *__hists__add_branch_entry(struct hists *self, |
441 | struct addr_location *al, | |
442 | struct symbol *sym_parent, | |
443 | struct branch_info *bi, | |
05484298 AK |
444 | u64 period, |
445 | u64 weight) | |
b5387528 RAV |
446 | { |
447 | struct hist_entry entry = { | |
448 | .thread = al->thread, | |
449 | .ms = { | |
450 | .map = bi->to.map, | |
451 | .sym = bi->to.sym, | |
452 | }, | |
453 | .cpu = al->cpu, | |
454 | .ip = bi->to.addr, | |
455 | .level = al->level, | |
b24c28f7 NK |
456 | .stat = { |
457 | .period = period, | |
c4b35351 | 458 | .nr_events = 1, |
05484298 | 459 | .weight = weight, |
b24c28f7 | 460 | }, |
b5387528 RAV |
461 | .parent = sym_parent, |
462 | .filtered = symbol__parent_filter(sym_parent), | |
463 | .branch_info = bi, | |
ae359f19 | 464 | .hists = self, |
98a3b32c | 465 | .mem_info = NULL, |
b5387528 RAV |
466 | }; |
467 | ||
05484298 | 468 | return add_hist_entry(self, &entry, al, period, weight); |
b5387528 RAV |
469 | } |
470 | ||
471 | struct hist_entry *__hists__add_entry(struct hists *self, | |
472 | struct addr_location *al, | |
05484298 AK |
473 | struct symbol *sym_parent, u64 period, |
474 | u64 weight) | |
b5387528 RAV |
475 | { |
476 | struct hist_entry entry = { | |
477 | .thread = al->thread, | |
478 | .ms = { | |
479 | .map = al->map, | |
480 | .sym = al->sym, | |
481 | }, | |
482 | .cpu = al->cpu, | |
483 | .ip = al->addr, | |
484 | .level = al->level, | |
b24c28f7 NK |
485 | .stat = { |
486 | .period = period, | |
c4b35351 | 487 | .nr_events = 1, |
05484298 | 488 | .weight = weight, |
b24c28f7 | 489 | }, |
b5387528 RAV |
490 | .parent = sym_parent, |
491 | .filtered = symbol__parent_filter(sym_parent), | |
ae359f19 | 492 | .hists = self, |
98a3b32c SE |
493 | .branch_info = NULL, |
494 | .mem_info = NULL, | |
b5387528 RAV |
495 | }; |
496 | ||
05484298 | 497 | return add_hist_entry(self, &entry, al, period, weight); |
b5387528 RAV |
498 | } |
499 | ||
3d1d07ec JK |
500 | int64_t |
501 | hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) | |
502 | { | |
503 | struct sort_entry *se; | |
504 | int64_t cmp = 0; | |
505 | ||
506 | list_for_each_entry(se, &hist_entry__sort_list, list) { | |
fcd14984 | 507 | cmp = se->se_cmp(left, right); |
3d1d07ec JK |
508 | if (cmp) |
509 | break; | |
510 | } | |
511 | ||
512 | return cmp; | |
513 | } | |
514 | ||
515 | int64_t | |
516 | hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) | |
517 | { | |
518 | struct sort_entry *se; | |
519 | int64_t cmp = 0; | |
520 | ||
521 | list_for_each_entry(se, &hist_entry__sort_list, list) { | |
522 | int64_t (*f)(struct hist_entry *, struct hist_entry *); | |
523 | ||
fcd14984 | 524 | f = se->se_collapse ?: se->se_cmp; |
3d1d07ec JK |
525 | |
526 | cmp = f(left, right); | |
527 | if (cmp) | |
528 | break; | |
529 | } | |
530 | ||
531 | return cmp; | |
532 | } | |
533 | ||
534 | void hist_entry__free(struct hist_entry *he) | |
535 | { | |
580e338d | 536 | free(he->branch_info); |
028f12ee | 537 | free(he->mem_info); |
3d1d07ec JK |
538 | free(he); |
539 | } | |
540 | ||
541 | /* | |
542 | * collapse the histogram | |
543 | */ | |
544 | ||
1d037ca1 | 545 | static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused, |
1b3a0e95 FW |
546 | struct rb_root *root, |
547 | struct hist_entry *he) | |
3d1d07ec | 548 | { |
b9bf0892 | 549 | struct rb_node **p = &root->rb_node; |
3d1d07ec JK |
550 | struct rb_node *parent = NULL; |
551 | struct hist_entry *iter; | |
552 | int64_t cmp; | |
553 | ||
554 | while (*p != NULL) { | |
555 | parent = *p; | |
1980c2eb | 556 | iter = rb_entry(parent, struct hist_entry, rb_node_in); |
3d1d07ec JK |
557 | |
558 | cmp = hist_entry__collapse(iter, he); | |
559 | ||
560 | if (!cmp) { | |
139c0815 | 561 | he_stat__add_stat(&iter->stat, &he->stat); |
9ec60972 | 562 | |
1b3a0e95 | 563 | if (symbol_conf.use_callchain) { |
47260645 NK |
564 | callchain_cursor_reset(&callchain_cursor); |
565 | callchain_merge(&callchain_cursor, | |
566 | iter->callchain, | |
1b3a0e95 FW |
567 | he->callchain); |
568 | } | |
3d1d07ec | 569 | hist_entry__free(he); |
fefb0b94 | 570 | return false; |
3d1d07ec JK |
571 | } |
572 | ||
573 | if (cmp < 0) | |
574 | p = &(*p)->rb_left; | |
575 | else | |
576 | p = &(*p)->rb_right; | |
577 | } | |
578 | ||
1980c2eb ACM |
579 | rb_link_node(&he->rb_node_in, parent, p); |
580 | rb_insert_color(&he->rb_node_in, root); | |
fefb0b94 | 581 | return true; |
3d1d07ec JK |
582 | } |
583 | ||
1980c2eb | 584 | static struct rb_root *hists__get_rotate_entries_in(struct hists *hists) |
3d1d07ec | 585 | { |
1980c2eb ACM |
586 | struct rb_root *root; |
587 | ||
588 | pthread_mutex_lock(&hists->lock); | |
589 | ||
590 | root = hists->entries_in; | |
591 | if (++hists->entries_in > &hists->entries_in_array[1]) | |
592 | hists->entries_in = &hists->entries_in_array[0]; | |
593 | ||
594 | pthread_mutex_unlock(&hists->lock); | |
595 | ||
596 | return root; | |
597 | } | |
598 | ||
90cf1fb5 ACM |
599 | static void hists__apply_filters(struct hists *hists, struct hist_entry *he) |
600 | { | |
601 | hists__filter_entry_by_dso(hists, he); | |
602 | hists__filter_entry_by_thread(hists, he); | |
e94d53eb | 603 | hists__filter_entry_by_symbol(hists, he); |
90cf1fb5 ACM |
604 | } |
605 | ||
1980c2eb ACM |
606 | static void __hists__collapse_resort(struct hists *hists, bool threaded) |
607 | { | |
608 | struct rb_root *root; | |
3d1d07ec JK |
609 | struct rb_node *next; |
610 | struct hist_entry *n; | |
611 | ||
1980c2eb | 612 | if (!sort__need_collapse && !threaded) |
3d1d07ec JK |
613 | return; |
614 | ||
1980c2eb ACM |
615 | root = hists__get_rotate_entries_in(hists); |
616 | next = rb_first(root); | |
b9bf0892 | 617 | |
3d1d07ec | 618 | while (next) { |
1980c2eb ACM |
619 | n = rb_entry(next, struct hist_entry, rb_node_in); |
620 | next = rb_next(&n->rb_node_in); | |
3d1d07ec | 621 | |
1980c2eb | 622 | rb_erase(&n->rb_node_in, root); |
90cf1fb5 ACM |
623 | if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) { |
624 | /* | |
625 | * If it wasn't combined with one of the entries already | |
626 | * collapsed, we need to apply the filters that may have | |
627 | * been set by, say, the hist_browser. | |
628 | */ | |
629 | hists__apply_filters(hists, n); | |
90cf1fb5 | 630 | } |
3d1d07ec | 631 | } |
1980c2eb | 632 | } |
b9bf0892 | 633 | |
1980c2eb ACM |
634 | void hists__collapse_resort(struct hists *hists) |
635 | { | |
636 | return __hists__collapse_resort(hists, false); | |
637 | } | |
638 | ||
639 | void hists__collapse_resort_threaded(struct hists *hists) | |
640 | { | |
641 | return __hists__collapse_resort(hists, true); | |
3d1d07ec JK |
642 | } |
643 | ||
644 | /* | |
c82ee828 | 645 | * reverse the map, sort on period. |
3d1d07ec JK |
646 | */ |
647 | ||
29d720ed NK |
648 | static int period_cmp(u64 period_a, u64 period_b) |
649 | { | |
650 | if (period_a > period_b) | |
651 | return 1; | |
652 | if (period_a < period_b) | |
653 | return -1; | |
654 | return 0; | |
655 | } | |
656 | ||
657 | static int hist_entry__sort_on_period(struct hist_entry *a, | |
658 | struct hist_entry *b) | |
659 | { | |
660 | int ret; | |
661 | int i, nr_members; | |
662 | struct perf_evsel *evsel; | |
663 | struct hist_entry *pair; | |
664 | u64 *periods_a, *periods_b; | |
665 | ||
666 | ret = period_cmp(a->stat.period, b->stat.period); | |
667 | if (ret || !symbol_conf.event_group) | |
668 | return ret; | |
669 | ||
670 | evsel = hists_to_evsel(a->hists); | |
671 | nr_members = evsel->nr_members; | |
672 | if (nr_members <= 1) | |
673 | return ret; | |
674 | ||
675 | periods_a = zalloc(sizeof(periods_a) * nr_members); | |
676 | periods_b = zalloc(sizeof(periods_b) * nr_members); | |
677 | ||
678 | if (!periods_a || !periods_b) | |
679 | goto out; | |
680 | ||
681 | list_for_each_entry(pair, &a->pairs.head, pairs.node) { | |
682 | evsel = hists_to_evsel(pair->hists); | |
683 | periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period; | |
684 | } | |
685 | ||
686 | list_for_each_entry(pair, &b->pairs.head, pairs.node) { | |
687 | evsel = hists_to_evsel(pair->hists); | |
688 | periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period; | |
689 | } | |
690 | ||
691 | for (i = 1; i < nr_members; i++) { | |
692 | ret = period_cmp(periods_a[i], periods_b[i]); | |
693 | if (ret) | |
694 | break; | |
695 | } | |
696 | ||
697 | out: | |
698 | free(periods_a); | |
699 | free(periods_b); | |
700 | ||
701 | return ret; | |
702 | } | |
703 | ||
1c02c4d2 ACM |
704 | static void __hists__insert_output_entry(struct rb_root *entries, |
705 | struct hist_entry *he, | |
706 | u64 min_callchain_hits) | |
3d1d07ec | 707 | { |
1c02c4d2 | 708 | struct rb_node **p = &entries->rb_node; |
3d1d07ec JK |
709 | struct rb_node *parent = NULL; |
710 | struct hist_entry *iter; | |
711 | ||
d599db3f | 712 | if (symbol_conf.use_callchain) |
b9fb9304 | 713 | callchain_param.sort(&he->sorted_chain, he->callchain, |
3d1d07ec JK |
714 | min_callchain_hits, &callchain_param); |
715 | ||
716 | while (*p != NULL) { | |
717 | parent = *p; | |
718 | iter = rb_entry(parent, struct hist_entry, rb_node); | |
719 | ||
29d720ed | 720 | if (hist_entry__sort_on_period(he, iter) > 0) |
3d1d07ec JK |
721 | p = &(*p)->rb_left; |
722 | else | |
723 | p = &(*p)->rb_right; | |
724 | } | |
725 | ||
726 | rb_link_node(&he->rb_node, parent, p); | |
1c02c4d2 | 727 | rb_insert_color(&he->rb_node, entries); |
3d1d07ec JK |
728 | } |
729 | ||
1980c2eb | 730 | static void __hists__output_resort(struct hists *hists, bool threaded) |
3d1d07ec | 731 | { |
1980c2eb | 732 | struct rb_root *root; |
3d1d07ec JK |
733 | struct rb_node *next; |
734 | struct hist_entry *n; | |
3d1d07ec JK |
735 | u64 min_callchain_hits; |
736 | ||
42b28ac0 | 737 | min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100); |
3d1d07ec | 738 | |
1980c2eb ACM |
739 | if (sort__need_collapse || threaded) |
740 | root = &hists->entries_collapsed; | |
741 | else | |
742 | root = hists->entries_in; | |
743 | ||
744 | next = rb_first(root); | |
745 | hists->entries = RB_ROOT; | |
3d1d07ec | 746 | |
42b28ac0 | 747 | hists->nr_entries = 0; |
7928631a | 748 | hists->stats.total_period = 0; |
42b28ac0 | 749 | hists__reset_col_len(hists); |
fefb0b94 | 750 | |
3d1d07ec | 751 | while (next) { |
1980c2eb ACM |
752 | n = rb_entry(next, struct hist_entry, rb_node_in); |
753 | next = rb_next(&n->rb_node_in); | |
3d1d07ec | 754 | |
1980c2eb | 755 | __hists__insert_output_entry(&hists->entries, n, min_callchain_hits); |
42b28ac0 | 756 | hists__inc_nr_entries(hists, n); |
3d1d07ec | 757 | } |
1980c2eb | 758 | } |
b9bf0892 | 759 | |
1980c2eb ACM |
760 | void hists__output_resort(struct hists *hists) |
761 | { | |
762 | return __hists__output_resort(hists, false); | |
763 | } | |
764 | ||
765 | void hists__output_resort_threaded(struct hists *hists) | |
766 | { | |
767 | return __hists__output_resort(hists, true); | |
3d1d07ec | 768 | } |
4ecf84d0 | 769 | |
42b28ac0 | 770 | static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, |
cc5edb0e ACM |
771 | enum hist_filter filter) |
772 | { | |
773 | h->filtered &= ~(1 << filter); | |
774 | if (h->filtered) | |
775 | return; | |
776 | ||
42b28ac0 | 777 | ++hists->nr_entries; |
0f0cbf7a | 778 | if (h->ms.unfolded) |
42b28ac0 | 779 | hists->nr_entries += h->nr_rows; |
0f0cbf7a | 780 | h->row_offset = 0; |
b24c28f7 NK |
781 | hists->stats.total_period += h->stat.period; |
782 | hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events; | |
cc5edb0e | 783 | |
42b28ac0 | 784 | hists__calc_col_len(hists, h); |
cc5edb0e ACM |
785 | } |
786 | ||
90cf1fb5 ACM |
787 | |
788 | static bool hists__filter_entry_by_dso(struct hists *hists, | |
789 | struct hist_entry *he) | |
790 | { | |
791 | if (hists->dso_filter != NULL && | |
792 | (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) { | |
793 | he->filtered |= (1 << HIST_FILTER__DSO); | |
794 | return true; | |
795 | } | |
796 | ||
797 | return false; | |
798 | } | |
799 | ||
d7b76f09 | 800 | void hists__filter_by_dso(struct hists *hists) |
b09e0190 ACM |
801 | { |
802 | struct rb_node *nd; | |
803 | ||
42b28ac0 ACM |
804 | hists->nr_entries = hists->stats.total_period = 0; |
805 | hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0; | |
806 | hists__reset_col_len(hists); | |
b09e0190 | 807 | |
42b28ac0 | 808 | for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { |
b09e0190 ACM |
809 | struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); |
810 | ||
811 | if (symbol_conf.exclude_other && !h->parent) | |
812 | continue; | |
813 | ||
90cf1fb5 | 814 | if (hists__filter_entry_by_dso(hists, h)) |
b09e0190 | 815 | continue; |
b09e0190 | 816 | |
42b28ac0 | 817 | hists__remove_entry_filter(hists, h, HIST_FILTER__DSO); |
b09e0190 ACM |
818 | } |
819 | } | |
820 | ||
90cf1fb5 ACM |
821 | static bool hists__filter_entry_by_thread(struct hists *hists, |
822 | struct hist_entry *he) | |
823 | { | |
824 | if (hists->thread_filter != NULL && | |
825 | he->thread != hists->thread_filter) { | |
826 | he->filtered |= (1 << HIST_FILTER__THREAD); | |
827 | return true; | |
828 | } | |
829 | ||
830 | return false; | |
831 | } | |
832 | ||
d7b76f09 | 833 | void hists__filter_by_thread(struct hists *hists) |
b09e0190 ACM |
834 | { |
835 | struct rb_node *nd; | |
836 | ||
42b28ac0 ACM |
837 | hists->nr_entries = hists->stats.total_period = 0; |
838 | hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0; | |
839 | hists__reset_col_len(hists); | |
b09e0190 | 840 | |
42b28ac0 | 841 | for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { |
b09e0190 ACM |
842 | struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); |
843 | ||
90cf1fb5 | 844 | if (hists__filter_entry_by_thread(hists, h)) |
b09e0190 | 845 | continue; |
cc5edb0e | 846 | |
42b28ac0 | 847 | hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD); |
b09e0190 ACM |
848 | } |
849 | } | |
ef7b93a1 | 850 | |
e94d53eb NK |
851 | static bool hists__filter_entry_by_symbol(struct hists *hists, |
852 | struct hist_entry *he) | |
853 | { | |
854 | if (hists->symbol_filter_str != NULL && | |
855 | (!he->ms.sym || strstr(he->ms.sym->name, | |
856 | hists->symbol_filter_str) == NULL)) { | |
857 | he->filtered |= (1 << HIST_FILTER__SYMBOL); | |
858 | return true; | |
859 | } | |
860 | ||
861 | return false; | |
862 | } | |
863 | ||
864 | void hists__filter_by_symbol(struct hists *hists) | |
865 | { | |
866 | struct rb_node *nd; | |
867 | ||
868 | hists->nr_entries = hists->stats.total_period = 0; | |
869 | hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0; | |
870 | hists__reset_col_len(hists); | |
871 | ||
872 | for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { | |
873 | struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); | |
874 | ||
875 | if (hists__filter_entry_by_symbol(hists, h)) | |
876 | continue; | |
877 | ||
878 | hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL); | |
879 | } | |
880 | } | |
881 | ||
2f525d01 | 882 | int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip) |
ef7b93a1 | 883 | { |
2f525d01 | 884 | return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip); |
ef7b93a1 ACM |
885 | } |
886 | ||
ce6f4fab | 887 | int hist_entry__annotate(struct hist_entry *he, size_t privsize) |
ef7b93a1 | 888 | { |
ce6f4fab | 889 | return symbol__annotate(he->ms.sym, he->ms.map, privsize); |
ef7b93a1 | 890 | } |
c8446b9b | 891 | |
28a6b6aa ACM |
892 | void events_stats__inc(struct events_stats *stats, u32 type) |
893 | { | |
894 | ++stats->nr_events[0]; | |
895 | ++stats->nr_events[type]; | |
896 | } | |
897 | ||
42b28ac0 | 898 | void hists__inc_nr_events(struct hists *hists, u32 type) |
c8446b9b | 899 | { |
28a6b6aa | 900 | events_stats__inc(&hists->stats, type); |
c8446b9b | 901 | } |
95529be4 | 902 | |
494d70a1 ACM |
903 | static struct hist_entry *hists__add_dummy_entry(struct hists *hists, |
904 | struct hist_entry *pair) | |
905 | { | |
ce74f60e NK |
906 | struct rb_root *root; |
907 | struct rb_node **p; | |
494d70a1 ACM |
908 | struct rb_node *parent = NULL; |
909 | struct hist_entry *he; | |
910 | int cmp; | |
911 | ||
ce74f60e NK |
912 | if (sort__need_collapse) |
913 | root = &hists->entries_collapsed; | |
914 | else | |
915 | root = hists->entries_in; | |
916 | ||
917 | p = &root->rb_node; | |
918 | ||
494d70a1 ACM |
919 | while (*p != NULL) { |
920 | parent = *p; | |
ce74f60e | 921 | he = rb_entry(parent, struct hist_entry, rb_node_in); |
494d70a1 | 922 | |
ce74f60e | 923 | cmp = hist_entry__collapse(he, pair); |
494d70a1 ACM |
924 | |
925 | if (!cmp) | |
926 | goto out; | |
927 | ||
928 | if (cmp < 0) | |
929 | p = &(*p)->rb_left; | |
930 | else | |
931 | p = &(*p)->rb_right; | |
932 | } | |
933 | ||
934 | he = hist_entry__new(pair); | |
935 | if (he) { | |
30193d78 ACM |
936 | memset(&he->stat, 0, sizeof(he->stat)); |
937 | he->hists = hists; | |
ce74f60e NK |
938 | rb_link_node(&he->rb_node_in, parent, p); |
939 | rb_insert_color(&he->rb_node_in, root); | |
494d70a1 ACM |
940 | hists__inc_nr_entries(hists, he); |
941 | } | |
942 | out: | |
943 | return he; | |
944 | } | |
945 | ||
95529be4 ACM |
946 | static struct hist_entry *hists__find_entry(struct hists *hists, |
947 | struct hist_entry *he) | |
948 | { | |
ce74f60e NK |
949 | struct rb_node *n; |
950 | ||
951 | if (sort__need_collapse) | |
952 | n = hists->entries_collapsed.rb_node; | |
953 | else | |
954 | n = hists->entries_in->rb_node; | |
95529be4 ACM |
955 | |
956 | while (n) { | |
ce74f60e NK |
957 | struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in); |
958 | int64_t cmp = hist_entry__collapse(iter, he); | |
95529be4 ACM |
959 | |
960 | if (cmp < 0) | |
961 | n = n->rb_left; | |
962 | else if (cmp > 0) | |
963 | n = n->rb_right; | |
964 | else | |
965 | return iter; | |
966 | } | |
967 | ||
968 | return NULL; | |
969 | } | |
970 | ||
971 | /* | |
972 | * Look for pairs to link to the leader buckets (hist_entries): | |
973 | */ | |
974 | void hists__match(struct hists *leader, struct hists *other) | |
975 | { | |
ce74f60e | 976 | struct rb_root *root; |
95529be4 ACM |
977 | struct rb_node *nd; |
978 | struct hist_entry *pos, *pair; | |
979 | ||
ce74f60e NK |
980 | if (sort__need_collapse) |
981 | root = &leader->entries_collapsed; | |
982 | else | |
983 | root = leader->entries_in; | |
984 | ||
985 | for (nd = rb_first(root); nd; nd = rb_next(nd)) { | |
986 | pos = rb_entry(nd, struct hist_entry, rb_node_in); | |
95529be4 ACM |
987 | pair = hists__find_entry(other, pos); |
988 | ||
989 | if (pair) | |
5fa9041b | 990 | hist_entry__add_pair(pair, pos); |
95529be4 ACM |
991 | } |
992 | } | |
494d70a1 ACM |
993 | |
994 | /* | |
995 | * Look for entries in the other hists that are not present in the leader, if | |
996 | * we find them, just add a dummy entry on the leader hists, with period=0, | |
997 | * nr_events=0, to serve as the list header. | |
998 | */ | |
999 | int hists__link(struct hists *leader, struct hists *other) | |
1000 | { | |
ce74f60e | 1001 | struct rb_root *root; |
494d70a1 ACM |
1002 | struct rb_node *nd; |
1003 | struct hist_entry *pos, *pair; | |
1004 | ||
ce74f60e NK |
1005 | if (sort__need_collapse) |
1006 | root = &other->entries_collapsed; | |
1007 | else | |
1008 | root = other->entries_in; | |
1009 | ||
1010 | for (nd = rb_first(root); nd; nd = rb_next(nd)) { | |
1011 | pos = rb_entry(nd, struct hist_entry, rb_node_in); | |
494d70a1 ACM |
1012 | |
1013 | if (!hist_entry__has_pairs(pos)) { | |
1014 | pair = hists__add_dummy_entry(leader, pos); | |
1015 | if (pair == NULL) | |
1016 | return -1; | |
5fa9041b | 1017 | hist_entry__add_pair(pos, pair); |
494d70a1 ACM |
1018 | } |
1019 | } | |
1020 | ||
1021 | return 0; | |
1022 | } |