perf tools: Fix out-of-bound access to struct perf_session
[deliverable/linux.git] / tools / perf / util / session.c
CommitLineData
b8f46c5a
XG
1#define _FILE_OFFSET_BITS 64
2
94c744b6
ACM
3#include <linux/kernel.h>
4
ba21594c 5#include <byteswap.h>
94c744b6
ACM
6#include <unistd.h>
7#include <sys/types.h>
a41794cd 8#include <sys/mman.h>
94c744b6 9
e248de33
ACM
10#include "evlist.h"
11#include "evsel.h"
94c744b6 12#include "session.h"
45694aa7 13#include "tool.h"
a328626b 14#include "sort.h"
94c744b6 15#include "util.h"
5d67be97 16#include "cpumap.h"
94c744b6
ACM
17
18static int perf_session__open(struct perf_session *self, bool force)
19{
20 struct stat input_stat;
21
8dc58101
TZ
22 if (!strcmp(self->filename, "-")) {
23 self->fd_pipe = true;
24 self->fd = STDIN_FILENO;
25
a91e5431 26 if (perf_session__read_header(self, self->fd) < 0)
8dc58101
TZ
27 pr_err("incompatible file format");
28
29 return 0;
30 }
31
f887f301 32 self->fd = open(self->filename, O_RDONLY);
94c744b6 33 if (self->fd < 0) {
0f2c3de2
AI
34 int err = errno;
35
36 pr_err("failed to open %s: %s", self->filename, strerror(err));
37 if (err == ENOENT && !strcmp(self->filename, "perf.data"))
94c744b6
ACM
38 pr_err(" (try 'perf record' first)");
39 pr_err("\n");
40 return -errno;
41 }
42
43 if (fstat(self->fd, &input_stat) < 0)
44 goto out_close;
45
46 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
47 pr_err("file %s not owned by current user or root\n",
48 self->filename);
49 goto out_close;
50 }
51
52 if (!input_stat.st_size) {
53 pr_info("zero-sized file (%s), nothing to do!\n",
54 self->filename);
55 goto out_close;
56 }
57
a91e5431 58 if (perf_session__read_header(self, self->fd) < 0) {
94c744b6
ACM
59 pr_err("incompatible file format");
60 goto out_close;
61 }
62
c2a70653
ACM
63 if (!perf_evlist__valid_sample_type(self->evlist)) {
64 pr_err("non matching sample_type");
65 goto out_close;
66 }
67
68 if (!perf_evlist__valid_sample_id_all(self->evlist)) {
69 pr_err("non matching sample_id_all");
70 goto out_close;
71 }
72
94c744b6
ACM
73 self->size = input_stat.st_size;
74 return 0;
75
76out_close:
77 close(self->fd);
78 self->fd = -1;
79 return -1;
80}
81
9c90a61c
ACM
82void perf_session__update_sample_type(struct perf_session *self)
83{
a91e5431 84 self->sample_type = perf_evlist__sample_type(self->evlist);
c2a70653 85 self->sample_size = __perf_evsel__sample_size(self->sample_type);
a91e5431 86 self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
81e36bff 87 self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist);
743eb868 88 self->host_machine.id_hdr_size = self->id_hdr_size;
9c90a61c
ACM
89}
90
a1645ce1
ZY
91int perf_session__create_kernel_maps(struct perf_session *self)
92{
d118f8ba 93 int ret = machine__create_kernel_maps(&self->host_machine);
a1645ce1 94
a1645ce1 95 if (ret >= 0)
d118f8ba 96 ret = machines__create_guest_kernel_maps(&self->machines);
a1645ce1
ZY
97 return ret;
98}
99
076c6e45
ACM
100static void perf_session__destroy_kernel_maps(struct perf_session *self)
101{
102 machine__destroy_kernel_maps(&self->host_machine);
103 machines__destroy_guest_kernel_maps(&self->machines);
104}
105
21ef97f0
IM
106struct perf_session *perf_session__new(const char *filename, int mode,
107 bool force, bool repipe,
45694aa7 108 struct perf_tool *tool)
94c744b6 109{
002c4fd9 110 size_t len = filename ? strlen(filename) : 0;
94c744b6
ACM
111 struct perf_session *self = zalloc(sizeof(*self) + len);
112
113 if (self == NULL)
114 goto out;
115
94c744b6 116 memcpy(self->filename, filename, len);
55b44629
TG
117 /*
118 * On 64bit we can mmap the data file in one go. No need for tiny mmap
119 * slices. On 32bit we use 32MB.
120 */
121#if BITS_PER_LONG == 64
122 self->mmap_window = ULLONG_MAX;
123#else
124 self->mmap_window = 32 * 1024 * 1024ULL;
125#endif
23346f21 126 self->machines = RB_ROOT;
454c407e 127 self->repipe = repipe;
a1225dec 128 INIT_LIST_HEAD(&self->ordered_samples.samples);
020bb75a 129 INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
5c891f38 130 INIT_LIST_HEAD(&self->ordered_samples.to_free);
1f626bc3 131 machine__init(&self->host_machine, "", HOST_KERNEL_ID);
94c744b6 132
64abebf7
ACM
133 if (mode == O_RDONLY) {
134 if (perf_session__open(self, force) < 0)
135 goto out_delete;
a91e5431 136 perf_session__update_sample_type(self);
64abebf7
ACM
137 } else if (mode == O_WRONLY) {
138 /*
139 * In O_RDONLY mode this will be performed when reading the
8115d60c 140 * kernel MMAP event, in perf_event__process_mmap().
64abebf7
ACM
141 */
142 if (perf_session__create_kernel_maps(self) < 0)
143 goto out_delete;
144 }
d549c769 145
45694aa7
ACM
146 if (tool && tool->ordering_requires_timestamps &&
147 tool->ordered_samples && !self->sample_id_all) {
21ef97f0 148 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
45694aa7 149 tool->ordered_samples = false;
21ef97f0
IM
150 }
151
94c744b6
ACM
152out:
153 return self;
4aa65636
ACM
154out_delete:
155 perf_session__delete(self);
156 return NULL;
94c744b6
ACM
157}
158
b424eba2 159static void machine__delete_dead_threads(struct machine *machine)
d65a458b
ACM
160{
161 struct thread *n, *t;
162
b424eba2 163 list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
d65a458b
ACM
164 list_del(&t->node);
165 thread__delete(t);
166 }
167}
168
b424eba2
ACM
169static void perf_session__delete_dead_threads(struct perf_session *session)
170{
171 machine__delete_dead_threads(&session->host_machine);
172}
173
174static void machine__delete_threads(struct machine *self)
d65a458b
ACM
175{
176 struct rb_node *nd = rb_first(&self->threads);
177
178 while (nd) {
179 struct thread *t = rb_entry(nd, struct thread, rb_node);
180
181 rb_erase(&t->rb_node, &self->threads);
182 nd = rb_next(nd);
183 thread__delete(t);
184 }
185}
186
b424eba2
ACM
187static void perf_session__delete_threads(struct perf_session *session)
188{
189 machine__delete_threads(&session->host_machine);
190}
191
94c744b6
ACM
192void perf_session__delete(struct perf_session *self)
193{
076c6e45 194 perf_session__destroy_kernel_maps(self);
d65a458b
ACM
195 perf_session__delete_dead_threads(self);
196 perf_session__delete_threads(self);
197 machine__exit(&self->host_machine);
94c744b6
ACM
198 close(self->fd);
199 free(self);
200}
a328626b 201
b424eba2 202void machine__remove_thread(struct machine *self, struct thread *th)
720a3aeb 203{
70597f21 204 self->last_match = NULL;
720a3aeb
ACM
205 rb_erase(&th->rb_node, &self->threads);
206 /*
207 * We may have references to this thread, for instance in some hist_entry
208 * instances, so just move them to a separate list.
209 */
210 list_add_tail(&th->node, &self->dead_threads);
211}
212
a328626b
ACM
213static bool symbol__match_parent_regex(struct symbol *sym)
214{
215 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
216 return 1;
217
218 return 0;
219}
220
743eb868
ACM
221int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel,
222 struct thread *thread,
223 struct ip_callchain *chain,
224 struct symbol **parent)
a328626b
ACM
225{
226 u8 cpumode = PERF_RECORD_MISC_USER;
a328626b 227 unsigned int i;
1b3a0e95 228 int err;
a328626b 229
246d4ce8 230 callchain_cursor_reset(&evsel->hists.callchain_cursor);
a328626b
ACM
231
232 for (i = 0; i < chain->nr; i++) {
d797fdc5 233 u64 ip;
a328626b
ACM
234 struct addr_location al;
235
d797fdc5
SL
236 if (callchain_param.order == ORDER_CALLEE)
237 ip = chain->ips[i];
238 else
239 ip = chain->ips[chain->nr - i - 1];
240
a328626b
ACM
241 if (ip >= PERF_CONTEXT_MAX) {
242 switch (ip) {
243 case PERF_CONTEXT_HV:
244 cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
245 case PERF_CONTEXT_KERNEL:
246 cpumode = PERF_RECORD_MISC_KERNEL; break;
247 case PERF_CONTEXT_USER:
248 cpumode = PERF_RECORD_MISC_USER; break;
249 default:
250 break;
251 }
252 continue;
253 }
254
a1645ce1 255 al.filtered = false;
a328626b 256 thread__find_addr_location(thread, self, cpumode,
743eb868 257 MAP__FUNCTION, ip, &al, NULL);
a328626b
ACM
258 if (al.sym != NULL) {
259 if (sort__has_parent && !*parent &&
260 symbol__match_parent_regex(al.sym))
261 *parent = al.sym;
d599db3f 262 if (!symbol_conf.use_callchain)
a328626b 263 break;
a328626b 264 }
1b3a0e95 265
246d4ce8 266 err = callchain_cursor_append(&evsel->hists.callchain_cursor,
1b3a0e95
FW
267 ip, al.map, al.sym);
268 if (err)
269 return err;
a328626b
ACM
270 }
271
1b3a0e95 272 return 0;
a328626b 273}
06aae590 274
d20deb64
ACM
275static int process_event_synth_tracing_data_stub(union perf_event *event __used,
276 struct perf_session *session __used)
277{
278 dump_printf(": unhandled!\n");
279 return 0;
280}
281
10d0f086
ACM
282static int process_event_synth_attr_stub(union perf_event *event __used,
283 struct perf_evlist **pevlist __used)
284{
285 dump_printf(": unhandled!\n");
286 return 0;
287}
288
45694aa7 289static int process_event_sample_stub(struct perf_tool *tool __used,
d20deb64 290 union perf_event *event __used,
9e69c210
ACM
291 struct perf_sample *sample __used,
292 struct perf_evsel *evsel __used,
743eb868 293 struct machine *machine __used)
9e69c210
ACM
294{
295 dump_printf(": unhandled!\n");
296 return 0;
297}
298
45694aa7 299static int process_event_stub(struct perf_tool *tool __used,
d20deb64 300 union perf_event *event __used,
8d50e5b4 301 struct perf_sample *sample __used,
743eb868 302 struct machine *machine __used)
06aae590
ACM
303{
304 dump_printf(": unhandled!\n");
305 return 0;
306}
307
45694aa7 308static int process_finished_round_stub(struct perf_tool *tool __used,
d20deb64 309 union perf_event *event __used,
743eb868
ACM
310 struct perf_session *perf_session __used)
311{
312 dump_printf(": unhandled!\n");
313 return 0;
314}
315
45694aa7 316static int process_event_type_stub(struct perf_tool *tool __used,
743eb868 317 union perf_event *event __used)
d6b17beb
FW
318{
319 dump_printf(": unhandled!\n");
320 return 0;
321}
322
45694aa7 323static int process_finished_round(struct perf_tool *tool,
d20deb64
ACM
324 union perf_event *event,
325 struct perf_session *session);
d6b17beb 326
45694aa7 327static void perf_tool__fill_defaults(struct perf_tool *tool)
06aae590 328{
45694aa7
ACM
329 if (tool->sample == NULL)
330 tool->sample = process_event_sample_stub;
331 if (tool->mmap == NULL)
332 tool->mmap = process_event_stub;
333 if (tool->comm == NULL)
334 tool->comm = process_event_stub;
335 if (tool->fork == NULL)
336 tool->fork = process_event_stub;
337 if (tool->exit == NULL)
338 tool->exit = process_event_stub;
339 if (tool->lost == NULL)
340 tool->lost = perf_event__process_lost;
341 if (tool->read == NULL)
342 tool->read = process_event_sample_stub;
343 if (tool->throttle == NULL)
344 tool->throttle = process_event_stub;
345 if (tool->unthrottle == NULL)
346 tool->unthrottle = process_event_stub;
347 if (tool->attr == NULL)
348 tool->attr = process_event_synth_attr_stub;
349 if (tool->event_type == NULL)
350 tool->event_type = process_event_type_stub;
351 if (tool->tracing_data == NULL)
352 tool->tracing_data = process_event_synth_tracing_data_stub;
353 if (tool->build_id == NULL)
354 tool->build_id = process_finished_round_stub;
355 if (tool->finished_round == NULL) {
356 if (tool->ordered_samples)
357 tool->finished_round = process_finished_round;
d6b17beb 358 else
45694aa7 359 tool->finished_round = process_finished_round_stub;
d6b17beb 360 }
06aae590
ACM
361}
362
ba21594c
ACM
363void mem_bswap_64(void *src, int byte_size)
364{
365 u64 *m = src;
366
367 while (byte_size > 0) {
368 *m = bswap_64(*m);
369 byte_size -= sizeof(u64);
370 ++m;
371 }
372}
373
8115d60c 374static void perf_event__all64_swap(union perf_event *event)
ba21594c 375{
8115d60c
ACM
376 struct perf_event_header *hdr = &event->header;
377 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
ba21594c
ACM
378}
379
8115d60c 380static void perf_event__comm_swap(union perf_event *event)
ba21594c 381{
8115d60c
ACM
382 event->comm.pid = bswap_32(event->comm.pid);
383 event->comm.tid = bswap_32(event->comm.tid);
ba21594c
ACM
384}
385
8115d60c 386static void perf_event__mmap_swap(union perf_event *event)
ba21594c 387{
8115d60c
ACM
388 event->mmap.pid = bswap_32(event->mmap.pid);
389 event->mmap.tid = bswap_32(event->mmap.tid);
390 event->mmap.start = bswap_64(event->mmap.start);
391 event->mmap.len = bswap_64(event->mmap.len);
392 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
ba21594c
ACM
393}
394
8115d60c 395static void perf_event__task_swap(union perf_event *event)
ba21594c 396{
8115d60c
ACM
397 event->fork.pid = bswap_32(event->fork.pid);
398 event->fork.tid = bswap_32(event->fork.tid);
399 event->fork.ppid = bswap_32(event->fork.ppid);
400 event->fork.ptid = bswap_32(event->fork.ptid);
401 event->fork.time = bswap_64(event->fork.time);
ba21594c
ACM
402}
403
8115d60c 404static void perf_event__read_swap(union perf_event *event)
ba21594c 405{
8115d60c
ACM
406 event->read.pid = bswap_32(event->read.pid);
407 event->read.tid = bswap_32(event->read.tid);
408 event->read.value = bswap_64(event->read.value);
409 event->read.time_enabled = bswap_64(event->read.time_enabled);
410 event->read.time_running = bswap_64(event->read.time_running);
411 event->read.id = bswap_64(event->read.id);
ba21594c
ACM
412}
413
eda3913b
DA
414/* exported for swapping attributes in file header */
415void perf_event__attr_swap(struct perf_event_attr *attr)
416{
417 attr->type = bswap_32(attr->type);
418 attr->size = bswap_32(attr->size);
419 attr->config = bswap_64(attr->config);
420 attr->sample_period = bswap_64(attr->sample_period);
421 attr->sample_type = bswap_64(attr->sample_type);
422 attr->read_format = bswap_64(attr->read_format);
423 attr->wakeup_events = bswap_32(attr->wakeup_events);
424 attr->bp_type = bswap_32(attr->bp_type);
425 attr->bp_addr = bswap_64(attr->bp_addr);
426 attr->bp_len = bswap_64(attr->bp_len);
427}
428
429static void perf_event__hdr_attr_swap(union perf_event *event)
2c46dbb5
TZ
430{
431 size_t size;
432
eda3913b 433 perf_event__attr_swap(&event->attr.attr);
2c46dbb5 434
8115d60c
ACM
435 size = event->header.size;
436 size -= (void *)&event->attr.id - (void *)event;
437 mem_bswap_64(event->attr.id, size);
2c46dbb5
TZ
438}
439
8115d60c 440static void perf_event__event_type_swap(union perf_event *event)
cd19a035 441{
8115d60c
ACM
442 event->event_type.event_type.event_id =
443 bswap_64(event->event_type.event_type.event_id);
cd19a035
TZ
444}
445
8115d60c 446static void perf_event__tracing_data_swap(union perf_event *event)
9215545e 447{
8115d60c 448 event->tracing_data.size = bswap_32(event->tracing_data.size);
9215545e
TZ
449}
450
8115d60c 451typedef void (*perf_event__swap_op)(union perf_event *event);
ba21594c 452
8115d60c
ACM
453static perf_event__swap_op perf_event__swap_ops[] = {
454 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
455 [PERF_RECORD_COMM] = perf_event__comm_swap,
456 [PERF_RECORD_FORK] = perf_event__task_swap,
457 [PERF_RECORD_EXIT] = perf_event__task_swap,
458 [PERF_RECORD_LOST] = perf_event__all64_swap,
459 [PERF_RECORD_READ] = perf_event__read_swap,
460 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
eda3913b 461 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
8115d60c
ACM
462 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
463 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
464 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
465 [PERF_RECORD_HEADER_MAX] = NULL,
ba21594c
ACM
466};
467
c61e52ee
FW
468struct sample_queue {
469 u64 timestamp;
e4c2df13 470 u64 file_offset;
8115d60c 471 union perf_event *event;
c61e52ee
FW
472 struct list_head list;
473};
474
020bb75a
TG
475static void perf_session_free_sample_buffers(struct perf_session *session)
476{
477 struct ordered_samples *os = &session->ordered_samples;
478
5c891f38 479 while (!list_empty(&os->to_free)) {
020bb75a
TG
480 struct sample_queue *sq;
481
5c891f38 482 sq = list_entry(os->to_free.next, struct sample_queue, list);
020bb75a
TG
483 list_del(&sq->list);
484 free(sq);
485 }
486}
487
cbf41645 488static int perf_session_deliver_event(struct perf_session *session,
8115d60c 489 union perf_event *event,
8d50e5b4 490 struct perf_sample *sample,
45694aa7 491 struct perf_tool *tool,
f74725dc 492 u64 file_offset);
cbf41645 493
c61e52ee 494static void flush_sample_queue(struct perf_session *s,
45694aa7 495 struct perf_tool *tool)
c61e52ee 496{
a1225dec
TG
497 struct ordered_samples *os = &s->ordered_samples;
498 struct list_head *head = &os->samples;
c61e52ee 499 struct sample_queue *tmp, *iter;
8d50e5b4 500 struct perf_sample sample;
a1225dec
TG
501 u64 limit = os->next_flush;
502 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
88660563 503 unsigned idx = 0, progress_next = os->nr_samples / 16;
5538beca 504 int ret;
c61e52ee 505
45694aa7 506 if (!tool->ordered_samples || !limit)
c61e52ee
FW
507 return;
508
509 list_for_each_entry_safe(iter, tmp, head, list) {
510 if (iter->timestamp > limit)
a1225dec 511 break;
c61e52ee 512
5538beca
FW
513 ret = perf_session__parse_sample(s, iter->event, &sample);
514 if (ret)
515 pr_err("Can't parse sample, err = %d\n", ret);
516 else
45694aa7 517 perf_session_deliver_event(s, iter->event, &sample, tool,
5538beca 518 iter->file_offset);
c61e52ee 519
a1225dec 520 os->last_flush = iter->timestamp;
c61e52ee 521 list_del(&iter->list);
020bb75a 522 list_add(&iter->list, &os->sample_cache);
88660563
ACM
523 if (++idx >= progress_next) {
524 progress_next += os->nr_samples / 16;
525 ui_progress__update(idx, os->nr_samples,
526 "Processing time ordered events...");
527 }
c61e52ee 528 }
a1225dec
TG
529
530 if (list_empty(head)) {
531 os->last_sample = NULL;
532 } else if (last_ts <= limit) {
533 os->last_sample =
534 list_entry(head->prev, struct sample_queue, list);
535 }
88660563
ACM
536
537 os->nr_samples = 0;
c61e52ee
FW
538}
539
d6b17beb
FW
540/*
541 * When perf record finishes a pass on every buffers, it records this pseudo
542 * event.
543 * We record the max timestamp t found in the pass n.
544 * Assuming these timestamps are monotonic across cpus, we know that if
545 * a buffer still has events with timestamps below t, they will be all
546 * available and then read in the pass n + 1.
547 * Hence when we start to read the pass n + 2, we can safely flush every
548 * events with timestamps below t.
549 *
550 * ============ PASS n =================
551 * CPU 0 | CPU 1
552 * |
553 * cnt1 timestamps | cnt2 timestamps
554 * 1 | 2
555 * 2 | 3
556 * - | 4 <--- max recorded
557 *
558 * ============ PASS n + 1 ==============
559 * CPU 0 | CPU 1
560 * |
561 * cnt1 timestamps | cnt2 timestamps
562 * 3 | 5
563 * 4 | 6
564 * 5 | 7 <---- max recorded
565 *
566 * Flush every events below timestamp 4
567 *
568 * ============ PASS n + 2 ==============
569 * CPU 0 | CPU 1
570 * |
571 * cnt1 timestamps | cnt2 timestamps
572 * 6 | 8
573 * 7 | 9
574 * - | 10
575 *
576 * Flush every events below timestamp 7
577 * etc...
578 */
45694aa7 579static int process_finished_round(struct perf_tool *tool,
d20deb64
ACM
580 union perf_event *event __used,
581 struct perf_session *session)
d6b17beb 582{
45694aa7 583 flush_sample_queue(session, tool);
d6b17beb
FW
584 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
585
586 return 0;
587}
588
c61e52ee 589/* The queue is ordered by time */
cbf41645 590static void __queue_event(struct sample_queue *new, struct perf_session *s)
c61e52ee 591{
a1225dec
TG
592 struct ordered_samples *os = &s->ordered_samples;
593 struct sample_queue *sample = os->last_sample;
594 u64 timestamp = new->timestamp;
595 struct list_head *p;
c61e52ee 596
88660563 597 ++os->nr_samples;
a1225dec 598 os->last_sample = new;
c61e52ee 599
a1225dec
TG
600 if (!sample) {
601 list_add(&new->list, &os->samples);
602 os->max_timestamp = timestamp;
c61e52ee
FW
603 return;
604 }
605
606 /*
a1225dec
TG
607 * last_sample might point to some random place in the list as it's
608 * the last queued event. We expect that the new event is close to
609 * this.
c61e52ee 610 */
a1225dec
TG
611 if (sample->timestamp <= timestamp) {
612 while (sample->timestamp <= timestamp) {
613 p = sample->list.next;
614 if (p == &os->samples) {
615 list_add_tail(&new->list, &os->samples);
616 os->max_timestamp = timestamp;
617 return;
618 }
619 sample = list_entry(p, struct sample_queue, list);
620 }
621 list_add_tail(&new->list, &sample->list);
622 } else {
623 while (sample->timestamp > timestamp) {
624 p = sample->list.prev;
625 if (p == &os->samples) {
626 list_add(&new->list, &os->samples);
627 return;
628 }
629 sample = list_entry(p, struct sample_queue, list);
630 }
631 list_add(&new->list, &sample->list);
632 }
c61e52ee
FW
633}
634
5c891f38
TG
635#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
636
8115d60c 637static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
8d50e5b4 638 struct perf_sample *sample, u64 file_offset)
c61e52ee 639{
5c891f38
TG
640 struct ordered_samples *os = &s->ordered_samples;
641 struct list_head *sc = &os->sample_cache;
8d50e5b4 642 u64 timestamp = sample->time;
c61e52ee 643 struct sample_queue *new;
c61e52ee 644
79a14c1f 645 if (!timestamp || timestamp == ~0ULL)
cbf41645
TG
646 return -ETIME;
647
c61e52ee
FW
648 if (timestamp < s->ordered_samples.last_flush) {
649 printf("Warning: Timestamp below last timeslice flush\n");
650 return -EINVAL;
651 }
652
020bb75a
TG
653 if (!list_empty(sc)) {
654 new = list_entry(sc->next, struct sample_queue, list);
655 list_del(&new->list);
5c891f38
TG
656 } else if (os->sample_buffer) {
657 new = os->sample_buffer + os->sample_buffer_idx;
658 if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
659 os->sample_buffer = NULL;
020bb75a 660 } else {
5c891f38
TG
661 os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
662 if (!os->sample_buffer)
020bb75a 663 return -ENOMEM;
5c891f38
TG
664 list_add(&os->sample_buffer->list, &os->to_free);
665 os->sample_buffer_idx = 2;
666 new = os->sample_buffer + 1;
020bb75a 667 }
c61e52ee
FW
668
669 new->timestamp = timestamp;
e4c2df13 670 new->file_offset = file_offset;
fe174207 671 new->event = event;
c61e52ee 672
cbf41645 673 __queue_event(new, s);
640c03ce 674
640c03ce
ACM
675 return 0;
676}
c61e52ee 677
8d50e5b4 678static void callchain__printf(struct perf_sample *sample)
640c03ce
ACM
679{
680 unsigned int i;
c61e52ee 681
9486aa38 682 printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
640c03ce
ACM
683
684 for (i = 0; i < sample->callchain->nr; i++)
9486aa38
ACM
685 printf("..... %2d: %016" PRIx64 "\n",
686 i, sample->callchain->ips[i]);
c61e52ee
FW
687}
688
9c90a61c 689static void perf_session__print_tstamp(struct perf_session *session,
8115d60c 690 union perf_event *event,
8d50e5b4 691 struct perf_sample *sample)
9c90a61c
ACM
692{
693 if (event->header.type != PERF_RECORD_SAMPLE &&
694 !session->sample_id_all) {
695 fputs("-1 -1 ", stdout);
696 return;
697 }
698
699 if ((session->sample_type & PERF_SAMPLE_CPU))
700 printf("%u ", sample->cpu);
701
702 if (session->sample_type & PERF_SAMPLE_TIME)
9486aa38 703 printf("%" PRIu64 " ", sample->time);
9c90a61c
ACM
704}
705
8115d60c 706static void dump_event(struct perf_session *session, union perf_event *event,
8d50e5b4 707 u64 file_offset, struct perf_sample *sample)
9aefcab0
TG
708{
709 if (!dump_trace)
710 return;
711
9486aa38
ACM
712 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
713 file_offset, event->header.size, event->header.type);
9aefcab0
TG
714
715 trace_event(event);
716
717 if (sample)
718 perf_session__print_tstamp(session, event, sample);
719
9486aa38 720 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
8115d60c 721 event->header.size, perf_event__name(event->header.type));
9aefcab0
TG
722}
723
8115d60c 724static void dump_sample(struct perf_session *session, union perf_event *event,
8d50e5b4 725 struct perf_sample *sample)
9aefcab0 726{
ddbc24b7
ACM
727 if (!dump_trace)
728 return;
729
7cec0922 730 printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
9486aa38 731 event->header.misc, sample->pid, sample->tid, sample->ip,
7cec0922 732 sample->period, sample->addr);
9aefcab0
TG
733
734 if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
ddbc24b7 735 callchain__printf(sample);
9aefcab0
TG
736}
737
743eb868
ACM
738static struct machine *
739 perf_session__find_machine_for_cpumode(struct perf_session *session,
740 union perf_event *event)
741{
742 const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
743
744 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest)
745 return perf_session__find_machine(session, event->ip.pid);
746
747 return perf_session__find_host_machine(session);
748}
749
cbf41645 750static int perf_session_deliver_event(struct perf_session *session,
8115d60c 751 union perf_event *event,
8d50e5b4 752 struct perf_sample *sample,
45694aa7 753 struct perf_tool *tool,
532e7269 754 u64 file_offset)
cbf41645 755{
9e69c210 756 struct perf_evsel *evsel;
743eb868 757 struct machine *machine;
9e69c210 758
532e7269
TG
759 dump_event(session, event, file_offset, sample);
760
7b27509f
ACM
761 evsel = perf_evlist__id2evsel(session->evlist, sample->id);
762 if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
763 /*
764 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
765 * because the tools right now may apply filters, discarding
766 * some of the samples. For consistency, in the future we
767 * should have something like nr_filtered_samples and remove
768 * the sample->period from total_sample_period, etc, KISS for
769 * now tho.
770 *
771 * Also testing against NULL allows us to handle files without
772 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
773 * future probably it'll be a good idea to restrict event
774 * processing via perf_session to files with both set.
775 */
776 hists__inc_nr_events(&evsel->hists, event->header.type);
777 }
778
743eb868
ACM
779 machine = perf_session__find_machine_for_cpumode(session, event);
780
cbf41645
TG
781 switch (event->header.type) {
782 case PERF_RECORD_SAMPLE:
532e7269 783 dump_sample(session, event, sample);
9e69c210
ACM
784 if (evsel == NULL) {
785 ++session->hists.stats.nr_unknown_id;
786 return -1;
787 }
45694aa7 788 return tool->sample(tool, event, sample, evsel, machine);
cbf41645 789 case PERF_RECORD_MMAP:
45694aa7 790 return tool->mmap(tool, event, sample, machine);
cbf41645 791 case PERF_RECORD_COMM:
45694aa7 792 return tool->comm(tool, event, sample, machine);
cbf41645 793 case PERF_RECORD_FORK:
45694aa7 794 return tool->fork(tool, event, sample, machine);
cbf41645 795 case PERF_RECORD_EXIT:
45694aa7 796 return tool->exit(tool, event, sample, machine);
cbf41645 797 case PERF_RECORD_LOST:
45694aa7 798 if (tool->lost == perf_event__process_lost)
743eb868 799 session->hists.stats.total_lost += event->lost.lost;
45694aa7 800 return tool->lost(tool, event, sample, machine);
cbf41645 801 case PERF_RECORD_READ:
45694aa7 802 return tool->read(tool, event, sample, evsel, machine);
cbf41645 803 case PERF_RECORD_THROTTLE:
45694aa7 804 return tool->throttle(tool, event, sample, machine);
cbf41645 805 case PERF_RECORD_UNTHROTTLE:
45694aa7 806 return tool->unthrottle(tool, event, sample, machine);
cbf41645
TG
807 default:
808 ++session->hists.stats.nr_unknown_events;
809 return -1;
810 }
811}
812
3dfc2c0a 813static int perf_session__preprocess_sample(struct perf_session *session,
8115d60c 814 union perf_event *event, struct perf_sample *sample)
3dfc2c0a
TG
815{
816 if (event->header.type != PERF_RECORD_SAMPLE ||
817 !(session->sample_type & PERF_SAMPLE_CALLCHAIN))
818 return 0;
819
820 if (!ip_callchain__valid(sample->callchain, event)) {
821 pr_debug("call-chain problem with event, skipping it.\n");
822 ++session->hists.stats.nr_invalid_chains;
823 session->hists.stats.total_invalid_chains += sample->period;
824 return -EINVAL;
825 }
826 return 0;
827}
828
8115d60c 829static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
45694aa7 830 struct perf_tool *tool, u64 file_offset)
06aae590 831{
10d0f086
ACM
832 int err;
833
ba74f064 834 dump_event(session, event, file_offset, NULL);
06aae590 835
cbf41645 836 /* These events are processed right away */
06aae590 837 switch (event->header.type) {
2c46dbb5 838 case PERF_RECORD_HEADER_ATTR:
45694aa7 839 err = tool->attr(event, &session->evlist);
10d0f086
ACM
840 if (err == 0)
841 perf_session__update_sample_type(session);
842 return err;
cd19a035 843 case PERF_RECORD_HEADER_EVENT_TYPE:
45694aa7 844 return tool->event_type(tool, event);
9215545e
TZ
845 case PERF_RECORD_HEADER_TRACING_DATA:
846 /* setup for reading amidst mmap */
cbf41645 847 lseek(session->fd, file_offset, SEEK_SET);
45694aa7 848 return tool->tracing_data(event, session);
c7929e47 849 case PERF_RECORD_HEADER_BUILD_ID:
45694aa7 850 return tool->build_id(tool, event, session);
d6b17beb 851 case PERF_RECORD_FINISHED_ROUND:
45694aa7 852 return tool->finished_round(tool, event, session);
06aae590 853 default:
ba74f064 854 return -EINVAL;
06aae590 855 }
ba74f064
TG
856}
857
858static int perf_session__process_event(struct perf_session *session,
8115d60c 859 union perf_event *event,
45694aa7 860 struct perf_tool *tool,
ba74f064
TG
861 u64 file_offset)
862{
8d50e5b4 863 struct perf_sample sample;
ba74f064
TG
864 int ret;
865
8115d60c
ACM
866 if (session->header.needs_swap &&
867 perf_event__swap_ops[event->header.type])
868 perf_event__swap_ops[event->header.type](event);
ba74f064
TG
869
870 if (event->header.type >= PERF_RECORD_HEADER_MAX)
871 return -EINVAL;
872
873 hists__inc_nr_events(&session->hists, event->header.type);
874
875 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
45694aa7 876 return perf_session__process_user_event(session, event, tool, file_offset);
cbf41645 877
3dfc2c0a
TG
878 /*
879 * For all kernel events we get the sample data
880 */
5538beca
FW
881 ret = perf_session__parse_sample(session, event, &sample);
882 if (ret)
883 return ret;
3dfc2c0a
TG
884
885 /* Preprocess sample records - precheck callchains */
886 if (perf_session__preprocess_sample(session, event, &sample))
887 return 0;
888
45694aa7 889 if (tool->ordered_samples) {
e4c2df13
TG
890 ret = perf_session_queue_event(session, event, &sample,
891 file_offset);
cbf41645
TG
892 if (ret != -ETIME)
893 return ret;
894 }
895
45694aa7 896 return perf_session_deliver_event(session, event, &sample, tool,
f74725dc 897 file_offset);
06aae590
ACM
898}
899
ba21594c
ACM
900void perf_event_header__bswap(struct perf_event_header *self)
901{
902 self->type = bswap_32(self->type);
903 self->misc = bswap_16(self->misc);
904 self->size = bswap_16(self->size);
905}
906
b424eba2
ACM
907struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
908{
909 return machine__findnew_thread(&session->host_machine, pid);
910}
911
06aae590
ACM
912static struct thread *perf_session__register_idle_thread(struct perf_session *self)
913{
914 struct thread *thread = perf_session__findnew(self, 0);
915
916 if (thread == NULL || thread__set_comm(thread, "swapper")) {
917 pr_err("problem inserting idle task.\n");
918 thread = NULL;
919 }
920
921 return thread;
922}
923
11095994 924static void perf_session__warn_about_errors(const struct perf_session *session,
45694aa7 925 const struct perf_tool *tool)
11095994 926{
45694aa7 927 if (tool->lost == perf_event__process_lost &&
7b27509f
ACM
928 session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) {
929 ui__warning("Processed %d events and lost %d chunks!\n\n"
930 "Check IO/CPU overload!\n\n",
931 session->hists.stats.nr_events[0],
932 session->hists.stats.nr_events[PERF_RECORD_LOST]);
11095994
ACM
933 }
934
935 if (session->hists.stats.nr_unknown_events != 0) {
936 ui__warning("Found %u unknown events!\n\n"
937 "Is this an older tool processing a perf.data "
938 "file generated by a more recent tool?\n\n"
939 "If that is not the case, consider "
940 "reporting to linux-kernel@vger.kernel.org.\n\n",
941 session->hists.stats.nr_unknown_events);
942 }
943
9e69c210
ACM
944 if (session->hists.stats.nr_unknown_id != 0) {
945 ui__warning("%u samples with id not present in the header\n",
946 session->hists.stats.nr_unknown_id);
947 }
948
11095994
ACM
949 if (session->hists.stats.nr_invalid_chains != 0) {
950 ui__warning("Found invalid callchains!\n\n"
951 "%u out of %u events were discarded for this reason.\n\n"
952 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
953 session->hists.stats.nr_invalid_chains,
954 session->hists.stats.nr_events[PERF_RECORD_SAMPLE]);
955 }
956}
957
8dc58101
TZ
958#define session_done() (*(volatile int *)(&session_done))
959volatile int session_done;
960
961static int __perf_session__process_pipe_events(struct perf_session *self,
45694aa7 962 struct perf_tool *tool)
8dc58101 963{
8115d60c 964 union perf_event event;
8dc58101
TZ
965 uint32_t size;
966 int skip = 0;
967 u64 head;
968 int err;
969 void *p;
970
45694aa7 971 perf_tool__fill_defaults(tool);
8dc58101
TZ
972
973 head = 0;
974more:
1e7972cc 975 err = readn(self->fd, &event, sizeof(struct perf_event_header));
8dc58101
TZ
976 if (err <= 0) {
977 if (err == 0)
978 goto done;
979
980 pr_err("failed to read event header\n");
981 goto out_err;
982 }
983
984 if (self->header.needs_swap)
985 perf_event_header__bswap(&event.header);
986
987 size = event.header.size;
988 if (size == 0)
989 size = 8;
990
991 p = &event;
992 p += sizeof(struct perf_event_header);
993
794e43b5 994 if (size - sizeof(struct perf_event_header)) {
1e7972cc 995 err = readn(self->fd, p, size - sizeof(struct perf_event_header));
794e43b5
TZ
996 if (err <= 0) {
997 if (err == 0) {
998 pr_err("unexpected end of event stream\n");
999 goto done;
1000 }
8dc58101 1001
794e43b5
TZ
1002 pr_err("failed to read event data\n");
1003 goto out_err;
1004 }
8dc58101
TZ
1005 }
1006
1007 if (size == 0 ||
45694aa7 1008 (skip = perf_session__process_event(self, &event, tool, head)) < 0) {
9486aa38 1009 dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
8dc58101
TZ
1010 head, event.header.size, event.header.type);
1011 /*
1012 * assume we lost track of the stream, check alignment, and
1013 * increment a single u64 in the hope to catch on again 'soon'.
1014 */
1015 if (unlikely(head & 7))
1016 head &= ~7ULL;
1017
1018 size = 8;
1019 }
1020
1021 head += size;
1022
8dc58101
TZ
1023 if (skip > 0)
1024 head += skip;
1025
1026 if (!session_done())
1027 goto more;
1028done:
1029 err = 0;
1030out_err:
45694aa7 1031 perf_session__warn_about_errors(self, tool);
020bb75a 1032 perf_session_free_sample_buffers(self);
8dc58101
TZ
1033 return err;
1034}
1035
998bedc8
FW
1036static union perf_event *
1037fetch_mmaped_event(struct perf_session *session,
1038 u64 head, size_t mmap_size, char *buf)
1039{
1040 union perf_event *event;
1041
1042 /*
1043 * Ensure we have enough space remaining to read
1044 * the size of the event in the headers.
1045 */
1046 if (head + sizeof(event->header) > mmap_size)
1047 return NULL;
1048
1049 event = (union perf_event *)(buf + head);
1050
1051 if (session->header.needs_swap)
1052 perf_event_header__bswap(&event->header);
1053
1054 if (head + event->header.size > mmap_size)
1055 return NULL;
1056
1057 return event;
1058}
1059
0331ee0c 1060int __perf_session__process_events(struct perf_session *session,
6122e4e4 1061 u64 data_offset, u64 data_size,
45694aa7 1062 u64 file_size, struct perf_tool *tool)
06aae590 1063{
55b44629 1064 u64 head, page_offset, file_offset, file_pos, progress_next;
fe174207 1065 int err, mmap_prot, mmap_flags, map_idx = 0;
55b44629 1066 size_t page_size, mmap_size;
fe174207 1067 char *buf, *mmaps[8];
8115d60c 1068 union perf_event *event;
06aae590 1069 uint32_t size;
0331ee0c 1070
45694aa7 1071 perf_tool__fill_defaults(tool);
06aae590 1072
1b75962e 1073 page_size = sysconf(_SC_PAGESIZE);
06aae590 1074
0331ee0c
TG
1075 page_offset = page_size * (data_offset / page_size);
1076 file_offset = page_offset;
1077 head = data_offset - page_offset;
06aae590 1078
d6513281
TG
1079 if (data_offset + data_size < file_size)
1080 file_size = data_offset + data_size;
1081
55b44629 1082 progress_next = file_size / 16;
55b44629
TG
1083
1084 mmap_size = session->mmap_window;
1085 if (mmap_size > file_size)
1086 mmap_size = file_size;
1087
fe174207
TG
1088 memset(mmaps, 0, sizeof(mmaps));
1089
ba21594c
ACM
1090 mmap_prot = PROT_READ;
1091 mmap_flags = MAP_SHARED;
1092
0331ee0c 1093 if (session->header.needs_swap) {
ba21594c
ACM
1094 mmap_prot |= PROT_WRITE;
1095 mmap_flags = MAP_PRIVATE;
1096 }
06aae590 1097remap:
55b44629
TG
1098 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
1099 file_offset);
06aae590
ACM
1100 if (buf == MAP_FAILED) {
1101 pr_err("failed to mmap file\n");
1102 err = -errno;
1103 goto out_err;
1104 }
fe174207
TG
1105 mmaps[map_idx] = buf;
1106 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
d6513281 1107 file_pos = file_offset + head;
06aae590
ACM
1108
1109more:
998bedc8
FW
1110 event = fetch_mmaped_event(session, head, mmap_size, buf);
1111 if (!event) {
fe174207
TG
1112 if (mmaps[map_idx]) {
1113 munmap(mmaps[map_idx], mmap_size);
1114 mmaps[map_idx] = NULL;
1115 }
06aae590 1116
0331ee0c
TG
1117 page_offset = page_size * (head / page_size);
1118 file_offset += page_offset;
1119 head -= page_offset;
06aae590
ACM
1120 goto remap;
1121 }
1122
1123 size = event->header.size;
1124
d6513281 1125 if (size == 0 ||
45694aa7 1126 perf_session__process_event(session, event, tool, file_pos) < 0) {
9486aa38 1127 dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
0331ee0c 1128 file_offset + head, event->header.size,
06aae590
ACM
1129 event->header.type);
1130 /*
1131 * assume we lost track of the stream, check alignment, and
1132 * increment a single u64 in the hope to catch on again 'soon'.
1133 */
1134 if (unlikely(head & 7))
1135 head &= ~7ULL;
1136
1137 size = 8;
1138 }
1139
1140 head += size;
d6513281 1141 file_pos += size;
06aae590 1142
55b44629
TG
1143 if (file_pos >= progress_next) {
1144 progress_next += file_size / 16;
ca59bcbc
ACM
1145 ui_progress__update(file_pos, file_size,
1146 "Processing events...");
55b44629
TG
1147 }
1148
d6513281 1149 if (file_pos < file_size)
06aae590 1150 goto more;
d6513281 1151
06aae590 1152 err = 0;
c61e52ee 1153 /* do the final flush for ordered samples */
0331ee0c 1154 session->ordered_samples.next_flush = ULLONG_MAX;
45694aa7 1155 flush_sample_queue(session, tool);
06aae590 1156out_err:
45694aa7 1157 perf_session__warn_about_errors(session, tool);
020bb75a 1158 perf_session_free_sample_buffers(session);
06aae590
ACM
1159 return err;
1160}
27295592 1161
6122e4e4 1162int perf_session__process_events(struct perf_session *self,
45694aa7 1163 struct perf_tool *tool)
6122e4e4
ACM
1164{
1165 int err;
1166
1167 if (perf_session__register_idle_thread(self) == NULL)
1168 return -ENOMEM;
1169
8dc58101
TZ
1170 if (!self->fd_pipe)
1171 err = __perf_session__process_events(self,
1172 self->header.data_offset,
1173 self->header.data_size,
45694aa7 1174 self->size, tool);
8dc58101 1175 else
45694aa7 1176 err = __perf_session__process_pipe_events(self, tool);
88ca895d 1177
6122e4e4
ACM
1178 return err;
1179}
1180
d549c769 1181bool perf_session__has_traces(struct perf_session *self, const char *msg)
27295592
ACM
1182{
1183 if (!(self->sample_type & PERF_SAMPLE_RAW)) {
d549c769
ACM
1184 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1185 return false;
27295592
ACM
1186 }
1187
d549c769 1188 return true;
27295592 1189}
56b03f3c 1190
743eb868
ACM
1191int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1192 const char *symbol_name, u64 addr)
56b03f3c
ACM
1193{
1194 char *bracket;
9de89fe7 1195 enum map_type i;
a1645ce1
ZY
1196 struct ref_reloc_sym *ref;
1197
1198 ref = zalloc(sizeof(struct ref_reloc_sym));
1199 if (ref == NULL)
1200 return -ENOMEM;
56b03f3c 1201
a1645ce1
ZY
1202 ref->name = strdup(symbol_name);
1203 if (ref->name == NULL) {
1204 free(ref);
56b03f3c 1205 return -ENOMEM;
a1645ce1 1206 }
56b03f3c 1207
a1645ce1 1208 bracket = strchr(ref->name, ']');
56b03f3c
ACM
1209 if (bracket)
1210 *bracket = '\0';
1211
a1645ce1 1212 ref->addr = addr;
9de89fe7
ACM
1213
1214 for (i = 0; i < MAP__NR_TYPES; ++i) {
a1645ce1
ZY
1215 struct kmap *kmap = map__kmap(maps[i]);
1216 kmap->ref_reloc_sym = ref;
9de89fe7
ACM
1217 }
1218
56b03f3c
ACM
1219 return 0;
1220}
1f626bc3
ACM
1221
1222size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
1223{
1224 return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
1225 __dsos__fprintf(&self->host_machine.user_dsos, fp) +
1226 machines__fprintf_dsos(&self->machines, fp);
1227}
f869097e
ACM
1228
1229size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
1230 bool with_hits)
1231{
1232 size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
1233 return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
1234}
e248de33
ACM
1235
1236size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
1237{
1238 struct perf_evsel *pos;
1239 size_t ret = fprintf(fp, "Aggregated stats:\n");
1240
1241 ret += hists__fprintf_nr_events(&session->hists, fp);
1242
1243 list_for_each_entry(pos, &session->evlist->entries, node) {
1244 ret += fprintf(fp, "%s stats:\n", event_name(pos));
1245 ret += hists__fprintf_nr_events(&pos->hists, fp);
1246 }
1247
1248 return ret;
1249}
c0230b2b 1250
b424eba2
ACM
1251size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
1252{
1253 /*
1254 * FIXME: Here we have to actually print all the machines in this
1255 * session, not just the host...
1256 */
1257 return machine__fprintf(&session->host_machine, fp);
1258}
1259
1260void perf_session__remove_thread(struct perf_session *session,
1261 struct thread *th)
1262{
1263 /*
1264 * FIXME: This one makes no sense, we need to remove the thread from
1265 * the machine it belongs to, perf_session can have many machines, so
1266 * doing it always on ->host_machine is wrong. Fix when auditing all
1267 * the 'perf kvm' code.
1268 */
1269 machine__remove_thread(&session->host_machine, th);
1270}
1271
9cbdb702
DA
1272struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
1273 unsigned int type)
1274{
1275 struct perf_evsel *pos;
1276
1277 list_for_each_entry(pos, &session->evlist->entries, node) {
1278 if (pos->attr.type == type)
1279 return pos;
1280 }
1281 return NULL;
1282}
1283
743eb868
ACM
1284void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
1285 struct machine *machine, struct perf_evsel *evsel,
1286 int print_sym, int print_dso)
c0230b2b
DA
1287{
1288 struct addr_location al;
1289 const char *symname, *dsoname;
246d4ce8 1290 struct callchain_cursor *cursor = &evsel->hists.callchain_cursor;
c0230b2b
DA
1291 struct callchain_cursor_node *node;
1292
743eb868 1293 if (perf_event__preprocess_sample(event, machine, &al, sample,
c0230b2b
DA
1294 NULL) < 0) {
1295 error("problem processing %d event, skipping it.\n",
1296 event->header.type);
1297 return;
1298 }
1299
1300 if (symbol_conf.use_callchain && sample->callchain) {
1301
743eb868 1302 if (machine__resolve_callchain(machine, evsel, al.thread,
c0230b2b
DA
1303 sample->callchain, NULL) != 0) {
1304 if (verbose)
1305 error("Failed to resolve callchain. Skipping\n");
1306 return;
1307 }
1308 callchain_cursor_commit(cursor);
1309
1310 while (1) {
1311 node = callchain_cursor_current(cursor);
1312 if (!node)
1313 break;
1314
787bef17
DA
1315 printf("\t%16" PRIx64, node->ip);
1316 if (print_sym) {
1317 if (node->sym && node->sym->name)
1318 symname = node->sym->name;
1319 else
1320 symname = "";
c0230b2b 1321
610723f2
DA
1322 printf(" %s", symname);
1323 }
1324 if (print_dso) {
787bef17
DA
1325 if (node->map && node->map->dso && node->map->dso->name)
1326 dsoname = node->map->dso->name;
1327 else
1328 dsoname = "";
c0230b2b 1329
610723f2 1330 printf(" (%s)", dsoname);
787bef17
DA
1331 }
1332 printf("\n");
c0230b2b
DA
1333
1334 callchain_cursor_advance(cursor);
1335 }
1336
1337 } else {
adc4bf99 1338 printf("%16" PRIx64, sample->ip);
787bef17
DA
1339 if (print_sym) {
1340 if (al.sym && al.sym->name)
1341 symname = al.sym->name;
1342 else
1343 symname = "";
c0230b2b 1344
610723f2
DA
1345 printf(" %s", symname);
1346 }
1347
1348 if (print_dso) {
787bef17
DA
1349 if (al.map && al.map->dso && al.map->dso->name)
1350 dsoname = al.map->dso->name;
1351 else
1352 dsoname = "";
c0230b2b 1353
610723f2 1354 printf(" (%s)", dsoname);
787bef17 1355 }
c0230b2b
DA
1356 }
1357}
5d67be97
AB
1358
1359int perf_session__cpu_bitmap(struct perf_session *session,
1360 const char *cpu_list, unsigned long *cpu_bitmap)
1361{
1362 int i;
1363 struct cpu_map *map;
1364
1365 for (i = 0; i < PERF_TYPE_MAX; ++i) {
1366 struct perf_evsel *evsel;
1367
1368 evsel = perf_session__find_first_evtype(session, i);
1369 if (!evsel)
1370 continue;
1371
1372 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
1373 pr_err("File does not contain CPU events. "
1374 "Remove -c option to proceed.\n");
1375 return -1;
1376 }
1377 }
1378
1379 map = cpu_map__new(cpu_list);
47fbe53b
DA
1380 if (map == NULL) {
1381 pr_err("Invalid cpu_list\n");
1382 return -1;
1383 }
5d67be97
AB
1384
1385 for (i = 0; i < map->nr; i++) {
1386 int cpu = map->map[i];
1387
1388 if (cpu >= MAX_NR_CPUS) {
1389 pr_err("Requested CPU %d too large. "
1390 "Consider raising MAX_NR_CPUS\n", cpu);
1391 return -1;
1392 }
1393
1394 set_bit(cpu, cpu_bitmap);
1395 }
1396
1397 return 0;
1398}
fbe96f29
SE
1399
1400void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
1401 bool full)
1402{
1403 struct stat st;
1404 int ret;
1405
1406 if (session == NULL || fp == NULL)
1407 return;
1408
1409 ret = fstat(session->fd, &st);
1410 if (ret == -1)
1411 return;
1412
1413 fprintf(fp, "# ========\n");
1414 fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
1415 perf_header__fprintf_info(session, fp, full);
1416 fprintf(fp, "# ========\n#\n");
1417}
This page took 1.587195 seconds and 5 git commands to generate.