1 #include <linux/kernel.h>
2 #include <traceevent/event-parse.h>
16 #include "perf_regs.h"
18 static int perf_session__open(struct perf_session
*session
)
20 struct perf_data_file
*file
= session
->file
;
22 if (perf_session__read_header(session
) < 0) {
23 pr_err("incompatible file format (rerun with -v to learn more)");
27 if (perf_data_file__is_pipe(file
))
30 if (!perf_evlist__valid_sample_type(session
->evlist
)) {
31 pr_err("non matching sample_type");
35 if (!perf_evlist__valid_sample_id_all(session
->evlist
)) {
36 pr_err("non matching sample_id_all");
40 if (!perf_evlist__valid_read_format(session
->evlist
)) {
41 pr_err("non matching read_format");
48 void perf_session__set_id_hdr_size(struct perf_session
*session
)
50 u16 id_hdr_size
= perf_evlist__id_hdr_size(session
->evlist
);
52 machines__set_id_hdr_size(&session
->machines
, id_hdr_size
);
55 int perf_session__create_kernel_maps(struct perf_session
*session
)
57 int ret
= machine__create_kernel_maps(&session
->machines
.host
);
60 ret
= machines__create_guest_kernel_maps(&session
->machines
);
64 static void perf_session__destroy_kernel_maps(struct perf_session
*session
)
66 machines__destroy_kernel_maps(&session
->machines
);
69 struct perf_session
*perf_session__new(struct perf_data_file
*file
,
70 bool repipe
, struct perf_tool
*tool
)
72 struct perf_session
*session
= zalloc(sizeof(*session
));
77 session
->repipe
= repipe
;
78 INIT_LIST_HEAD(&session
->ordered_samples
.samples
);
79 INIT_LIST_HEAD(&session
->ordered_samples
.sample_cache
);
80 INIT_LIST_HEAD(&session
->ordered_samples
.to_free
);
81 machines__init(&session
->machines
);
84 if (perf_data_file__open(file
))
89 if (perf_data_file__is_read(file
)) {
90 if (perf_session__open(session
) < 0)
93 perf_session__set_id_hdr_size(session
);
97 if (!file
|| perf_data_file__is_write(file
)) {
99 * In O_RDONLY mode this will be performed when reading the
100 * kernel MMAP event, in perf_event__process_mmap().
102 if (perf_session__create_kernel_maps(session
) < 0)
106 if (tool
&& tool
->ordering_requires_timestamps
&&
107 tool
->ordered_samples
&& !perf_evlist__sample_id_all(session
->evlist
)) {
108 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
109 tool
->ordered_samples
= false;
115 perf_data_file__close(file
);
117 perf_session__delete(session
);
122 static void perf_session__delete_dead_threads(struct perf_session
*session
)
124 machine__delete_dead_threads(&session
->machines
.host
);
127 static void perf_session__delete_threads(struct perf_session
*session
)
129 machine__delete_threads(&session
->machines
.host
);
132 static void perf_session_env__delete(struct perf_session_env
*env
)
134 zfree(&env
->hostname
);
135 zfree(&env
->os_release
);
136 zfree(&env
->version
);
138 zfree(&env
->cpu_desc
);
141 zfree(&env
->cmdline
);
142 zfree(&env
->sibling_cores
);
143 zfree(&env
->sibling_threads
);
144 zfree(&env
->numa_nodes
);
145 zfree(&env
->pmu_mappings
);
148 void perf_session__delete(struct perf_session
*session
)
150 perf_session__destroy_kernel_maps(session
);
151 perf_session__delete_dead_threads(session
);
152 perf_session__delete_threads(session
);
153 perf_session_env__delete(&session
->header
.env
);
154 machines__exit(&session
->machines
);
156 perf_data_file__close(session
->file
);
160 static int process_event_synth_tracing_data_stub(struct perf_tool
*tool
162 union perf_event
*event
164 struct perf_session
*session
167 dump_printf(": unhandled!\n");
171 static int process_event_synth_attr_stub(struct perf_tool
*tool __maybe_unused
,
172 union perf_event
*event __maybe_unused
,
173 struct perf_evlist
**pevlist
176 dump_printf(": unhandled!\n");
180 static int process_event_sample_stub(struct perf_tool
*tool __maybe_unused
,
181 union perf_event
*event __maybe_unused
,
182 struct perf_sample
*sample __maybe_unused
,
183 struct perf_evsel
*evsel __maybe_unused
,
184 struct machine
*machine __maybe_unused
)
186 dump_printf(": unhandled!\n");
190 static int process_event_stub(struct perf_tool
*tool __maybe_unused
,
191 union perf_event
*event __maybe_unused
,
192 struct perf_sample
*sample __maybe_unused
,
193 struct machine
*machine __maybe_unused
)
195 dump_printf(": unhandled!\n");
199 static int process_finished_round_stub(struct perf_tool
*tool __maybe_unused
,
200 union perf_event
*event __maybe_unused
,
201 struct perf_session
*perf_session
204 dump_printf(": unhandled!\n");
208 static int process_finished_round(struct perf_tool
*tool
,
209 union perf_event
*event
,
210 struct perf_session
*session
);
212 void perf_tool__fill_defaults(struct perf_tool
*tool
)
214 if (tool
->sample
== NULL
)
215 tool
->sample
= process_event_sample_stub
;
216 if (tool
->mmap
== NULL
)
217 tool
->mmap
= process_event_stub
;
218 if (tool
->mmap2
== NULL
)
219 tool
->mmap2
= process_event_stub
;
220 if (tool
->comm
== NULL
)
221 tool
->comm
= process_event_stub
;
222 if (tool
->fork
== NULL
)
223 tool
->fork
= process_event_stub
;
224 if (tool
->exit
== NULL
)
225 tool
->exit
= process_event_stub
;
226 if (tool
->lost
== NULL
)
227 tool
->lost
= perf_event__process_lost
;
228 if (tool
->read
== NULL
)
229 tool
->read
= process_event_sample_stub
;
230 if (tool
->throttle
== NULL
)
231 tool
->throttle
= process_event_stub
;
232 if (tool
->unthrottle
== NULL
)
233 tool
->unthrottle
= process_event_stub
;
234 if (tool
->attr
== NULL
)
235 tool
->attr
= process_event_synth_attr_stub
;
236 if (tool
->tracing_data
== NULL
)
237 tool
->tracing_data
= process_event_synth_tracing_data_stub
;
238 if (tool
->build_id
== NULL
)
239 tool
->build_id
= process_finished_round_stub
;
240 if (tool
->finished_round
== NULL
) {
241 if (tool
->ordered_samples
)
242 tool
->finished_round
= process_finished_round
;
244 tool
->finished_round
= process_finished_round_stub
;
248 static void swap_sample_id_all(union perf_event
*event
, void *data
)
250 void *end
= (void *) event
+ event
->header
.size
;
251 int size
= end
- data
;
253 BUG_ON(size
% sizeof(u64
));
254 mem_bswap_64(data
, size
);
257 static void perf_event__all64_swap(union perf_event
*event
,
258 bool sample_id_all __maybe_unused
)
260 struct perf_event_header
*hdr
= &event
->header
;
261 mem_bswap_64(hdr
+ 1, event
->header
.size
- sizeof(*hdr
));
264 static void perf_event__comm_swap(union perf_event
*event
, bool sample_id_all
)
266 event
->comm
.pid
= bswap_32(event
->comm
.pid
);
267 event
->comm
.tid
= bswap_32(event
->comm
.tid
);
270 void *data
= &event
->comm
.comm
;
272 data
+= PERF_ALIGN(strlen(data
) + 1, sizeof(u64
));
273 swap_sample_id_all(event
, data
);
277 static void perf_event__mmap_swap(union perf_event
*event
,
280 event
->mmap
.pid
= bswap_32(event
->mmap
.pid
);
281 event
->mmap
.tid
= bswap_32(event
->mmap
.tid
);
282 event
->mmap
.start
= bswap_64(event
->mmap
.start
);
283 event
->mmap
.len
= bswap_64(event
->mmap
.len
);
284 event
->mmap
.pgoff
= bswap_64(event
->mmap
.pgoff
);
287 void *data
= &event
->mmap
.filename
;
289 data
+= PERF_ALIGN(strlen(data
) + 1, sizeof(u64
));
290 swap_sample_id_all(event
, data
);
294 static void perf_event__mmap2_swap(union perf_event
*event
,
297 event
->mmap2
.pid
= bswap_32(event
->mmap2
.pid
);
298 event
->mmap2
.tid
= bswap_32(event
->mmap2
.tid
);
299 event
->mmap2
.start
= bswap_64(event
->mmap2
.start
);
300 event
->mmap2
.len
= bswap_64(event
->mmap2
.len
);
301 event
->mmap2
.pgoff
= bswap_64(event
->mmap2
.pgoff
);
302 event
->mmap2
.maj
= bswap_32(event
->mmap2
.maj
);
303 event
->mmap2
.min
= bswap_32(event
->mmap2
.min
);
304 event
->mmap2
.ino
= bswap_64(event
->mmap2
.ino
);
307 void *data
= &event
->mmap2
.filename
;
309 data
+= PERF_ALIGN(strlen(data
) + 1, sizeof(u64
));
310 swap_sample_id_all(event
, data
);
313 static void perf_event__task_swap(union perf_event
*event
, bool sample_id_all
)
315 event
->fork
.pid
= bswap_32(event
->fork
.pid
);
316 event
->fork
.tid
= bswap_32(event
->fork
.tid
);
317 event
->fork
.ppid
= bswap_32(event
->fork
.ppid
);
318 event
->fork
.ptid
= bswap_32(event
->fork
.ptid
);
319 event
->fork
.time
= bswap_64(event
->fork
.time
);
322 swap_sample_id_all(event
, &event
->fork
+ 1);
325 static void perf_event__read_swap(union perf_event
*event
, bool sample_id_all
)
327 event
->read
.pid
= bswap_32(event
->read
.pid
);
328 event
->read
.tid
= bswap_32(event
->read
.tid
);
329 event
->read
.value
= bswap_64(event
->read
.value
);
330 event
->read
.time_enabled
= bswap_64(event
->read
.time_enabled
);
331 event
->read
.time_running
= bswap_64(event
->read
.time_running
);
332 event
->read
.id
= bswap_64(event
->read
.id
);
335 swap_sample_id_all(event
, &event
->read
+ 1);
338 static void perf_event__throttle_swap(union perf_event
*event
,
341 event
->throttle
.time
= bswap_64(event
->throttle
.time
);
342 event
->throttle
.id
= bswap_64(event
->throttle
.id
);
343 event
->throttle
.stream_id
= bswap_64(event
->throttle
.stream_id
);
346 swap_sample_id_all(event
, &event
->throttle
+ 1);
349 static u8
revbyte(u8 b
)
351 int rev
= (b
>> 4) | ((b
& 0xf) << 4);
352 rev
= ((rev
& 0xcc) >> 2) | ((rev
& 0x33) << 2);
353 rev
= ((rev
& 0xaa) >> 1) | ((rev
& 0x55) << 1);
358 * XXX this is hack in attempt to carry flags bitfield
359 * throught endian village. ABI says:
361 * Bit-fields are allocated from right to left (least to most significant)
362 * on little-endian implementations and from left to right (most to least
363 * significant) on big-endian implementations.
365 * The above seems to be byte specific, so we need to reverse each
366 * byte of the bitfield. 'Internet' also says this might be implementation
367 * specific and we probably need proper fix and carry perf_event_attr
368 * bitfield flags in separate data file FEAT_ section. Thought this seems
371 static void swap_bitfield(u8
*p
, unsigned len
)
375 for (i
= 0; i
< len
; i
++) {
381 /* exported for swapping attributes in file header */
382 void perf_event__attr_swap(struct perf_event_attr
*attr
)
384 attr
->type
= bswap_32(attr
->type
);
385 attr
->size
= bswap_32(attr
->size
);
386 attr
->config
= bswap_64(attr
->config
);
387 attr
->sample_period
= bswap_64(attr
->sample_period
);
388 attr
->sample_type
= bswap_64(attr
->sample_type
);
389 attr
->read_format
= bswap_64(attr
->read_format
);
390 attr
->wakeup_events
= bswap_32(attr
->wakeup_events
);
391 attr
->bp_type
= bswap_32(attr
->bp_type
);
392 attr
->bp_addr
= bswap_64(attr
->bp_addr
);
393 attr
->bp_len
= bswap_64(attr
->bp_len
);
394 attr
->branch_sample_type
= bswap_64(attr
->branch_sample_type
);
395 attr
->sample_regs_user
= bswap_64(attr
->sample_regs_user
);
396 attr
->sample_stack_user
= bswap_32(attr
->sample_stack_user
);
398 swap_bitfield((u8
*) (&attr
->read_format
+ 1), sizeof(u64
));
401 static void perf_event__hdr_attr_swap(union perf_event
*event
,
402 bool sample_id_all __maybe_unused
)
406 perf_event__attr_swap(&event
->attr
.attr
);
408 size
= event
->header
.size
;
409 size
-= (void *)&event
->attr
.id
- (void *)event
;
410 mem_bswap_64(event
->attr
.id
, size
);
413 static void perf_event__event_type_swap(union perf_event
*event
,
414 bool sample_id_all __maybe_unused
)
416 event
->event_type
.event_type
.event_id
=
417 bswap_64(event
->event_type
.event_type
.event_id
);
420 static void perf_event__tracing_data_swap(union perf_event
*event
,
421 bool sample_id_all __maybe_unused
)
423 event
->tracing_data
.size
= bswap_32(event
->tracing_data
.size
);
426 typedef void (*perf_event__swap_op
)(union perf_event
*event
,
429 static perf_event__swap_op perf_event__swap_ops
[] = {
430 [PERF_RECORD_MMAP
] = perf_event__mmap_swap
,
431 [PERF_RECORD_MMAP2
] = perf_event__mmap2_swap
,
432 [PERF_RECORD_COMM
] = perf_event__comm_swap
,
433 [PERF_RECORD_FORK
] = perf_event__task_swap
,
434 [PERF_RECORD_EXIT
] = perf_event__task_swap
,
435 [PERF_RECORD_LOST
] = perf_event__all64_swap
,
436 [PERF_RECORD_READ
] = perf_event__read_swap
,
437 [PERF_RECORD_THROTTLE
] = perf_event__throttle_swap
,
438 [PERF_RECORD_UNTHROTTLE
] = perf_event__throttle_swap
,
439 [PERF_RECORD_SAMPLE
] = perf_event__all64_swap
,
440 [PERF_RECORD_HEADER_ATTR
] = perf_event__hdr_attr_swap
,
441 [PERF_RECORD_HEADER_EVENT_TYPE
] = perf_event__event_type_swap
,
442 [PERF_RECORD_HEADER_TRACING_DATA
] = perf_event__tracing_data_swap
,
443 [PERF_RECORD_HEADER_BUILD_ID
] = NULL
,
444 [PERF_RECORD_HEADER_MAX
] = NULL
,
447 struct sample_queue
{
450 union perf_event
*event
;
451 struct list_head list
;
454 static void perf_session_free_sample_buffers(struct perf_session
*session
)
456 struct ordered_samples
*os
= &session
->ordered_samples
;
458 while (!list_empty(&os
->to_free
)) {
459 struct sample_queue
*sq
;
461 sq
= list_entry(os
->to_free
.next
, struct sample_queue
, list
);
467 static int perf_session_deliver_event(struct perf_session
*session
,
468 union perf_event
*event
,
469 struct perf_sample
*sample
,
470 struct perf_tool
*tool
,
473 static int flush_sample_queue(struct perf_session
*s
,
474 struct perf_tool
*tool
)
476 struct ordered_samples
*os
= &s
->ordered_samples
;
477 struct list_head
*head
= &os
->samples
;
478 struct sample_queue
*tmp
, *iter
;
479 struct perf_sample sample
;
480 u64 limit
= os
->next_flush
;
481 u64 last_ts
= os
->last_sample
? os
->last_sample
->timestamp
: 0ULL;
482 bool show_progress
= limit
== ULLONG_MAX
;
483 struct ui_progress prog
;
486 if (!tool
->ordered_samples
|| !limit
)
490 ui_progress__init(&prog
, os
->nr_samples
, "Processing time ordered events...");
492 list_for_each_entry_safe(iter
, tmp
, head
, list
) {
496 if (iter
->timestamp
> limit
)
499 ret
= perf_evlist__parse_sample(s
->evlist
, iter
->event
, &sample
);
501 pr_err("Can't parse sample, err = %d\n", ret
);
503 ret
= perf_session_deliver_event(s
, iter
->event
, &sample
, tool
,
509 os
->last_flush
= iter
->timestamp
;
510 list_del(&iter
->list
);
511 list_add(&iter
->list
, &os
->sample_cache
);
515 ui_progress__update(&prog
, 1);
518 if (list_empty(head
)) {
519 os
->last_sample
= NULL
;
520 } else if (last_ts
<= limit
) {
522 list_entry(head
->prev
, struct sample_queue
, list
);
529 * When perf record finishes a pass on every buffers, it records this pseudo
531 * We record the max timestamp t found in the pass n.
532 * Assuming these timestamps are monotonic across cpus, we know that if
533 * a buffer still has events with timestamps below t, they will be all
534 * available and then read in the pass n + 1.
535 * Hence when we start to read the pass n + 2, we can safely flush every
536 * events with timestamps below t.
538 * ============ PASS n =================
541 * cnt1 timestamps | cnt2 timestamps
544 * - | 4 <--- max recorded
546 * ============ PASS n + 1 ==============
549 * cnt1 timestamps | cnt2 timestamps
552 * 5 | 7 <---- max recorded
554 * Flush every events below timestamp 4
556 * ============ PASS n + 2 ==============
559 * cnt1 timestamps | cnt2 timestamps
564 * Flush every events below timestamp 7
567 static int process_finished_round(struct perf_tool
*tool
,
568 union perf_event
*event __maybe_unused
,
569 struct perf_session
*session
)
571 int ret
= flush_sample_queue(session
, tool
);
573 session
->ordered_samples
.next_flush
= session
->ordered_samples
.max_timestamp
;
578 /* The queue is ordered by time */
579 static void __queue_event(struct sample_queue
*new, struct perf_session
*s
)
581 struct ordered_samples
*os
= &s
->ordered_samples
;
582 struct sample_queue
*sample
= os
->last_sample
;
583 u64 timestamp
= new->timestamp
;
587 os
->last_sample
= new;
590 list_add(&new->list
, &os
->samples
);
591 os
->max_timestamp
= timestamp
;
596 * last_sample might point to some random place in the list as it's
597 * the last queued event. We expect that the new event is close to
600 if (sample
->timestamp
<= timestamp
) {
601 while (sample
->timestamp
<= timestamp
) {
602 p
= sample
->list
.next
;
603 if (p
== &os
->samples
) {
604 list_add_tail(&new->list
, &os
->samples
);
605 os
->max_timestamp
= timestamp
;
608 sample
= list_entry(p
, struct sample_queue
, list
);
610 list_add_tail(&new->list
, &sample
->list
);
612 while (sample
->timestamp
> timestamp
) {
613 p
= sample
->list
.prev
;
614 if (p
== &os
->samples
) {
615 list_add(&new->list
, &os
->samples
);
618 sample
= list_entry(p
, struct sample_queue
, list
);
620 list_add(&new->list
, &sample
->list
);
624 #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
626 int perf_session_queue_event(struct perf_session
*s
, union perf_event
*event
,
627 struct perf_sample
*sample
, u64 file_offset
)
629 struct ordered_samples
*os
= &s
->ordered_samples
;
630 struct list_head
*sc
= &os
->sample_cache
;
631 u64 timestamp
= sample
->time
;
632 struct sample_queue
*new;
634 if (!timestamp
|| timestamp
== ~0ULL)
637 if (timestamp
< s
->ordered_samples
.last_flush
) {
638 printf("Warning: Timestamp below last timeslice flush\n");
642 if (!list_empty(sc
)) {
643 new = list_entry(sc
->next
, struct sample_queue
, list
);
644 list_del(&new->list
);
645 } else if (os
->sample_buffer
) {
646 new = os
->sample_buffer
+ os
->sample_buffer_idx
;
647 if (++os
->sample_buffer_idx
== MAX_SAMPLE_BUFFER
)
648 os
->sample_buffer
= NULL
;
650 os
->sample_buffer
= malloc(MAX_SAMPLE_BUFFER
* sizeof(*new));
651 if (!os
->sample_buffer
)
653 list_add(&os
->sample_buffer
->list
, &os
->to_free
);
654 os
->sample_buffer_idx
= 2;
655 new = os
->sample_buffer
+ 1;
658 new->timestamp
= timestamp
;
659 new->file_offset
= file_offset
;
662 __queue_event(new, s
);
667 static void callchain__printf(struct perf_sample
*sample
)
671 printf("... chain: nr:%" PRIu64
"\n", sample
->callchain
->nr
);
673 for (i
= 0; i
< sample
->callchain
->nr
; i
++)
674 printf("..... %2d: %016" PRIx64
"\n",
675 i
, sample
->callchain
->ips
[i
]);
678 static void branch_stack__printf(struct perf_sample
*sample
)
682 printf("... branch stack: nr:%" PRIu64
"\n", sample
->branch_stack
->nr
);
684 for (i
= 0; i
< sample
->branch_stack
->nr
; i
++)
685 printf("..... %2"PRIu64
": %016" PRIx64
" -> %016" PRIx64
"\n",
686 i
, sample
->branch_stack
->entries
[i
].from
,
687 sample
->branch_stack
->entries
[i
].to
);
690 static void regs_dump__printf(u64 mask
, u64
*regs
)
694 for_each_set_bit(rid
, (unsigned long *) &mask
, sizeof(mask
) * 8) {
697 printf(".... %-5s 0x%" PRIx64
"\n",
698 perf_reg_name(rid
), val
);
702 static void regs_user__printf(struct perf_sample
*sample
)
704 struct regs_dump
*user_regs
= &sample
->user_regs
;
706 if (user_regs
->regs
) {
707 u64 mask
= user_regs
->mask
;
708 printf("... user regs: mask 0x%" PRIx64
"\n", mask
);
709 regs_dump__printf(mask
, user_regs
->regs
);
713 static void stack_user__printf(struct stack_dump
*dump
)
715 printf("... ustack: size %" PRIu64
", offset 0x%x\n",
716 dump
->size
, dump
->offset
);
719 static void perf_session__print_tstamp(struct perf_session
*session
,
720 union perf_event
*event
,
721 struct perf_sample
*sample
)
723 u64 sample_type
= __perf_evlist__combined_sample_type(session
->evlist
);
725 if (event
->header
.type
!= PERF_RECORD_SAMPLE
&&
726 !perf_evlist__sample_id_all(session
->evlist
)) {
727 fputs("-1 -1 ", stdout
);
731 if ((sample_type
& PERF_SAMPLE_CPU
))
732 printf("%u ", sample
->cpu
);
734 if (sample_type
& PERF_SAMPLE_TIME
)
735 printf("%" PRIu64
" ", sample
->time
);
738 static void sample_read__printf(struct perf_sample
*sample
, u64 read_format
)
740 printf("... sample_read:\n");
742 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
743 printf("...... time enabled %016" PRIx64
"\n",
744 sample
->read
.time_enabled
);
746 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
747 printf("...... time running %016" PRIx64
"\n",
748 sample
->read
.time_running
);
750 if (read_format
& PERF_FORMAT_GROUP
) {
753 printf(".... group nr %" PRIu64
"\n", sample
->read
.group
.nr
);
755 for (i
= 0; i
< sample
->read
.group
.nr
; i
++) {
756 struct sample_read_value
*value
;
758 value
= &sample
->read
.group
.values
[i
];
759 printf("..... id %016" PRIx64
760 ", value %016" PRIx64
"\n",
761 value
->id
, value
->value
);
764 printf("..... id %016" PRIx64
", value %016" PRIx64
"\n",
765 sample
->read
.one
.id
, sample
->read
.one
.value
);
768 static void dump_event(struct perf_session
*session
, union perf_event
*event
,
769 u64 file_offset
, struct perf_sample
*sample
)
774 printf("\n%#" PRIx64
" [%#x]: event: %d\n",
775 file_offset
, event
->header
.size
, event
->header
.type
);
780 perf_session__print_tstamp(session
, event
, sample
);
782 printf("%#" PRIx64
" [%#x]: PERF_RECORD_%s", file_offset
,
783 event
->header
.size
, perf_event__name(event
->header
.type
));
786 static void dump_sample(struct perf_evsel
*evsel
, union perf_event
*event
,
787 struct perf_sample
*sample
)
794 printf("(IP, 0x%x): %d/%d: %#" PRIx64
" period: %" PRIu64
" addr: %#" PRIx64
"\n",
795 event
->header
.misc
, sample
->pid
, sample
->tid
, sample
->ip
,
796 sample
->period
, sample
->addr
);
798 sample_type
= evsel
->attr
.sample_type
;
800 if (sample_type
& PERF_SAMPLE_CALLCHAIN
)
801 callchain__printf(sample
);
803 if (sample_type
& PERF_SAMPLE_BRANCH_STACK
)
804 branch_stack__printf(sample
);
806 if (sample_type
& PERF_SAMPLE_REGS_USER
)
807 regs_user__printf(sample
);
809 if (sample_type
& PERF_SAMPLE_STACK_USER
)
810 stack_user__printf(&sample
->user_stack
);
812 if (sample_type
& PERF_SAMPLE_WEIGHT
)
813 printf("... weight: %" PRIu64
"\n", sample
->weight
);
815 if (sample_type
& PERF_SAMPLE_DATA_SRC
)
816 printf(" . data_src: 0x%"PRIx64
"\n", sample
->data_src
);
818 if (sample_type
& PERF_SAMPLE_TRANSACTION
)
819 printf("... transaction: %" PRIx64
"\n", sample
->transaction
);
821 if (sample_type
& PERF_SAMPLE_READ
)
822 sample_read__printf(sample
, evsel
->attr
.read_format
);
825 static struct machine
*
826 perf_session__find_machine_for_cpumode(struct perf_session
*session
,
827 union perf_event
*event
,
828 struct perf_sample
*sample
)
830 const u8 cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
831 struct machine
*machine
;
834 ((cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
) ||
835 (cpumode
== PERF_RECORD_MISC_GUEST_USER
))) {
838 if (event
->header
.type
== PERF_RECORD_MMAP
839 || event
->header
.type
== PERF_RECORD_MMAP2
)
840 pid
= event
->mmap
.pid
;
844 machine
= perf_session__find_machine(session
, pid
);
846 machine
= perf_session__findnew_machine(session
,
847 DEFAULT_GUEST_KERNEL_ID
);
851 return &session
->machines
.host
;
854 static int deliver_sample_value(struct perf_session
*session
,
855 struct perf_tool
*tool
,
856 union perf_event
*event
,
857 struct perf_sample
*sample
,
858 struct sample_read_value
*v
,
859 struct machine
*machine
)
861 struct perf_sample_id
*sid
;
863 sid
= perf_evlist__id2sid(session
->evlist
, v
->id
);
866 sample
->period
= v
->value
- sid
->period
;
867 sid
->period
= v
->value
;
870 if (!sid
|| sid
->evsel
== NULL
) {
871 ++session
->stats
.nr_unknown_id
;
875 return tool
->sample(tool
, event
, sample
, sid
->evsel
, machine
);
878 static int deliver_sample_group(struct perf_session
*session
,
879 struct perf_tool
*tool
,
880 union perf_event
*event
,
881 struct perf_sample
*sample
,
882 struct machine
*machine
)
887 for (i
= 0; i
< sample
->read
.group
.nr
; i
++) {
888 ret
= deliver_sample_value(session
, tool
, event
, sample
,
889 &sample
->read
.group
.values
[i
],
899 perf_session__deliver_sample(struct perf_session
*session
,
900 struct perf_tool
*tool
,
901 union perf_event
*event
,
902 struct perf_sample
*sample
,
903 struct perf_evsel
*evsel
,
904 struct machine
*machine
)
906 /* We know evsel != NULL. */
907 u64 sample_type
= evsel
->attr
.sample_type
;
908 u64 read_format
= evsel
->attr
.read_format
;
910 /* Standard sample delievery. */
911 if (!(sample_type
& PERF_SAMPLE_READ
))
912 return tool
->sample(tool
, event
, sample
, evsel
, machine
);
914 /* For PERF_SAMPLE_READ we have either single or group mode. */
915 if (read_format
& PERF_FORMAT_GROUP
)
916 return deliver_sample_group(session
, tool
, event
, sample
,
919 return deliver_sample_value(session
, tool
, event
, sample
,
920 &sample
->read
.one
, machine
);
923 static int perf_session_deliver_event(struct perf_session
*session
,
924 union perf_event
*event
,
925 struct perf_sample
*sample
,
926 struct perf_tool
*tool
,
929 struct perf_evsel
*evsel
;
930 struct machine
*machine
;
932 dump_event(session
, event
, file_offset
, sample
);
934 evsel
= perf_evlist__id2evsel(session
->evlist
, sample
->id
);
935 if (evsel
!= NULL
&& event
->header
.type
!= PERF_RECORD_SAMPLE
) {
937 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
938 * because the tools right now may apply filters, discarding
939 * some of the samples. For consistency, in the future we
940 * should have something like nr_filtered_samples and remove
941 * the sample->period from total_sample_period, etc, KISS for
944 * Also testing against NULL allows us to handle files without
945 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
946 * future probably it'll be a good idea to restrict event
947 * processing via perf_session to files with both set.
949 hists__inc_nr_events(&evsel
->hists
, event
->header
.type
);
952 machine
= perf_session__find_machine_for_cpumode(session
, event
,
955 switch (event
->header
.type
) {
956 case PERF_RECORD_SAMPLE
:
957 dump_sample(evsel
, event
, sample
);
959 ++session
->stats
.nr_unknown_id
;
962 if (machine
== NULL
) {
963 ++session
->stats
.nr_unprocessable_samples
;
966 return perf_session__deliver_sample(session
, tool
, event
,
967 sample
, evsel
, machine
);
968 case PERF_RECORD_MMAP
:
969 return tool
->mmap(tool
, event
, sample
, machine
);
970 case PERF_RECORD_MMAP2
:
971 return tool
->mmap2(tool
, event
, sample
, machine
);
972 case PERF_RECORD_COMM
:
973 return tool
->comm(tool
, event
, sample
, machine
);
974 case PERF_RECORD_FORK
:
975 return tool
->fork(tool
, event
, sample
, machine
);
976 case PERF_RECORD_EXIT
:
977 return tool
->exit(tool
, event
, sample
, machine
);
978 case PERF_RECORD_LOST
:
979 if (tool
->lost
== perf_event__process_lost
)
980 session
->stats
.total_lost
+= event
->lost
.lost
;
981 return tool
->lost(tool
, event
, sample
, machine
);
982 case PERF_RECORD_READ
:
983 return tool
->read(tool
, event
, sample
, evsel
, machine
);
984 case PERF_RECORD_THROTTLE
:
985 return tool
->throttle(tool
, event
, sample
, machine
);
986 case PERF_RECORD_UNTHROTTLE
:
987 return tool
->unthrottle(tool
, event
, sample
, machine
);
989 ++session
->stats
.nr_unknown_events
;
994 static s64
perf_session__process_user_event(struct perf_session
*session
,
995 union perf_event
*event
,
996 struct perf_tool
*tool
,
999 int fd
= perf_data_file__fd(session
->file
);
1002 dump_event(session
, event
, file_offset
, NULL
);
1004 /* These events are processed right away */
1005 switch (event
->header
.type
) {
1006 case PERF_RECORD_HEADER_ATTR
:
1007 err
= tool
->attr(tool
, event
, &session
->evlist
);
1009 perf_session__set_id_hdr_size(session
);
1011 case PERF_RECORD_HEADER_EVENT_TYPE
:
1013 * Depreceated, but we need to handle it for sake
1014 * of old data files create in pipe mode.
1017 case PERF_RECORD_HEADER_TRACING_DATA
:
1018 /* setup for reading amidst mmap */
1019 lseek(fd
, file_offset
, SEEK_SET
);
1020 return tool
->tracing_data(tool
, event
, session
);
1021 case PERF_RECORD_HEADER_BUILD_ID
:
1022 return tool
->build_id(tool
, event
, session
);
1023 case PERF_RECORD_FINISHED_ROUND
:
1024 return tool
->finished_round(tool
, event
, session
);
1030 static void event_swap(union perf_event
*event
, bool sample_id_all
)
1032 perf_event__swap_op swap
;
1034 swap
= perf_event__swap_ops
[event
->header
.type
];
1036 swap(event
, sample_id_all
);
1039 static s64
perf_session__process_event(struct perf_session
*session
,
1040 union perf_event
*event
,
1041 struct perf_tool
*tool
,
1044 struct perf_sample sample
;
1047 if (session
->header
.needs_swap
)
1048 event_swap(event
, perf_evlist__sample_id_all(session
->evlist
));
1050 if (event
->header
.type
>= PERF_RECORD_HEADER_MAX
)
1053 events_stats__inc(&session
->stats
, event
->header
.type
);
1055 if (event
->header
.type
>= PERF_RECORD_USER_TYPE_START
)
1056 return perf_session__process_user_event(session
, event
, tool
, file_offset
);
1059 * For all kernel events we get the sample data
1061 ret
= perf_evlist__parse_sample(session
->evlist
, event
, &sample
);
1065 if (tool
->ordered_samples
) {
1066 ret
= perf_session_queue_event(session
, event
, &sample
,
1072 return perf_session_deliver_event(session
, event
, &sample
, tool
,
1076 void perf_event_header__bswap(struct perf_event_header
*hdr
)
1078 hdr
->type
= bswap_32(hdr
->type
);
1079 hdr
->misc
= bswap_16(hdr
->misc
);
1080 hdr
->size
= bswap_16(hdr
->size
);
1083 struct thread
*perf_session__findnew(struct perf_session
*session
, pid_t pid
)
1085 return machine__findnew_thread(&session
->machines
.host
, -1, pid
);
1088 static struct thread
*perf_session__register_idle_thread(struct perf_session
*session
)
1090 struct thread
*thread
;
1092 thread
= machine__findnew_thread(&session
->machines
.host
, 0, 0);
1093 if (thread
== NULL
|| thread__set_comm(thread
, "swapper", 0)) {
1094 pr_err("problem inserting idle task.\n");
1101 static void perf_session__warn_about_errors(const struct perf_session
*session
,
1102 const struct perf_tool
*tool
)
1104 if (tool
->lost
== perf_event__process_lost
&&
1105 session
->stats
.nr_events
[PERF_RECORD_LOST
] != 0) {
1106 ui__warning("Processed %d events and lost %d chunks!\n\n"
1107 "Check IO/CPU overload!\n\n",
1108 session
->stats
.nr_events
[0],
1109 session
->stats
.nr_events
[PERF_RECORD_LOST
]);
1112 if (session
->stats
.nr_unknown_events
!= 0) {
1113 ui__warning("Found %u unknown events!\n\n"
1114 "Is this an older tool processing a perf.data "
1115 "file generated by a more recent tool?\n\n"
1116 "If that is not the case, consider "
1117 "reporting to linux-kernel@vger.kernel.org.\n\n",
1118 session
->stats
.nr_unknown_events
);
1121 if (session
->stats
.nr_unknown_id
!= 0) {
1122 ui__warning("%u samples with id not present in the header\n",
1123 session
->stats
.nr_unknown_id
);
1126 if (session
->stats
.nr_invalid_chains
!= 0) {
1127 ui__warning("Found invalid callchains!\n\n"
1128 "%u out of %u events were discarded for this reason.\n\n"
1129 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1130 session
->stats
.nr_invalid_chains
,
1131 session
->stats
.nr_events
[PERF_RECORD_SAMPLE
]);
1134 if (session
->stats
.nr_unprocessable_samples
!= 0) {
1135 ui__warning("%u unprocessable samples recorded.\n"
1136 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1137 session
->stats
.nr_unprocessable_samples
);
1141 volatile int session_done
;
1143 static int __perf_session__process_pipe_events(struct perf_session
*session
,
1144 struct perf_tool
*tool
)
1146 int fd
= perf_data_file__fd(session
->file
);
1147 union perf_event
*event
;
1148 uint32_t size
, cur_size
= 0;
1155 perf_tool__fill_defaults(tool
);
1158 cur_size
= sizeof(union perf_event
);
1160 buf
= malloc(cur_size
);
1165 err
= readn(fd
, event
, sizeof(struct perf_event_header
));
1170 pr_err("failed to read event header\n");
1174 if (session
->header
.needs_swap
)
1175 perf_event_header__bswap(&event
->header
);
1177 size
= event
->header
.size
;
1178 if (size
< sizeof(struct perf_event_header
)) {
1179 pr_err("bad event header size\n");
1183 if (size
> cur_size
) {
1184 void *new = realloc(buf
, size
);
1186 pr_err("failed to allocate memory to read event\n");
1194 p
+= sizeof(struct perf_event_header
);
1196 if (size
- sizeof(struct perf_event_header
)) {
1197 err
= readn(fd
, p
, size
- sizeof(struct perf_event_header
));
1200 pr_err("unexpected end of event stream\n");
1204 pr_err("failed to read event data\n");
1209 if ((skip
= perf_session__process_event(session
, event
, tool
, head
)) < 0) {
1210 pr_err("%#" PRIx64
" [%#x]: failed to process type: %d\n",
1211 head
, event
->header
.size
, event
->header
.type
);
1221 if (!session_done())
1224 /* do the final flush for ordered samples */
1225 session
->ordered_samples
.next_flush
= ULLONG_MAX
;
1226 err
= flush_sample_queue(session
, tool
);
1229 perf_session__warn_about_errors(session
, tool
);
1230 perf_session_free_sample_buffers(session
);
1234 static union perf_event
*
1235 fetch_mmaped_event(struct perf_session
*session
,
1236 u64 head
, size_t mmap_size
, char *buf
)
1238 union perf_event
*event
;
1241 * Ensure we have enough space remaining to read
1242 * the size of the event in the headers.
1244 if (head
+ sizeof(event
->header
) > mmap_size
)
1247 event
= (union perf_event
*)(buf
+ head
);
1249 if (session
->header
.needs_swap
)
1250 perf_event_header__bswap(&event
->header
);
1252 if (head
+ event
->header
.size
> mmap_size
) {
1253 /* We're not fetching the event so swap back again */
1254 if (session
->header
.needs_swap
)
1255 perf_event_header__bswap(&event
->header
);
1263 * On 64bit we can mmap the data file in one go. No need for tiny mmap
1264 * slices. On 32bit we use 32MB.
1266 #if BITS_PER_LONG == 64
1267 #define MMAP_SIZE ULLONG_MAX
1270 #define MMAP_SIZE (32 * 1024 * 1024ULL)
1271 #define NUM_MMAPS 128
1274 int __perf_session__process_events(struct perf_session
*session
,
1275 u64 data_offset
, u64 data_size
,
1276 u64 file_size
, struct perf_tool
*tool
)
1278 int fd
= perf_data_file__fd(session
->file
);
1279 u64 head
, page_offset
, file_offset
, file_pos
, size
;
1280 int err
, mmap_prot
, mmap_flags
, map_idx
= 0;
1282 char *buf
, *mmaps
[NUM_MMAPS
];
1283 union perf_event
*event
;
1284 struct ui_progress prog
;
1287 perf_tool__fill_defaults(tool
);
1289 page_offset
= page_size
* (data_offset
/ page_size
);
1290 file_offset
= page_offset
;
1291 head
= data_offset
- page_offset
;
1293 if (data_size
&& (data_offset
+ data_size
< file_size
))
1294 file_size
= data_offset
+ data_size
;
1296 ui_progress__init(&prog
, file_size
, "Processing events...");
1298 mmap_size
= MMAP_SIZE
;
1299 if (mmap_size
> file_size
) {
1300 mmap_size
= file_size
;
1301 session
->one_mmap
= true;
1304 memset(mmaps
, 0, sizeof(mmaps
));
1306 mmap_prot
= PROT_READ
;
1307 mmap_flags
= MAP_SHARED
;
1309 if (session
->header
.needs_swap
) {
1310 mmap_prot
|= PROT_WRITE
;
1311 mmap_flags
= MAP_PRIVATE
;
1314 buf
= mmap(NULL
, mmap_size
, mmap_prot
, mmap_flags
, fd
,
1316 if (buf
== MAP_FAILED
) {
1317 pr_err("failed to mmap file\n");
1321 mmaps
[map_idx
] = buf
;
1322 map_idx
= (map_idx
+ 1) & (ARRAY_SIZE(mmaps
) - 1);
1323 file_pos
= file_offset
+ head
;
1324 if (session
->one_mmap
) {
1325 session
->one_mmap_addr
= buf
;
1326 session
->one_mmap_offset
= file_offset
;
1330 event
= fetch_mmaped_event(session
, head
, mmap_size
, buf
);
1332 if (mmaps
[map_idx
]) {
1333 munmap(mmaps
[map_idx
], mmap_size
);
1334 mmaps
[map_idx
] = NULL
;
1337 page_offset
= page_size
* (head
/ page_size
);
1338 file_offset
+= page_offset
;
1339 head
-= page_offset
;
1343 size
= event
->header
.size
;
1345 if (size
< sizeof(struct perf_event_header
) ||
1346 (skip
= perf_session__process_event(session
, event
, tool
, file_pos
))
1348 pr_err("%#" PRIx64
" [%#x]: failed to process type: %d\n",
1349 file_offset
+ head
, event
->header
.size
,
1350 event
->header
.type
);
1361 ui_progress__update(&prog
, size
);
1366 if (file_pos
< file_size
)
1370 /* do the final flush for ordered samples */
1371 session
->ordered_samples
.next_flush
= ULLONG_MAX
;
1372 err
= flush_sample_queue(session
, tool
);
1374 ui_progress__finish();
1375 perf_session__warn_about_errors(session
, tool
);
1376 perf_session_free_sample_buffers(session
);
1377 session
->one_mmap
= false;
1381 int perf_session__process_events(struct perf_session
*session
,
1382 struct perf_tool
*tool
)
1384 u64 size
= perf_data_file__size(session
->file
);
1387 if (perf_session__register_idle_thread(session
) == NULL
)
1390 if (!perf_data_file__is_pipe(session
->file
))
1391 err
= __perf_session__process_events(session
,
1392 session
->header
.data_offset
,
1393 session
->header
.data_size
,
1396 err
= __perf_session__process_pipe_events(session
, tool
);
1401 bool perf_session__has_traces(struct perf_session
*session
, const char *msg
)
1403 struct perf_evsel
*evsel
;
1405 evlist__for_each(session
->evlist
, evsel
) {
1406 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
)
1410 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg
);
1414 int maps__set_kallsyms_ref_reloc_sym(struct map
**maps
,
1415 const char *symbol_name
, u64 addr
)
1419 struct ref_reloc_sym
*ref
;
1421 ref
= zalloc(sizeof(struct ref_reloc_sym
));
1425 ref
->name
= strdup(symbol_name
);
1426 if (ref
->name
== NULL
) {
1431 bracket
= strchr(ref
->name
, ']');
1437 for (i
= 0; i
< MAP__NR_TYPES
; ++i
) {
1438 struct kmap
*kmap
= map__kmap(maps
[i
]);
1439 kmap
->ref_reloc_sym
= ref
;
1445 size_t perf_session__fprintf_dsos(struct perf_session
*session
, FILE *fp
)
1447 return machines__fprintf_dsos(&session
->machines
, fp
);
1450 size_t perf_session__fprintf_dsos_buildid(struct perf_session
*session
, FILE *fp
,
1451 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
1453 return machines__fprintf_dsos_buildid(&session
->machines
, fp
, skip
, parm
);
1456 size_t perf_session__fprintf_nr_events(struct perf_session
*session
, FILE *fp
)
1458 struct perf_evsel
*pos
;
1459 size_t ret
= fprintf(fp
, "Aggregated stats:\n");
1461 ret
+= events_stats__fprintf(&session
->stats
, fp
);
1463 evlist__for_each(session
->evlist
, pos
) {
1464 ret
+= fprintf(fp
, "%s stats:\n", perf_evsel__name(pos
));
1465 ret
+= events_stats__fprintf(&pos
->hists
.stats
, fp
);
1471 size_t perf_session__fprintf(struct perf_session
*session
, FILE *fp
)
1474 * FIXME: Here we have to actually print all the machines in this
1475 * session, not just the host...
1477 return machine__fprintf(&session
->machines
.host
, fp
);
1480 struct perf_evsel
*perf_session__find_first_evtype(struct perf_session
*session
,
1483 struct perf_evsel
*pos
;
1485 evlist__for_each(session
->evlist
, pos
) {
1486 if (pos
->attr
.type
== type
)
1492 void perf_evsel__print_ip(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
1493 struct addr_location
*al
,
1494 unsigned int print_opts
, unsigned int stack_depth
)
1496 struct callchain_cursor_node
*node
;
1497 int print_ip
= print_opts
& PRINT_IP_OPT_IP
;
1498 int print_sym
= print_opts
& PRINT_IP_OPT_SYM
;
1499 int print_dso
= print_opts
& PRINT_IP_OPT_DSO
;
1500 int print_symoffset
= print_opts
& PRINT_IP_OPT_SYMOFFSET
;
1501 int print_oneline
= print_opts
& PRINT_IP_OPT_ONELINE
;
1502 int print_srcline
= print_opts
& PRINT_IP_OPT_SRCLINE
;
1503 char s
= print_oneline
? ' ' : '\t';
1505 if (symbol_conf
.use_callchain
&& sample
->callchain
) {
1506 struct addr_location node_al
;
1508 if (machine__resolve_callchain(al
->machine
, evsel
, al
->thread
,
1510 PERF_MAX_STACK_DEPTH
) != 0) {
1512 error("Failed to resolve callchain. Skipping\n");
1515 callchain_cursor_commit(&callchain_cursor
);
1517 if (print_symoffset
)
1520 while (stack_depth
) {
1523 node
= callchain_cursor_current(&callchain_cursor
);
1527 if (node
->sym
&& node
->sym
->ignore
)
1531 printf("%c%16" PRIx64
, s
, node
->ip
);
1534 addr
= node
->map
->map_ip(node
->map
, node
->ip
);
1538 if (print_symoffset
) {
1539 node_al
.addr
= addr
;
1540 node_al
.map
= node
->map
;
1541 symbol__fprintf_symname_offs(node
->sym
, &node_al
, stdout
);
1543 symbol__fprintf_symname(node
->sym
, stdout
);
1548 map__fprintf_dsoname(node
->map
, stdout
);
1553 map__fprintf_srcline(node
->map
, addr
, "\n ",
1561 callchain_cursor_advance(&callchain_cursor
);
1565 if (al
->sym
&& al
->sym
->ignore
)
1569 printf("%16" PRIx64
, sample
->ip
);
1573 if (print_symoffset
)
1574 symbol__fprintf_symname_offs(al
->sym
, al
,
1577 symbol__fprintf_symname(al
->sym
, stdout
);
1582 map__fprintf_dsoname(al
->map
, stdout
);
1587 map__fprintf_srcline(al
->map
, al
->addr
, "\n ", stdout
);
1591 int perf_session__cpu_bitmap(struct perf_session
*session
,
1592 const char *cpu_list
, unsigned long *cpu_bitmap
)
1595 struct cpu_map
*map
;
1597 for (i
= 0; i
< PERF_TYPE_MAX
; ++i
) {
1598 struct perf_evsel
*evsel
;
1600 evsel
= perf_session__find_first_evtype(session
, i
);
1604 if (!(evsel
->attr
.sample_type
& PERF_SAMPLE_CPU
)) {
1605 pr_err("File does not contain CPU events. "
1606 "Remove -c option to proceed.\n");
1611 map
= cpu_map__new(cpu_list
);
1613 pr_err("Invalid cpu_list\n");
1617 for (i
= 0; i
< map
->nr
; i
++) {
1618 int cpu
= map
->map
[i
];
1620 if (cpu
>= MAX_NR_CPUS
) {
1621 pr_err("Requested CPU %d too large. "
1622 "Consider raising MAX_NR_CPUS\n", cpu
);
1623 goto out_delete_map
;
1626 set_bit(cpu
, cpu_bitmap
);
1632 cpu_map__delete(map
);
1636 void perf_session__fprintf_info(struct perf_session
*session
, FILE *fp
,
1642 if (session
== NULL
|| fp
== NULL
)
1645 fd
= perf_data_file__fd(session
->file
);
1647 ret
= fstat(fd
, &st
);
1651 fprintf(fp
, "# ========\n");
1652 fprintf(fp
, "# captured on: %s", ctime(&st
.st_ctime
));
1653 perf_header__fprintf_info(session
, fp
, full
);
1654 fprintf(fp
, "# ========\n#\n");
1658 int __perf_session__set_tracepoints_handlers(struct perf_session
*session
,
1659 const struct perf_evsel_str_handler
*assocs
,
1662 struct perf_evsel
*evsel
;
1666 for (i
= 0; i
< nr_assocs
; i
++) {
1668 * Adding a handler for an event not in the session,
1671 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
, assocs
[i
].name
);
1676 if (evsel
->handler
!= NULL
)
1678 evsel
->handler
= assocs
[i
].handler
;