2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
11 #include <linux/bitops.h>
12 #include <api/fs/tracing_path.h>
13 #include <traceevent/event-parse.h>
14 #include <linux/hw_breakpoint.h>
15 #include <linux/perf_event.h>
16 #include <linux/err.h>
17 #include <sys/resource.h>
19 #include "callchain.h"
25 #include "thread_map.h"
27 #include "perf_regs.h"
29 #include "trace-event.h"
39 } perf_missing_features
;
41 static clockid_t clockid
;
43 static int perf_evsel__no_extra_init(struct perf_evsel
*evsel __maybe_unused
)
48 static void perf_evsel__no_extra_fini(struct perf_evsel
*evsel __maybe_unused
)
54 int (*init
)(struct perf_evsel
*evsel
);
55 void (*fini
)(struct perf_evsel
*evsel
);
56 } perf_evsel__object
= {
57 .size
= sizeof(struct perf_evsel
),
58 .init
= perf_evsel__no_extra_init
,
59 .fini
= perf_evsel__no_extra_fini
,
62 int perf_evsel__object_config(size_t object_size
,
63 int (*init
)(struct perf_evsel
*evsel
),
64 void (*fini
)(struct perf_evsel
*evsel
))
70 if (perf_evsel__object
.size
> object_size
)
73 perf_evsel__object
.size
= object_size
;
77 perf_evsel__object
.init
= init
;
80 perf_evsel__object
.fini
= fini
;
85 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
87 int __perf_evsel__sample_size(u64 sample_type
)
89 u64 mask
= sample_type
& PERF_SAMPLE_MASK
;
93 for (i
= 0; i
< 64; i
++) {
94 if (mask
& (1ULL << i
))
104 * __perf_evsel__calc_id_pos - calculate id_pos.
105 * @sample_type: sample type
107 * This function returns the position of the event id (PERF_SAMPLE_ID or
108 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
111 static int __perf_evsel__calc_id_pos(u64 sample_type
)
115 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
118 if (!(sample_type
& PERF_SAMPLE_ID
))
121 if (sample_type
& PERF_SAMPLE_IP
)
124 if (sample_type
& PERF_SAMPLE_TID
)
127 if (sample_type
& PERF_SAMPLE_TIME
)
130 if (sample_type
& PERF_SAMPLE_ADDR
)
137 * __perf_evsel__calc_is_pos - calculate is_pos.
138 * @sample_type: sample type
140 * This function returns the position (counting backwards) of the event id
141 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
142 * sample_id_all is used there is an id sample appended to non-sample events.
144 static int __perf_evsel__calc_is_pos(u64 sample_type
)
148 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
151 if (!(sample_type
& PERF_SAMPLE_ID
))
154 if (sample_type
& PERF_SAMPLE_CPU
)
157 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
163 void perf_evsel__calc_id_pos(struct perf_evsel
*evsel
)
165 evsel
->id_pos
= __perf_evsel__calc_id_pos(evsel
->attr
.sample_type
);
166 evsel
->is_pos
= __perf_evsel__calc_is_pos(evsel
->attr
.sample_type
);
169 void __perf_evsel__set_sample_bit(struct perf_evsel
*evsel
,
170 enum perf_event_sample_format bit
)
172 if (!(evsel
->attr
.sample_type
& bit
)) {
173 evsel
->attr
.sample_type
|= bit
;
174 evsel
->sample_size
+= sizeof(u64
);
175 perf_evsel__calc_id_pos(evsel
);
179 void __perf_evsel__reset_sample_bit(struct perf_evsel
*evsel
,
180 enum perf_event_sample_format bit
)
182 if (evsel
->attr
.sample_type
& bit
) {
183 evsel
->attr
.sample_type
&= ~bit
;
184 evsel
->sample_size
-= sizeof(u64
);
185 perf_evsel__calc_id_pos(evsel
);
189 void perf_evsel__set_sample_id(struct perf_evsel
*evsel
,
190 bool can_sample_identifier
)
192 if (can_sample_identifier
) {
193 perf_evsel__reset_sample_bit(evsel
, ID
);
194 perf_evsel__set_sample_bit(evsel
, IDENTIFIER
);
196 perf_evsel__set_sample_bit(evsel
, ID
);
198 evsel
->attr
.read_format
|= PERF_FORMAT_ID
;
201 void perf_evsel__init(struct perf_evsel
*evsel
,
202 struct perf_event_attr
*attr
, int idx
)
205 evsel
->tracking
= !idx
;
207 evsel
->leader
= evsel
;
210 evsel
->evlist
= NULL
;
211 INIT_LIST_HEAD(&evsel
->node
);
212 INIT_LIST_HEAD(&evsel
->config_terms
);
213 perf_evsel__object
.init(evsel
);
214 evsel
->sample_size
= __perf_evsel__sample_size(attr
->sample_type
);
215 perf_evsel__calc_id_pos(evsel
);
216 evsel
->cmdline_group_boundary
= false;
219 struct perf_evsel
*perf_evsel__new_idx(struct perf_event_attr
*attr
, int idx
)
221 struct perf_evsel
*evsel
= zalloc(perf_evsel__object
.size
);
224 perf_evsel__init(evsel
, attr
, idx
);
230 * Returns pointer with encoded error via <linux/err.h> interface.
232 struct perf_evsel
*perf_evsel__newtp_idx(const char *sys
, const char *name
, int idx
)
234 struct perf_evsel
*evsel
= zalloc(perf_evsel__object
.size
);
240 struct perf_event_attr attr
= {
241 .type
= PERF_TYPE_TRACEPOINT
,
242 .sample_type
= (PERF_SAMPLE_RAW
| PERF_SAMPLE_TIME
|
243 PERF_SAMPLE_CPU
| PERF_SAMPLE_PERIOD
),
246 if (asprintf(&evsel
->name
, "%s:%s", sys
, name
) < 0)
249 evsel
->tp_format
= trace_event__tp_format(sys
, name
);
250 if (IS_ERR(evsel
->tp_format
)) {
251 err
= PTR_ERR(evsel
->tp_format
);
255 event_attr_init(&attr
);
256 attr
.config
= evsel
->tp_format
->id
;
257 attr
.sample_period
= 1;
258 perf_evsel__init(evsel
, &attr
, idx
);
270 const char *perf_evsel__hw_names
[PERF_COUNT_HW_MAX
] = {
278 "stalled-cycles-frontend",
279 "stalled-cycles-backend",
283 static const char *__perf_evsel__hw_name(u64 config
)
285 if (config
< PERF_COUNT_HW_MAX
&& perf_evsel__hw_names
[config
])
286 return perf_evsel__hw_names
[config
];
288 return "unknown-hardware";
291 static int perf_evsel__add_modifiers(struct perf_evsel
*evsel
, char *bf
, size_t size
)
293 int colon
= 0, r
= 0;
294 struct perf_event_attr
*attr
= &evsel
->attr
;
295 bool exclude_guest_default
= false;
297 #define MOD_PRINT(context, mod) do { \
298 if (!attr->exclude_##context) { \
299 if (!colon) colon = ++r; \
300 r += scnprintf(bf + r, size - r, "%c", mod); \
303 if (attr
->exclude_kernel
|| attr
->exclude_user
|| attr
->exclude_hv
) {
304 MOD_PRINT(kernel
, 'k');
305 MOD_PRINT(user
, 'u');
307 exclude_guest_default
= true;
310 if (attr
->precise_ip
) {
313 r
+= scnprintf(bf
+ r
, size
- r
, "%.*s", attr
->precise_ip
, "ppp");
314 exclude_guest_default
= true;
317 if (attr
->exclude_host
|| attr
->exclude_guest
== exclude_guest_default
) {
318 MOD_PRINT(host
, 'H');
319 MOD_PRINT(guest
, 'G');
327 static int perf_evsel__hw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
329 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__hw_name(evsel
->attr
.config
));
330 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
333 const char *perf_evsel__sw_names
[PERF_COUNT_SW_MAX
] = {
346 static const char *__perf_evsel__sw_name(u64 config
)
348 if (config
< PERF_COUNT_SW_MAX
&& perf_evsel__sw_names
[config
])
349 return perf_evsel__sw_names
[config
];
350 return "unknown-software";
353 static int perf_evsel__sw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
355 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__sw_name(evsel
->attr
.config
));
356 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
359 static int __perf_evsel__bp_name(char *bf
, size_t size
, u64 addr
, u64 type
)
363 r
= scnprintf(bf
, size
, "mem:0x%" PRIx64
":", addr
);
365 if (type
& HW_BREAKPOINT_R
)
366 r
+= scnprintf(bf
+ r
, size
- r
, "r");
368 if (type
& HW_BREAKPOINT_W
)
369 r
+= scnprintf(bf
+ r
, size
- r
, "w");
371 if (type
& HW_BREAKPOINT_X
)
372 r
+= scnprintf(bf
+ r
, size
- r
, "x");
377 static int perf_evsel__bp_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
379 struct perf_event_attr
*attr
= &evsel
->attr
;
380 int r
= __perf_evsel__bp_name(bf
, size
, attr
->bp_addr
, attr
->bp_type
);
381 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
384 const char *perf_evsel__hw_cache
[PERF_COUNT_HW_CACHE_MAX
]
385 [PERF_EVSEL__MAX_ALIASES
] = {
386 { "L1-dcache", "l1-d", "l1d", "L1-data", },
387 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
389 { "dTLB", "d-tlb", "Data-TLB", },
390 { "iTLB", "i-tlb", "Instruction-TLB", },
391 { "branch", "branches", "bpu", "btb", "bpc", },
395 const char *perf_evsel__hw_cache_op
[PERF_COUNT_HW_CACHE_OP_MAX
]
396 [PERF_EVSEL__MAX_ALIASES
] = {
397 { "load", "loads", "read", },
398 { "store", "stores", "write", },
399 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
402 const char *perf_evsel__hw_cache_result
[PERF_COUNT_HW_CACHE_RESULT_MAX
]
403 [PERF_EVSEL__MAX_ALIASES
] = {
404 { "refs", "Reference", "ops", "access", },
405 { "misses", "miss", },
408 #define C(x) PERF_COUNT_HW_CACHE_##x
409 #define CACHE_READ (1 << C(OP_READ))
410 #define CACHE_WRITE (1 << C(OP_WRITE))
411 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
412 #define COP(x) (1 << x)
415 * cache operartion stat
416 * L1I : Read and prefetch only
417 * ITLB and BPU : Read-only
419 static unsigned long perf_evsel__hw_cache_stat
[C(MAX
)] = {
420 [C(L1D
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
421 [C(L1I
)] = (CACHE_READ
| CACHE_PREFETCH
),
422 [C(LL
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
423 [C(DTLB
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
424 [C(ITLB
)] = (CACHE_READ
),
425 [C(BPU
)] = (CACHE_READ
),
426 [C(NODE
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
429 bool perf_evsel__is_cache_op_valid(u8 type
, u8 op
)
431 if (perf_evsel__hw_cache_stat
[type
] & COP(op
))
432 return true; /* valid */
434 return false; /* invalid */
437 int __perf_evsel__hw_cache_type_op_res_name(u8 type
, u8 op
, u8 result
,
438 char *bf
, size_t size
)
441 return scnprintf(bf
, size
, "%s-%s-%s", perf_evsel__hw_cache
[type
][0],
442 perf_evsel__hw_cache_op
[op
][0],
443 perf_evsel__hw_cache_result
[result
][0]);
446 return scnprintf(bf
, size
, "%s-%s", perf_evsel__hw_cache
[type
][0],
447 perf_evsel__hw_cache_op
[op
][1]);
450 static int __perf_evsel__hw_cache_name(u64 config
, char *bf
, size_t size
)
452 u8 op
, result
, type
= (config
>> 0) & 0xff;
453 const char *err
= "unknown-ext-hardware-cache-type";
455 if (type
> PERF_COUNT_HW_CACHE_MAX
)
458 op
= (config
>> 8) & 0xff;
459 err
= "unknown-ext-hardware-cache-op";
460 if (op
> PERF_COUNT_HW_CACHE_OP_MAX
)
463 result
= (config
>> 16) & 0xff;
464 err
= "unknown-ext-hardware-cache-result";
465 if (result
> PERF_COUNT_HW_CACHE_RESULT_MAX
)
468 err
= "invalid-cache";
469 if (!perf_evsel__is_cache_op_valid(type
, op
))
472 return __perf_evsel__hw_cache_type_op_res_name(type
, op
, result
, bf
, size
);
474 return scnprintf(bf
, size
, "%s", err
);
477 static int perf_evsel__hw_cache_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
479 int ret
= __perf_evsel__hw_cache_name(evsel
->attr
.config
, bf
, size
);
480 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
483 static int perf_evsel__raw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
485 int ret
= scnprintf(bf
, size
, "raw 0x%" PRIx64
, evsel
->attr
.config
);
486 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
489 const char *perf_evsel__name(struct perf_evsel
*evsel
)
496 switch (evsel
->attr
.type
) {
498 perf_evsel__raw_name(evsel
, bf
, sizeof(bf
));
501 case PERF_TYPE_HARDWARE
:
502 perf_evsel__hw_name(evsel
, bf
, sizeof(bf
));
505 case PERF_TYPE_HW_CACHE
:
506 perf_evsel__hw_cache_name(evsel
, bf
, sizeof(bf
));
509 case PERF_TYPE_SOFTWARE
:
510 perf_evsel__sw_name(evsel
, bf
, sizeof(bf
));
513 case PERF_TYPE_TRACEPOINT
:
514 scnprintf(bf
, sizeof(bf
), "%s", "unknown tracepoint");
517 case PERF_TYPE_BREAKPOINT
:
518 perf_evsel__bp_name(evsel
, bf
, sizeof(bf
));
522 scnprintf(bf
, sizeof(bf
), "unknown attr type: %d",
527 evsel
->name
= strdup(bf
);
529 return evsel
->name
?: "unknown";
532 const char *perf_evsel__group_name(struct perf_evsel
*evsel
)
534 return evsel
->group_name
?: "anon group";
537 int perf_evsel__group_desc(struct perf_evsel
*evsel
, char *buf
, size_t size
)
540 struct perf_evsel
*pos
;
541 const char *group_name
= perf_evsel__group_name(evsel
);
543 ret
= scnprintf(buf
, size
, "%s", group_name
);
545 ret
+= scnprintf(buf
+ ret
, size
- ret
, " { %s",
546 perf_evsel__name(evsel
));
548 for_each_group_member(pos
, evsel
)
549 ret
+= scnprintf(buf
+ ret
, size
- ret
, ", %s",
550 perf_evsel__name(pos
));
552 ret
+= scnprintf(buf
+ ret
, size
- ret
, " }");
558 perf_evsel__config_callgraph(struct perf_evsel
*evsel
,
559 struct record_opts
*opts
,
560 struct callchain_param
*param
)
562 bool function
= perf_evsel__is_function_event(evsel
);
563 struct perf_event_attr
*attr
= &evsel
->attr
;
565 perf_evsel__set_sample_bit(evsel
, CALLCHAIN
);
567 if (param
->record_mode
== CALLCHAIN_LBR
) {
568 if (!opts
->branch_stack
) {
569 if (attr
->exclude_user
) {
570 pr_warning("LBR callstack option is only available "
571 "to get user callchain information. "
572 "Falling back to framepointers.\n");
574 perf_evsel__set_sample_bit(evsel
, BRANCH_STACK
);
575 attr
->branch_sample_type
= PERF_SAMPLE_BRANCH_USER
|
576 PERF_SAMPLE_BRANCH_CALL_STACK
;
579 pr_warning("Cannot use LBR callstack with branch stack. "
580 "Falling back to framepointers.\n");
583 if (param
->record_mode
== CALLCHAIN_DWARF
) {
585 perf_evsel__set_sample_bit(evsel
, REGS_USER
);
586 perf_evsel__set_sample_bit(evsel
, STACK_USER
);
587 attr
->sample_regs_user
= PERF_REGS_MASK
;
588 attr
->sample_stack_user
= param
->dump_size
;
589 attr
->exclude_callchain_user
= 1;
591 pr_info("Cannot use DWARF unwind for function trace event,"
592 " falling back to framepointers.\n");
597 pr_info("Disabling user space callchains for function trace event.\n");
598 attr
->exclude_callchain_user
= 1;
603 perf_evsel__reset_callgraph(struct perf_evsel
*evsel
,
604 struct callchain_param
*param
)
606 struct perf_event_attr
*attr
= &evsel
->attr
;
608 perf_evsel__reset_sample_bit(evsel
, CALLCHAIN
);
609 if (param
->record_mode
== CALLCHAIN_LBR
) {
610 perf_evsel__reset_sample_bit(evsel
, BRANCH_STACK
);
611 attr
->branch_sample_type
&= ~(PERF_SAMPLE_BRANCH_USER
|
612 PERF_SAMPLE_BRANCH_CALL_STACK
);
614 if (param
->record_mode
== CALLCHAIN_DWARF
) {
615 perf_evsel__reset_sample_bit(evsel
, REGS_USER
);
616 perf_evsel__reset_sample_bit(evsel
, STACK_USER
);
620 static void apply_config_terms(struct perf_evsel
*evsel
,
621 struct record_opts
*opts
)
623 struct perf_evsel_config_term
*term
;
624 struct list_head
*config_terms
= &evsel
->config_terms
;
625 struct perf_event_attr
*attr
= &evsel
->attr
;
626 struct callchain_param param
;
628 char *callgraph_buf
= NULL
;
630 /* callgraph default */
631 param
.record_mode
= callchain_param
.record_mode
;
633 list_for_each_entry(term
, config_terms
, list
) {
634 switch (term
->type
) {
635 case PERF_EVSEL__CONFIG_TERM_PERIOD
:
636 attr
->sample_period
= term
->val
.period
;
639 case PERF_EVSEL__CONFIG_TERM_FREQ
:
640 attr
->sample_freq
= term
->val
.freq
;
643 case PERF_EVSEL__CONFIG_TERM_TIME
:
645 perf_evsel__set_sample_bit(evsel
, TIME
);
647 perf_evsel__reset_sample_bit(evsel
, TIME
);
649 case PERF_EVSEL__CONFIG_TERM_CALLGRAPH
:
650 callgraph_buf
= term
->val
.callgraph
;
652 case PERF_EVSEL__CONFIG_TERM_STACK_USER
:
653 dump_size
= term
->val
.stack_user
;
660 /* User explicitly set per-event callgraph, clear the old setting and reset. */
661 if ((callgraph_buf
!= NULL
) || (dump_size
> 0)) {
663 /* parse callgraph parameters */
664 if (callgraph_buf
!= NULL
) {
665 if (!strcmp(callgraph_buf
, "no")) {
666 param
.enabled
= false;
667 param
.record_mode
= CALLCHAIN_NONE
;
669 param
.enabled
= true;
670 if (parse_callchain_record(callgraph_buf
, ¶m
)) {
671 pr_err("per-event callgraph setting for %s failed. "
672 "Apply callgraph global setting for it\n",
679 dump_size
= round_up(dump_size
, sizeof(u64
));
680 param
.dump_size
= dump_size
;
683 /* If global callgraph set, clear it */
684 if (callchain_param
.enabled
)
685 perf_evsel__reset_callgraph(evsel
, &callchain_param
);
687 /* set perf-event callgraph */
689 perf_evsel__config_callgraph(evsel
, opts
, ¶m
);
694 * The enable_on_exec/disabled value strategy:
696 * 1) For any type of traced program:
697 * - all independent events and group leaders are disabled
698 * - all group members are enabled
700 * Group members are ruled by group leaders. They need to
701 * be enabled, because the group scheduling relies on that.
703 * 2) For traced programs executed by perf:
704 * - all independent events and group leaders have
706 * - we don't specifically enable or disable any event during
709 * Independent events and group leaders are initially disabled
710 * and get enabled by exec. Group members are ruled by group
711 * leaders as stated in 1).
713 * 3) For traced programs attached by perf (pid/tid):
714 * - we specifically enable or disable all events during
717 * When attaching events to already running traced we
718 * enable/disable events specifically, as there's no
719 * initial traced exec call.
721 void perf_evsel__config(struct perf_evsel
*evsel
, struct record_opts
*opts
)
723 struct perf_evsel
*leader
= evsel
->leader
;
724 struct perf_event_attr
*attr
= &evsel
->attr
;
725 int track
= evsel
->tracking
;
726 bool per_cpu
= opts
->target
.default_per_cpu
&& !opts
->target
.per_thread
;
728 attr
->sample_id_all
= perf_missing_features
.sample_id_all
? 0 : 1;
729 attr
->inherit
= !opts
->no_inherit
;
731 perf_evsel__set_sample_bit(evsel
, IP
);
732 perf_evsel__set_sample_bit(evsel
, TID
);
734 if (evsel
->sample_read
) {
735 perf_evsel__set_sample_bit(evsel
, READ
);
738 * We need ID even in case of single event, because
739 * PERF_SAMPLE_READ process ID specific data.
741 perf_evsel__set_sample_id(evsel
, false);
744 * Apply group format only if we belong to group
745 * with more than one members.
747 if (leader
->nr_members
> 1) {
748 attr
->read_format
|= PERF_FORMAT_GROUP
;
754 * We default some events to have a default interval. But keep
755 * it a weak assumption overridable by the user.
757 if (!attr
->sample_period
|| (opts
->user_freq
!= UINT_MAX
||
758 opts
->user_interval
!= ULLONG_MAX
)) {
760 perf_evsel__set_sample_bit(evsel
, PERIOD
);
762 attr
->sample_freq
= opts
->freq
;
764 attr
->sample_period
= opts
->default_interval
;
769 * Disable sampling for all group members other
770 * than leader in case leader 'leads' the sampling.
772 if ((leader
!= evsel
) && leader
->sample_read
) {
773 attr
->sample_freq
= 0;
774 attr
->sample_period
= 0;
777 if (opts
->no_samples
)
778 attr
->sample_freq
= 0;
780 if (opts
->inherit_stat
)
781 attr
->inherit_stat
= 1;
783 if (opts
->sample_address
) {
784 perf_evsel__set_sample_bit(evsel
, ADDR
);
785 attr
->mmap_data
= track
;
789 * We don't allow user space callchains for function trace
790 * event, due to issues with page faults while tracing page
791 * fault handler and its overall trickiness nature.
793 if (perf_evsel__is_function_event(evsel
))
794 evsel
->attr
.exclude_callchain_user
= 1;
796 if (callchain_param
.enabled
&& !evsel
->no_aux_samples
)
797 perf_evsel__config_callgraph(evsel
, opts
, &callchain_param
);
799 if (opts
->sample_intr_regs
) {
800 attr
->sample_regs_intr
= opts
->sample_intr_regs
;
801 perf_evsel__set_sample_bit(evsel
, REGS_INTR
);
804 if (target__has_cpu(&opts
->target
))
805 perf_evsel__set_sample_bit(evsel
, CPU
);
808 perf_evsel__set_sample_bit(evsel
, PERIOD
);
811 * When the user explicitely disabled time don't force it here.
813 if (opts
->sample_time
&&
814 (!perf_missing_features
.sample_id_all
&&
815 (!opts
->no_inherit
|| target__has_cpu(&opts
->target
) || per_cpu
||
816 opts
->sample_time_set
)))
817 perf_evsel__set_sample_bit(evsel
, TIME
);
819 if (opts
->raw_samples
&& !evsel
->no_aux_samples
) {
820 perf_evsel__set_sample_bit(evsel
, TIME
);
821 perf_evsel__set_sample_bit(evsel
, RAW
);
822 perf_evsel__set_sample_bit(evsel
, CPU
);
825 if (opts
->sample_address
)
826 perf_evsel__set_sample_bit(evsel
, DATA_SRC
);
828 if (opts
->no_buffering
) {
830 attr
->wakeup_events
= 1;
832 if (opts
->branch_stack
&& !evsel
->no_aux_samples
) {
833 perf_evsel__set_sample_bit(evsel
, BRANCH_STACK
);
834 attr
->branch_sample_type
= opts
->branch_stack
;
837 if (opts
->sample_weight
)
838 perf_evsel__set_sample_bit(evsel
, WEIGHT
);
842 attr
->mmap2
= track
&& !perf_missing_features
.mmap2
;
845 if (opts
->record_switch_events
)
846 attr
->context_switch
= track
;
848 if (opts
->sample_transaction
)
849 perf_evsel__set_sample_bit(evsel
, TRANSACTION
);
851 if (opts
->running_time
) {
852 evsel
->attr
.read_format
|=
853 PERF_FORMAT_TOTAL_TIME_ENABLED
|
854 PERF_FORMAT_TOTAL_TIME_RUNNING
;
858 * XXX see the function comment above
860 * Disabling only independent events or group leaders,
861 * keeping group members enabled.
863 if (perf_evsel__is_group_leader(evsel
))
867 * Setting enable_on_exec for independent events and
868 * group leaders for traced executed by perf.
870 if (target__none(&opts
->target
) && perf_evsel__is_group_leader(evsel
) &&
871 !opts
->initial_delay
)
872 attr
->enable_on_exec
= 1;
874 if (evsel
->immediate
) {
876 attr
->enable_on_exec
= 0;
879 clockid
= opts
->clockid
;
880 if (opts
->use_clockid
) {
881 attr
->use_clockid
= 1;
882 attr
->clockid
= opts
->clockid
;
886 * Apply event specific term settings,
887 * it overloads any global configuration.
889 apply_config_terms(evsel
, opts
);
892 static int perf_evsel__alloc_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
896 if (evsel
->system_wide
)
899 evsel
->fd
= xyarray__new(ncpus
, nthreads
, sizeof(int));
902 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
903 for (thread
= 0; thread
< nthreads
; thread
++) {
904 FD(evsel
, cpu
, thread
) = -1;
909 return evsel
->fd
!= NULL
? 0 : -ENOMEM
;
912 static int perf_evsel__run_ioctl(struct perf_evsel
*evsel
, int ncpus
, int nthreads
,
917 if (evsel
->system_wide
)
920 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
921 for (thread
= 0; thread
< nthreads
; thread
++) {
922 int fd
= FD(evsel
, cpu
, thread
),
923 err
= ioctl(fd
, ioc
, arg
);
933 int perf_evsel__apply_filter(struct perf_evsel
*evsel
, int ncpus
, int nthreads
,
936 return perf_evsel__run_ioctl(evsel
, ncpus
, nthreads
,
937 PERF_EVENT_IOC_SET_FILTER
,
941 int perf_evsel__set_filter(struct perf_evsel
*evsel
, const char *filter
)
943 char *new_filter
= strdup(filter
);
945 if (new_filter
!= NULL
) {
947 evsel
->filter
= new_filter
;
954 int perf_evsel__append_filter(struct perf_evsel
*evsel
,
955 const char *op
, const char *filter
)
959 if (evsel
->filter
== NULL
)
960 return perf_evsel__set_filter(evsel
, filter
);
962 if (asprintf(&new_filter
,"(%s) %s (%s)", evsel
->filter
, op
, filter
) > 0) {
964 evsel
->filter
= new_filter
;
971 int perf_evsel__enable(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
973 return perf_evsel__run_ioctl(evsel
, ncpus
, nthreads
,
974 PERF_EVENT_IOC_ENABLE
,
978 int perf_evsel__alloc_id(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
980 if (ncpus
== 0 || nthreads
== 0)
983 if (evsel
->system_wide
)
986 evsel
->sample_id
= xyarray__new(ncpus
, nthreads
, sizeof(struct perf_sample_id
));
987 if (evsel
->sample_id
== NULL
)
990 evsel
->id
= zalloc(ncpus
* nthreads
* sizeof(u64
));
991 if (evsel
->id
== NULL
) {
992 xyarray__delete(evsel
->sample_id
);
993 evsel
->sample_id
= NULL
;
1000 static void perf_evsel__free_fd(struct perf_evsel
*evsel
)
1002 xyarray__delete(evsel
->fd
);
1006 static void perf_evsel__free_id(struct perf_evsel
*evsel
)
1008 xyarray__delete(evsel
->sample_id
);
1009 evsel
->sample_id
= NULL
;
1013 static void perf_evsel__free_config_terms(struct perf_evsel
*evsel
)
1015 struct perf_evsel_config_term
*term
, *h
;
1017 list_for_each_entry_safe(term
, h
, &evsel
->config_terms
, list
) {
1018 list_del(&term
->list
);
1023 void perf_evsel__close_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
1027 if (evsel
->system_wide
)
1030 for (cpu
= 0; cpu
< ncpus
; cpu
++)
1031 for (thread
= 0; thread
< nthreads
; ++thread
) {
1032 close(FD(evsel
, cpu
, thread
));
1033 FD(evsel
, cpu
, thread
) = -1;
1037 void perf_evsel__exit(struct perf_evsel
*evsel
)
1039 assert(list_empty(&evsel
->node
));
1040 assert(evsel
->evlist
== NULL
);
1041 perf_evsel__free_fd(evsel
);
1042 perf_evsel__free_id(evsel
);
1043 perf_evsel__free_config_terms(evsel
);
1044 close_cgroup(evsel
->cgrp
);
1045 cpu_map__put(evsel
->cpus
);
1046 cpu_map__put(evsel
->own_cpus
);
1047 thread_map__put(evsel
->threads
);
1048 zfree(&evsel
->group_name
);
1049 zfree(&evsel
->name
);
1050 perf_evsel__object
.fini(evsel
);
1053 void perf_evsel__delete(struct perf_evsel
*evsel
)
1055 perf_evsel__exit(evsel
);
1059 void perf_evsel__compute_deltas(struct perf_evsel
*evsel
, int cpu
, int thread
,
1060 struct perf_counts_values
*count
)
1062 struct perf_counts_values tmp
;
1064 if (!evsel
->prev_raw_counts
)
1068 tmp
= evsel
->prev_raw_counts
->aggr
;
1069 evsel
->prev_raw_counts
->aggr
= *count
;
1071 tmp
= *perf_counts(evsel
->prev_raw_counts
, cpu
, thread
);
1072 *perf_counts(evsel
->prev_raw_counts
, cpu
, thread
) = *count
;
1075 count
->val
= count
->val
- tmp
.val
;
1076 count
->ena
= count
->ena
- tmp
.ena
;
1077 count
->run
= count
->run
- tmp
.run
;
1080 void perf_counts_values__scale(struct perf_counts_values
*count
,
1081 bool scale
, s8
*pscaled
)
1086 if (count
->run
== 0) {
1089 } else if (count
->run
< count
->ena
) {
1091 count
->val
= (u64
)((double) count
->val
* count
->ena
/ count
->run
+ 0.5);
1094 count
->ena
= count
->run
= 0;
1100 int perf_evsel__read(struct perf_evsel
*evsel
, int cpu
, int thread
,
1101 struct perf_counts_values
*count
)
1103 memset(count
, 0, sizeof(*count
));
1105 if (FD(evsel
, cpu
, thread
) < 0)
1108 if (readn(FD(evsel
, cpu
, thread
), count
, sizeof(*count
)) < 0)
1114 int __perf_evsel__read_on_cpu(struct perf_evsel
*evsel
,
1115 int cpu
, int thread
, bool scale
)
1117 struct perf_counts_values count
;
1118 size_t nv
= scale
? 3 : 1;
1120 if (FD(evsel
, cpu
, thread
) < 0)
1123 if (evsel
->counts
== NULL
&& perf_evsel__alloc_counts(evsel
, cpu
+ 1, thread
+ 1) < 0)
1126 if (readn(FD(evsel
, cpu
, thread
), &count
, nv
* sizeof(u64
)) < 0)
1129 perf_evsel__compute_deltas(evsel
, cpu
, thread
, &count
);
1130 perf_counts_values__scale(&count
, scale
, NULL
);
1131 *perf_counts(evsel
->counts
, cpu
, thread
) = count
;
1135 static int get_group_fd(struct perf_evsel
*evsel
, int cpu
, int thread
)
1137 struct perf_evsel
*leader
= evsel
->leader
;
1140 if (perf_evsel__is_group_leader(evsel
))
1144 * Leader must be already processed/open,
1145 * if not it's a bug.
1147 BUG_ON(!leader
->fd
);
1149 fd
= FD(leader
, cpu
, thread
);
1160 static void __p_bits(char *buf
, size_t size
, u64 value
, struct bit_names
*bits
)
1162 bool first_bit
= true;
1166 if (value
& bits
[i
].bit
) {
1167 buf
+= scnprintf(buf
, size
, "%s%s", first_bit
? "" : "|", bits
[i
].name
);
1170 } while (bits
[++i
].name
!= NULL
);
1173 static void __p_sample_type(char *buf
, size_t size
, u64 value
)
1175 #define bit_name(n) { PERF_SAMPLE_##n, #n }
1176 struct bit_names bits
[] = {
1177 bit_name(IP
), bit_name(TID
), bit_name(TIME
), bit_name(ADDR
),
1178 bit_name(READ
), bit_name(CALLCHAIN
), bit_name(ID
), bit_name(CPU
),
1179 bit_name(PERIOD
), bit_name(STREAM_ID
), bit_name(RAW
),
1180 bit_name(BRANCH_STACK
), bit_name(REGS_USER
), bit_name(STACK_USER
),
1181 bit_name(IDENTIFIER
), bit_name(REGS_INTR
),
1185 __p_bits(buf
, size
, value
, bits
);
1188 static void __p_read_format(char *buf
, size_t size
, u64 value
)
1190 #define bit_name(n) { PERF_FORMAT_##n, #n }
1191 struct bit_names bits
[] = {
1192 bit_name(TOTAL_TIME_ENABLED
), bit_name(TOTAL_TIME_RUNNING
),
1193 bit_name(ID
), bit_name(GROUP
),
1197 __p_bits(buf
, size
, value
, bits
);
1200 #define BUF_SIZE 1024
1202 #define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
1203 #define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
1204 #define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
1205 #define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
1206 #define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
1208 #define PRINT_ATTRn(_n, _f, _p) \
1212 ret += attr__fprintf(fp, _n, buf, priv);\
1216 #define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p)
1218 int perf_event_attr__fprintf(FILE *fp
, struct perf_event_attr
*attr
,
1219 attr__fprintf_f attr__fprintf
, void *priv
)
1224 PRINT_ATTRf(type
, p_unsigned
);
1225 PRINT_ATTRf(size
, p_unsigned
);
1226 PRINT_ATTRf(config
, p_hex
);
1227 PRINT_ATTRn("{ sample_period, sample_freq }", sample_period
, p_unsigned
);
1228 PRINT_ATTRf(sample_type
, p_sample_type
);
1229 PRINT_ATTRf(read_format
, p_read_format
);
1231 PRINT_ATTRf(disabled
, p_unsigned
);
1232 PRINT_ATTRf(inherit
, p_unsigned
);
1233 PRINT_ATTRf(pinned
, p_unsigned
);
1234 PRINT_ATTRf(exclusive
, p_unsigned
);
1235 PRINT_ATTRf(exclude_user
, p_unsigned
);
1236 PRINT_ATTRf(exclude_kernel
, p_unsigned
);
1237 PRINT_ATTRf(exclude_hv
, p_unsigned
);
1238 PRINT_ATTRf(exclude_idle
, p_unsigned
);
1239 PRINT_ATTRf(mmap
, p_unsigned
);
1240 PRINT_ATTRf(comm
, p_unsigned
);
1241 PRINT_ATTRf(freq
, p_unsigned
);
1242 PRINT_ATTRf(inherit_stat
, p_unsigned
);
1243 PRINT_ATTRf(enable_on_exec
, p_unsigned
);
1244 PRINT_ATTRf(task
, p_unsigned
);
1245 PRINT_ATTRf(watermark
, p_unsigned
);
1246 PRINT_ATTRf(precise_ip
, p_unsigned
);
1247 PRINT_ATTRf(mmap_data
, p_unsigned
);
1248 PRINT_ATTRf(sample_id_all
, p_unsigned
);
1249 PRINT_ATTRf(exclude_host
, p_unsigned
);
1250 PRINT_ATTRf(exclude_guest
, p_unsigned
);
1251 PRINT_ATTRf(exclude_callchain_kernel
, p_unsigned
);
1252 PRINT_ATTRf(exclude_callchain_user
, p_unsigned
);
1253 PRINT_ATTRf(mmap2
, p_unsigned
);
1254 PRINT_ATTRf(comm_exec
, p_unsigned
);
1255 PRINT_ATTRf(use_clockid
, p_unsigned
);
1256 PRINT_ATTRf(context_switch
, p_unsigned
);
1258 PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events
, p_unsigned
);
1259 PRINT_ATTRf(bp_type
, p_unsigned
);
1260 PRINT_ATTRn("{ bp_addr, config1 }", bp_addr
, p_hex
);
1261 PRINT_ATTRn("{ bp_len, config2 }", bp_len
, p_hex
);
1262 PRINT_ATTRf(sample_regs_user
, p_hex
);
1263 PRINT_ATTRf(sample_stack_user
, p_unsigned
);
1264 PRINT_ATTRf(clockid
, p_signed
);
1265 PRINT_ATTRf(sample_regs_intr
, p_hex
);
1266 PRINT_ATTRf(aux_watermark
, p_unsigned
);
1271 static int __open_attr__fprintf(FILE *fp
, const char *name
, const char *val
,
1272 void *priv
__attribute__((unused
)))
1274 return fprintf(fp
, " %-32s %s\n", name
, val
);
1277 static int __perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
1278 struct thread_map
*threads
)
1280 int cpu
, thread
, nthreads
;
1281 unsigned long flags
= PERF_FLAG_FD_CLOEXEC
;
1283 enum { NO_CHANGE
, SET_TO_MAX
, INCREASED_MAX
} set_rlimit
= NO_CHANGE
;
1285 if (evsel
->system_wide
)
1288 nthreads
= threads
->nr
;
1290 if (evsel
->fd
== NULL
&&
1291 perf_evsel__alloc_fd(evsel
, cpus
->nr
, nthreads
) < 0)
1295 flags
|= PERF_FLAG_PID_CGROUP
;
1296 pid
= evsel
->cgrp
->fd
;
1299 fallback_missing_features
:
1300 if (perf_missing_features
.clockid_wrong
)
1301 evsel
->attr
.clockid
= CLOCK_MONOTONIC
; /* should always work */
1302 if (perf_missing_features
.clockid
) {
1303 evsel
->attr
.use_clockid
= 0;
1304 evsel
->attr
.clockid
= 0;
1306 if (perf_missing_features
.cloexec
)
1307 flags
&= ~(unsigned long)PERF_FLAG_FD_CLOEXEC
;
1308 if (perf_missing_features
.mmap2
)
1309 evsel
->attr
.mmap2
= 0;
1310 if (perf_missing_features
.exclude_guest
)
1311 evsel
->attr
.exclude_guest
= evsel
->attr
.exclude_host
= 0;
1313 if (perf_missing_features
.sample_id_all
)
1314 evsel
->attr
.sample_id_all
= 0;
1317 fprintf(stderr
, "%.60s\n", graph_dotted_line
);
1318 fprintf(stderr
, "perf_event_attr:\n");
1319 perf_event_attr__fprintf(stderr
, &evsel
->attr
, __open_attr__fprintf
, NULL
);
1320 fprintf(stderr
, "%.60s\n", graph_dotted_line
);
1323 for (cpu
= 0; cpu
< cpus
->nr
; cpu
++) {
1325 for (thread
= 0; thread
< nthreads
; thread
++) {
1328 if (!evsel
->cgrp
&& !evsel
->system_wide
)
1329 pid
= thread_map__pid(threads
, thread
);
1331 group_fd
= get_group_fd(evsel
, cpu
, thread
);
1333 pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n",
1334 pid
, cpus
->map
[cpu
], group_fd
, flags
);
1336 FD(evsel
, cpu
, thread
) = sys_perf_event_open(&evsel
->attr
,
1340 if (FD(evsel
, cpu
, thread
) < 0) {
1342 pr_debug2("sys_perf_event_open failed, error %d\n",
1346 set_rlimit
= NO_CHANGE
;
1349 * If we succeeded but had to kill clockid, fail and
1350 * have perf_evsel__open_strerror() print us a nice
1353 if (perf_missing_features
.clockid
||
1354 perf_missing_features
.clockid_wrong
) {
1365 * perf stat needs between 5 and 22 fds per CPU. When we run out
1366 * of them try to increase the limits.
1368 if (err
== -EMFILE
&& set_rlimit
< INCREASED_MAX
) {
1370 int old_errno
= errno
;
1372 if (getrlimit(RLIMIT_NOFILE
, &l
) == 0) {
1373 if (set_rlimit
== NO_CHANGE
)
1374 l
.rlim_cur
= l
.rlim_max
;
1376 l
.rlim_cur
= l
.rlim_max
+ 1000;
1377 l
.rlim_max
= l
.rlim_cur
;
1379 if (setrlimit(RLIMIT_NOFILE
, &l
) == 0) {
1388 if (err
!= -EINVAL
|| cpu
> 0 || thread
> 0)
1392 * Must probe features in the order they were added to the
1393 * perf_event_attr interface.
1395 if (!perf_missing_features
.clockid_wrong
&& evsel
->attr
.use_clockid
) {
1396 perf_missing_features
.clockid_wrong
= true;
1397 goto fallback_missing_features
;
1398 } else if (!perf_missing_features
.clockid
&& evsel
->attr
.use_clockid
) {
1399 perf_missing_features
.clockid
= true;
1400 goto fallback_missing_features
;
1401 } else if (!perf_missing_features
.cloexec
&& (flags
& PERF_FLAG_FD_CLOEXEC
)) {
1402 perf_missing_features
.cloexec
= true;
1403 goto fallback_missing_features
;
1404 } else if (!perf_missing_features
.mmap2
&& evsel
->attr
.mmap2
) {
1405 perf_missing_features
.mmap2
= true;
1406 goto fallback_missing_features
;
1407 } else if (!perf_missing_features
.exclude_guest
&&
1408 (evsel
->attr
.exclude_guest
|| evsel
->attr
.exclude_host
)) {
1409 perf_missing_features
.exclude_guest
= true;
1410 goto fallback_missing_features
;
1411 } else if (!perf_missing_features
.sample_id_all
) {
1412 perf_missing_features
.sample_id_all
= true;
1413 goto retry_sample_id
;
1418 while (--thread
>= 0) {
1419 close(FD(evsel
, cpu
, thread
));
1420 FD(evsel
, cpu
, thread
) = -1;
1423 } while (--cpu
>= 0);
1427 void perf_evsel__close(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
1429 if (evsel
->fd
== NULL
)
1432 perf_evsel__close_fd(evsel
, ncpus
, nthreads
);
1433 perf_evsel__free_fd(evsel
);
1445 struct thread_map map
;
1447 } empty_thread_map
= {
1452 int perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
1453 struct thread_map
*threads
)
1456 /* Work around old compiler warnings about strict aliasing */
1457 cpus
= &empty_cpu_map
.map
;
1460 if (threads
== NULL
)
1461 threads
= &empty_thread_map
.map
;
1463 return __perf_evsel__open(evsel
, cpus
, threads
);
1466 int perf_evsel__open_per_cpu(struct perf_evsel
*evsel
,
1467 struct cpu_map
*cpus
)
1469 return __perf_evsel__open(evsel
, cpus
, &empty_thread_map
.map
);
1472 int perf_evsel__open_per_thread(struct perf_evsel
*evsel
,
1473 struct thread_map
*threads
)
1475 return __perf_evsel__open(evsel
, &empty_cpu_map
.map
, threads
);
1478 static int perf_evsel__parse_id_sample(const struct perf_evsel
*evsel
,
1479 const union perf_event
*event
,
1480 struct perf_sample
*sample
)
1482 u64 type
= evsel
->attr
.sample_type
;
1483 const u64
*array
= event
->sample
.array
;
1484 bool swapped
= evsel
->needs_swap
;
1487 array
+= ((event
->header
.size
-
1488 sizeof(event
->header
)) / sizeof(u64
)) - 1;
1490 if (type
& PERF_SAMPLE_IDENTIFIER
) {
1491 sample
->id
= *array
;
1495 if (type
& PERF_SAMPLE_CPU
) {
1498 /* undo swap of u64, then swap on individual u32s */
1499 u
.val64
= bswap_64(u
.val64
);
1500 u
.val32
[0] = bswap_32(u
.val32
[0]);
1503 sample
->cpu
= u
.val32
[0];
1507 if (type
& PERF_SAMPLE_STREAM_ID
) {
1508 sample
->stream_id
= *array
;
1512 if (type
& PERF_SAMPLE_ID
) {
1513 sample
->id
= *array
;
1517 if (type
& PERF_SAMPLE_TIME
) {
1518 sample
->time
= *array
;
1522 if (type
& PERF_SAMPLE_TID
) {
1525 /* undo swap of u64, then swap on individual u32s */
1526 u
.val64
= bswap_64(u
.val64
);
1527 u
.val32
[0] = bswap_32(u
.val32
[0]);
1528 u
.val32
[1] = bswap_32(u
.val32
[1]);
1531 sample
->pid
= u
.val32
[0];
1532 sample
->tid
= u
.val32
[1];
1539 static inline bool overflow(const void *endp
, u16 max_size
, const void *offset
,
1542 return size
> max_size
|| offset
+ size
> endp
;
1545 #define OVERFLOW_CHECK(offset, size, max_size) \
1547 if (overflow(endp, (max_size), (offset), (size))) \
1551 #define OVERFLOW_CHECK_u64(offset) \
1552 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
1554 int perf_evsel__parse_sample(struct perf_evsel
*evsel
, union perf_event
*event
,
1555 struct perf_sample
*data
)
1557 u64 type
= evsel
->attr
.sample_type
;
1558 bool swapped
= evsel
->needs_swap
;
1560 u16 max_size
= event
->header
.size
;
1561 const void *endp
= (void *)event
+ max_size
;
1565 * used for cross-endian analysis. See git commit 65014ab3
1566 * for why this goofiness is needed.
1570 memset(data
, 0, sizeof(*data
));
1571 data
->cpu
= data
->pid
= data
->tid
= -1;
1572 data
->stream_id
= data
->id
= data
->time
= -1ULL;
1573 data
->period
= evsel
->attr
.sample_period
;
1576 if (event
->header
.type
!= PERF_RECORD_SAMPLE
) {
1577 if (!evsel
->attr
.sample_id_all
)
1579 return perf_evsel__parse_id_sample(evsel
, event
, data
);
1582 array
= event
->sample
.array
;
1585 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
1586 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
1587 * check the format does not go past the end of the event.
1589 if (evsel
->sample_size
+ sizeof(event
->header
) > event
->header
.size
)
1593 if (type
& PERF_SAMPLE_IDENTIFIER
) {
1598 if (type
& PERF_SAMPLE_IP
) {
1603 if (type
& PERF_SAMPLE_TID
) {
1606 /* undo swap of u64, then swap on individual u32s */
1607 u
.val64
= bswap_64(u
.val64
);
1608 u
.val32
[0] = bswap_32(u
.val32
[0]);
1609 u
.val32
[1] = bswap_32(u
.val32
[1]);
1612 data
->pid
= u
.val32
[0];
1613 data
->tid
= u
.val32
[1];
1617 if (type
& PERF_SAMPLE_TIME
) {
1618 data
->time
= *array
;
1623 if (type
& PERF_SAMPLE_ADDR
) {
1624 data
->addr
= *array
;
1628 if (type
& PERF_SAMPLE_ID
) {
1633 if (type
& PERF_SAMPLE_STREAM_ID
) {
1634 data
->stream_id
= *array
;
1638 if (type
& PERF_SAMPLE_CPU
) {
1642 /* undo swap of u64, then swap on individual u32s */
1643 u
.val64
= bswap_64(u
.val64
);
1644 u
.val32
[0] = bswap_32(u
.val32
[0]);
1647 data
->cpu
= u
.val32
[0];
1651 if (type
& PERF_SAMPLE_PERIOD
) {
1652 data
->period
= *array
;
1656 if (type
& PERF_SAMPLE_READ
) {
1657 u64 read_format
= evsel
->attr
.read_format
;
1659 OVERFLOW_CHECK_u64(array
);
1660 if (read_format
& PERF_FORMAT_GROUP
)
1661 data
->read
.group
.nr
= *array
;
1663 data
->read
.one
.value
= *array
;
1667 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
1668 OVERFLOW_CHECK_u64(array
);
1669 data
->read
.time_enabled
= *array
;
1673 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
1674 OVERFLOW_CHECK_u64(array
);
1675 data
->read
.time_running
= *array
;
1679 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1680 if (read_format
& PERF_FORMAT_GROUP
) {
1681 const u64 max_group_nr
= UINT64_MAX
/
1682 sizeof(struct sample_read_value
);
1684 if (data
->read
.group
.nr
> max_group_nr
)
1686 sz
= data
->read
.group
.nr
*
1687 sizeof(struct sample_read_value
);
1688 OVERFLOW_CHECK(array
, sz
, max_size
);
1689 data
->read
.group
.values
=
1690 (struct sample_read_value
*)array
;
1691 array
= (void *)array
+ sz
;
1693 OVERFLOW_CHECK_u64(array
);
1694 data
->read
.one
.id
= *array
;
1699 if (type
& PERF_SAMPLE_CALLCHAIN
) {
1700 const u64 max_callchain_nr
= UINT64_MAX
/ sizeof(u64
);
1702 OVERFLOW_CHECK_u64(array
);
1703 data
->callchain
= (struct ip_callchain
*)array
++;
1704 if (data
->callchain
->nr
> max_callchain_nr
)
1706 sz
= data
->callchain
->nr
* sizeof(u64
);
1707 OVERFLOW_CHECK(array
, sz
, max_size
);
1708 array
= (void *)array
+ sz
;
1711 if (type
& PERF_SAMPLE_RAW
) {
1712 OVERFLOW_CHECK_u64(array
);
1714 if (WARN_ONCE(swapped
,
1715 "Endianness of raw data not corrected!\n")) {
1716 /* undo swap of u64, then swap on individual u32s */
1717 u
.val64
= bswap_64(u
.val64
);
1718 u
.val32
[0] = bswap_32(u
.val32
[0]);
1719 u
.val32
[1] = bswap_32(u
.val32
[1]);
1721 data
->raw_size
= u
.val32
[0];
1722 array
= (void *)array
+ sizeof(u32
);
1724 OVERFLOW_CHECK(array
, data
->raw_size
, max_size
);
1725 data
->raw_data
= (void *)array
;
1726 array
= (void *)array
+ data
->raw_size
;
1729 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
1730 const u64 max_branch_nr
= UINT64_MAX
/
1731 sizeof(struct branch_entry
);
1733 OVERFLOW_CHECK_u64(array
);
1734 data
->branch_stack
= (struct branch_stack
*)array
++;
1736 if (data
->branch_stack
->nr
> max_branch_nr
)
1738 sz
= data
->branch_stack
->nr
* sizeof(struct branch_entry
);
1739 OVERFLOW_CHECK(array
, sz
, max_size
);
1740 array
= (void *)array
+ sz
;
1743 if (type
& PERF_SAMPLE_REGS_USER
) {
1744 OVERFLOW_CHECK_u64(array
);
1745 data
->user_regs
.abi
= *array
;
1748 if (data
->user_regs
.abi
) {
1749 u64 mask
= evsel
->attr
.sample_regs_user
;
1751 sz
= hweight_long(mask
) * sizeof(u64
);
1752 OVERFLOW_CHECK(array
, sz
, max_size
);
1753 data
->user_regs
.mask
= mask
;
1754 data
->user_regs
.regs
= (u64
*)array
;
1755 array
= (void *)array
+ sz
;
1759 if (type
& PERF_SAMPLE_STACK_USER
) {
1760 OVERFLOW_CHECK_u64(array
);
1763 data
->user_stack
.offset
= ((char *)(array
- 1)
1767 data
->user_stack
.size
= 0;
1769 OVERFLOW_CHECK(array
, sz
, max_size
);
1770 data
->user_stack
.data
= (char *)array
;
1771 array
= (void *)array
+ sz
;
1772 OVERFLOW_CHECK_u64(array
);
1773 data
->user_stack
.size
= *array
++;
1774 if (WARN_ONCE(data
->user_stack
.size
> sz
,
1775 "user stack dump failure\n"))
1781 if (type
& PERF_SAMPLE_WEIGHT
) {
1782 OVERFLOW_CHECK_u64(array
);
1783 data
->weight
= *array
;
1787 data
->data_src
= PERF_MEM_DATA_SRC_NONE
;
1788 if (type
& PERF_SAMPLE_DATA_SRC
) {
1789 OVERFLOW_CHECK_u64(array
);
1790 data
->data_src
= *array
;
1794 data
->transaction
= 0;
1795 if (type
& PERF_SAMPLE_TRANSACTION
) {
1796 OVERFLOW_CHECK_u64(array
);
1797 data
->transaction
= *array
;
1801 data
->intr_regs
.abi
= PERF_SAMPLE_REGS_ABI_NONE
;
1802 if (type
& PERF_SAMPLE_REGS_INTR
) {
1803 OVERFLOW_CHECK_u64(array
);
1804 data
->intr_regs
.abi
= *array
;
1807 if (data
->intr_regs
.abi
!= PERF_SAMPLE_REGS_ABI_NONE
) {
1808 u64 mask
= evsel
->attr
.sample_regs_intr
;
1810 sz
= hweight_long(mask
) * sizeof(u64
);
1811 OVERFLOW_CHECK(array
, sz
, max_size
);
1812 data
->intr_regs
.mask
= mask
;
1813 data
->intr_regs
.regs
= (u64
*)array
;
1814 array
= (void *)array
+ sz
;
1821 size_t perf_event__sample_event_size(const struct perf_sample
*sample
, u64 type
,
1824 size_t sz
, result
= sizeof(struct sample_event
);
1826 if (type
& PERF_SAMPLE_IDENTIFIER
)
1827 result
+= sizeof(u64
);
1829 if (type
& PERF_SAMPLE_IP
)
1830 result
+= sizeof(u64
);
1832 if (type
& PERF_SAMPLE_TID
)
1833 result
+= sizeof(u64
);
1835 if (type
& PERF_SAMPLE_TIME
)
1836 result
+= sizeof(u64
);
1838 if (type
& PERF_SAMPLE_ADDR
)
1839 result
+= sizeof(u64
);
1841 if (type
& PERF_SAMPLE_ID
)
1842 result
+= sizeof(u64
);
1844 if (type
& PERF_SAMPLE_STREAM_ID
)
1845 result
+= sizeof(u64
);
1847 if (type
& PERF_SAMPLE_CPU
)
1848 result
+= sizeof(u64
);
1850 if (type
& PERF_SAMPLE_PERIOD
)
1851 result
+= sizeof(u64
);
1853 if (type
& PERF_SAMPLE_READ
) {
1854 result
+= sizeof(u64
);
1855 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
1856 result
+= sizeof(u64
);
1857 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
1858 result
+= sizeof(u64
);
1859 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1860 if (read_format
& PERF_FORMAT_GROUP
) {
1861 sz
= sample
->read
.group
.nr
*
1862 sizeof(struct sample_read_value
);
1865 result
+= sizeof(u64
);
1869 if (type
& PERF_SAMPLE_CALLCHAIN
) {
1870 sz
= (sample
->callchain
->nr
+ 1) * sizeof(u64
);
1874 if (type
& PERF_SAMPLE_RAW
) {
1875 result
+= sizeof(u32
);
1876 result
+= sample
->raw_size
;
1879 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
1880 sz
= sample
->branch_stack
->nr
* sizeof(struct branch_entry
);
1885 if (type
& PERF_SAMPLE_REGS_USER
) {
1886 if (sample
->user_regs
.abi
) {
1887 result
+= sizeof(u64
);
1888 sz
= hweight_long(sample
->user_regs
.mask
) * sizeof(u64
);
1891 result
+= sizeof(u64
);
1895 if (type
& PERF_SAMPLE_STACK_USER
) {
1896 sz
= sample
->user_stack
.size
;
1897 result
+= sizeof(u64
);
1900 result
+= sizeof(u64
);
1904 if (type
& PERF_SAMPLE_WEIGHT
)
1905 result
+= sizeof(u64
);
1907 if (type
& PERF_SAMPLE_DATA_SRC
)
1908 result
+= sizeof(u64
);
1910 if (type
& PERF_SAMPLE_TRANSACTION
)
1911 result
+= sizeof(u64
);
1913 if (type
& PERF_SAMPLE_REGS_INTR
) {
1914 if (sample
->intr_regs
.abi
) {
1915 result
+= sizeof(u64
);
1916 sz
= hweight_long(sample
->intr_regs
.mask
) * sizeof(u64
);
1919 result
+= sizeof(u64
);
1926 int perf_event__synthesize_sample(union perf_event
*event
, u64 type
,
1928 const struct perf_sample
*sample
,
1934 * used for cross-endian analysis. See git commit 65014ab3
1935 * for why this goofiness is needed.
1939 array
= event
->sample
.array
;
1941 if (type
& PERF_SAMPLE_IDENTIFIER
) {
1942 *array
= sample
->id
;
1946 if (type
& PERF_SAMPLE_IP
) {
1947 *array
= sample
->ip
;
1951 if (type
& PERF_SAMPLE_TID
) {
1952 u
.val32
[0] = sample
->pid
;
1953 u
.val32
[1] = sample
->tid
;
1956 * Inverse of what is done in perf_evsel__parse_sample
1958 u
.val32
[0] = bswap_32(u
.val32
[0]);
1959 u
.val32
[1] = bswap_32(u
.val32
[1]);
1960 u
.val64
= bswap_64(u
.val64
);
1967 if (type
& PERF_SAMPLE_TIME
) {
1968 *array
= sample
->time
;
1972 if (type
& PERF_SAMPLE_ADDR
) {
1973 *array
= sample
->addr
;
1977 if (type
& PERF_SAMPLE_ID
) {
1978 *array
= sample
->id
;
1982 if (type
& PERF_SAMPLE_STREAM_ID
) {
1983 *array
= sample
->stream_id
;
1987 if (type
& PERF_SAMPLE_CPU
) {
1988 u
.val32
[0] = sample
->cpu
;
1991 * Inverse of what is done in perf_evsel__parse_sample
1993 u
.val32
[0] = bswap_32(u
.val32
[0]);
1994 u
.val64
= bswap_64(u
.val64
);
2000 if (type
& PERF_SAMPLE_PERIOD
) {
2001 *array
= sample
->period
;
2005 if (type
& PERF_SAMPLE_READ
) {
2006 if (read_format
& PERF_FORMAT_GROUP
)
2007 *array
= sample
->read
.group
.nr
;
2009 *array
= sample
->read
.one
.value
;
2012 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
2013 *array
= sample
->read
.time_enabled
;
2017 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
2018 *array
= sample
->read
.time_running
;
2022 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2023 if (read_format
& PERF_FORMAT_GROUP
) {
2024 sz
= sample
->read
.group
.nr
*
2025 sizeof(struct sample_read_value
);
2026 memcpy(array
, sample
->read
.group
.values
, sz
);
2027 array
= (void *)array
+ sz
;
2029 *array
= sample
->read
.one
.id
;
2034 if (type
& PERF_SAMPLE_CALLCHAIN
) {
2035 sz
= (sample
->callchain
->nr
+ 1) * sizeof(u64
);
2036 memcpy(array
, sample
->callchain
, sz
);
2037 array
= (void *)array
+ sz
;
2040 if (type
& PERF_SAMPLE_RAW
) {
2041 u
.val32
[0] = sample
->raw_size
;
2042 if (WARN_ONCE(swapped
,
2043 "Endianness of raw data not corrected!\n")) {
2045 * Inverse of what is done in perf_evsel__parse_sample
2047 u
.val32
[0] = bswap_32(u
.val32
[0]);
2048 u
.val32
[1] = bswap_32(u
.val32
[1]);
2049 u
.val64
= bswap_64(u
.val64
);
2052 array
= (void *)array
+ sizeof(u32
);
2054 memcpy(array
, sample
->raw_data
, sample
->raw_size
);
2055 array
= (void *)array
+ sample
->raw_size
;
2058 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
2059 sz
= sample
->branch_stack
->nr
* sizeof(struct branch_entry
);
2061 memcpy(array
, sample
->branch_stack
, sz
);
2062 array
= (void *)array
+ sz
;
2065 if (type
& PERF_SAMPLE_REGS_USER
) {
2066 if (sample
->user_regs
.abi
) {
2067 *array
++ = sample
->user_regs
.abi
;
2068 sz
= hweight_long(sample
->user_regs
.mask
) * sizeof(u64
);
2069 memcpy(array
, sample
->user_regs
.regs
, sz
);
2070 array
= (void *)array
+ sz
;
2076 if (type
& PERF_SAMPLE_STACK_USER
) {
2077 sz
= sample
->user_stack
.size
;
2080 memcpy(array
, sample
->user_stack
.data
, sz
);
2081 array
= (void *)array
+ sz
;
2086 if (type
& PERF_SAMPLE_WEIGHT
) {
2087 *array
= sample
->weight
;
2091 if (type
& PERF_SAMPLE_DATA_SRC
) {
2092 *array
= sample
->data_src
;
2096 if (type
& PERF_SAMPLE_TRANSACTION
) {
2097 *array
= sample
->transaction
;
2101 if (type
& PERF_SAMPLE_REGS_INTR
) {
2102 if (sample
->intr_regs
.abi
) {
2103 *array
++ = sample
->intr_regs
.abi
;
2104 sz
= hweight_long(sample
->intr_regs
.mask
) * sizeof(u64
);
2105 memcpy(array
, sample
->intr_regs
.regs
, sz
);
2106 array
= (void *)array
+ sz
;
2115 struct format_field
*perf_evsel__field(struct perf_evsel
*evsel
, const char *name
)
2117 return pevent_find_field(evsel
->tp_format
, name
);
2120 void *perf_evsel__rawptr(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
2123 struct format_field
*field
= perf_evsel__field(evsel
, name
);
2129 offset
= field
->offset
;
2131 if (field
->flags
& FIELD_IS_DYNAMIC
) {
2132 offset
= *(int *)(sample
->raw_data
+ field
->offset
);
2136 return sample
->raw_data
+ offset
;
2139 u64
perf_evsel__intval(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
2142 struct format_field
*field
= perf_evsel__field(evsel
, name
);
2149 ptr
= sample
->raw_data
+ field
->offset
;
2151 switch (field
->size
) {
2155 value
= *(u16
*)ptr
;
2158 value
= *(u32
*)ptr
;
2161 memcpy(&value
, ptr
, sizeof(u64
));
2167 if (!evsel
->needs_swap
)
2170 switch (field
->size
) {
2172 return bswap_16(value
);
2174 return bswap_32(value
);
2176 return bswap_64(value
);
2184 static int comma_fprintf(FILE *fp
, bool *first
, const char *fmt
, ...)
2190 ret
+= fprintf(fp
, ",");
2192 ret
+= fprintf(fp
, ":");
2196 va_start(args
, fmt
);
2197 ret
+= vfprintf(fp
, fmt
, args
);
2202 static int __print_attr__fprintf(FILE *fp
, const char *name
, const char *val
, void *priv
)
2204 return comma_fprintf(fp
, (bool *)priv
, " %s: %s", name
, val
);
2207 int perf_evsel__fprintf(struct perf_evsel
*evsel
,
2208 struct perf_attr_details
*details
, FILE *fp
)
2213 if (details
->event_group
) {
2214 struct perf_evsel
*pos
;
2216 if (!perf_evsel__is_group_leader(evsel
))
2219 if (evsel
->nr_members
> 1)
2220 printed
+= fprintf(fp
, "%s{", evsel
->group_name
?: "");
2222 printed
+= fprintf(fp
, "%s", perf_evsel__name(evsel
));
2223 for_each_group_member(pos
, evsel
)
2224 printed
+= fprintf(fp
, ",%s", perf_evsel__name(pos
));
2226 if (evsel
->nr_members
> 1)
2227 printed
+= fprintf(fp
, "}");
2231 printed
+= fprintf(fp
, "%s", perf_evsel__name(evsel
));
2233 if (details
->verbose
) {
2234 printed
+= perf_event_attr__fprintf(fp
, &evsel
->attr
,
2235 __print_attr__fprintf
, &first
);
2236 } else if (details
->freq
) {
2237 const char *term
= "sample_freq";
2239 if (!evsel
->attr
.freq
)
2240 term
= "sample_period";
2242 printed
+= comma_fprintf(fp
, &first
, " %s=%" PRIu64
,
2243 term
, (u64
)evsel
->attr
.sample_freq
);
2250 bool perf_evsel__fallback(struct perf_evsel
*evsel
, int err
,
2251 char *msg
, size_t msgsize
)
2253 if ((err
== ENOENT
|| err
== ENXIO
|| err
== ENODEV
) &&
2254 evsel
->attr
.type
== PERF_TYPE_HARDWARE
&&
2255 evsel
->attr
.config
== PERF_COUNT_HW_CPU_CYCLES
) {
2257 * If it's cycles then fall back to hrtimer based
2258 * cpu-clock-tick sw counter, which is always available even if
2261 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
2264 scnprintf(msg
, msgsize
, "%s",
2265 "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
2267 evsel
->attr
.type
= PERF_TYPE_SOFTWARE
;
2268 evsel
->attr
.config
= PERF_COUNT_SW_CPU_CLOCK
;
2270 zfree(&evsel
->name
);
2277 int perf_evsel__open_strerror(struct perf_evsel
*evsel
, struct target
*target
,
2278 int err
, char *msg
, size_t size
)
2280 char sbuf
[STRERR_BUFSIZE
];
2285 return scnprintf(msg
, size
,
2286 "You may not have permission to collect %sstats.\n"
2287 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
2288 " -1 - Not paranoid at all\n"
2289 " 0 - Disallow raw tracepoint access for unpriv\n"
2290 " 1 - Disallow cpu events for unpriv\n"
2291 " 2 - Disallow kernel profiling for unpriv",
2292 target
->system_wide
? "system-wide " : "");
2294 return scnprintf(msg
, size
, "The %s event is not supported.",
2295 perf_evsel__name(evsel
));
2297 return scnprintf(msg
, size
, "%s",
2298 "Too many events are opened.\n"
2299 "Probably the maximum number of open file descriptors has been reached.\n"
2300 "Hint: Try again after reducing the number of events.\n"
2301 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
2303 if (target
->cpu_list
)
2304 return scnprintf(msg
, size
, "%s",
2305 "No such device - did you specify an out-of-range profile CPU?\n");
2308 if (evsel
->attr
.precise_ip
)
2309 return scnprintf(msg
, size
, "%s",
2310 "\'precise\' request may not be supported. Try removing 'p' modifier.");
2311 #if defined(__i386__) || defined(__x86_64__)
2312 if (evsel
->attr
.type
== PERF_TYPE_HARDWARE
)
2313 return scnprintf(msg
, size
, "%s",
2314 "No hardware sampling interrupt available.\n"
2315 "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
2319 if (find_process("oprofiled"))
2320 return scnprintf(msg
, size
,
2321 "The PMU counters are busy/taken by another profiler.\n"
2322 "We found oprofile daemon running, please stop it and try again.");
2325 if (perf_missing_features
.clockid
)
2326 return scnprintf(msg
, size
, "clockid feature not supported.");
2327 if (perf_missing_features
.clockid_wrong
)
2328 return scnprintf(msg
, size
, "wrong clockid (%d).", clockid
);
2334 return scnprintf(msg
, size
,
2335 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
2336 "/bin/dmesg may provide additional information.\n"
2337 "No CONFIG_PERF_EVENTS=y kernel support configured?\n",
2338 err
, strerror_r(err
, sbuf
, sizeof(sbuf
)),
2339 perf_evsel__name(evsel
));