perf tools: Re-implement debug print function for linking python/perf.so
[deliverable/linux.git] / tools / perf / util / evsel.c
CommitLineData
f8a95309
ACM
1/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9
936be503 10#include <byteswap.h>
0f6a3015 11#include <linux/bitops.h>
85c66be1 12#include <lk/debugfs.h>
4e319027
RR
13#include <traceevent/event-parse.h>
14#include <linux/hw_breakpoint.h>
15#include <linux/perf_event.h>
bec19672 16#include <sys/resource.h>
4e319027 17#include "asm/bug.h"
69aad6f1 18#include "evsel.h"
70082dd9 19#include "evlist.h"
69aad6f1 20#include "util.h"
86bd5e86 21#include "cpumap.h"
fd78260b 22#include "thread_map.h"
12864b31 23#include "target.h"
26d33022 24#include "perf_regs.h"
69aad6f1 25
594ac61a
ACM
26static struct {
27 bool sample_id_all;
28 bool exclude_guest;
29} perf_missing_features;
30
c52b12ed
ACM
31#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
32
bde09467 33static int __perf_evsel__sample_size(u64 sample_type)
c2a70653
ACM
34{
35 u64 mask = sample_type & PERF_SAMPLE_MASK;
36 int size = 0;
37 int i;
38
39 for (i = 0; i < 64; i++) {
40 if (mask & (1ULL << i))
41 size++;
42 }
43
44 size *= sizeof(u64);
45
46 return size;
47}
48
4bf9ce1b 49void hists__init(struct hists *hists)
0e2a5f10
ACM
50{
51 memset(hists, 0, sizeof(*hists));
52 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
53 hists->entries_in = &hists->entries_in_array[0];
54 hists->entries_collapsed = RB_ROOT;
55 hists->entries = RB_ROOT;
56 pthread_mutex_init(&hists->lock, NULL);
57}
58
7be5ebe8
ACM
59void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
60 enum perf_event_sample_format bit)
61{
62 if (!(evsel->attr.sample_type & bit)) {
63 evsel->attr.sample_type |= bit;
64 evsel->sample_size += sizeof(u64);
65 }
66}
67
68void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
69 enum perf_event_sample_format bit)
70{
71 if (evsel->attr.sample_type & bit) {
72 evsel->attr.sample_type &= ~bit;
73 evsel->sample_size -= sizeof(u64);
74 }
75}
76
7a5a5ca5
ACM
77void perf_evsel__set_sample_id(struct perf_evsel *evsel)
78{
79 perf_evsel__set_sample_bit(evsel, ID);
80 evsel->attr.read_format |= PERF_FORMAT_ID;
81}
82
ef1d1af2
ACM
83void perf_evsel__init(struct perf_evsel *evsel,
84 struct perf_event_attr *attr, int idx)
85{
86 evsel->idx = idx;
87 evsel->attr = *attr;
2cfda562 88 evsel->leader = evsel;
ef1d1af2 89 INIT_LIST_HEAD(&evsel->node);
1980c2eb 90 hists__init(&evsel->hists);
bde09467 91 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
ef1d1af2
ACM
92}
93
23a2f3ab 94struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
69aad6f1
ACM
95{
96 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
97
ef1d1af2
ACM
98 if (evsel != NULL)
99 perf_evsel__init(evsel, attr, idx);
69aad6f1
ACM
100
101 return evsel;
102}
103
201b7334 104struct event_format *event_format__new(const char *sys, const char *name)
efd2b924
ACM
105{
106 int fd, n;
107 char *filename;
108 void *bf = NULL, *nbf;
109 size_t size = 0, alloc_size = 0;
110 struct event_format *format = NULL;
111
112 if (asprintf(&filename, "%s/%s/%s/format", tracing_events_path, sys, name) < 0)
113 goto out;
114
115 fd = open(filename, O_RDONLY);
116 if (fd < 0)
117 goto out_free_filename;
118
119 do {
120 if (size == alloc_size) {
121 alloc_size += BUFSIZ;
122 nbf = realloc(bf, alloc_size);
123 if (nbf == NULL)
124 goto out_free_bf;
125 bf = nbf;
126 }
127
7cab84e8 128 n = read(fd, bf + size, alloc_size - size);
efd2b924
ACM
129 if (n < 0)
130 goto out_free_bf;
131 size += n;
132 } while (n > 0);
133
134 pevent_parse_format(&format, bf, size, sys);
135
136out_free_bf:
137 free(bf);
138 close(fd);
139out_free_filename:
140 free(filename);
141out:
142 return format;
143}
144
145struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx)
146{
147 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
148
149 if (evsel != NULL) {
150 struct perf_event_attr attr = {
0b80f8b3
ACM
151 .type = PERF_TYPE_TRACEPOINT,
152 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
153 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
efd2b924
ACM
154 };
155
e48ffe2b
ACM
156 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
157 goto out_free;
158
efd2b924
ACM
159 evsel->tp_format = event_format__new(sys, name);
160 if (evsel->tp_format == NULL)
161 goto out_free;
162
0b80f8b3 163 event_attr_init(&attr);
efd2b924 164 attr.config = evsel->tp_format->id;
0b80f8b3 165 attr.sample_period = 1;
efd2b924 166 perf_evsel__init(evsel, &attr, idx);
efd2b924
ACM
167 }
168
169 return evsel;
170
171out_free:
e48ffe2b 172 free(evsel->name);
efd2b924
ACM
173 free(evsel);
174 return NULL;
175}
176
8ad7013b 177const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
c410431c
ACM
178 "cycles",
179 "instructions",
180 "cache-references",
181 "cache-misses",
182 "branches",
183 "branch-misses",
184 "bus-cycles",
185 "stalled-cycles-frontend",
186 "stalled-cycles-backend",
187 "ref-cycles",
188};
189
dd4f5223 190static const char *__perf_evsel__hw_name(u64 config)
c410431c
ACM
191{
192 if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
193 return perf_evsel__hw_names[config];
194
195 return "unknown-hardware";
196}
197
27f18617 198static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
c410431c 199{
27f18617 200 int colon = 0, r = 0;
c410431c 201 struct perf_event_attr *attr = &evsel->attr;
c410431c
ACM
202 bool exclude_guest_default = false;
203
204#define MOD_PRINT(context, mod) do { \
205 if (!attr->exclude_##context) { \
27f18617 206 if (!colon) colon = ++r; \
c410431c
ACM
207 r += scnprintf(bf + r, size - r, "%c", mod); \
208 } } while(0)
209
210 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
211 MOD_PRINT(kernel, 'k');
212 MOD_PRINT(user, 'u');
213 MOD_PRINT(hv, 'h');
214 exclude_guest_default = true;
215 }
216
217 if (attr->precise_ip) {
218 if (!colon)
27f18617 219 colon = ++r;
c410431c
ACM
220 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
221 exclude_guest_default = true;
222 }
223
224 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
225 MOD_PRINT(host, 'H');
226 MOD_PRINT(guest, 'G');
227 }
228#undef MOD_PRINT
229 if (colon)
27f18617 230 bf[colon - 1] = ':';
c410431c
ACM
231 return r;
232}
233
27f18617
ACM
234static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
235{
236 int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
237 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
238}
239
8ad7013b 240const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
335c2f5d
ACM
241 "cpu-clock",
242 "task-clock",
243 "page-faults",
244 "context-switches",
8ad7013b 245 "cpu-migrations",
335c2f5d
ACM
246 "minor-faults",
247 "major-faults",
248 "alignment-faults",
249 "emulation-faults",
250};
251
dd4f5223 252static const char *__perf_evsel__sw_name(u64 config)
335c2f5d
ACM
253{
254 if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
255 return perf_evsel__sw_names[config];
256 return "unknown-software";
257}
258
259static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
260{
261 int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
262 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
263}
264
287e74aa
JO
265static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
266{
267 int r;
268
269 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
270
271 if (type & HW_BREAKPOINT_R)
272 r += scnprintf(bf + r, size - r, "r");
273
274 if (type & HW_BREAKPOINT_W)
275 r += scnprintf(bf + r, size - r, "w");
276
277 if (type & HW_BREAKPOINT_X)
278 r += scnprintf(bf + r, size - r, "x");
279
280 return r;
281}
282
283static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
284{
285 struct perf_event_attr *attr = &evsel->attr;
286 int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
287 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
288}
289
0b668bc9
ACM
290const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
291 [PERF_EVSEL__MAX_ALIASES] = {
292 { "L1-dcache", "l1-d", "l1d", "L1-data", },
293 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
294 { "LLC", "L2", },
295 { "dTLB", "d-tlb", "Data-TLB", },
296 { "iTLB", "i-tlb", "Instruction-TLB", },
297 { "branch", "branches", "bpu", "btb", "bpc", },
298 { "node", },
299};
300
301const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
302 [PERF_EVSEL__MAX_ALIASES] = {
303 { "load", "loads", "read", },
304 { "store", "stores", "write", },
305 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
306};
307
308const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
309 [PERF_EVSEL__MAX_ALIASES] = {
310 { "refs", "Reference", "ops", "access", },
311 { "misses", "miss", },
312};
313
314#define C(x) PERF_COUNT_HW_CACHE_##x
315#define CACHE_READ (1 << C(OP_READ))
316#define CACHE_WRITE (1 << C(OP_WRITE))
317#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
318#define COP(x) (1 << x)
319
320/*
321 * cache operartion stat
322 * L1I : Read and prefetch only
323 * ITLB and BPU : Read-only
324 */
325static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
326 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
327 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
328 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
329 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
330 [C(ITLB)] = (CACHE_READ),
331 [C(BPU)] = (CACHE_READ),
332 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
333};
334
335bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
336{
337 if (perf_evsel__hw_cache_stat[type] & COP(op))
338 return true; /* valid */
339 else
340 return false; /* invalid */
341}
342
343int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
344 char *bf, size_t size)
345{
346 if (result) {
347 return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
348 perf_evsel__hw_cache_op[op][0],
349 perf_evsel__hw_cache_result[result][0]);
350 }
351
352 return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
353 perf_evsel__hw_cache_op[op][1]);
354}
355
dd4f5223 356static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
0b668bc9
ACM
357{
358 u8 op, result, type = (config >> 0) & 0xff;
359 const char *err = "unknown-ext-hardware-cache-type";
360
361 if (type > PERF_COUNT_HW_CACHE_MAX)
362 goto out_err;
363
364 op = (config >> 8) & 0xff;
365 err = "unknown-ext-hardware-cache-op";
366 if (op > PERF_COUNT_HW_CACHE_OP_MAX)
367 goto out_err;
368
369 result = (config >> 16) & 0xff;
370 err = "unknown-ext-hardware-cache-result";
371 if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
372 goto out_err;
373
374 err = "invalid-cache";
375 if (!perf_evsel__is_cache_op_valid(type, op))
376 goto out_err;
377
378 return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
379out_err:
380 return scnprintf(bf, size, "%s", err);
381}
382
383static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
384{
385 int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
386 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
387}
388
6eef3d9c
ACM
389static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
390{
391 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
392 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
393}
394
7289f83c 395const char *perf_evsel__name(struct perf_evsel *evsel)
a4460836 396{
7289f83c 397 char bf[128];
a4460836 398
7289f83c
ACM
399 if (evsel->name)
400 return evsel->name;
c410431c
ACM
401
402 switch (evsel->attr.type) {
403 case PERF_TYPE_RAW:
6eef3d9c 404 perf_evsel__raw_name(evsel, bf, sizeof(bf));
c410431c
ACM
405 break;
406
407 case PERF_TYPE_HARDWARE:
7289f83c 408 perf_evsel__hw_name(evsel, bf, sizeof(bf));
c410431c 409 break;
0b668bc9
ACM
410
411 case PERF_TYPE_HW_CACHE:
7289f83c 412 perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
0b668bc9
ACM
413 break;
414
335c2f5d 415 case PERF_TYPE_SOFTWARE:
7289f83c 416 perf_evsel__sw_name(evsel, bf, sizeof(bf));
335c2f5d
ACM
417 break;
418
a4460836 419 case PERF_TYPE_TRACEPOINT:
7289f83c 420 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
a4460836
ACM
421 break;
422
287e74aa
JO
423 case PERF_TYPE_BREAKPOINT:
424 perf_evsel__bp_name(evsel, bf, sizeof(bf));
425 break;
426
c410431c 427 default:
ca1b1457
RR
428 scnprintf(bf, sizeof(bf), "unknown attr type: %d",
429 evsel->attr.type);
a4460836 430 break;
c410431c
ACM
431 }
432
7289f83c
ACM
433 evsel->name = strdup(bf);
434
435 return evsel->name ?: "unknown";
c410431c
ACM
436}
437
717e263f
NK
438const char *perf_evsel__group_name(struct perf_evsel *evsel)
439{
440 return evsel->group_name ?: "anon group";
441}
442
443int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
444{
445 int ret;
446 struct perf_evsel *pos;
447 const char *group_name = perf_evsel__group_name(evsel);
448
449 ret = scnprintf(buf, size, "%s", group_name);
450
451 ret += scnprintf(buf + ret, size - ret, " { %s",
452 perf_evsel__name(evsel));
453
454 for_each_group_member(pos, evsel)
455 ret += scnprintf(buf + ret, size - ret, ", %s",
456 perf_evsel__name(pos));
457
458 ret += scnprintf(buf + ret, size - ret, " }");
459
460 return ret;
461}
462
774cb499
JO
463/*
464 * The enable_on_exec/disabled value strategy:
465 *
466 * 1) For any type of traced program:
467 * - all independent events and group leaders are disabled
468 * - all group members are enabled
469 *
470 * Group members are ruled by group leaders. They need to
471 * be enabled, because the group scheduling relies on that.
472 *
473 * 2) For traced programs executed by perf:
474 * - all independent events and group leaders have
475 * enable_on_exec set
476 * - we don't specifically enable or disable any event during
477 * the record command
478 *
479 * Independent events and group leaders are initially disabled
480 * and get enabled by exec. Group members are ruled by group
481 * leaders as stated in 1).
482 *
483 * 3) For traced programs attached by perf (pid/tid):
484 * - we specifically enable or disable all events during
485 * the record command
486 *
487 * When attaching events to already running traced we
488 * enable/disable events specifically, as there's no
489 * initial traced exec call.
490 */
cac21425
JO
491void perf_evsel__config(struct perf_evsel *evsel,
492 struct perf_record_opts *opts)
0f82ebc4 493{
3c176311 494 struct perf_evsel *leader = evsel->leader;
0f82ebc4
ACM
495 struct perf_event_attr *attr = &evsel->attr;
496 int track = !evsel->idx; /* only the first counter needs these */
497
594ac61a 498 attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
0f82ebc4 499 attr->inherit = !opts->no_inherit;
0f82ebc4 500
7be5ebe8
ACM
501 perf_evsel__set_sample_bit(evsel, IP);
502 perf_evsel__set_sample_bit(evsel, TID);
0f82ebc4 503
3c176311
JO
504 if (evsel->sample_read) {
505 perf_evsel__set_sample_bit(evsel, READ);
506
507 /*
508 * We need ID even in case of single event, because
509 * PERF_SAMPLE_READ process ID specific data.
510 */
511 perf_evsel__set_sample_id(evsel);
512
513 /*
514 * Apply group format only if we belong to group
515 * with more than one members.
516 */
517 if (leader->nr_members > 1) {
518 attr->read_format |= PERF_FORMAT_GROUP;
519 attr->inherit = 0;
520 }
521 }
522
0f82ebc4
ACM
523 /*
524 * We default some events to a 1 default interval. But keep
525 * it a weak assumption overridable by the user.
526 */
527 if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
528 opts->user_interval != ULLONG_MAX)) {
529 if (opts->freq) {
7be5ebe8 530 perf_evsel__set_sample_bit(evsel, PERIOD);
0f82ebc4
ACM
531 attr->freq = 1;
532 attr->sample_freq = opts->freq;
533 } else {
534 attr->sample_period = opts->default_interval;
535 }
536 }
537
3c176311
JO
538 /*
539 * Disable sampling for all group members other
540 * than leader in case leader 'leads' the sampling.
541 */
542 if ((leader != evsel) && leader->sample_read) {
543 attr->sample_freq = 0;
544 attr->sample_period = 0;
545 }
546
0f82ebc4
ACM
547 if (opts->no_samples)
548 attr->sample_freq = 0;
549
550 if (opts->inherit_stat)
551 attr->inherit_stat = 1;
552
553 if (opts->sample_address) {
7be5ebe8 554 perf_evsel__set_sample_bit(evsel, ADDR);
0f82ebc4
ACM
555 attr->mmap_data = track;
556 }
557
26d33022 558 if (opts->call_graph) {
7be5ebe8 559 perf_evsel__set_sample_bit(evsel, CALLCHAIN);
0f82ebc4 560
26d33022 561 if (opts->call_graph == CALLCHAIN_DWARF) {
7be5ebe8
ACM
562 perf_evsel__set_sample_bit(evsel, REGS_USER);
563 perf_evsel__set_sample_bit(evsel, STACK_USER);
26d33022
JO
564 attr->sample_regs_user = PERF_REGS_MASK;
565 attr->sample_stack_user = opts->stack_dump_size;
566 attr->exclude_callchain_user = 1;
567 }
568 }
569
e40ee742 570 if (perf_target__has_cpu(&opts->target))
7be5ebe8 571 perf_evsel__set_sample_bit(evsel, CPU);
0f82ebc4 572
3e76ac78 573 if (opts->period)
7be5ebe8 574 perf_evsel__set_sample_bit(evsel, PERIOD);
3e76ac78 575
594ac61a 576 if (!perf_missing_features.sample_id_all &&
d67356e7 577 (opts->sample_time || !opts->no_inherit ||
aa22dd49 578 perf_target__has_cpu(&opts->target)))
7be5ebe8 579 perf_evsel__set_sample_bit(evsel, TIME);
0f82ebc4
ACM
580
581 if (opts->raw_samples) {
7be5ebe8
ACM
582 perf_evsel__set_sample_bit(evsel, TIME);
583 perf_evsel__set_sample_bit(evsel, RAW);
584 perf_evsel__set_sample_bit(evsel, CPU);
0f82ebc4
ACM
585 }
586
ccf49bfc
SE
587 if (opts->sample_address)
588 attr->sample_type |= PERF_SAMPLE_DATA_SRC;
589
0f82ebc4
ACM
590 if (opts->no_delay) {
591 attr->watermark = 0;
592 attr->wakeup_events = 1;
593 }
bdfebd84 594 if (opts->branch_stack) {
7be5ebe8 595 perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
bdfebd84
RAV
596 attr->branch_sample_type = opts->branch_stack;
597 }
0f82ebc4 598
05484298
AK
599 if (opts->sample_weight)
600 attr->sample_type |= PERF_SAMPLE_WEIGHT;
601
0f82ebc4
ACM
602 attr->mmap = track;
603 attr->comm = track;
604
774cb499
JO
605 /*
606 * XXX see the function comment above
607 *
608 * Disabling only independent events or group leaders,
609 * keeping group members enabled.
610 */
823254ed 611 if (perf_evsel__is_group_leader(evsel))
774cb499
JO
612 attr->disabled = 1;
613
614 /*
615 * Setting enable_on_exec for independent events and
616 * group leaders for traced executed by perf.
617 */
823254ed 618 if (perf_target__none(&opts->target) && perf_evsel__is_group_leader(evsel))
0f82ebc4 619 attr->enable_on_exec = 1;
0f82ebc4
ACM
620}
621
69aad6f1
ACM
622int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
623{
4af4c955 624 int cpu, thread;
69aad6f1 625 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
4af4c955
DA
626
627 if (evsel->fd) {
628 for (cpu = 0; cpu < ncpus; cpu++) {
629 for (thread = 0; thread < nthreads; thread++) {
630 FD(evsel, cpu, thread) = -1;
631 }
632 }
633 }
634
69aad6f1
ACM
635 return evsel->fd != NULL ? 0 : -ENOMEM;
636}
637
e2407bef
AK
638static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads,
639 int ioc, void *arg)
745cefc5
ACM
640{
641 int cpu, thread;
642
643 for (cpu = 0; cpu < ncpus; cpu++) {
644 for (thread = 0; thread < nthreads; thread++) {
645 int fd = FD(evsel, cpu, thread),
e2407bef 646 err = ioctl(fd, ioc, arg);
745cefc5
ACM
647
648 if (err)
649 return err;
650 }
651 }
652
653 return 0;
654}
655
e2407bef
AK
656int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
657 const char *filter)
658{
659 return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
660 PERF_EVENT_IOC_SET_FILTER,
661 (void *)filter);
662}
663
664int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
665{
666 return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
667 PERF_EVENT_IOC_ENABLE,
668 0);
669}
670
70db7533
ACM
671int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
672{
a91e5431
ACM
673 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
674 if (evsel->sample_id == NULL)
675 return -ENOMEM;
676
677 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
678 if (evsel->id == NULL) {
679 xyarray__delete(evsel->sample_id);
680 evsel->sample_id = NULL;
681 return -ENOMEM;
682 }
683
684 return 0;
70db7533
ACM
685}
686
a7e191c3
FD
687void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus)
688{
689 memset(evsel->counts, 0, (sizeof(*evsel->counts) +
690 (ncpus * sizeof(struct perf_counts_values))));
691}
692
c52b12ed
ACM
693int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
694{
695 evsel->counts = zalloc((sizeof(*evsel->counts) +
696 (ncpus * sizeof(struct perf_counts_values))));
697 return evsel->counts != NULL ? 0 : -ENOMEM;
698}
699
69aad6f1
ACM
700void perf_evsel__free_fd(struct perf_evsel *evsel)
701{
702 xyarray__delete(evsel->fd);
703 evsel->fd = NULL;
704}
705
70db7533
ACM
706void perf_evsel__free_id(struct perf_evsel *evsel)
707{
a91e5431
ACM
708 xyarray__delete(evsel->sample_id);
709 evsel->sample_id = NULL;
710 free(evsel->id);
70db7533
ACM
711 evsel->id = NULL;
712}
713
c52b12ed
ACM
714void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
715{
716 int cpu, thread;
717
718 for (cpu = 0; cpu < ncpus; cpu++)
719 for (thread = 0; thread < nthreads; ++thread) {
720 close(FD(evsel, cpu, thread));
721 FD(evsel, cpu, thread) = -1;
722 }
723}
724
43f8e76e
NK
725void perf_evsel__free_counts(struct perf_evsel *evsel)
726{
727 free(evsel->counts);
728}
729
ef1d1af2 730void perf_evsel__exit(struct perf_evsel *evsel)
69aad6f1
ACM
731{
732 assert(list_empty(&evsel->node));
736b05a0
NK
733 perf_evsel__free_fd(evsel);
734 perf_evsel__free_id(evsel);
ef1d1af2
ACM
735}
736
737void perf_evsel__delete(struct perf_evsel *evsel)
738{
739 perf_evsel__exit(evsel);
023695d9 740 close_cgroup(evsel->cgrp);
6a4bb04c 741 free(evsel->group_name);
e48ffe2b 742 if (evsel->tp_format)
efd2b924 743 pevent_free_format(evsel->tp_format);
f0c55bcf 744 free(evsel->name);
69aad6f1
ACM
745 free(evsel);
746}
c52b12ed 747
c7a79c47
SE
748static inline void compute_deltas(struct perf_evsel *evsel,
749 int cpu,
750 struct perf_counts_values *count)
751{
752 struct perf_counts_values tmp;
753
754 if (!evsel->prev_raw_counts)
755 return;
756
757 if (cpu == -1) {
758 tmp = evsel->prev_raw_counts->aggr;
759 evsel->prev_raw_counts->aggr = *count;
760 } else {
761 tmp = evsel->prev_raw_counts->cpu[cpu];
762 evsel->prev_raw_counts->cpu[cpu] = *count;
763 }
764
765 count->val = count->val - tmp.val;
766 count->ena = count->ena - tmp.ena;
767 count->run = count->run - tmp.run;
768}
769
c52b12ed
ACM
770int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
771 int cpu, int thread, bool scale)
772{
773 struct perf_counts_values count;
774 size_t nv = scale ? 3 : 1;
775
776 if (FD(evsel, cpu, thread) < 0)
777 return -EINVAL;
778
4eed11d5
ACM
779 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
780 return -ENOMEM;
781
c52b12ed
ACM
782 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
783 return -errno;
784
c7a79c47
SE
785 compute_deltas(evsel, cpu, &count);
786
c52b12ed
ACM
787 if (scale) {
788 if (count.run == 0)
789 count.val = 0;
790 else if (count.run < count.ena)
791 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
792 } else
793 count.ena = count.run = 0;
794
795 evsel->counts->cpu[cpu] = count;
796 return 0;
797}
798
799int __perf_evsel__read(struct perf_evsel *evsel,
800 int ncpus, int nthreads, bool scale)
801{
802 size_t nv = scale ? 3 : 1;
803 int cpu, thread;
804 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
805
52bcd994 806 aggr->val = aggr->ena = aggr->run = 0;
c52b12ed
ACM
807
808 for (cpu = 0; cpu < ncpus; cpu++) {
809 for (thread = 0; thread < nthreads; thread++) {
810 if (FD(evsel, cpu, thread) < 0)
811 continue;
812
813 if (readn(FD(evsel, cpu, thread),
814 &count, nv * sizeof(u64)) < 0)
815 return -errno;
816
817 aggr->val += count.val;
818 if (scale) {
819 aggr->ena += count.ena;
820 aggr->run += count.run;
821 }
822 }
823 }
824
c7a79c47
SE
825 compute_deltas(evsel, -1, aggr);
826
c52b12ed
ACM
827 evsel->counts->scaled = 0;
828 if (scale) {
829 if (aggr->run == 0) {
830 evsel->counts->scaled = -1;
831 aggr->val = 0;
832 return 0;
833 }
834
835 if (aggr->run < aggr->ena) {
836 evsel->counts->scaled = 1;
837 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
838 }
839 } else
840 aggr->ena = aggr->run = 0;
841
842 return 0;
843}
48290609 844
6a4bb04c
JO
845static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
846{
847 struct perf_evsel *leader = evsel->leader;
848 int fd;
849
823254ed 850 if (perf_evsel__is_group_leader(evsel))
6a4bb04c
JO
851 return -1;
852
853 /*
854 * Leader must be already processed/open,
855 * if not it's a bug.
856 */
857 BUG_ON(!leader->fd);
858
859 fd = FD(leader, cpu, thread);
860 BUG_ON(fd == -1);
861
862 return fd;
863}
864
0252208e 865static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
6a4bb04c 866 struct thread_map *threads)
48290609 867{
0252208e 868 int cpu, thread;
023695d9 869 unsigned long flags = 0;
727ab04e 870 int pid = -1, err;
bec19672 871 enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
48290609 872
0252208e
ACM
873 if (evsel->fd == NULL &&
874 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
727ab04e 875 return -ENOMEM;
4eed11d5 876
023695d9
SE
877 if (evsel->cgrp) {
878 flags = PERF_FLAG_PID_CGROUP;
879 pid = evsel->cgrp->fd;
880 }
881
594ac61a
ACM
882fallback_missing_features:
883 if (perf_missing_features.exclude_guest)
884 evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
885retry_sample_id:
886 if (perf_missing_features.sample_id_all)
887 evsel->attr.sample_id_all = 0;
888
86bd5e86 889 for (cpu = 0; cpu < cpus->nr; cpu++) {
9d04f178 890
0252208e 891 for (thread = 0; thread < threads->nr; thread++) {
6a4bb04c 892 int group_fd;
023695d9
SE
893
894 if (!evsel->cgrp)
895 pid = threads->map[thread];
896
6a4bb04c
JO
897 group_fd = get_group_fd(evsel, cpu, thread);
898
bec19672 899retry_open:
0252208e 900 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
023695d9 901 pid,
f08199d3 902 cpus->map[cpu],
023695d9 903 group_fd, flags);
727ab04e
ACM
904 if (FD(evsel, cpu, thread) < 0) {
905 err = -errno;
594ac61a 906 goto try_fallback;
727ab04e 907 }
bec19672 908 set_rlimit = NO_CHANGE;
0252208e 909 }
48290609
ACM
910 }
911
912 return 0;
913
594ac61a 914try_fallback:
bec19672
AK
915 /*
916 * perf stat needs between 5 and 22 fds per CPU. When we run out
917 * of them try to increase the limits.
918 */
919 if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
920 struct rlimit l;
921 int old_errno = errno;
922
923 if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
924 if (set_rlimit == NO_CHANGE)
925 l.rlim_cur = l.rlim_max;
926 else {
927 l.rlim_cur = l.rlim_max + 1000;
928 l.rlim_max = l.rlim_cur;
929 }
930 if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
931 set_rlimit++;
932 errno = old_errno;
933 goto retry_open;
934 }
935 }
936 errno = old_errno;
937 }
938
594ac61a
ACM
939 if (err != -EINVAL || cpu > 0 || thread > 0)
940 goto out_close;
941
942 if (!perf_missing_features.exclude_guest &&
943 (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
944 perf_missing_features.exclude_guest = true;
945 goto fallback_missing_features;
946 } else if (!perf_missing_features.sample_id_all) {
947 perf_missing_features.sample_id_all = true;
948 goto retry_sample_id;
949 }
950
48290609 951out_close:
0252208e
ACM
952 do {
953 while (--thread >= 0) {
954 close(FD(evsel, cpu, thread));
955 FD(evsel, cpu, thread) = -1;
956 }
957 thread = threads->nr;
958 } while (--cpu >= 0);
727ab04e
ACM
959 return err;
960}
961
962void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
963{
964 if (evsel->fd == NULL)
965 return;
966
967 perf_evsel__close_fd(evsel, ncpus, nthreads);
968 perf_evsel__free_fd(evsel);
969 evsel->fd = NULL;
48290609
ACM
970}
971
0252208e
ACM
972static struct {
973 struct cpu_map map;
974 int cpus[1];
975} empty_cpu_map = {
976 .map.nr = 1,
977 .cpus = { -1, },
978};
979
980static struct {
981 struct thread_map map;
982 int threads[1];
983} empty_thread_map = {
984 .map.nr = 1,
985 .threads = { -1, },
986};
987
f08199d3 988int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
6a4bb04c 989 struct thread_map *threads)
48290609 990{
0252208e
ACM
991 if (cpus == NULL) {
992 /* Work around old compiler warnings about strict aliasing */
993 cpus = &empty_cpu_map.map;
48290609
ACM
994 }
995
0252208e
ACM
996 if (threads == NULL)
997 threads = &empty_thread_map.map;
48290609 998
6a4bb04c 999 return __perf_evsel__open(evsel, cpus, threads);
48290609
ACM
1000}
1001
f08199d3 1002int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
6a4bb04c 1003 struct cpu_map *cpus)
48290609 1004{
6a4bb04c 1005 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
0252208e 1006}
48290609 1007
f08199d3 1008int perf_evsel__open_per_thread(struct perf_evsel *evsel,
6a4bb04c 1009 struct thread_map *threads)
0252208e 1010{
6a4bb04c 1011 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
48290609 1012}
70082dd9 1013
0807d2d8
ACM
1014static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
1015 const union perf_event *event,
1016 struct perf_sample *sample)
d0dd74e8 1017{
0807d2d8 1018 u64 type = evsel->attr.sample_type;
d0dd74e8 1019 const u64 *array = event->sample.array;
0807d2d8 1020 bool swapped = evsel->needs_swap;
37073f9e 1021 union u64_swap u;
d0dd74e8
ACM
1022
1023 array += ((event->header.size -
1024 sizeof(event->header)) / sizeof(u64)) - 1;
1025
1026 if (type & PERF_SAMPLE_CPU) {
37073f9e
JO
1027 u.val64 = *array;
1028 if (swapped) {
1029 /* undo swap of u64, then swap on individual u32s */
1030 u.val64 = bswap_64(u.val64);
1031 u.val32[0] = bswap_32(u.val32[0]);
1032 }
1033
1034 sample->cpu = u.val32[0];
d0dd74e8
ACM
1035 array--;
1036 }
1037
1038 if (type & PERF_SAMPLE_STREAM_ID) {
1039 sample->stream_id = *array;
1040 array--;
1041 }
1042
1043 if (type & PERF_SAMPLE_ID) {
1044 sample->id = *array;
1045 array--;
1046 }
1047
1048 if (type & PERF_SAMPLE_TIME) {
1049 sample->time = *array;
1050 array--;
1051 }
1052
1053 if (type & PERF_SAMPLE_TID) {
37073f9e
JO
1054 u.val64 = *array;
1055 if (swapped) {
1056 /* undo swap of u64, then swap on individual u32s */
1057 u.val64 = bswap_64(u.val64);
1058 u.val32[0] = bswap_32(u.val32[0]);
1059 u.val32[1] = bswap_32(u.val32[1]);
1060 }
1061
1062 sample->pid = u.val32[0];
1063 sample->tid = u.val32[1];
d0dd74e8
ACM
1064 }
1065
1066 return 0;
1067}
1068
98e1da90
FW
1069static bool sample_overlap(const union perf_event *event,
1070 const void *offset, u64 size)
1071{
1072 const void *base = event;
1073
1074 if (offset + size > base + event->header.size)
1075 return true;
1076
1077 return false;
1078}
1079
a3f698fe 1080int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
0807d2d8 1081 struct perf_sample *data)
d0dd74e8 1082{
a3f698fe 1083 u64 type = evsel->attr.sample_type;
0f6a3015 1084 u64 regs_user = evsel->attr.sample_regs_user;
0807d2d8 1085 bool swapped = evsel->needs_swap;
d0dd74e8
ACM
1086 const u64 *array;
1087
936be503
DA
1088 /*
1089 * used for cross-endian analysis. See git commit 65014ab3
1090 * for why this goofiness is needed.
1091 */
6a11f92e 1092 union u64_swap u;
936be503 1093
f3bda2c9 1094 memset(data, 0, sizeof(*data));
d0dd74e8
ACM
1095 data->cpu = data->pid = data->tid = -1;
1096 data->stream_id = data->id = data->time = -1ULL;
a4a03fc7 1097 data->period = 1;
05484298 1098 data->weight = 0;
d0dd74e8
ACM
1099
1100 if (event->header.type != PERF_RECORD_SAMPLE) {
a3f698fe 1101 if (!evsel->attr.sample_id_all)
d0dd74e8 1102 return 0;
0807d2d8 1103 return perf_evsel__parse_id_sample(evsel, event, data);
d0dd74e8
ACM
1104 }
1105
1106 array = event->sample.array;
1107
a3f698fe 1108 if (evsel->sample_size + sizeof(event->header) > event->header.size)
a2854124
FW
1109 return -EFAULT;
1110
d0dd74e8
ACM
1111 if (type & PERF_SAMPLE_IP) {
1112 data->ip = event->ip.ip;
1113 array++;
1114 }
1115
1116 if (type & PERF_SAMPLE_TID) {
936be503
DA
1117 u.val64 = *array;
1118 if (swapped) {
1119 /* undo swap of u64, then swap on individual u32s */
1120 u.val64 = bswap_64(u.val64);
1121 u.val32[0] = bswap_32(u.val32[0]);
1122 u.val32[1] = bswap_32(u.val32[1]);
1123 }
1124
1125 data->pid = u.val32[0];
1126 data->tid = u.val32[1];
d0dd74e8
ACM
1127 array++;
1128 }
1129
1130 if (type & PERF_SAMPLE_TIME) {
1131 data->time = *array;
1132 array++;
1133 }
1134
7cec0922 1135 data->addr = 0;
d0dd74e8
ACM
1136 if (type & PERF_SAMPLE_ADDR) {
1137 data->addr = *array;
1138 array++;
1139 }
1140
1141 data->id = -1ULL;
1142 if (type & PERF_SAMPLE_ID) {
1143 data->id = *array;
1144 array++;
1145 }
1146
1147 if (type & PERF_SAMPLE_STREAM_ID) {
1148 data->stream_id = *array;
1149 array++;
1150 }
1151
1152 if (type & PERF_SAMPLE_CPU) {
936be503
DA
1153
1154 u.val64 = *array;
1155 if (swapped) {
1156 /* undo swap of u64, then swap on individual u32s */
1157 u.val64 = bswap_64(u.val64);
1158 u.val32[0] = bswap_32(u.val32[0]);
1159 }
1160
1161 data->cpu = u.val32[0];
d0dd74e8
ACM
1162 array++;
1163 }
1164
1165 if (type & PERF_SAMPLE_PERIOD) {
1166 data->period = *array;
1167 array++;
1168 }
1169
1170 if (type & PERF_SAMPLE_READ) {
9ede473c
JO
1171 u64 read_format = evsel->attr.read_format;
1172
1173 if (read_format & PERF_FORMAT_GROUP)
1174 data->read.group.nr = *array;
1175 else
1176 data->read.one.value = *array;
1177
1178 array++;
1179
1180 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1181 data->read.time_enabled = *array;
1182 array++;
1183 }
1184
1185 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1186 data->read.time_running = *array;
1187 array++;
1188 }
1189
1190 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1191 if (read_format & PERF_FORMAT_GROUP) {
1192 data->read.group.values = (struct sample_read_value *) array;
1193 array = (void *) array + data->read.group.nr *
1194 sizeof(struct sample_read_value);
1195 } else {
1196 data->read.one.id = *array;
1197 array++;
1198 }
d0dd74e8
ACM
1199 }
1200
1201 if (type & PERF_SAMPLE_CALLCHAIN) {
98e1da90
FW
1202 if (sample_overlap(event, array, sizeof(data->callchain->nr)))
1203 return -EFAULT;
1204
d0dd74e8 1205 data->callchain = (struct ip_callchain *)array;
98e1da90
FW
1206
1207 if (sample_overlap(event, array, data->callchain->nr))
1208 return -EFAULT;
1209
d0dd74e8
ACM
1210 array += 1 + data->callchain->nr;
1211 }
1212
1213 if (type & PERF_SAMPLE_RAW) {
8e303f20
JO
1214 const u64 *pdata;
1215
936be503
DA
1216 u.val64 = *array;
1217 if (WARN_ONCE(swapped,
1218 "Endianness of raw data not corrected!\n")) {
1219 /* undo swap of u64, then swap on individual u32s */
1220 u.val64 = bswap_64(u.val64);
1221 u.val32[0] = bswap_32(u.val32[0]);
1222 u.val32[1] = bswap_32(u.val32[1]);
1223 }
98e1da90
FW
1224
1225 if (sample_overlap(event, array, sizeof(u32)))
1226 return -EFAULT;
1227
936be503 1228 data->raw_size = u.val32[0];
8e303f20 1229 pdata = (void *) array + sizeof(u32);
98e1da90 1230
8e303f20 1231 if (sample_overlap(event, pdata, data->raw_size))
98e1da90
FW
1232 return -EFAULT;
1233
8e303f20 1234 data->raw_data = (void *) pdata;
fa30c964
SE
1235
1236 array = (void *)array + data->raw_size + sizeof(u32);
d0dd74e8
ACM
1237 }
1238
b5387528
RAV
1239 if (type & PERF_SAMPLE_BRANCH_STACK) {
1240 u64 sz;
1241
1242 data->branch_stack = (struct branch_stack *)array;
1243 array++; /* nr */
1244
1245 sz = data->branch_stack->nr * sizeof(struct branch_entry);
1246 sz /= sizeof(u64);
1247 array += sz;
1248 }
0f6a3015
JO
1249
1250 if (type & PERF_SAMPLE_REGS_USER) {
1251 /* First u64 tells us if we have any regs in sample. */
1252 u64 avail = *array++;
1253
1254 if (avail) {
1255 data->user_regs.regs = (u64 *)array;
1256 array += hweight_long(regs_user);
1257 }
1258 }
1259
1260 if (type & PERF_SAMPLE_STACK_USER) {
1261 u64 size = *array++;
1262
1263 data->user_stack.offset = ((char *)(array - 1)
1264 - (char *) event);
1265
1266 if (!size) {
1267 data->user_stack.size = 0;
1268 } else {
1269 data->user_stack.data = (char *)array;
1270 array += size / sizeof(*array);
54bd2692 1271 data->user_stack.size = *array++;
0f6a3015
JO
1272 }
1273 }
1274
05484298
AK
1275 data->weight = 0;
1276 if (type & PERF_SAMPLE_WEIGHT) {
1277 data->weight = *array;
1278 array++;
1279 }
1280
98a3b32c
SE
1281 data->data_src = PERF_MEM_DATA_SRC_NONE;
1282 if (type & PERF_SAMPLE_DATA_SRC) {
1283 data->data_src = *array;
1284 array++;
1285 }
1286
d0dd74e8
ACM
1287 return 0;
1288}
74eec26f
AV
1289
1290int perf_event__synthesize_sample(union perf_event *event, u64 type,
1291 const struct perf_sample *sample,
1292 bool swapped)
1293{
1294 u64 *array;
1295
1296 /*
1297 * used for cross-endian analysis. See git commit 65014ab3
1298 * for why this goofiness is needed.
1299 */
6a11f92e 1300 union u64_swap u;
74eec26f
AV
1301
1302 array = event->sample.array;
1303
1304 if (type & PERF_SAMPLE_IP) {
1305 event->ip.ip = sample->ip;
1306 array++;
1307 }
1308
1309 if (type & PERF_SAMPLE_TID) {
1310 u.val32[0] = sample->pid;
1311 u.val32[1] = sample->tid;
1312 if (swapped) {
1313 /*
a3f698fe 1314 * Inverse of what is done in perf_evsel__parse_sample
74eec26f
AV
1315 */
1316 u.val32[0] = bswap_32(u.val32[0]);
1317 u.val32[1] = bswap_32(u.val32[1]);
1318 u.val64 = bswap_64(u.val64);
1319 }
1320
1321 *array = u.val64;
1322 array++;
1323 }
1324
1325 if (type & PERF_SAMPLE_TIME) {
1326 *array = sample->time;
1327 array++;
1328 }
1329
1330 if (type & PERF_SAMPLE_ADDR) {
1331 *array = sample->addr;
1332 array++;
1333 }
1334
1335 if (type & PERF_SAMPLE_ID) {
1336 *array = sample->id;
1337 array++;
1338 }
1339
1340 if (type & PERF_SAMPLE_STREAM_ID) {
1341 *array = sample->stream_id;
1342 array++;
1343 }
1344
1345 if (type & PERF_SAMPLE_CPU) {
1346 u.val32[0] = sample->cpu;
1347 if (swapped) {
1348 /*
a3f698fe 1349 * Inverse of what is done in perf_evsel__parse_sample
74eec26f
AV
1350 */
1351 u.val32[0] = bswap_32(u.val32[0]);
1352 u.val64 = bswap_64(u.val64);
1353 }
1354 *array = u.val64;
1355 array++;
1356 }
1357
1358 if (type & PERF_SAMPLE_PERIOD) {
1359 *array = sample->period;
1360 array++;
1361 }
1362
1363 return 0;
1364}
5555ded4 1365
efd2b924
ACM
1366struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
1367{
1368 return pevent_find_field(evsel->tp_format, name);
1369}
1370
5d2074ea 1371void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
5555ded4
ACM
1372 const char *name)
1373{
efd2b924 1374 struct format_field *field = perf_evsel__field(evsel, name);
5555ded4
ACM
1375 int offset;
1376
efd2b924
ACM
1377 if (!field)
1378 return NULL;
5555ded4
ACM
1379
1380 offset = field->offset;
1381
1382 if (field->flags & FIELD_IS_DYNAMIC) {
1383 offset = *(int *)(sample->raw_data + field->offset);
1384 offset &= 0xffff;
1385 }
1386
1387 return sample->raw_data + offset;
1388}
1389
1390u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
1391 const char *name)
1392{
efd2b924 1393 struct format_field *field = perf_evsel__field(evsel, name);
e6b6f679
ACM
1394 void *ptr;
1395 u64 value;
5555ded4 1396
efd2b924
ACM
1397 if (!field)
1398 return 0;
5555ded4 1399
e6b6f679 1400 ptr = sample->raw_data + field->offset;
5555ded4 1401
e6b6f679
ACM
1402 switch (field->size) {
1403 case 1:
1404 return *(u8 *)ptr;
1405 case 2:
1406 value = *(u16 *)ptr;
1407 break;
1408 case 4:
1409 value = *(u32 *)ptr;
1410 break;
1411 case 8:
1412 value = *(u64 *)ptr;
1413 break;
1414 default:
1415 return 0;
1416 }
1417
1418 if (!evsel->needs_swap)
1419 return value;
1420
1421 switch (field->size) {
1422 case 2:
1423 return bswap_16(value);
1424 case 4:
1425 return bswap_32(value);
1426 case 8:
1427 return bswap_64(value);
1428 default:
1429 return 0;
1430 }
1431
1432 return 0;
5555ded4 1433}
0698aedd
ACM
1434
1435static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
1436{
1437 va_list args;
1438 int ret = 0;
1439
1440 if (!*first) {
1441 ret += fprintf(fp, ",");
1442 } else {
1443 ret += fprintf(fp, ":");
1444 *first = false;
1445 }
1446
1447 va_start(args, fmt);
1448 ret += vfprintf(fp, fmt, args);
1449 va_end(args);
1450 return ret;
1451}
1452
1453static int __if_fprintf(FILE *fp, bool *first, const char *field, u64 value)
1454{
1455 if (value == 0)
1456 return 0;
1457
1458 return comma_fprintf(fp, first, " %s: %" PRIu64, field, value);
1459}
1460
1461#define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field)
1462
c79a4393
ACM
1463struct bit_names {
1464 int bit;
1465 const char *name;
1466};
1467
1468static int bits__fprintf(FILE *fp, const char *field, u64 value,
1469 struct bit_names *bits, bool *first)
1470{
1471 int i = 0, printed = comma_fprintf(fp, first, " %s: ", field);
1472 bool first_bit = true;
1473
1474 do {
1475 if (value & bits[i].bit) {
1476 printed += fprintf(fp, "%s%s", first_bit ? "" : "|", bits[i].name);
1477 first_bit = false;
1478 }
1479 } while (bits[++i].name != NULL);
1480
1481 return printed;
1482}
1483
1484static int sample_type__fprintf(FILE *fp, bool *first, u64 value)
1485{
1486#define bit_name(n) { PERF_SAMPLE_##n, #n }
1487 struct bit_names bits[] = {
1488 bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
1489 bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
1490 bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
1491 bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
1492 { .name = NULL, }
1493 };
1494#undef bit_name
1495 return bits__fprintf(fp, "sample_type", value, bits, first);
1496}
1497
1498static int read_format__fprintf(FILE *fp, bool *first, u64 value)
1499{
1500#define bit_name(n) { PERF_FORMAT_##n, #n }
1501 struct bit_names bits[] = {
1502 bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
1503 bit_name(ID), bit_name(GROUP),
1504 { .name = NULL, }
1505 };
1506#undef bit_name
1507 return bits__fprintf(fp, "read_format", value, bits, first);
1508}
1509
0698aedd
ACM
1510int perf_evsel__fprintf(struct perf_evsel *evsel,
1511 struct perf_attr_details *details, FILE *fp)
1512{
1513 bool first = true;
e6ab07d0
NK
1514 int printed = 0;
1515
e35ef355 1516 if (details->event_group) {
e6ab07d0
NK
1517 struct perf_evsel *pos;
1518
1519 if (!perf_evsel__is_group_leader(evsel))
1520 return 0;
1521
1522 if (evsel->nr_members > 1)
1523 printed += fprintf(fp, "%s{", evsel->group_name ?: "");
1524
1525 printed += fprintf(fp, "%s", perf_evsel__name(evsel));
1526 for_each_group_member(pos, evsel)
1527 printed += fprintf(fp, ",%s", perf_evsel__name(pos));
1528
1529 if (evsel->nr_members > 1)
1530 printed += fprintf(fp, "}");
1531 goto out;
1532 }
1533
1534 printed += fprintf(fp, "%s", perf_evsel__name(evsel));
0698aedd
ACM
1535
1536 if (details->verbose || details->freq) {
1537 printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64,
1538 (u64)evsel->attr.sample_freq);
1539 }
1540
1541 if (details->verbose) {
1542 if_print(type);
1543 if_print(config);
1544 if_print(config1);
1545 if_print(config2);
1546 if_print(size);
c79a4393
ACM
1547 printed += sample_type__fprintf(fp, &first, evsel->attr.sample_type);
1548 if (evsel->attr.read_format)
1549 printed += read_format__fprintf(fp, &first, evsel->attr.read_format);
0698aedd
ACM
1550 if_print(disabled);
1551 if_print(inherit);
1552 if_print(pinned);
1553 if_print(exclusive);
1554 if_print(exclude_user);
1555 if_print(exclude_kernel);
1556 if_print(exclude_hv);
1557 if_print(exclude_idle);
1558 if_print(mmap);
1559 if_print(comm);
1560 if_print(freq);
1561 if_print(inherit_stat);
1562 if_print(enable_on_exec);
1563 if_print(task);
1564 if_print(watermark);
1565 if_print(precise_ip);
1566 if_print(mmap_data);
1567 if_print(sample_id_all);
1568 if_print(exclude_host);
1569 if_print(exclude_guest);
1570 if_print(__reserved_1);
1571 if_print(wakeup_events);
1572 if_print(bp_type);
1573 if_print(branch_sample_type);
1574 }
e6ab07d0 1575out:
0698aedd
ACM
1576 fputc('\n', fp);
1577 return ++printed;
1578}
c0a54341
ACM
1579
1580bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
1581 char *msg, size_t msgsize)
1582{
2b821cce 1583 if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
c0a54341
ACM
1584 evsel->attr.type == PERF_TYPE_HARDWARE &&
1585 evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
1586 /*
1587 * If it's cycles then fall back to hrtimer based
1588 * cpu-clock-tick sw counter, which is always available even if
1589 * no PMU support.
1590 *
1591 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
1592 * b0a873e).
1593 */
1594 scnprintf(msg, msgsize, "%s",
1595"The cycles event is not supported, trying to fall back to cpu-clock-ticks");
1596
1597 evsel->attr.type = PERF_TYPE_SOFTWARE;
1598 evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
1599
1600 free(evsel->name);
1601 evsel->name = NULL;
1602 return true;
1603 }
1604
1605 return false;
1606}
56e52e85
ACM
1607
1608int perf_evsel__open_strerror(struct perf_evsel *evsel,
1609 struct perf_target *target,
1610 int err, char *msg, size_t size)
1611{
1612 switch (err) {
1613 case EPERM:
1614 case EACCES:
b69e63a4 1615 return scnprintf(msg, size,
56e52e85
ACM
1616 "You may not have permission to collect %sstats.\n"
1617 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
1618 " -1 - Not paranoid at all\n"
1619 " 0 - Disallow raw tracepoint access for unpriv\n"
1620 " 1 - Disallow cpu events for unpriv\n"
1621 " 2 - Disallow kernel profiling for unpriv",
1622 target->system_wide ? "system-wide " : "");
1623 case ENOENT:
1624 return scnprintf(msg, size, "The %s event is not supported.",
1625 perf_evsel__name(evsel));
1626 case EMFILE:
1627 return scnprintf(msg, size, "%s",
1628 "Too many events are opened.\n"
1629 "Try again after reducing the number of events.");
1630 case ENODEV:
1631 if (target->cpu_list)
1632 return scnprintf(msg, size, "%s",
1633 "No such device - did you specify an out-of-range profile CPU?\n");
1634 break;
1635 case EOPNOTSUPP:
1636 if (evsel->attr.precise_ip)
1637 return scnprintf(msg, size, "%s",
1638 "\'precise\' request may not be supported. Try removing 'p' modifier.");
1639#if defined(__i386__) || defined(__x86_64__)
1640 if (evsel->attr.type == PERF_TYPE_HARDWARE)
1641 return scnprintf(msg, size, "%s",
1642 "No hardware sampling interrupt available.\n"
1643 "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
1644#endif
1645 break;
1646 default:
1647 break;
1648 }
1649
1650 return scnprintf(msg, size,
1651 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s). \n"
1652 "/bin/dmesg may provide additional information.\n"
1653 "No CONFIG_PERF_EVENTS=y kernel support configured?\n",
1654 err, strerror(err), perf_evsel__name(evsel));
1655}
This page took 0.240128 seconds and 5 git commands to generate.