perf tools: Add support for parsing PERF_SAMPLE_READ sample type
[deliverable/linux.git] / tools / perf / util / evlist.c
CommitLineData
f8a95309
ACM
1/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
a8c9ae18 9#include "util.h"
85c66be1 10#include <lk/debugfs.h>
5c581041 11#include <poll.h>
f8a95309
ACM
12#include "cpumap.h"
13#include "thread_map.h"
12864b31 14#include "target.h"
361c99a6
ACM
15#include "evlist.h"
16#include "evsel.h"
35b9d88e 17#include <unistd.h>
361c99a6 18
50d08e47
ACM
19#include "parse-events.h"
20
f8a95309
ACM
21#include <sys/mman.h>
22
70db7533
ACM
23#include <linux/bitops.h>
24#include <linux/hash.h>
25
f8a95309 26#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
a91e5431 27#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
f8a95309 28
7e2ed097
ACM
29void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
30 struct thread_map *threads)
ef1d1af2
ACM
31{
32 int i;
33
34 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
35 INIT_HLIST_HEAD(&evlist->heads[i]);
36 INIT_LIST_HEAD(&evlist->entries);
7e2ed097 37 perf_evlist__set_maps(evlist, cpus, threads);
35b9d88e 38 evlist->workload.pid = -1;
ef1d1af2
ACM
39}
40
334fe7a3 41struct perf_evlist *perf_evlist__new(void)
361c99a6
ACM
42{
43 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
44
ef1d1af2 45 if (evlist != NULL)
334fe7a3 46 perf_evlist__init(evlist, NULL, NULL);
361c99a6
ACM
47
48 return evlist;
49}
50
f77a9518
ACM
51void perf_evlist__config(struct perf_evlist *evlist,
52 struct perf_record_opts *opts)
0f82ebc4 53{
cac21425 54 struct perf_evsel *evsel;
f77a9518
ACM
55 /*
56 * Set the evsel leader links before we configure attributes,
57 * since some might depend on this info.
58 */
59 if (opts->group)
60 perf_evlist__set_leader(evlist);
0f82ebc4
ACM
61
62 if (evlist->cpus->map[0] < 0)
63 opts->no_inherit = true;
64
65 list_for_each_entry(evsel, &evlist->entries, node) {
cac21425 66 perf_evsel__config(evsel, opts);
0f82ebc4
ACM
67
68 if (evlist->nr_entries > 1)
3a5afaec 69 perf_evsel__set_sample_id(evsel);
0f82ebc4
ACM
70 }
71}
72
361c99a6
ACM
73static void perf_evlist__purge(struct perf_evlist *evlist)
74{
75 struct perf_evsel *pos, *n;
76
77 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
78 list_del_init(&pos->node);
79 perf_evsel__delete(pos);
80 }
81
82 evlist->nr_entries = 0;
83}
84
ef1d1af2 85void perf_evlist__exit(struct perf_evlist *evlist)
361c99a6 86{
70db7533 87 free(evlist->mmap);
5c581041 88 free(evlist->pollfd);
ef1d1af2
ACM
89 evlist->mmap = NULL;
90 evlist->pollfd = NULL;
91}
92
93void perf_evlist__delete(struct perf_evlist *evlist)
94{
95 perf_evlist__purge(evlist);
96 perf_evlist__exit(evlist);
361c99a6
ACM
97 free(evlist);
98}
99
100void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
101{
102 list_add_tail(&entry->node, &evlist->entries);
103 ++evlist->nr_entries;
104}
105
0529bc1f
JO
106void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
107 struct list_head *list,
108 int nr_entries)
50d08e47
ACM
109{
110 list_splice_tail(list, &evlist->entries);
111 evlist->nr_entries += nr_entries;
112}
113
63dab225
ACM
114void __perf_evlist__set_leader(struct list_head *list)
115{
116 struct perf_evsel *evsel, *leader;
117
118 leader = list_entry(list->next, struct perf_evsel, node);
97f63e4a
NK
119 evsel = list_entry(list->prev, struct perf_evsel, node);
120
121 leader->nr_members = evsel->idx - leader->idx + 1;
63dab225
ACM
122
123 list_for_each_entry(evsel, list, node) {
74b2133d 124 evsel->leader = leader;
63dab225
ACM
125 }
126}
127
128void perf_evlist__set_leader(struct perf_evlist *evlist)
6a4bb04c 129{
97f63e4a
NK
130 if (evlist->nr_entries) {
131 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
63dab225 132 __perf_evlist__set_leader(&evlist->entries);
97f63e4a 133 }
6a4bb04c
JO
134}
135
361c99a6
ACM
136int perf_evlist__add_default(struct perf_evlist *evlist)
137{
138 struct perf_event_attr attr = {
139 .type = PERF_TYPE_HARDWARE,
140 .config = PERF_COUNT_HW_CPU_CYCLES,
141 };
1aed2671
JR
142 struct perf_evsel *evsel;
143
144 event_attr_init(&attr);
361c99a6 145
1aed2671 146 evsel = perf_evsel__new(&attr, 0);
361c99a6 147 if (evsel == NULL)
cc2d86b0
SE
148 goto error;
149
150 /* use strdup() because free(evsel) assumes name is allocated */
151 evsel->name = strdup("cycles");
152 if (!evsel->name)
153 goto error_free;
361c99a6
ACM
154
155 perf_evlist__add(evlist, evsel);
156 return 0;
cc2d86b0
SE
157error_free:
158 perf_evsel__delete(evsel);
159error:
160 return -ENOMEM;
361c99a6 161}
5c581041 162
e60fc847
ACM
163static int perf_evlist__add_attrs(struct perf_evlist *evlist,
164 struct perf_event_attr *attrs, size_t nr_attrs)
50d08e47
ACM
165{
166 struct perf_evsel *evsel, *n;
167 LIST_HEAD(head);
168 size_t i;
169
170 for (i = 0; i < nr_attrs; i++) {
171 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
172 if (evsel == NULL)
173 goto out_delete_partial_list;
174 list_add_tail(&evsel->node, &head);
175 }
176
177 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
178
179 return 0;
180
181out_delete_partial_list:
182 list_for_each_entry_safe(evsel, n, &head, node)
183 perf_evsel__delete(evsel);
184 return -1;
185}
186
79695e1b
ACM
187int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
188 struct perf_event_attr *attrs, size_t nr_attrs)
189{
190 size_t i;
191
192 for (i = 0; i < nr_attrs; i++)
193 event_attr_init(attrs + i);
194
195 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
196}
197
da378962
ACM
198struct perf_evsel *
199perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
ee29be62
ACM
200{
201 struct perf_evsel *evsel;
202
203 list_for_each_entry(evsel, &evlist->entries, node) {
204 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
205 (int)evsel->attr.config == id)
206 return evsel;
207 }
208
209 return NULL;
210}
211
39876e7d
ACM
212int perf_evlist__add_newtp(struct perf_evlist *evlist,
213 const char *sys, const char *name, void *handler)
214{
215 struct perf_evsel *evsel;
216
217 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
218 if (evsel == NULL)
219 return -1;
220
221 evsel->handler.func = handler;
222 perf_evlist__add(evlist, evsel);
223 return 0;
224}
225
4152ab37
ACM
226void perf_evlist__disable(struct perf_evlist *evlist)
227{
228 int cpu, thread;
229 struct perf_evsel *pos;
b3a319d5
NK
230 int nr_cpus = cpu_map__nr(evlist->cpus);
231 int nr_threads = thread_map__nr(evlist->threads);
4152ab37 232
b3a319d5 233 for (cpu = 0; cpu < nr_cpus; cpu++) {
4152ab37 234 list_for_each_entry(pos, &evlist->entries, node) {
823254ed 235 if (!perf_evsel__is_group_leader(pos))
3fe4430d 236 continue;
b3a319d5 237 for (thread = 0; thread < nr_threads; thread++)
55da8005
NK
238 ioctl(FD(pos, cpu, thread),
239 PERF_EVENT_IOC_DISABLE, 0);
4152ab37
ACM
240 }
241 }
242}
243
764e16a3
DA
244void perf_evlist__enable(struct perf_evlist *evlist)
245{
246 int cpu, thread;
247 struct perf_evsel *pos;
b3a319d5
NK
248 int nr_cpus = cpu_map__nr(evlist->cpus);
249 int nr_threads = thread_map__nr(evlist->threads);
764e16a3 250
b3a319d5 251 for (cpu = 0; cpu < nr_cpus; cpu++) {
764e16a3 252 list_for_each_entry(pos, &evlist->entries, node) {
823254ed 253 if (!perf_evsel__is_group_leader(pos))
3fe4430d 254 continue;
b3a319d5 255 for (thread = 0; thread < nr_threads; thread++)
55da8005
NK
256 ioctl(FD(pos, cpu, thread),
257 PERF_EVENT_IOC_ENABLE, 0);
764e16a3
DA
258 }
259 }
260}
261
806fb630 262static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
5c581041 263{
b3a319d5
NK
264 int nr_cpus = cpu_map__nr(evlist->cpus);
265 int nr_threads = thread_map__nr(evlist->threads);
266 int nfds = nr_cpus * nr_threads * evlist->nr_entries;
5c581041
ACM
267 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
268 return evlist->pollfd != NULL ? 0 : -ENOMEM;
269}
70082dd9
ACM
270
271void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
272{
273 fcntl(fd, F_SETFL, O_NONBLOCK);
274 evlist->pollfd[evlist->nr_fds].fd = fd;
275 evlist->pollfd[evlist->nr_fds].events = POLLIN;
276 evlist->nr_fds++;
277}
70db7533 278
a91e5431
ACM
279static void perf_evlist__id_hash(struct perf_evlist *evlist,
280 struct perf_evsel *evsel,
281 int cpu, int thread, u64 id)
3d3b5e95
ACM
282{
283 int hash;
284 struct perf_sample_id *sid = SID(evsel, cpu, thread);
285
286 sid->id = id;
287 sid->evsel = evsel;
288 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
289 hlist_add_head(&sid->node, &evlist->heads[hash]);
290}
291
a91e5431
ACM
292void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
293 int cpu, int thread, u64 id)
294{
295 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
296 evsel->id[evsel->ids++] = id;
297}
298
299static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
300 struct perf_evsel *evsel,
301 int cpu, int thread, int fd)
f8a95309 302{
f8a95309 303 u64 read_data[4] = { 0, };
3d3b5e95 304 int id_idx = 1; /* The first entry is the counter value */
e2b5abe0
JO
305 u64 id;
306 int ret;
307
308 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
309 if (!ret)
310 goto add;
311
312 if (errno != ENOTTY)
313 return -1;
314
315 /* Legacy way to get event id.. All hail to old kernels! */
f8a95309
ACM
316
317 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
318 read(fd, &read_data, sizeof(read_data)) == -1)
319 return -1;
320
321 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
322 ++id_idx;
323 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
324 ++id_idx;
325
e2b5abe0
JO
326 id = read_data[id_idx];
327
328 add:
329 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
f8a95309
ACM
330 return 0;
331}
332
70db7533
ACM
333struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
334{
335 struct hlist_head *head;
70db7533
ACM
336 struct perf_sample_id *sid;
337 int hash;
338
339 if (evlist->nr_entries == 1)
0c21f736 340 return perf_evlist__first(evlist);
70db7533
ACM
341
342 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
343 head = &evlist->heads[hash];
344
b67bfe0d 345 hlist_for_each_entry(sid, head, node)
70db7533
ACM
346 if (sid->id == id)
347 return sid->evsel;
30e68bcc
NK
348
349 if (!perf_evlist__sample_id_all(evlist))
0c21f736 350 return perf_evlist__first(evlist);
30e68bcc 351
70db7533
ACM
352 return NULL;
353}
04391deb 354
aece948f 355union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
04391deb 356{
aece948f 357 struct perf_mmap *md = &evlist->mmap[idx];
04391deb
ACM
358 unsigned int head = perf_mmap__read_head(md);
359 unsigned int old = md->prev;
360 unsigned char *data = md->base + page_size;
8115d60c 361 union perf_event *event = NULL;
04391deb 362
7bb41152 363 if (evlist->overwrite) {
04391deb 364 /*
7bb41152
ACM
365 * If we're further behind than half the buffer, there's a chance
366 * the writer will bite our tail and mess up the samples under us.
367 *
368 * If we somehow ended up ahead of the head, we got messed up.
369 *
370 * In either case, truncate and restart at head.
04391deb 371 */
7bb41152
ACM
372 int diff = head - old;
373 if (diff > md->mask / 2 || diff < 0) {
374 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
375
376 /*
377 * head points to a known good entry, start there.
378 */
379 old = head;
380 }
04391deb
ACM
381 }
382
383 if (old != head) {
384 size_t size;
385
8115d60c 386 event = (union perf_event *)&data[old & md->mask];
04391deb
ACM
387 size = event->header.size;
388
389 /*
390 * Event straddles the mmap boundary -- header should always
391 * be inside due to u64 alignment of output.
392 */
393 if ((old & md->mask) + size != ((old + size) & md->mask)) {
394 unsigned int offset = old;
395 unsigned int len = min(sizeof(*event), size), cpy;
0479b8b9 396 void *dst = &md->event_copy;
04391deb
ACM
397
398 do {
399 cpy = min(md->mask + 1 - (offset & md->mask), len);
400 memcpy(dst, &data[offset & md->mask], cpy);
401 offset += cpy;
402 dst += cpy;
403 len -= cpy;
404 } while (len);
405
0479b8b9 406 event = &md->event_copy;
04391deb
ACM
407 }
408
409 old += size;
410 }
411
412 md->prev = old;
7bb41152
ACM
413
414 if (!evlist->overwrite)
415 perf_mmap__write_tail(md, old);
416
04391deb
ACM
417 return event;
418}
f8a95309 419
93edcbd9
AH
420static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
421{
422 if (evlist->mmap[idx].base != NULL) {
423 munmap(evlist->mmap[idx].base, evlist->mmap_len);
424 evlist->mmap[idx].base = NULL;
425 }
426}
427
7e2ed097 428void perf_evlist__munmap(struct perf_evlist *evlist)
f8a95309 429{
aece948f 430 int i;
f8a95309 431
93edcbd9
AH
432 for (i = 0; i < evlist->nr_mmaps; i++)
433 __perf_evlist__munmap(evlist, i);
aece948f
ACM
434
435 free(evlist->mmap);
436 evlist->mmap = NULL;
f8a95309
ACM
437}
438
806fb630 439static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
f8a95309 440{
a14bb7a6 441 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
ec1e7e43 442 if (cpu_map__empty(evlist->cpus))
b3a319d5 443 evlist->nr_mmaps = thread_map__nr(evlist->threads);
aece948f 444 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
f8a95309
ACM
445 return evlist->mmap != NULL ? 0 : -ENOMEM;
446}
447
bccdaba0 448static int __perf_evlist__mmap(struct perf_evlist *evlist,
aece948f 449 int idx, int prot, int mask, int fd)
f8a95309 450{
aece948f
ACM
451 evlist->mmap[idx].prev = 0;
452 evlist->mmap[idx].mask = mask;
453 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
f8a95309 454 MAP_SHARED, fd, 0);
301b195d
NE
455 if (evlist->mmap[idx].base == MAP_FAILED) {
456 evlist->mmap[idx].base = NULL;
f8a95309 457 return -1;
301b195d 458 }
f8a95309
ACM
459
460 perf_evlist__add_pollfd(evlist, fd);
461 return 0;
462}
463
aece948f
ACM
464static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
465{
466 struct perf_evsel *evsel;
467 int cpu, thread;
b3a319d5
NK
468 int nr_cpus = cpu_map__nr(evlist->cpus);
469 int nr_threads = thread_map__nr(evlist->threads);
aece948f 470
b3a319d5 471 for (cpu = 0; cpu < nr_cpus; cpu++) {
aece948f
ACM
472 int output = -1;
473
b3a319d5 474 for (thread = 0; thread < nr_threads; thread++) {
aece948f
ACM
475 list_for_each_entry(evsel, &evlist->entries, node) {
476 int fd = FD(evsel, cpu, thread);
477
478 if (output == -1) {
479 output = fd;
bccdaba0 480 if (__perf_evlist__mmap(evlist, cpu,
aece948f
ACM
481 prot, mask, output) < 0)
482 goto out_unmap;
483 } else {
484 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
485 goto out_unmap;
486 }
487
488 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
489 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
490 goto out_unmap;
491 }
492 }
493 }
494
495 return 0;
496
497out_unmap:
93edcbd9
AH
498 for (cpu = 0; cpu < nr_cpus; cpu++)
499 __perf_evlist__munmap(evlist, cpu);
aece948f
ACM
500 return -1;
501}
502
503static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
504{
505 struct perf_evsel *evsel;
506 int thread;
b3a319d5 507 int nr_threads = thread_map__nr(evlist->threads);
aece948f 508
b3a319d5 509 for (thread = 0; thread < nr_threads; thread++) {
aece948f
ACM
510 int output = -1;
511
512 list_for_each_entry(evsel, &evlist->entries, node) {
513 int fd = FD(evsel, 0, thread);
514
515 if (output == -1) {
516 output = fd;
bccdaba0 517 if (__perf_evlist__mmap(evlist, thread,
aece948f
ACM
518 prot, mask, output) < 0)
519 goto out_unmap;
520 } else {
521 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
522 goto out_unmap;
523 }
524
525 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
526 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
527 goto out_unmap;
528 }
529 }
530
531 return 0;
532
533out_unmap:
93edcbd9
AH
534 for (thread = 0; thread < nr_threads; thread++)
535 __perf_evlist__munmap(evlist, thread);
aece948f
ACM
536 return -1;
537}
538
f8a95309
ACM
539/** perf_evlist__mmap - Create per cpu maps to receive events
540 *
541 * @evlist - list of events
f8a95309
ACM
542 * @pages - map length in pages
543 * @overwrite - overwrite older events?
544 *
545 * If overwrite is false the user needs to signal event consuption using:
546 *
547 * struct perf_mmap *m = &evlist->mmap[cpu];
548 * unsigned int head = perf_mmap__read_head(m);
549 *
550 * perf_mmap__write_tail(m, head)
7e2ed097
ACM
551 *
552 * Using perf_evlist__read_on_cpu does this automatically.
f8a95309 553 */
50a682ce
ACM
554int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
555 bool overwrite)
f8a95309 556{
aece948f 557 struct perf_evsel *evsel;
7e2ed097
ACM
558 const struct cpu_map *cpus = evlist->cpus;
559 const struct thread_map *threads = evlist->threads;
50a682ce
ACM
560 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
561
562 /* 512 kiB: default amount of unprivileged mlocked memory */
563 if (pages == UINT_MAX)
564 pages = (512 * 1024) / page_size;
41d0d933
NE
565 else if (!is_power_of_2(pages))
566 return -EINVAL;
50a682ce
ACM
567
568 mask = pages * page_size - 1;
f8a95309 569
7e2ed097 570 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
f8a95309
ACM
571 return -ENOMEM;
572
7e2ed097 573 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
f8a95309
ACM
574 return -ENOMEM;
575
576 evlist->overwrite = overwrite;
577 evlist->mmap_len = (pages + 1) * page_size;
f8a95309
ACM
578
579 list_for_each_entry(evsel, &evlist->entries, node) {
580 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
a91e5431 581 evsel->sample_id == NULL &&
a14bb7a6 582 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
f8a95309 583 return -ENOMEM;
f8a95309
ACM
584 }
585
ec1e7e43 586 if (cpu_map__empty(cpus))
aece948f 587 return perf_evlist__mmap_per_thread(evlist, prot, mask);
f8a95309 588
aece948f 589 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
f8a95309 590}
7e2ed097 591
b809ac10
NK
592int perf_evlist__create_maps(struct perf_evlist *evlist,
593 struct perf_target *target)
7e2ed097 594{
b809ac10
NK
595 evlist->threads = thread_map__new_str(target->pid, target->tid,
596 target->uid);
7e2ed097
ACM
597
598 if (evlist->threads == NULL)
599 return -1;
600
879d77d0 601 if (perf_target__has_task(target))
d67356e7 602 evlist->cpus = cpu_map__dummy_new();
d1cb9fce
NK
603 else if (!perf_target__has_cpu(target) && !target->uses_mmap)
604 evlist->cpus = cpu_map__dummy_new();
879d77d0
NK
605 else
606 evlist->cpus = cpu_map__new(target->cpu_list);
7e2ed097
ACM
607
608 if (evlist->cpus == NULL)
609 goto out_delete_threads;
610
611 return 0;
612
613out_delete_threads:
614 thread_map__delete(evlist->threads);
615 return -1;
616}
617
618void perf_evlist__delete_maps(struct perf_evlist *evlist)
619{
620 cpu_map__delete(evlist->cpus);
621 thread_map__delete(evlist->threads);
622 evlist->cpus = NULL;
623 evlist->threads = NULL;
624}
0a102479 625
1491a632 626int perf_evlist__apply_filters(struct perf_evlist *evlist)
0a102479 627{
0a102479 628 struct perf_evsel *evsel;
745cefc5
ACM
629 int err = 0;
630 const int ncpus = cpu_map__nr(evlist->cpus),
b3a319d5 631 nthreads = thread_map__nr(evlist->threads);
0a102479
FW
632
633 list_for_each_entry(evsel, &evlist->entries, node) {
745cefc5 634 if (evsel->filter == NULL)
0a102479 635 continue;
745cefc5
ACM
636
637 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
638 if (err)
639 break;
0a102479
FW
640 }
641
745cefc5
ACM
642 return err;
643}
644
645int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
646{
647 struct perf_evsel *evsel;
648 int err = 0;
649 const int ncpus = cpu_map__nr(evlist->cpus),
b3a319d5 650 nthreads = thread_map__nr(evlist->threads);
745cefc5
ACM
651
652 list_for_each_entry(evsel, &evlist->entries, node) {
653 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
654 if (err)
655 break;
656 }
657
658 return err;
0a102479 659}
74429964 660
0c21f736 661bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
74429964 662{
0c21f736 663 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
c2a70653
ACM
664
665 list_for_each_entry_continue(pos, &evlist->entries, node) {
666 if (first->attr.sample_type != pos->attr.sample_type)
667 return false;
74429964
FW
668 }
669
c2a70653 670 return true;
74429964
FW
671}
672
0c21f736 673u64 perf_evlist__sample_type(struct perf_evlist *evlist)
c2a70653 674{
0c21f736 675 struct perf_evsel *first = perf_evlist__first(evlist);
c2a70653
ACM
676 return first->attr.sample_type;
677}
678
9ede473c
JO
679bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
680{
681 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
682 u64 read_format = first->attr.read_format;
683 u64 sample_type = first->attr.sample_type;
684
685 list_for_each_entry_continue(pos, &evlist->entries, node) {
686 if (read_format != pos->attr.read_format)
687 return false;
688 }
689
690 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
691 if ((sample_type & PERF_SAMPLE_READ) &&
692 !(read_format & PERF_FORMAT_ID)) {
693 return false;
694 }
695
696 return true;
697}
698
699u64 perf_evlist__read_format(struct perf_evlist *evlist)
700{
701 struct perf_evsel *first = perf_evlist__first(evlist);
702 return first->attr.read_format;
703}
704
0c21f736 705u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
81e36bff 706{
0c21f736 707 struct perf_evsel *first = perf_evlist__first(evlist);
81e36bff
ACM
708 struct perf_sample *data;
709 u64 sample_type;
710 u16 size = 0;
711
81e36bff
ACM
712 if (!first->attr.sample_id_all)
713 goto out;
714
715 sample_type = first->attr.sample_type;
716
717 if (sample_type & PERF_SAMPLE_TID)
718 size += sizeof(data->tid) * 2;
719
720 if (sample_type & PERF_SAMPLE_TIME)
721 size += sizeof(data->time);
722
723 if (sample_type & PERF_SAMPLE_ID)
724 size += sizeof(data->id);
725
726 if (sample_type & PERF_SAMPLE_STREAM_ID)
727 size += sizeof(data->stream_id);
728
729 if (sample_type & PERF_SAMPLE_CPU)
730 size += sizeof(data->cpu) * 2;
731out:
732 return size;
733}
734
0c21f736 735bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
74429964 736{
0c21f736 737 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
c2a70653
ACM
738
739 list_for_each_entry_continue(pos, &evlist->entries, node) {
740 if (first->attr.sample_id_all != pos->attr.sample_id_all)
741 return false;
74429964
FW
742 }
743
c2a70653
ACM
744 return true;
745}
746
0c21f736 747bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
c2a70653 748{
0c21f736 749 struct perf_evsel *first = perf_evlist__first(evlist);
c2a70653 750 return first->attr.sample_id_all;
74429964 751}
81cce8de
ACM
752
753void perf_evlist__set_selected(struct perf_evlist *evlist,
754 struct perf_evsel *evsel)
755{
756 evlist->selected = evsel;
757}
727ab04e 758
a74b4b66
NK
759void perf_evlist__close(struct perf_evlist *evlist)
760{
761 struct perf_evsel *evsel;
762 int ncpus = cpu_map__nr(evlist->cpus);
763 int nthreads = thread_map__nr(evlist->threads);
764
765 list_for_each_entry_reverse(evsel, &evlist->entries, node)
766 perf_evsel__close(evsel, ncpus, nthreads);
767}
768
6a4bb04c 769int perf_evlist__open(struct perf_evlist *evlist)
727ab04e 770{
6a4bb04c 771 struct perf_evsel *evsel;
a74b4b66 772 int err;
727ab04e 773
727ab04e 774 list_for_each_entry(evsel, &evlist->entries, node) {
6a4bb04c 775 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
727ab04e
ACM
776 if (err < 0)
777 goto out_err;
778 }
779
780 return 0;
781out_err:
a74b4b66 782 perf_evlist__close(evlist);
41c21a68 783 errno = -err;
727ab04e
ACM
784 return err;
785}
35b9d88e
ACM
786
787int perf_evlist__prepare_workload(struct perf_evlist *evlist,
6ef73ec4 788 struct perf_target *target,
55e162ea
NK
789 const char *argv[], bool pipe_output,
790 bool want_signal)
35b9d88e
ACM
791{
792 int child_ready_pipe[2], go_pipe[2];
793 char bf;
794
795 if (pipe(child_ready_pipe) < 0) {
796 perror("failed to create 'ready' pipe");
797 return -1;
798 }
799
800 if (pipe(go_pipe) < 0) {
801 perror("failed to create 'go' pipe");
802 goto out_close_ready_pipe;
803 }
804
805 evlist->workload.pid = fork();
806 if (evlist->workload.pid < 0) {
807 perror("failed to fork");
808 goto out_close_pipes;
809 }
810
811 if (!evlist->workload.pid) {
119fa3c9 812 if (pipe_output)
35b9d88e
ACM
813 dup2(2, 1);
814
0817df08
DA
815 signal(SIGTERM, SIG_DFL);
816
35b9d88e
ACM
817 close(child_ready_pipe[0]);
818 close(go_pipe[1]);
819 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
820
821 /*
822 * Do a dummy execvp to get the PLT entry resolved,
823 * so we avoid the resolver overhead on the real
824 * execvp call.
825 */
826 execvp("", (char **)argv);
827
828 /*
829 * Tell the parent we're ready to go
830 */
831 close(child_ready_pipe[1]);
832
833 /*
834 * Wait until the parent tells us to go.
835 */
836 if (read(go_pipe[0], &bf, 1) == -1)
837 perror("unable to read pipe");
838
839 execvp(argv[0], (char **)argv);
840
841 perror(argv[0]);
55e162ea
NK
842 if (want_signal)
843 kill(getppid(), SIGUSR1);
35b9d88e
ACM
844 exit(-1);
845 }
846
6ef73ec4 847 if (perf_target__none(target))
35b9d88e
ACM
848 evlist->threads->map[0] = evlist->workload.pid;
849
850 close(child_ready_pipe[1]);
851 close(go_pipe[0]);
852 /*
853 * wait for child to settle
854 */
855 if (read(child_ready_pipe[0], &bf, 1) == -1) {
856 perror("unable to read pipe");
857 goto out_close_pipes;
858 }
859
bcf3145f 860 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
35b9d88e
ACM
861 evlist->workload.cork_fd = go_pipe[1];
862 close(child_ready_pipe[0]);
863 return 0;
864
865out_close_pipes:
866 close(go_pipe[0]);
867 close(go_pipe[1]);
868out_close_ready_pipe:
869 close(child_ready_pipe[0]);
870 close(child_ready_pipe[1]);
871 return -1;
872}
873
874int perf_evlist__start_workload(struct perf_evlist *evlist)
875{
876 if (evlist->workload.cork_fd > 0) {
b3824404 877 char bf = 0;
bcf3145f 878 int ret;
35b9d88e
ACM
879 /*
880 * Remove the cork, let it rip!
881 */
bcf3145f
NK
882 ret = write(evlist->workload.cork_fd, &bf, 1);
883 if (ret < 0)
884 perror("enable to write to pipe");
885
886 close(evlist->workload.cork_fd);
887 return ret;
35b9d88e
ACM
888 }
889
890 return 0;
891}
cb0b29e0 892
a3f698fe 893int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
0807d2d8 894 struct perf_sample *sample)
cb0b29e0 895{
0c21f736 896 struct perf_evsel *evsel = perf_evlist__first(evlist);
0807d2d8 897 return perf_evsel__parse_sample(evsel, event, sample);
cb0b29e0 898}
78f067b3
ACM
899
900size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
901{
902 struct perf_evsel *evsel;
903 size_t printed = 0;
904
905 list_for_each_entry(evsel, &evlist->entries, node) {
906 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
907 perf_evsel__name(evsel));
908 }
909
910 return printed + fprintf(fp, "\n");;
911}
This page took 0.168387 seconds and 5 git commands to generate.