perf lock: Account for lock average wait time
[deliverable/linux.git] / tools / perf / util / evlist.c
CommitLineData
f8a95309
ACM
1/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
a8c9ae18 9#include "util.h"
85c66be1 10#include <lk/debugfs.h>
5c581041 11#include <poll.h>
f8a95309
ACM
12#include "cpumap.h"
13#include "thread_map.h"
12864b31 14#include "target.h"
361c99a6
ACM
15#include "evlist.h"
16#include "evsel.h"
e3e1a54f 17#include "debug.h"
35b9d88e 18#include <unistd.h>
361c99a6 19
50d08e47
ACM
20#include "parse-events.h"
21
f8a95309
ACM
22#include <sys/mman.h>
23
70db7533
ACM
24#include <linux/bitops.h>
25#include <linux/hash.h>
26
f8a95309 27#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
a91e5431 28#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
f8a95309 29
7e2ed097
ACM
30void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
31 struct thread_map *threads)
ef1d1af2
ACM
32{
33 int i;
34
35 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
36 INIT_HLIST_HEAD(&evlist->heads[i]);
37 INIT_LIST_HEAD(&evlist->entries);
7e2ed097 38 perf_evlist__set_maps(evlist, cpus, threads);
35b9d88e 39 evlist->workload.pid = -1;
ef1d1af2
ACM
40}
41
334fe7a3 42struct perf_evlist *perf_evlist__new(void)
361c99a6
ACM
43{
44 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
45
ef1d1af2 46 if (evlist != NULL)
334fe7a3 47 perf_evlist__init(evlist, NULL, NULL);
361c99a6
ACM
48
49 return evlist;
50}
51
75562573
AH
52/**
53 * perf_evlist__set_id_pos - set the positions of event ids.
54 * @evlist: selected event list
55 *
56 * Events with compatible sample types all have the same id_pos
57 * and is_pos. For convenience, put a copy on evlist.
58 */
59void perf_evlist__set_id_pos(struct perf_evlist *evlist)
60{
61 struct perf_evsel *first = perf_evlist__first(evlist);
62
63 evlist->id_pos = first->id_pos;
64 evlist->is_pos = first->is_pos;
65}
66
733cd2fe
AH
67static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
68{
69 struct perf_evsel *evsel;
70
71 list_for_each_entry(evsel, &evlist->entries, node)
72 perf_evsel__calc_id_pos(evsel);
73
74 perf_evlist__set_id_pos(evlist);
75}
76
361c99a6
ACM
77static void perf_evlist__purge(struct perf_evlist *evlist)
78{
79 struct perf_evsel *pos, *n;
80
81 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
82 list_del_init(&pos->node);
83 perf_evsel__delete(pos);
84 }
85
86 evlist->nr_entries = 0;
87}
88
ef1d1af2 89void perf_evlist__exit(struct perf_evlist *evlist)
361c99a6 90{
70db7533 91 free(evlist->mmap);
5c581041 92 free(evlist->pollfd);
ef1d1af2
ACM
93 evlist->mmap = NULL;
94 evlist->pollfd = NULL;
95}
96
97void perf_evlist__delete(struct perf_evlist *evlist)
98{
99 perf_evlist__purge(evlist);
100 perf_evlist__exit(evlist);
361c99a6
ACM
101 free(evlist);
102}
103
104void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
105{
106 list_add_tail(&entry->node, &evlist->entries);
75562573
AH
107 if (!evlist->nr_entries++)
108 perf_evlist__set_id_pos(evlist);
361c99a6
ACM
109}
110
0529bc1f
JO
111void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
112 struct list_head *list,
113 int nr_entries)
50d08e47 114{
75562573
AH
115 bool set_id_pos = !evlist->nr_entries;
116
50d08e47
ACM
117 list_splice_tail(list, &evlist->entries);
118 evlist->nr_entries += nr_entries;
75562573
AH
119 if (set_id_pos)
120 perf_evlist__set_id_pos(evlist);
50d08e47
ACM
121}
122
63dab225
ACM
123void __perf_evlist__set_leader(struct list_head *list)
124{
125 struct perf_evsel *evsel, *leader;
126
127 leader = list_entry(list->next, struct perf_evsel, node);
97f63e4a
NK
128 evsel = list_entry(list->prev, struct perf_evsel, node);
129
130 leader->nr_members = evsel->idx - leader->idx + 1;
63dab225
ACM
131
132 list_for_each_entry(evsel, list, node) {
74b2133d 133 evsel->leader = leader;
63dab225
ACM
134 }
135}
136
137void perf_evlist__set_leader(struct perf_evlist *evlist)
6a4bb04c 138{
97f63e4a
NK
139 if (evlist->nr_entries) {
140 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
63dab225 141 __perf_evlist__set_leader(&evlist->entries);
97f63e4a 142 }
6a4bb04c
JO
143}
144
361c99a6
ACM
145int perf_evlist__add_default(struct perf_evlist *evlist)
146{
147 struct perf_event_attr attr = {
148 .type = PERF_TYPE_HARDWARE,
149 .config = PERF_COUNT_HW_CPU_CYCLES,
150 };
1aed2671
JR
151 struct perf_evsel *evsel;
152
153 event_attr_init(&attr);
361c99a6 154
1aed2671 155 evsel = perf_evsel__new(&attr, 0);
361c99a6 156 if (evsel == NULL)
cc2d86b0
SE
157 goto error;
158
159 /* use strdup() because free(evsel) assumes name is allocated */
160 evsel->name = strdup("cycles");
161 if (!evsel->name)
162 goto error_free;
361c99a6
ACM
163
164 perf_evlist__add(evlist, evsel);
165 return 0;
cc2d86b0
SE
166error_free:
167 perf_evsel__delete(evsel);
168error:
169 return -ENOMEM;
361c99a6 170}
5c581041 171
e60fc847
ACM
172static int perf_evlist__add_attrs(struct perf_evlist *evlist,
173 struct perf_event_attr *attrs, size_t nr_attrs)
50d08e47
ACM
174{
175 struct perf_evsel *evsel, *n;
176 LIST_HEAD(head);
177 size_t i;
178
179 for (i = 0; i < nr_attrs; i++) {
180 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
181 if (evsel == NULL)
182 goto out_delete_partial_list;
183 list_add_tail(&evsel->node, &head);
184 }
185
186 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
187
188 return 0;
189
190out_delete_partial_list:
191 list_for_each_entry_safe(evsel, n, &head, node)
192 perf_evsel__delete(evsel);
193 return -1;
194}
195
79695e1b
ACM
196int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
197 struct perf_event_attr *attrs, size_t nr_attrs)
198{
199 size_t i;
200
201 for (i = 0; i < nr_attrs; i++)
202 event_attr_init(attrs + i);
203
204 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
205}
206
da378962
ACM
207struct perf_evsel *
208perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
ee29be62
ACM
209{
210 struct perf_evsel *evsel;
211
212 list_for_each_entry(evsel, &evlist->entries, node) {
213 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
214 (int)evsel->attr.config == id)
215 return evsel;
216 }
217
218 return NULL;
219}
220
a2f2804a
DA
221struct perf_evsel *
222perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
223 const char *name)
224{
225 struct perf_evsel *evsel;
226
227 list_for_each_entry(evsel, &evlist->entries, node) {
228 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
229 (strcmp(evsel->name, name) == 0))
230 return evsel;
231 }
232
233 return NULL;
234}
235
39876e7d
ACM
236int perf_evlist__add_newtp(struct perf_evlist *evlist,
237 const char *sys, const char *name, void *handler)
238{
239 struct perf_evsel *evsel;
240
241 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
242 if (evsel == NULL)
243 return -1;
244
245 evsel->handler.func = handler;
246 perf_evlist__add(evlist, evsel);
247 return 0;
248}
249
4152ab37
ACM
250void perf_evlist__disable(struct perf_evlist *evlist)
251{
252 int cpu, thread;
253 struct perf_evsel *pos;
b3a319d5
NK
254 int nr_cpus = cpu_map__nr(evlist->cpus);
255 int nr_threads = thread_map__nr(evlist->threads);
4152ab37 256
b3a319d5 257 for (cpu = 0; cpu < nr_cpus; cpu++) {
4152ab37 258 list_for_each_entry(pos, &evlist->entries, node) {
395c3070 259 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
3fe4430d 260 continue;
b3a319d5 261 for (thread = 0; thread < nr_threads; thread++)
55da8005
NK
262 ioctl(FD(pos, cpu, thread),
263 PERF_EVENT_IOC_DISABLE, 0);
4152ab37
ACM
264 }
265 }
266}
267
764e16a3
DA
268void perf_evlist__enable(struct perf_evlist *evlist)
269{
270 int cpu, thread;
271 struct perf_evsel *pos;
b3a319d5
NK
272 int nr_cpus = cpu_map__nr(evlist->cpus);
273 int nr_threads = thread_map__nr(evlist->threads);
764e16a3 274
b3a319d5 275 for (cpu = 0; cpu < nr_cpus; cpu++) {
764e16a3 276 list_for_each_entry(pos, &evlist->entries, node) {
395c3070 277 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
3fe4430d 278 continue;
b3a319d5 279 for (thread = 0; thread < nr_threads; thread++)
55da8005
NK
280 ioctl(FD(pos, cpu, thread),
281 PERF_EVENT_IOC_ENABLE, 0);
764e16a3
DA
282 }
283 }
284}
285
395c3070
AH
286int perf_evlist__disable_event(struct perf_evlist *evlist,
287 struct perf_evsel *evsel)
288{
289 int cpu, thread, err;
290
291 if (!evsel->fd)
292 return 0;
293
294 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
295 for (thread = 0; thread < evlist->threads->nr; thread++) {
296 err = ioctl(FD(evsel, cpu, thread),
297 PERF_EVENT_IOC_DISABLE, 0);
298 if (err)
299 return err;
300 }
301 }
302 return 0;
303}
304
305int perf_evlist__enable_event(struct perf_evlist *evlist,
306 struct perf_evsel *evsel)
307{
308 int cpu, thread, err;
309
310 if (!evsel->fd)
311 return -EINVAL;
312
313 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
314 for (thread = 0; thread < evlist->threads->nr; thread++) {
315 err = ioctl(FD(evsel, cpu, thread),
316 PERF_EVENT_IOC_ENABLE, 0);
317 if (err)
318 return err;
319 }
320 }
321 return 0;
322}
323
806fb630 324static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
5c581041 325{
b3a319d5
NK
326 int nr_cpus = cpu_map__nr(evlist->cpus);
327 int nr_threads = thread_map__nr(evlist->threads);
328 int nfds = nr_cpus * nr_threads * evlist->nr_entries;
5c581041
ACM
329 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
330 return evlist->pollfd != NULL ? 0 : -ENOMEM;
331}
70082dd9
ACM
332
333void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
334{
335 fcntl(fd, F_SETFL, O_NONBLOCK);
336 evlist->pollfd[evlist->nr_fds].fd = fd;
337 evlist->pollfd[evlist->nr_fds].events = POLLIN;
338 evlist->nr_fds++;
339}
70db7533 340
a91e5431
ACM
341static void perf_evlist__id_hash(struct perf_evlist *evlist,
342 struct perf_evsel *evsel,
343 int cpu, int thread, u64 id)
3d3b5e95
ACM
344{
345 int hash;
346 struct perf_sample_id *sid = SID(evsel, cpu, thread);
347
348 sid->id = id;
349 sid->evsel = evsel;
350 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
351 hlist_add_head(&sid->node, &evlist->heads[hash]);
352}
353
a91e5431
ACM
354void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
355 int cpu, int thread, u64 id)
356{
357 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
358 evsel->id[evsel->ids++] = id;
359}
360
361static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
362 struct perf_evsel *evsel,
363 int cpu, int thread, int fd)
f8a95309 364{
f8a95309 365 u64 read_data[4] = { 0, };
3d3b5e95 366 int id_idx = 1; /* The first entry is the counter value */
e2b5abe0
JO
367 u64 id;
368 int ret;
369
370 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
371 if (!ret)
372 goto add;
373
374 if (errno != ENOTTY)
375 return -1;
376
377 /* Legacy way to get event id.. All hail to old kernels! */
f8a95309 378
c4861afe
JO
379 /*
380 * This way does not work with group format read, so bail
381 * out in that case.
382 */
383 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
384 return -1;
385
f8a95309
ACM
386 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
387 read(fd, &read_data, sizeof(read_data)) == -1)
388 return -1;
389
390 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
391 ++id_idx;
392 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
393 ++id_idx;
394
e2b5abe0
JO
395 id = read_data[id_idx];
396
397 add:
398 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
f8a95309
ACM
399 return 0;
400}
401
932a3594 402struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
70db7533
ACM
403{
404 struct hlist_head *head;
70db7533
ACM
405 struct perf_sample_id *sid;
406 int hash;
407
70db7533
ACM
408 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
409 head = &evlist->heads[hash];
410
b67bfe0d 411 hlist_for_each_entry(sid, head, node)
70db7533 412 if (sid->id == id)
932a3594
JO
413 return sid;
414
415 return NULL;
416}
417
418struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
419{
420 struct perf_sample_id *sid;
421
422 if (evlist->nr_entries == 1)
423 return perf_evlist__first(evlist);
424
425 sid = perf_evlist__id2sid(evlist, id);
426 if (sid)
427 return sid->evsel;
30e68bcc
NK
428
429 if (!perf_evlist__sample_id_all(evlist))
0c21f736 430 return perf_evlist__first(evlist);
30e68bcc 431
70db7533
ACM
432 return NULL;
433}
04391deb 434
75562573
AH
435static int perf_evlist__event2id(struct perf_evlist *evlist,
436 union perf_event *event, u64 *id)
437{
438 const u64 *array = event->sample.array;
439 ssize_t n;
440
441 n = (event->header.size - sizeof(event->header)) >> 3;
442
443 if (event->header.type == PERF_RECORD_SAMPLE) {
444 if (evlist->id_pos >= n)
445 return -1;
446 *id = array[evlist->id_pos];
447 } else {
448 if (evlist->is_pos > n)
449 return -1;
450 n -= evlist->is_pos;
451 *id = array[n];
452 }
453 return 0;
454}
455
456static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
457 union perf_event *event)
458{
98be6966 459 struct perf_evsel *first = perf_evlist__first(evlist);
75562573
AH
460 struct hlist_head *head;
461 struct perf_sample_id *sid;
462 int hash;
463 u64 id;
464
465 if (evlist->nr_entries == 1)
98be6966
AH
466 return first;
467
468 if (!first->attr.sample_id_all &&
469 event->header.type != PERF_RECORD_SAMPLE)
470 return first;
75562573
AH
471
472 if (perf_evlist__event2id(evlist, event, &id))
473 return NULL;
474
475 /* Synthesized events have an id of zero */
476 if (!id)
98be6966 477 return first;
75562573
AH
478
479 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
480 head = &evlist->heads[hash];
481
482 hlist_for_each_entry(sid, head, node) {
483 if (sid->id == id)
484 return sid->evsel;
485 }
486 return NULL;
487}
488
aece948f 489union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
04391deb 490{
aece948f 491 struct perf_mmap *md = &evlist->mmap[idx];
04391deb
ACM
492 unsigned int head = perf_mmap__read_head(md);
493 unsigned int old = md->prev;
494 unsigned char *data = md->base + page_size;
8115d60c 495 union perf_event *event = NULL;
04391deb 496
7bb41152 497 if (evlist->overwrite) {
04391deb 498 /*
7bb41152
ACM
499 * If we're further behind than half the buffer, there's a chance
500 * the writer will bite our tail and mess up the samples under us.
501 *
502 * If we somehow ended up ahead of the head, we got messed up.
503 *
504 * In either case, truncate and restart at head.
04391deb 505 */
7bb41152
ACM
506 int diff = head - old;
507 if (diff > md->mask / 2 || diff < 0) {
508 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
509
510 /*
511 * head points to a known good entry, start there.
512 */
513 old = head;
514 }
04391deb
ACM
515 }
516
517 if (old != head) {
518 size_t size;
519
8115d60c 520 event = (union perf_event *)&data[old & md->mask];
04391deb
ACM
521 size = event->header.size;
522
523 /*
524 * Event straddles the mmap boundary -- header should always
525 * be inside due to u64 alignment of output.
526 */
527 if ((old & md->mask) + size != ((old + size) & md->mask)) {
528 unsigned int offset = old;
529 unsigned int len = min(sizeof(*event), size), cpy;
0479b8b9 530 void *dst = &md->event_copy;
04391deb
ACM
531
532 do {
533 cpy = min(md->mask + 1 - (offset & md->mask), len);
534 memcpy(dst, &data[offset & md->mask], cpy);
535 offset += cpy;
536 dst += cpy;
537 len -= cpy;
538 } while (len);
539
0479b8b9 540 event = &md->event_copy;
04391deb
ACM
541 }
542
543 old += size;
544 }
545
546 md->prev = old;
7bb41152
ACM
547
548 if (!evlist->overwrite)
549 perf_mmap__write_tail(md, old);
550
04391deb
ACM
551 return event;
552}
f8a95309 553
93edcbd9
AH
554static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
555{
556 if (evlist->mmap[idx].base != NULL) {
557 munmap(evlist->mmap[idx].base, evlist->mmap_len);
558 evlist->mmap[idx].base = NULL;
559 }
560}
561
7e2ed097 562void perf_evlist__munmap(struct perf_evlist *evlist)
f8a95309 563{
aece948f 564 int i;
f8a95309 565
93edcbd9
AH
566 for (i = 0; i < evlist->nr_mmaps; i++)
567 __perf_evlist__munmap(evlist, i);
aece948f
ACM
568
569 free(evlist->mmap);
570 evlist->mmap = NULL;
f8a95309
ACM
571}
572
806fb630 573static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
f8a95309 574{
a14bb7a6 575 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
ec1e7e43 576 if (cpu_map__empty(evlist->cpus))
b3a319d5 577 evlist->nr_mmaps = thread_map__nr(evlist->threads);
aece948f 578 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
f8a95309
ACM
579 return evlist->mmap != NULL ? 0 : -ENOMEM;
580}
581
bccdaba0 582static int __perf_evlist__mmap(struct perf_evlist *evlist,
aece948f 583 int idx, int prot, int mask, int fd)
f8a95309 584{
aece948f
ACM
585 evlist->mmap[idx].prev = 0;
586 evlist->mmap[idx].mask = mask;
587 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
f8a95309 588 MAP_SHARED, fd, 0);
301b195d
NE
589 if (evlist->mmap[idx].base == MAP_FAILED) {
590 evlist->mmap[idx].base = NULL;
f8a95309 591 return -1;
301b195d 592 }
f8a95309
ACM
593
594 perf_evlist__add_pollfd(evlist, fd);
595 return 0;
596}
597
aece948f
ACM
598static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
599{
600 struct perf_evsel *evsel;
601 int cpu, thread;
b3a319d5
NK
602 int nr_cpus = cpu_map__nr(evlist->cpus);
603 int nr_threads = thread_map__nr(evlist->threads);
aece948f 604
e3e1a54f 605 pr_debug2("perf event ring buffer mmapped per cpu\n");
b3a319d5 606 for (cpu = 0; cpu < nr_cpus; cpu++) {
aece948f
ACM
607 int output = -1;
608
b3a319d5 609 for (thread = 0; thread < nr_threads; thread++) {
aece948f
ACM
610 list_for_each_entry(evsel, &evlist->entries, node) {
611 int fd = FD(evsel, cpu, thread);
612
613 if (output == -1) {
614 output = fd;
bccdaba0 615 if (__perf_evlist__mmap(evlist, cpu,
aece948f
ACM
616 prot, mask, output) < 0)
617 goto out_unmap;
618 } else {
619 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
620 goto out_unmap;
621 }
622
623 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
624 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
625 goto out_unmap;
626 }
627 }
628 }
629
630 return 0;
631
632out_unmap:
93edcbd9
AH
633 for (cpu = 0; cpu < nr_cpus; cpu++)
634 __perf_evlist__munmap(evlist, cpu);
aece948f
ACM
635 return -1;
636}
637
638static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
639{
640 struct perf_evsel *evsel;
641 int thread;
b3a319d5 642 int nr_threads = thread_map__nr(evlist->threads);
aece948f 643
e3e1a54f 644 pr_debug2("perf event ring buffer mmapped per thread\n");
b3a319d5 645 for (thread = 0; thread < nr_threads; thread++) {
aece948f
ACM
646 int output = -1;
647
648 list_for_each_entry(evsel, &evlist->entries, node) {
649 int fd = FD(evsel, 0, thread);
650
651 if (output == -1) {
652 output = fd;
bccdaba0 653 if (__perf_evlist__mmap(evlist, thread,
aece948f
ACM
654 prot, mask, output) < 0)
655 goto out_unmap;
656 } else {
657 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
658 goto out_unmap;
659 }
660
661 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
662 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
663 goto out_unmap;
664 }
665 }
666
667 return 0;
668
669out_unmap:
93edcbd9
AH
670 for (thread = 0; thread < nr_threads; thread++)
671 __perf_evlist__munmap(evlist, thread);
aece948f
ACM
672 return -1;
673}
674
f8a95309
ACM
675/** perf_evlist__mmap - Create per cpu maps to receive events
676 *
677 * @evlist - list of events
f8a95309
ACM
678 * @pages - map length in pages
679 * @overwrite - overwrite older events?
680 *
681 * If overwrite is false the user needs to signal event consuption using:
682 *
683 * struct perf_mmap *m = &evlist->mmap[cpu];
684 * unsigned int head = perf_mmap__read_head(m);
685 *
686 * perf_mmap__write_tail(m, head)
7e2ed097
ACM
687 *
688 * Using perf_evlist__read_on_cpu does this automatically.
f8a95309 689 */
50a682ce
ACM
690int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
691 bool overwrite)
f8a95309 692{
aece948f 693 struct perf_evsel *evsel;
7e2ed097
ACM
694 const struct cpu_map *cpus = evlist->cpus;
695 const struct thread_map *threads = evlist->threads;
50a682ce
ACM
696 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
697
698 /* 512 kiB: default amount of unprivileged mlocked memory */
699 if (pages == UINT_MAX)
700 pages = (512 * 1024) / page_size;
41d0d933
NE
701 else if (!is_power_of_2(pages))
702 return -EINVAL;
50a682ce
ACM
703
704 mask = pages * page_size - 1;
f8a95309 705
7e2ed097 706 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
f8a95309
ACM
707 return -ENOMEM;
708
7e2ed097 709 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
f8a95309
ACM
710 return -ENOMEM;
711
712 evlist->overwrite = overwrite;
713 evlist->mmap_len = (pages + 1) * page_size;
f8a95309
ACM
714
715 list_for_each_entry(evsel, &evlist->entries, node) {
716 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
a91e5431 717 evsel->sample_id == NULL &&
a14bb7a6 718 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
f8a95309 719 return -ENOMEM;
f8a95309
ACM
720 }
721
ec1e7e43 722 if (cpu_map__empty(cpus))
aece948f 723 return perf_evlist__mmap_per_thread(evlist, prot, mask);
f8a95309 724
aece948f 725 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
f8a95309 726}
7e2ed097 727
b809ac10
NK
728int perf_evlist__create_maps(struct perf_evlist *evlist,
729 struct perf_target *target)
7e2ed097 730{
b809ac10
NK
731 evlist->threads = thread_map__new_str(target->pid, target->tid,
732 target->uid);
7e2ed097
ACM
733
734 if (evlist->threads == NULL)
735 return -1;
736
879d77d0 737 if (perf_target__has_task(target))
d67356e7 738 evlist->cpus = cpu_map__dummy_new();
d1cb9fce
NK
739 else if (!perf_target__has_cpu(target) && !target->uses_mmap)
740 evlist->cpus = cpu_map__dummy_new();
879d77d0
NK
741 else
742 evlist->cpus = cpu_map__new(target->cpu_list);
7e2ed097
ACM
743
744 if (evlist->cpus == NULL)
745 goto out_delete_threads;
746
747 return 0;
748
749out_delete_threads:
750 thread_map__delete(evlist->threads);
751 return -1;
752}
753
754void perf_evlist__delete_maps(struct perf_evlist *evlist)
755{
756 cpu_map__delete(evlist->cpus);
757 thread_map__delete(evlist->threads);
758 evlist->cpus = NULL;
759 evlist->threads = NULL;
760}
0a102479 761
1491a632 762int perf_evlist__apply_filters(struct perf_evlist *evlist)
0a102479 763{
0a102479 764 struct perf_evsel *evsel;
745cefc5
ACM
765 int err = 0;
766 const int ncpus = cpu_map__nr(evlist->cpus),
b3a319d5 767 nthreads = thread_map__nr(evlist->threads);
0a102479
FW
768
769 list_for_each_entry(evsel, &evlist->entries, node) {
745cefc5 770 if (evsel->filter == NULL)
0a102479 771 continue;
745cefc5
ACM
772
773 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
774 if (err)
775 break;
0a102479
FW
776 }
777
745cefc5
ACM
778 return err;
779}
780
781int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
782{
783 struct perf_evsel *evsel;
784 int err = 0;
785 const int ncpus = cpu_map__nr(evlist->cpus),
b3a319d5 786 nthreads = thread_map__nr(evlist->threads);
745cefc5
ACM
787
788 list_for_each_entry(evsel, &evlist->entries, node) {
789 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
790 if (err)
791 break;
792 }
793
794 return err;
0a102479 795}
74429964 796
0c21f736 797bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
74429964 798{
75562573 799 struct perf_evsel *pos;
c2a70653 800
75562573
AH
801 if (evlist->nr_entries == 1)
802 return true;
803
804 if (evlist->id_pos < 0 || evlist->is_pos < 0)
805 return false;
806
807 list_for_each_entry(pos, &evlist->entries, node) {
808 if (pos->id_pos != evlist->id_pos ||
809 pos->is_pos != evlist->is_pos)
c2a70653 810 return false;
74429964
FW
811 }
812
c2a70653 813 return true;
74429964
FW
814}
815
75562573 816u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
c2a70653 817{
75562573
AH
818 struct perf_evsel *evsel;
819
820 if (evlist->combined_sample_type)
821 return evlist->combined_sample_type;
822
823 list_for_each_entry(evsel, &evlist->entries, node)
824 evlist->combined_sample_type |= evsel->attr.sample_type;
825
826 return evlist->combined_sample_type;
827}
828
829u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
830{
831 evlist->combined_sample_type = 0;
832 return __perf_evlist__combined_sample_type(evlist);
c2a70653
ACM
833}
834
9ede473c
JO
835bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
836{
837 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
838 u64 read_format = first->attr.read_format;
839 u64 sample_type = first->attr.sample_type;
840
841 list_for_each_entry_continue(pos, &evlist->entries, node) {
842 if (read_format != pos->attr.read_format)
843 return false;
844 }
845
846 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
847 if ((sample_type & PERF_SAMPLE_READ) &&
848 !(read_format & PERF_FORMAT_ID)) {
849 return false;
850 }
851
852 return true;
853}
854
855u64 perf_evlist__read_format(struct perf_evlist *evlist)
856{
857 struct perf_evsel *first = perf_evlist__first(evlist);
858 return first->attr.read_format;
859}
860
0c21f736 861u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
81e36bff 862{
0c21f736 863 struct perf_evsel *first = perf_evlist__first(evlist);
81e36bff
ACM
864 struct perf_sample *data;
865 u64 sample_type;
866 u16 size = 0;
867
81e36bff
ACM
868 if (!first->attr.sample_id_all)
869 goto out;
870
871 sample_type = first->attr.sample_type;
872
873 if (sample_type & PERF_SAMPLE_TID)
874 size += sizeof(data->tid) * 2;
875
876 if (sample_type & PERF_SAMPLE_TIME)
877 size += sizeof(data->time);
878
879 if (sample_type & PERF_SAMPLE_ID)
880 size += sizeof(data->id);
881
882 if (sample_type & PERF_SAMPLE_STREAM_ID)
883 size += sizeof(data->stream_id);
884
885 if (sample_type & PERF_SAMPLE_CPU)
886 size += sizeof(data->cpu) * 2;
75562573
AH
887
888 if (sample_type & PERF_SAMPLE_IDENTIFIER)
889 size += sizeof(data->id);
81e36bff
ACM
890out:
891 return size;
892}
893
0c21f736 894bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
74429964 895{
0c21f736 896 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
c2a70653
ACM
897
898 list_for_each_entry_continue(pos, &evlist->entries, node) {
899 if (first->attr.sample_id_all != pos->attr.sample_id_all)
900 return false;
74429964
FW
901 }
902
c2a70653
ACM
903 return true;
904}
905
0c21f736 906bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
c2a70653 907{
0c21f736 908 struct perf_evsel *first = perf_evlist__first(evlist);
c2a70653 909 return first->attr.sample_id_all;
74429964 910}
81cce8de
ACM
911
912void perf_evlist__set_selected(struct perf_evlist *evlist,
913 struct perf_evsel *evsel)
914{
915 evlist->selected = evsel;
916}
727ab04e 917
a74b4b66
NK
918void perf_evlist__close(struct perf_evlist *evlist)
919{
920 struct perf_evsel *evsel;
921 int ncpus = cpu_map__nr(evlist->cpus);
922 int nthreads = thread_map__nr(evlist->threads);
923
924 list_for_each_entry_reverse(evsel, &evlist->entries, node)
925 perf_evsel__close(evsel, ncpus, nthreads);
926}
927
6a4bb04c 928int perf_evlist__open(struct perf_evlist *evlist)
727ab04e 929{
6a4bb04c 930 struct perf_evsel *evsel;
a74b4b66 931 int err;
727ab04e 932
733cd2fe
AH
933 perf_evlist__update_id_pos(evlist);
934
727ab04e 935 list_for_each_entry(evsel, &evlist->entries, node) {
6a4bb04c 936 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
727ab04e
ACM
937 if (err < 0)
938 goto out_err;
939 }
940
941 return 0;
942out_err:
a74b4b66 943 perf_evlist__close(evlist);
41c21a68 944 errno = -err;
727ab04e
ACM
945 return err;
946}
35b9d88e
ACM
947
948int perf_evlist__prepare_workload(struct perf_evlist *evlist,
6ef73ec4 949 struct perf_target *target,
55e162ea
NK
950 const char *argv[], bool pipe_output,
951 bool want_signal)
35b9d88e
ACM
952{
953 int child_ready_pipe[2], go_pipe[2];
954 char bf;
955
956 if (pipe(child_ready_pipe) < 0) {
957 perror("failed to create 'ready' pipe");
958 return -1;
959 }
960
961 if (pipe(go_pipe) < 0) {
962 perror("failed to create 'go' pipe");
963 goto out_close_ready_pipe;
964 }
965
966 evlist->workload.pid = fork();
967 if (evlist->workload.pid < 0) {
968 perror("failed to fork");
969 goto out_close_pipes;
970 }
971
972 if (!evlist->workload.pid) {
119fa3c9 973 if (pipe_output)
35b9d88e
ACM
974 dup2(2, 1);
975
0817df08
DA
976 signal(SIGTERM, SIG_DFL);
977
35b9d88e
ACM
978 close(child_ready_pipe[0]);
979 close(go_pipe[1]);
980 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
981
35b9d88e
ACM
982 /*
983 * Tell the parent we're ready to go
984 */
985 close(child_ready_pipe[1]);
986
987 /*
988 * Wait until the parent tells us to go.
989 */
990 if (read(go_pipe[0], &bf, 1) == -1)
991 perror("unable to read pipe");
992
993 execvp(argv[0], (char **)argv);
994
995 perror(argv[0]);
55e162ea
NK
996 if (want_signal)
997 kill(getppid(), SIGUSR1);
35b9d88e
ACM
998 exit(-1);
999 }
1000
6ef73ec4 1001 if (perf_target__none(target))
35b9d88e
ACM
1002 evlist->threads->map[0] = evlist->workload.pid;
1003
1004 close(child_ready_pipe[1]);
1005 close(go_pipe[0]);
1006 /*
1007 * wait for child to settle
1008 */
1009 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1010 perror("unable to read pipe");
1011 goto out_close_pipes;
1012 }
1013
bcf3145f 1014 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
35b9d88e
ACM
1015 evlist->workload.cork_fd = go_pipe[1];
1016 close(child_ready_pipe[0]);
1017 return 0;
1018
1019out_close_pipes:
1020 close(go_pipe[0]);
1021 close(go_pipe[1]);
1022out_close_ready_pipe:
1023 close(child_ready_pipe[0]);
1024 close(child_ready_pipe[1]);
1025 return -1;
1026}
1027
1028int perf_evlist__start_workload(struct perf_evlist *evlist)
1029{
1030 if (evlist->workload.cork_fd > 0) {
b3824404 1031 char bf = 0;
bcf3145f 1032 int ret;
35b9d88e
ACM
1033 /*
1034 * Remove the cork, let it rip!
1035 */
bcf3145f
NK
1036 ret = write(evlist->workload.cork_fd, &bf, 1);
1037 if (ret < 0)
1038 perror("enable to write to pipe");
1039
1040 close(evlist->workload.cork_fd);
1041 return ret;
35b9d88e
ACM
1042 }
1043
1044 return 0;
1045}
cb0b29e0 1046
a3f698fe 1047int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
0807d2d8 1048 struct perf_sample *sample)
cb0b29e0 1049{
75562573
AH
1050 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1051
1052 if (!evsel)
1053 return -EFAULT;
0807d2d8 1054 return perf_evsel__parse_sample(evsel, event, sample);
cb0b29e0 1055}
78f067b3
ACM
1056
1057size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1058{
1059 struct perf_evsel *evsel;
1060 size_t printed = 0;
1061
1062 list_for_each_entry(evsel, &evlist->entries, node) {
1063 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1064 perf_evsel__name(evsel));
1065 }
1066
1067 return printed + fprintf(fp, "\n");;
1068}
This page took 0.159813 seconds and 5 git commands to generate.