da2dd92d5b27ae58eee8650e090e54a92cd28eea
[deliverable/linux.git] / tools / perf / util / evlist.c
1 /*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9 #include "util.h"
10 #include <lk/debugfs.h>
11 #include <poll.h>
12 #include "cpumap.h"
13 #include "thread_map.h"
14 #include "target.h"
15 #include "evlist.h"
16 #include "evsel.h"
17 #include <unistd.h>
18
19 #include "parse-events.h"
20
21 #include <sys/mman.h>
22
23 #include <linux/bitops.h>
24 #include <linux/hash.h>
25
26 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
27 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
28
29 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
30 struct thread_map *threads)
31 {
32 int i;
33
34 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
35 INIT_HLIST_HEAD(&evlist->heads[i]);
36 INIT_LIST_HEAD(&evlist->entries);
37 perf_evlist__set_maps(evlist, cpus, threads);
38 evlist->workload.pid = -1;
39 }
40
41 struct perf_evlist *perf_evlist__new(void)
42 {
43 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
44
45 if (evlist != NULL)
46 perf_evlist__init(evlist, NULL, NULL);
47
48 return evlist;
49 }
50
51 void perf_evlist__config(struct perf_evlist *evlist,
52 struct perf_record_opts *opts)
53 {
54 struct perf_evsel *evsel;
55 /*
56 * Set the evsel leader links before we configure attributes,
57 * since some might depend on this info.
58 */
59 if (opts->group)
60 perf_evlist__set_leader(evlist);
61
62 if (evlist->cpus->map[0] < 0)
63 opts->no_inherit = true;
64
65 list_for_each_entry(evsel, &evlist->entries, node) {
66 perf_evsel__config(evsel, opts);
67
68 if (evlist->nr_entries > 1)
69 perf_evsel__set_sample_id(evsel);
70 }
71 }
72
73 static void perf_evlist__purge(struct perf_evlist *evlist)
74 {
75 struct perf_evsel *pos, *n;
76
77 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
78 list_del_init(&pos->node);
79 perf_evsel__delete(pos);
80 }
81
82 evlist->nr_entries = 0;
83 }
84
85 void perf_evlist__exit(struct perf_evlist *evlist)
86 {
87 free(evlist->mmap);
88 free(evlist->pollfd);
89 evlist->mmap = NULL;
90 evlist->pollfd = NULL;
91 }
92
93 void perf_evlist__delete(struct perf_evlist *evlist)
94 {
95 perf_evlist__purge(evlist);
96 perf_evlist__exit(evlist);
97 free(evlist);
98 }
99
100 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
101 {
102 list_add_tail(&entry->node, &evlist->entries);
103 ++evlist->nr_entries;
104 }
105
106 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
107 struct list_head *list,
108 int nr_entries)
109 {
110 list_splice_tail(list, &evlist->entries);
111 evlist->nr_entries += nr_entries;
112 }
113
114 void __perf_evlist__set_leader(struct list_head *list)
115 {
116 struct perf_evsel *evsel, *leader;
117
118 leader = list_entry(list->next, struct perf_evsel, node);
119 evsel = list_entry(list->prev, struct perf_evsel, node);
120
121 leader->nr_members = evsel->idx - leader->idx + 1;
122
123 list_for_each_entry(evsel, list, node) {
124 evsel->leader = leader;
125 }
126 }
127
128 void perf_evlist__set_leader(struct perf_evlist *evlist)
129 {
130 if (evlist->nr_entries) {
131 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
132 __perf_evlist__set_leader(&evlist->entries);
133 }
134 }
135
136 int perf_evlist__add_default(struct perf_evlist *evlist)
137 {
138 struct perf_event_attr attr = {
139 .type = PERF_TYPE_HARDWARE,
140 .config = PERF_COUNT_HW_CPU_CYCLES,
141 };
142 struct perf_evsel *evsel;
143
144 event_attr_init(&attr);
145
146 evsel = perf_evsel__new(&attr, 0);
147 if (evsel == NULL)
148 goto error;
149
150 /* use strdup() because free(evsel) assumes name is allocated */
151 evsel->name = strdup("cycles");
152 if (!evsel->name)
153 goto error_free;
154
155 perf_evlist__add(evlist, evsel);
156 return 0;
157 error_free:
158 perf_evsel__delete(evsel);
159 error:
160 return -ENOMEM;
161 }
162
163 static int perf_evlist__add_attrs(struct perf_evlist *evlist,
164 struct perf_event_attr *attrs, size_t nr_attrs)
165 {
166 struct perf_evsel *evsel, *n;
167 LIST_HEAD(head);
168 size_t i;
169
170 for (i = 0; i < nr_attrs; i++) {
171 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
172 if (evsel == NULL)
173 goto out_delete_partial_list;
174 list_add_tail(&evsel->node, &head);
175 }
176
177 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
178
179 return 0;
180
181 out_delete_partial_list:
182 list_for_each_entry_safe(evsel, n, &head, node)
183 perf_evsel__delete(evsel);
184 return -1;
185 }
186
187 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
188 struct perf_event_attr *attrs, size_t nr_attrs)
189 {
190 size_t i;
191
192 for (i = 0; i < nr_attrs; i++)
193 event_attr_init(attrs + i);
194
195 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
196 }
197
198 struct perf_evsel *
199 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
200 {
201 struct perf_evsel *evsel;
202
203 list_for_each_entry(evsel, &evlist->entries, node) {
204 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
205 (int)evsel->attr.config == id)
206 return evsel;
207 }
208
209 return NULL;
210 }
211
212 int perf_evlist__add_newtp(struct perf_evlist *evlist,
213 const char *sys, const char *name, void *handler)
214 {
215 struct perf_evsel *evsel;
216
217 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
218 if (evsel == NULL)
219 return -1;
220
221 evsel->handler.func = handler;
222 perf_evlist__add(evlist, evsel);
223 return 0;
224 }
225
226 void perf_evlist__disable(struct perf_evlist *evlist)
227 {
228 int cpu, thread;
229 struct perf_evsel *pos;
230 int nr_cpus = cpu_map__nr(evlist->cpus);
231 int nr_threads = thread_map__nr(evlist->threads);
232
233 for (cpu = 0; cpu < nr_cpus; cpu++) {
234 list_for_each_entry(pos, &evlist->entries, node) {
235 if (!perf_evsel__is_group_leader(pos))
236 continue;
237 for (thread = 0; thread < nr_threads; thread++)
238 ioctl(FD(pos, cpu, thread),
239 PERF_EVENT_IOC_DISABLE, 0);
240 }
241 }
242 }
243
244 void perf_evlist__enable(struct perf_evlist *evlist)
245 {
246 int cpu, thread;
247 struct perf_evsel *pos;
248 int nr_cpus = cpu_map__nr(evlist->cpus);
249 int nr_threads = thread_map__nr(evlist->threads);
250
251 for (cpu = 0; cpu < nr_cpus; cpu++) {
252 list_for_each_entry(pos, &evlist->entries, node) {
253 if (!perf_evsel__is_group_leader(pos))
254 continue;
255 for (thread = 0; thread < nr_threads; thread++)
256 ioctl(FD(pos, cpu, thread),
257 PERF_EVENT_IOC_ENABLE, 0);
258 }
259 }
260 }
261
262 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
263 {
264 int nr_cpus = cpu_map__nr(evlist->cpus);
265 int nr_threads = thread_map__nr(evlist->threads);
266 int nfds = nr_cpus * nr_threads * evlist->nr_entries;
267 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
268 return evlist->pollfd != NULL ? 0 : -ENOMEM;
269 }
270
271 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
272 {
273 fcntl(fd, F_SETFL, O_NONBLOCK);
274 evlist->pollfd[evlist->nr_fds].fd = fd;
275 evlist->pollfd[evlist->nr_fds].events = POLLIN;
276 evlist->nr_fds++;
277 }
278
279 static void perf_evlist__id_hash(struct perf_evlist *evlist,
280 struct perf_evsel *evsel,
281 int cpu, int thread, u64 id)
282 {
283 int hash;
284 struct perf_sample_id *sid = SID(evsel, cpu, thread);
285
286 sid->id = id;
287 sid->evsel = evsel;
288 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
289 hlist_add_head(&sid->node, &evlist->heads[hash]);
290 }
291
292 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
293 int cpu, int thread, u64 id)
294 {
295 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
296 evsel->id[evsel->ids++] = id;
297 }
298
299 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
300 struct perf_evsel *evsel,
301 int cpu, int thread, int fd)
302 {
303 u64 read_data[4] = { 0, };
304 int id_idx = 1; /* The first entry is the counter value */
305 u64 id;
306 int ret;
307
308 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
309 if (!ret)
310 goto add;
311
312 if (errno != ENOTTY)
313 return -1;
314
315 /* Legacy way to get event id.. All hail to old kernels! */
316
317 /*
318 * This way does not work with group format read, so bail
319 * out in that case.
320 */
321 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
322 return -1;
323
324 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
325 read(fd, &read_data, sizeof(read_data)) == -1)
326 return -1;
327
328 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
329 ++id_idx;
330 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
331 ++id_idx;
332
333 id = read_data[id_idx];
334
335 add:
336 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
337 return 0;
338 }
339
340 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
341 {
342 struct hlist_head *head;
343 struct perf_sample_id *sid;
344 int hash;
345
346 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
347 head = &evlist->heads[hash];
348
349 hlist_for_each_entry(sid, head, node)
350 if (sid->id == id)
351 return sid;
352
353 return NULL;
354 }
355
356 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
357 {
358 struct perf_sample_id *sid;
359
360 if (evlist->nr_entries == 1)
361 return perf_evlist__first(evlist);
362
363 sid = perf_evlist__id2sid(evlist, id);
364 if (sid)
365 return sid->evsel;
366
367 if (!perf_evlist__sample_id_all(evlist))
368 return perf_evlist__first(evlist);
369
370 return NULL;
371 }
372
373 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
374 {
375 struct perf_mmap *md = &evlist->mmap[idx];
376 unsigned int head = perf_mmap__read_head(md);
377 unsigned int old = md->prev;
378 unsigned char *data = md->base + page_size;
379 union perf_event *event = NULL;
380
381 if (evlist->overwrite) {
382 /*
383 * If we're further behind than half the buffer, there's a chance
384 * the writer will bite our tail and mess up the samples under us.
385 *
386 * If we somehow ended up ahead of the head, we got messed up.
387 *
388 * In either case, truncate and restart at head.
389 */
390 int diff = head - old;
391 if (diff > md->mask / 2 || diff < 0) {
392 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
393
394 /*
395 * head points to a known good entry, start there.
396 */
397 old = head;
398 }
399 }
400
401 if (old != head) {
402 size_t size;
403
404 event = (union perf_event *)&data[old & md->mask];
405 size = event->header.size;
406
407 /*
408 * Event straddles the mmap boundary -- header should always
409 * be inside due to u64 alignment of output.
410 */
411 if ((old & md->mask) + size != ((old + size) & md->mask)) {
412 unsigned int offset = old;
413 unsigned int len = min(sizeof(*event), size), cpy;
414 void *dst = &md->event_copy;
415
416 do {
417 cpy = min(md->mask + 1 - (offset & md->mask), len);
418 memcpy(dst, &data[offset & md->mask], cpy);
419 offset += cpy;
420 dst += cpy;
421 len -= cpy;
422 } while (len);
423
424 event = &md->event_copy;
425 }
426
427 old += size;
428 }
429
430 md->prev = old;
431
432 if (!evlist->overwrite)
433 perf_mmap__write_tail(md, old);
434
435 return event;
436 }
437
438 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
439 {
440 if (evlist->mmap[idx].base != NULL) {
441 munmap(evlist->mmap[idx].base, evlist->mmap_len);
442 evlist->mmap[idx].base = NULL;
443 }
444 }
445
446 void perf_evlist__munmap(struct perf_evlist *evlist)
447 {
448 int i;
449
450 for (i = 0; i < evlist->nr_mmaps; i++)
451 __perf_evlist__munmap(evlist, i);
452
453 free(evlist->mmap);
454 evlist->mmap = NULL;
455 }
456
457 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
458 {
459 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
460 if (cpu_map__empty(evlist->cpus))
461 evlist->nr_mmaps = thread_map__nr(evlist->threads);
462 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
463 return evlist->mmap != NULL ? 0 : -ENOMEM;
464 }
465
466 static int __perf_evlist__mmap(struct perf_evlist *evlist,
467 int idx, int prot, int mask, int fd)
468 {
469 evlist->mmap[idx].prev = 0;
470 evlist->mmap[idx].mask = mask;
471 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
472 MAP_SHARED, fd, 0);
473 if (evlist->mmap[idx].base == MAP_FAILED) {
474 evlist->mmap[idx].base = NULL;
475 return -1;
476 }
477
478 perf_evlist__add_pollfd(evlist, fd);
479 return 0;
480 }
481
482 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
483 {
484 struct perf_evsel *evsel;
485 int cpu, thread;
486 int nr_cpus = cpu_map__nr(evlist->cpus);
487 int nr_threads = thread_map__nr(evlist->threads);
488
489 for (cpu = 0; cpu < nr_cpus; cpu++) {
490 int output = -1;
491
492 for (thread = 0; thread < nr_threads; thread++) {
493 list_for_each_entry(evsel, &evlist->entries, node) {
494 int fd = FD(evsel, cpu, thread);
495
496 if (output == -1) {
497 output = fd;
498 if (__perf_evlist__mmap(evlist, cpu,
499 prot, mask, output) < 0)
500 goto out_unmap;
501 } else {
502 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
503 goto out_unmap;
504 }
505
506 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
507 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
508 goto out_unmap;
509 }
510 }
511 }
512
513 return 0;
514
515 out_unmap:
516 for (cpu = 0; cpu < nr_cpus; cpu++)
517 __perf_evlist__munmap(evlist, cpu);
518 return -1;
519 }
520
521 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
522 {
523 struct perf_evsel *evsel;
524 int thread;
525 int nr_threads = thread_map__nr(evlist->threads);
526
527 for (thread = 0; thread < nr_threads; thread++) {
528 int output = -1;
529
530 list_for_each_entry(evsel, &evlist->entries, node) {
531 int fd = FD(evsel, 0, thread);
532
533 if (output == -1) {
534 output = fd;
535 if (__perf_evlist__mmap(evlist, thread,
536 prot, mask, output) < 0)
537 goto out_unmap;
538 } else {
539 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
540 goto out_unmap;
541 }
542
543 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
544 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
545 goto out_unmap;
546 }
547 }
548
549 return 0;
550
551 out_unmap:
552 for (thread = 0; thread < nr_threads; thread++)
553 __perf_evlist__munmap(evlist, thread);
554 return -1;
555 }
556
557 /** perf_evlist__mmap - Create per cpu maps to receive events
558 *
559 * @evlist - list of events
560 * @pages - map length in pages
561 * @overwrite - overwrite older events?
562 *
563 * If overwrite is false the user needs to signal event consuption using:
564 *
565 * struct perf_mmap *m = &evlist->mmap[cpu];
566 * unsigned int head = perf_mmap__read_head(m);
567 *
568 * perf_mmap__write_tail(m, head)
569 *
570 * Using perf_evlist__read_on_cpu does this automatically.
571 */
572 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
573 bool overwrite)
574 {
575 struct perf_evsel *evsel;
576 const struct cpu_map *cpus = evlist->cpus;
577 const struct thread_map *threads = evlist->threads;
578 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
579
580 /* 512 kiB: default amount of unprivileged mlocked memory */
581 if (pages == UINT_MAX)
582 pages = (512 * 1024) / page_size;
583 else if (!is_power_of_2(pages))
584 return -EINVAL;
585
586 mask = pages * page_size - 1;
587
588 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
589 return -ENOMEM;
590
591 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
592 return -ENOMEM;
593
594 evlist->overwrite = overwrite;
595 evlist->mmap_len = (pages + 1) * page_size;
596
597 list_for_each_entry(evsel, &evlist->entries, node) {
598 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
599 evsel->sample_id == NULL &&
600 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
601 return -ENOMEM;
602 }
603
604 if (cpu_map__empty(cpus))
605 return perf_evlist__mmap_per_thread(evlist, prot, mask);
606
607 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
608 }
609
610 int perf_evlist__create_maps(struct perf_evlist *evlist,
611 struct perf_target *target)
612 {
613 evlist->threads = thread_map__new_str(target->pid, target->tid,
614 target->uid);
615
616 if (evlist->threads == NULL)
617 return -1;
618
619 if (perf_target__has_task(target))
620 evlist->cpus = cpu_map__dummy_new();
621 else if (!perf_target__has_cpu(target) && !target->uses_mmap)
622 evlist->cpus = cpu_map__dummy_new();
623 else
624 evlist->cpus = cpu_map__new(target->cpu_list);
625
626 if (evlist->cpus == NULL)
627 goto out_delete_threads;
628
629 return 0;
630
631 out_delete_threads:
632 thread_map__delete(evlist->threads);
633 return -1;
634 }
635
636 void perf_evlist__delete_maps(struct perf_evlist *evlist)
637 {
638 cpu_map__delete(evlist->cpus);
639 thread_map__delete(evlist->threads);
640 evlist->cpus = NULL;
641 evlist->threads = NULL;
642 }
643
644 int perf_evlist__apply_filters(struct perf_evlist *evlist)
645 {
646 struct perf_evsel *evsel;
647 int err = 0;
648 const int ncpus = cpu_map__nr(evlist->cpus),
649 nthreads = thread_map__nr(evlist->threads);
650
651 list_for_each_entry(evsel, &evlist->entries, node) {
652 if (evsel->filter == NULL)
653 continue;
654
655 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
656 if (err)
657 break;
658 }
659
660 return err;
661 }
662
663 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
664 {
665 struct perf_evsel *evsel;
666 int err = 0;
667 const int ncpus = cpu_map__nr(evlist->cpus),
668 nthreads = thread_map__nr(evlist->threads);
669
670 list_for_each_entry(evsel, &evlist->entries, node) {
671 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
672 if (err)
673 break;
674 }
675
676 return err;
677 }
678
679 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
680 {
681 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
682
683 list_for_each_entry_continue(pos, &evlist->entries, node) {
684 if (first->attr.sample_type != pos->attr.sample_type)
685 return false;
686 }
687
688 return true;
689 }
690
691 u64 perf_evlist__sample_type(struct perf_evlist *evlist)
692 {
693 struct perf_evsel *first = perf_evlist__first(evlist);
694 return first->attr.sample_type;
695 }
696
697 bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
698 {
699 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
700 u64 read_format = first->attr.read_format;
701 u64 sample_type = first->attr.sample_type;
702
703 list_for_each_entry_continue(pos, &evlist->entries, node) {
704 if (read_format != pos->attr.read_format)
705 return false;
706 }
707
708 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
709 if ((sample_type & PERF_SAMPLE_READ) &&
710 !(read_format & PERF_FORMAT_ID)) {
711 return false;
712 }
713
714 return true;
715 }
716
717 u64 perf_evlist__read_format(struct perf_evlist *evlist)
718 {
719 struct perf_evsel *first = perf_evlist__first(evlist);
720 return first->attr.read_format;
721 }
722
723 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
724 {
725 struct perf_evsel *first = perf_evlist__first(evlist);
726 struct perf_sample *data;
727 u64 sample_type;
728 u16 size = 0;
729
730 if (!first->attr.sample_id_all)
731 goto out;
732
733 sample_type = first->attr.sample_type;
734
735 if (sample_type & PERF_SAMPLE_TID)
736 size += sizeof(data->tid) * 2;
737
738 if (sample_type & PERF_SAMPLE_TIME)
739 size += sizeof(data->time);
740
741 if (sample_type & PERF_SAMPLE_ID)
742 size += sizeof(data->id);
743
744 if (sample_type & PERF_SAMPLE_STREAM_ID)
745 size += sizeof(data->stream_id);
746
747 if (sample_type & PERF_SAMPLE_CPU)
748 size += sizeof(data->cpu) * 2;
749 out:
750 return size;
751 }
752
753 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
754 {
755 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
756
757 list_for_each_entry_continue(pos, &evlist->entries, node) {
758 if (first->attr.sample_id_all != pos->attr.sample_id_all)
759 return false;
760 }
761
762 return true;
763 }
764
765 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
766 {
767 struct perf_evsel *first = perf_evlist__first(evlist);
768 return first->attr.sample_id_all;
769 }
770
771 void perf_evlist__set_selected(struct perf_evlist *evlist,
772 struct perf_evsel *evsel)
773 {
774 evlist->selected = evsel;
775 }
776
777 void perf_evlist__close(struct perf_evlist *evlist)
778 {
779 struct perf_evsel *evsel;
780 int ncpus = cpu_map__nr(evlist->cpus);
781 int nthreads = thread_map__nr(evlist->threads);
782
783 list_for_each_entry_reverse(evsel, &evlist->entries, node)
784 perf_evsel__close(evsel, ncpus, nthreads);
785 }
786
787 int perf_evlist__open(struct perf_evlist *evlist)
788 {
789 struct perf_evsel *evsel;
790 int err;
791
792 list_for_each_entry(evsel, &evlist->entries, node) {
793 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
794 if (err < 0)
795 goto out_err;
796 }
797
798 return 0;
799 out_err:
800 perf_evlist__close(evlist);
801 errno = -err;
802 return err;
803 }
804
805 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
806 struct perf_target *target,
807 const char *argv[], bool pipe_output,
808 bool want_signal)
809 {
810 int child_ready_pipe[2], go_pipe[2];
811 char bf;
812
813 if (pipe(child_ready_pipe) < 0) {
814 perror("failed to create 'ready' pipe");
815 return -1;
816 }
817
818 if (pipe(go_pipe) < 0) {
819 perror("failed to create 'go' pipe");
820 goto out_close_ready_pipe;
821 }
822
823 evlist->workload.pid = fork();
824 if (evlist->workload.pid < 0) {
825 perror("failed to fork");
826 goto out_close_pipes;
827 }
828
829 if (!evlist->workload.pid) {
830 if (pipe_output)
831 dup2(2, 1);
832
833 signal(SIGTERM, SIG_DFL);
834
835 close(child_ready_pipe[0]);
836 close(go_pipe[1]);
837 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
838
839 /*
840 * Do a dummy execvp to get the PLT entry resolved,
841 * so we avoid the resolver overhead on the real
842 * execvp call.
843 */
844 execvp("", (char **)argv);
845
846 /*
847 * Tell the parent we're ready to go
848 */
849 close(child_ready_pipe[1]);
850
851 /*
852 * Wait until the parent tells us to go.
853 */
854 if (read(go_pipe[0], &bf, 1) == -1)
855 perror("unable to read pipe");
856
857 execvp(argv[0], (char **)argv);
858
859 perror(argv[0]);
860 if (want_signal)
861 kill(getppid(), SIGUSR1);
862 exit(-1);
863 }
864
865 if (perf_target__none(target))
866 evlist->threads->map[0] = evlist->workload.pid;
867
868 close(child_ready_pipe[1]);
869 close(go_pipe[0]);
870 /*
871 * wait for child to settle
872 */
873 if (read(child_ready_pipe[0], &bf, 1) == -1) {
874 perror("unable to read pipe");
875 goto out_close_pipes;
876 }
877
878 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
879 evlist->workload.cork_fd = go_pipe[1];
880 close(child_ready_pipe[0]);
881 return 0;
882
883 out_close_pipes:
884 close(go_pipe[0]);
885 close(go_pipe[1]);
886 out_close_ready_pipe:
887 close(child_ready_pipe[0]);
888 close(child_ready_pipe[1]);
889 return -1;
890 }
891
892 int perf_evlist__start_workload(struct perf_evlist *evlist)
893 {
894 if (evlist->workload.cork_fd > 0) {
895 char bf = 0;
896 int ret;
897 /*
898 * Remove the cork, let it rip!
899 */
900 ret = write(evlist->workload.cork_fd, &bf, 1);
901 if (ret < 0)
902 perror("enable to write to pipe");
903
904 close(evlist->workload.cork_fd);
905 return ret;
906 }
907
908 return 0;
909 }
910
911 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
912 struct perf_sample *sample)
913 {
914 struct perf_evsel *evsel = perf_evlist__first(evlist);
915 return perf_evsel__parse_sample(evsel, event, sample);
916 }
917
918 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
919 {
920 struct perf_evsel *evsel;
921 size_t printed = 0;
922
923 list_for_each_entry(evsel, &evlist->entries, node) {
924 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
925 perf_evsel__name(evsel));
926 }
927
928 return printed + fprintf(fp, "\n");;
929 }
This page took 0.051052 seconds and 4 git commands to generate.