2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
10 #include <lk/debugfs.h>
13 #include "thread_map.h"
19 #include "parse-events.h"
23 #include <linux/bitops.h>
24 #include <linux/hash.h>
26 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
27 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
29 void perf_evlist__init(struct perf_evlist
*evlist
, struct cpu_map
*cpus
,
30 struct thread_map
*threads
)
34 for (i
= 0; i
< PERF_EVLIST__HLIST_SIZE
; ++i
)
35 INIT_HLIST_HEAD(&evlist
->heads
[i
]);
36 INIT_LIST_HEAD(&evlist
->entries
);
37 perf_evlist__set_maps(evlist
, cpus
, threads
);
38 evlist
->workload
.pid
= -1;
41 struct perf_evlist
*perf_evlist__new(void)
43 struct perf_evlist
*evlist
= zalloc(sizeof(*evlist
));
46 perf_evlist__init(evlist
, NULL
, NULL
);
51 void perf_evlist__config(struct perf_evlist
*evlist
,
52 struct perf_record_opts
*opts
)
54 struct perf_evsel
*evsel
;
56 * Set the evsel leader links before we configure attributes,
57 * since some might depend on this info.
60 perf_evlist__set_leader(evlist
);
62 if (evlist
->cpus
->map
[0] < 0)
63 opts
->no_inherit
= true;
65 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
66 perf_evsel__config(evsel
, opts
);
68 if (evlist
->nr_entries
> 1)
69 perf_evsel__set_sample_id(evsel
);
73 static void perf_evlist__purge(struct perf_evlist
*evlist
)
75 struct perf_evsel
*pos
, *n
;
77 list_for_each_entry_safe(pos
, n
, &evlist
->entries
, node
) {
78 list_del_init(&pos
->node
);
79 perf_evsel__delete(pos
);
82 evlist
->nr_entries
= 0;
85 void perf_evlist__exit(struct perf_evlist
*evlist
)
90 evlist
->pollfd
= NULL
;
93 void perf_evlist__delete(struct perf_evlist
*evlist
)
95 perf_evlist__purge(evlist
);
96 perf_evlist__exit(evlist
);
100 void perf_evlist__add(struct perf_evlist
*evlist
, struct perf_evsel
*entry
)
102 list_add_tail(&entry
->node
, &evlist
->entries
);
103 ++evlist
->nr_entries
;
106 void perf_evlist__splice_list_tail(struct perf_evlist
*evlist
,
107 struct list_head
*list
,
110 list_splice_tail(list
, &evlist
->entries
);
111 evlist
->nr_entries
+= nr_entries
;
114 void __perf_evlist__set_leader(struct list_head
*list
)
116 struct perf_evsel
*evsel
, *leader
;
118 leader
= list_entry(list
->next
, struct perf_evsel
, node
);
119 evsel
= list_entry(list
->prev
, struct perf_evsel
, node
);
121 leader
->nr_members
= evsel
->idx
- leader
->idx
+ 1;
123 list_for_each_entry(evsel
, list
, node
) {
124 evsel
->leader
= leader
;
128 void perf_evlist__set_leader(struct perf_evlist
*evlist
)
130 if (evlist
->nr_entries
) {
131 evlist
->nr_groups
= evlist
->nr_entries
> 1 ? 1 : 0;
132 __perf_evlist__set_leader(&evlist
->entries
);
136 int perf_evlist__add_default(struct perf_evlist
*evlist
)
138 struct perf_event_attr attr
= {
139 .type
= PERF_TYPE_HARDWARE
,
140 .config
= PERF_COUNT_HW_CPU_CYCLES
,
142 struct perf_evsel
*evsel
;
144 event_attr_init(&attr
);
146 evsel
= perf_evsel__new(&attr
, 0);
150 /* use strdup() because free(evsel) assumes name is allocated */
151 evsel
->name
= strdup("cycles");
155 perf_evlist__add(evlist
, evsel
);
158 perf_evsel__delete(evsel
);
163 static int perf_evlist__add_attrs(struct perf_evlist
*evlist
,
164 struct perf_event_attr
*attrs
, size_t nr_attrs
)
166 struct perf_evsel
*evsel
, *n
;
170 for (i
= 0; i
< nr_attrs
; i
++) {
171 evsel
= perf_evsel__new(attrs
+ i
, evlist
->nr_entries
+ i
);
173 goto out_delete_partial_list
;
174 list_add_tail(&evsel
->node
, &head
);
177 perf_evlist__splice_list_tail(evlist
, &head
, nr_attrs
);
181 out_delete_partial_list
:
182 list_for_each_entry_safe(evsel
, n
, &head
, node
)
183 perf_evsel__delete(evsel
);
187 int __perf_evlist__add_default_attrs(struct perf_evlist
*evlist
,
188 struct perf_event_attr
*attrs
, size_t nr_attrs
)
192 for (i
= 0; i
< nr_attrs
; i
++)
193 event_attr_init(attrs
+ i
);
195 return perf_evlist__add_attrs(evlist
, attrs
, nr_attrs
);
199 perf_evlist__find_tracepoint_by_id(struct perf_evlist
*evlist
, int id
)
201 struct perf_evsel
*evsel
;
203 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
204 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
&&
205 (int)evsel
->attr
.config
== id
)
212 int perf_evlist__add_newtp(struct perf_evlist
*evlist
,
213 const char *sys
, const char *name
, void *handler
)
215 struct perf_evsel
*evsel
;
217 evsel
= perf_evsel__newtp(sys
, name
, evlist
->nr_entries
);
221 evsel
->handler
.func
= handler
;
222 perf_evlist__add(evlist
, evsel
);
226 void perf_evlist__disable(struct perf_evlist
*evlist
)
229 struct perf_evsel
*pos
;
230 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
231 int nr_threads
= thread_map__nr(evlist
->threads
);
233 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
234 list_for_each_entry(pos
, &evlist
->entries
, node
) {
235 if (!perf_evsel__is_group_leader(pos
))
237 for (thread
= 0; thread
< nr_threads
; thread
++)
238 ioctl(FD(pos
, cpu
, thread
),
239 PERF_EVENT_IOC_DISABLE
, 0);
244 void perf_evlist__enable(struct perf_evlist
*evlist
)
247 struct perf_evsel
*pos
;
248 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
249 int nr_threads
= thread_map__nr(evlist
->threads
);
251 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
252 list_for_each_entry(pos
, &evlist
->entries
, node
) {
253 if (!perf_evsel__is_group_leader(pos
))
255 for (thread
= 0; thread
< nr_threads
; thread
++)
256 ioctl(FD(pos
, cpu
, thread
),
257 PERF_EVENT_IOC_ENABLE
, 0);
262 static int perf_evlist__alloc_pollfd(struct perf_evlist
*evlist
)
264 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
265 int nr_threads
= thread_map__nr(evlist
->threads
);
266 int nfds
= nr_cpus
* nr_threads
* evlist
->nr_entries
;
267 evlist
->pollfd
= malloc(sizeof(struct pollfd
) * nfds
);
268 return evlist
->pollfd
!= NULL
? 0 : -ENOMEM
;
271 void perf_evlist__add_pollfd(struct perf_evlist
*evlist
, int fd
)
273 fcntl(fd
, F_SETFL
, O_NONBLOCK
);
274 evlist
->pollfd
[evlist
->nr_fds
].fd
= fd
;
275 evlist
->pollfd
[evlist
->nr_fds
].events
= POLLIN
;
279 static void perf_evlist__id_hash(struct perf_evlist
*evlist
,
280 struct perf_evsel
*evsel
,
281 int cpu
, int thread
, u64 id
)
284 struct perf_sample_id
*sid
= SID(evsel
, cpu
, thread
);
288 hash
= hash_64(sid
->id
, PERF_EVLIST__HLIST_BITS
);
289 hlist_add_head(&sid
->node
, &evlist
->heads
[hash
]);
292 void perf_evlist__id_add(struct perf_evlist
*evlist
, struct perf_evsel
*evsel
,
293 int cpu
, int thread
, u64 id
)
295 perf_evlist__id_hash(evlist
, evsel
, cpu
, thread
, id
);
296 evsel
->id
[evsel
->ids
++] = id
;
299 static int perf_evlist__id_add_fd(struct perf_evlist
*evlist
,
300 struct perf_evsel
*evsel
,
301 int cpu
, int thread
, int fd
)
303 u64 read_data
[4] = { 0, };
304 int id_idx
= 1; /* The first entry is the counter value */
308 ret
= ioctl(fd
, PERF_EVENT_IOC_ID
, &id
);
315 /* Legacy way to get event id.. All hail to old kernels! */
318 * This way does not work with group format read, so bail
321 if (perf_evlist__read_format(evlist
) & PERF_FORMAT_GROUP
)
324 if (!(evsel
->attr
.read_format
& PERF_FORMAT_ID
) ||
325 read(fd
, &read_data
, sizeof(read_data
)) == -1)
328 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
330 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
333 id
= read_data
[id_idx
];
336 perf_evlist__id_add(evlist
, evsel
, cpu
, thread
, id
);
340 struct perf_sample_id
*perf_evlist__id2sid(struct perf_evlist
*evlist
, u64 id
)
342 struct hlist_head
*head
;
343 struct perf_sample_id
*sid
;
346 hash
= hash_64(id
, PERF_EVLIST__HLIST_BITS
);
347 head
= &evlist
->heads
[hash
];
349 hlist_for_each_entry(sid
, head
, node
)
356 struct perf_evsel
*perf_evlist__id2evsel(struct perf_evlist
*evlist
, u64 id
)
358 struct perf_sample_id
*sid
;
360 if (evlist
->nr_entries
== 1)
361 return perf_evlist__first(evlist
);
363 sid
= perf_evlist__id2sid(evlist
, id
);
367 if (!perf_evlist__sample_id_all(evlist
))
368 return perf_evlist__first(evlist
);
373 union perf_event
*perf_evlist__mmap_read(struct perf_evlist
*evlist
, int idx
)
375 struct perf_mmap
*md
= &evlist
->mmap
[idx
];
376 unsigned int head
= perf_mmap__read_head(md
);
377 unsigned int old
= md
->prev
;
378 unsigned char *data
= md
->base
+ page_size
;
379 union perf_event
*event
= NULL
;
381 if (evlist
->overwrite
) {
383 * If we're further behind than half the buffer, there's a chance
384 * the writer will bite our tail and mess up the samples under us.
386 * If we somehow ended up ahead of the head, we got messed up.
388 * In either case, truncate and restart at head.
390 int diff
= head
- old
;
391 if (diff
> md
->mask
/ 2 || diff
< 0) {
392 fprintf(stderr
, "WARNING: failed to keep up with mmap data.\n");
395 * head points to a known good entry, start there.
404 event
= (union perf_event
*)&data
[old
& md
->mask
];
405 size
= event
->header
.size
;
408 * Event straddles the mmap boundary -- header should always
409 * be inside due to u64 alignment of output.
411 if ((old
& md
->mask
) + size
!= ((old
+ size
) & md
->mask
)) {
412 unsigned int offset
= old
;
413 unsigned int len
= min(sizeof(*event
), size
), cpy
;
414 void *dst
= &md
->event_copy
;
417 cpy
= min(md
->mask
+ 1 - (offset
& md
->mask
), len
);
418 memcpy(dst
, &data
[offset
& md
->mask
], cpy
);
424 event
= &md
->event_copy
;
432 if (!evlist
->overwrite
)
433 perf_mmap__write_tail(md
, old
);
438 static void __perf_evlist__munmap(struct perf_evlist
*evlist
, int idx
)
440 if (evlist
->mmap
[idx
].base
!= NULL
) {
441 munmap(evlist
->mmap
[idx
].base
, evlist
->mmap_len
);
442 evlist
->mmap
[idx
].base
= NULL
;
446 void perf_evlist__munmap(struct perf_evlist
*evlist
)
450 for (i
= 0; i
< evlist
->nr_mmaps
; i
++)
451 __perf_evlist__munmap(evlist
, i
);
457 static int perf_evlist__alloc_mmap(struct perf_evlist
*evlist
)
459 evlist
->nr_mmaps
= cpu_map__nr(evlist
->cpus
);
460 if (cpu_map__empty(evlist
->cpus
))
461 evlist
->nr_mmaps
= thread_map__nr(evlist
->threads
);
462 evlist
->mmap
= zalloc(evlist
->nr_mmaps
* sizeof(struct perf_mmap
));
463 return evlist
->mmap
!= NULL
? 0 : -ENOMEM
;
466 static int __perf_evlist__mmap(struct perf_evlist
*evlist
,
467 int idx
, int prot
, int mask
, int fd
)
469 evlist
->mmap
[idx
].prev
= 0;
470 evlist
->mmap
[idx
].mask
= mask
;
471 evlist
->mmap
[idx
].base
= mmap(NULL
, evlist
->mmap_len
, prot
,
473 if (evlist
->mmap
[idx
].base
== MAP_FAILED
) {
474 evlist
->mmap
[idx
].base
= NULL
;
478 perf_evlist__add_pollfd(evlist
, fd
);
482 static int perf_evlist__mmap_per_cpu(struct perf_evlist
*evlist
, int prot
, int mask
)
484 struct perf_evsel
*evsel
;
486 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
487 int nr_threads
= thread_map__nr(evlist
->threads
);
489 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
492 for (thread
= 0; thread
< nr_threads
; thread
++) {
493 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
494 int fd
= FD(evsel
, cpu
, thread
);
498 if (__perf_evlist__mmap(evlist
, cpu
,
499 prot
, mask
, output
) < 0)
502 if (ioctl(fd
, PERF_EVENT_IOC_SET_OUTPUT
, output
) != 0)
506 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
507 perf_evlist__id_add_fd(evlist
, evsel
, cpu
, thread
, fd
) < 0)
516 for (cpu
= 0; cpu
< nr_cpus
; cpu
++)
517 __perf_evlist__munmap(evlist
, cpu
);
521 static int perf_evlist__mmap_per_thread(struct perf_evlist
*evlist
, int prot
, int mask
)
523 struct perf_evsel
*evsel
;
525 int nr_threads
= thread_map__nr(evlist
->threads
);
527 for (thread
= 0; thread
< nr_threads
; thread
++) {
530 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
531 int fd
= FD(evsel
, 0, thread
);
535 if (__perf_evlist__mmap(evlist
, thread
,
536 prot
, mask
, output
) < 0)
539 if (ioctl(fd
, PERF_EVENT_IOC_SET_OUTPUT
, output
) != 0)
543 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
544 perf_evlist__id_add_fd(evlist
, evsel
, 0, thread
, fd
) < 0)
552 for (thread
= 0; thread
< nr_threads
; thread
++)
553 __perf_evlist__munmap(evlist
, thread
);
557 /** perf_evlist__mmap - Create per cpu maps to receive events
559 * @evlist - list of events
560 * @pages - map length in pages
561 * @overwrite - overwrite older events?
563 * If overwrite is false the user needs to signal event consuption using:
565 * struct perf_mmap *m = &evlist->mmap[cpu];
566 * unsigned int head = perf_mmap__read_head(m);
568 * perf_mmap__write_tail(m, head)
570 * Using perf_evlist__read_on_cpu does this automatically.
572 int perf_evlist__mmap(struct perf_evlist
*evlist
, unsigned int pages
,
575 struct perf_evsel
*evsel
;
576 const struct cpu_map
*cpus
= evlist
->cpus
;
577 const struct thread_map
*threads
= evlist
->threads
;
578 int prot
= PROT_READ
| (overwrite
? 0 : PROT_WRITE
), mask
;
580 /* 512 kiB: default amount of unprivileged mlocked memory */
581 if (pages
== UINT_MAX
)
582 pages
= (512 * 1024) / page_size
;
583 else if (!is_power_of_2(pages
))
586 mask
= pages
* page_size
- 1;
588 if (evlist
->mmap
== NULL
&& perf_evlist__alloc_mmap(evlist
) < 0)
591 if (evlist
->pollfd
== NULL
&& perf_evlist__alloc_pollfd(evlist
) < 0)
594 evlist
->overwrite
= overwrite
;
595 evlist
->mmap_len
= (pages
+ 1) * page_size
;
597 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
598 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
599 evsel
->sample_id
== NULL
&&
600 perf_evsel__alloc_id(evsel
, cpu_map__nr(cpus
), threads
->nr
) < 0)
604 if (cpu_map__empty(cpus
))
605 return perf_evlist__mmap_per_thread(evlist
, prot
, mask
);
607 return perf_evlist__mmap_per_cpu(evlist
, prot
, mask
);
610 int perf_evlist__create_maps(struct perf_evlist
*evlist
,
611 struct perf_target
*target
)
613 evlist
->threads
= thread_map__new_str(target
->pid
, target
->tid
,
616 if (evlist
->threads
== NULL
)
619 if (perf_target__has_task(target
))
620 evlist
->cpus
= cpu_map__dummy_new();
621 else if (!perf_target__has_cpu(target
) && !target
->uses_mmap
)
622 evlist
->cpus
= cpu_map__dummy_new();
624 evlist
->cpus
= cpu_map__new(target
->cpu_list
);
626 if (evlist
->cpus
== NULL
)
627 goto out_delete_threads
;
632 thread_map__delete(evlist
->threads
);
636 void perf_evlist__delete_maps(struct perf_evlist
*evlist
)
638 cpu_map__delete(evlist
->cpus
);
639 thread_map__delete(evlist
->threads
);
641 evlist
->threads
= NULL
;
644 int perf_evlist__apply_filters(struct perf_evlist
*evlist
)
646 struct perf_evsel
*evsel
;
648 const int ncpus
= cpu_map__nr(evlist
->cpus
),
649 nthreads
= thread_map__nr(evlist
->threads
);
651 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
652 if (evsel
->filter
== NULL
)
655 err
= perf_evsel__set_filter(evsel
, ncpus
, nthreads
, evsel
->filter
);
663 int perf_evlist__set_filter(struct perf_evlist
*evlist
, const char *filter
)
665 struct perf_evsel
*evsel
;
667 const int ncpus
= cpu_map__nr(evlist
->cpus
),
668 nthreads
= thread_map__nr(evlist
->threads
);
670 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
671 err
= perf_evsel__set_filter(evsel
, ncpus
, nthreads
, filter
);
679 bool perf_evlist__valid_sample_type(struct perf_evlist
*evlist
)
681 struct perf_evsel
*first
= perf_evlist__first(evlist
), *pos
= first
;
683 list_for_each_entry_continue(pos
, &evlist
->entries
, node
) {
684 if (first
->attr
.sample_type
!= pos
->attr
.sample_type
)
691 u64
perf_evlist__sample_type(struct perf_evlist
*evlist
)
693 struct perf_evsel
*first
= perf_evlist__first(evlist
);
694 return first
->attr
.sample_type
;
697 bool perf_evlist__valid_read_format(struct perf_evlist
*evlist
)
699 struct perf_evsel
*first
= perf_evlist__first(evlist
), *pos
= first
;
700 u64 read_format
= first
->attr
.read_format
;
701 u64 sample_type
= first
->attr
.sample_type
;
703 list_for_each_entry_continue(pos
, &evlist
->entries
, node
) {
704 if (read_format
!= pos
->attr
.read_format
)
708 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
709 if ((sample_type
& PERF_SAMPLE_READ
) &&
710 !(read_format
& PERF_FORMAT_ID
)) {
717 u64
perf_evlist__read_format(struct perf_evlist
*evlist
)
719 struct perf_evsel
*first
= perf_evlist__first(evlist
);
720 return first
->attr
.read_format
;
723 u16
perf_evlist__id_hdr_size(struct perf_evlist
*evlist
)
725 struct perf_evsel
*first
= perf_evlist__first(evlist
);
726 struct perf_sample
*data
;
730 if (!first
->attr
.sample_id_all
)
733 sample_type
= first
->attr
.sample_type
;
735 if (sample_type
& PERF_SAMPLE_TID
)
736 size
+= sizeof(data
->tid
) * 2;
738 if (sample_type
& PERF_SAMPLE_TIME
)
739 size
+= sizeof(data
->time
);
741 if (sample_type
& PERF_SAMPLE_ID
)
742 size
+= sizeof(data
->id
);
744 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
745 size
+= sizeof(data
->stream_id
);
747 if (sample_type
& PERF_SAMPLE_CPU
)
748 size
+= sizeof(data
->cpu
) * 2;
753 bool perf_evlist__valid_sample_id_all(struct perf_evlist
*evlist
)
755 struct perf_evsel
*first
= perf_evlist__first(evlist
), *pos
= first
;
757 list_for_each_entry_continue(pos
, &evlist
->entries
, node
) {
758 if (first
->attr
.sample_id_all
!= pos
->attr
.sample_id_all
)
765 bool perf_evlist__sample_id_all(struct perf_evlist
*evlist
)
767 struct perf_evsel
*first
= perf_evlist__first(evlist
);
768 return first
->attr
.sample_id_all
;
771 void perf_evlist__set_selected(struct perf_evlist
*evlist
,
772 struct perf_evsel
*evsel
)
774 evlist
->selected
= evsel
;
777 void perf_evlist__close(struct perf_evlist
*evlist
)
779 struct perf_evsel
*evsel
;
780 int ncpus
= cpu_map__nr(evlist
->cpus
);
781 int nthreads
= thread_map__nr(evlist
->threads
);
783 list_for_each_entry_reverse(evsel
, &evlist
->entries
, node
)
784 perf_evsel__close(evsel
, ncpus
, nthreads
);
787 int perf_evlist__open(struct perf_evlist
*evlist
)
789 struct perf_evsel
*evsel
;
792 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
793 err
= perf_evsel__open(evsel
, evlist
->cpus
, evlist
->threads
);
800 perf_evlist__close(evlist
);
805 int perf_evlist__prepare_workload(struct perf_evlist
*evlist
,
806 struct perf_target
*target
,
807 const char *argv
[], bool pipe_output
,
810 int child_ready_pipe
[2], go_pipe
[2];
813 if (pipe(child_ready_pipe
) < 0) {
814 perror("failed to create 'ready' pipe");
818 if (pipe(go_pipe
) < 0) {
819 perror("failed to create 'go' pipe");
820 goto out_close_ready_pipe
;
823 evlist
->workload
.pid
= fork();
824 if (evlist
->workload
.pid
< 0) {
825 perror("failed to fork");
826 goto out_close_pipes
;
829 if (!evlist
->workload
.pid
) {
833 signal(SIGTERM
, SIG_DFL
);
835 close(child_ready_pipe
[0]);
837 fcntl(go_pipe
[0], F_SETFD
, FD_CLOEXEC
);
840 * Do a dummy execvp to get the PLT entry resolved,
841 * so we avoid the resolver overhead on the real
844 execvp("", (char **)argv
);
847 * Tell the parent we're ready to go
849 close(child_ready_pipe
[1]);
852 * Wait until the parent tells us to go.
854 if (read(go_pipe
[0], &bf
, 1) == -1)
855 perror("unable to read pipe");
857 execvp(argv
[0], (char **)argv
);
861 kill(getppid(), SIGUSR1
);
865 if (perf_target__none(target
))
866 evlist
->threads
->map
[0] = evlist
->workload
.pid
;
868 close(child_ready_pipe
[1]);
871 * wait for child to settle
873 if (read(child_ready_pipe
[0], &bf
, 1) == -1) {
874 perror("unable to read pipe");
875 goto out_close_pipes
;
878 fcntl(go_pipe
[1], F_SETFD
, FD_CLOEXEC
);
879 evlist
->workload
.cork_fd
= go_pipe
[1];
880 close(child_ready_pipe
[0]);
886 out_close_ready_pipe
:
887 close(child_ready_pipe
[0]);
888 close(child_ready_pipe
[1]);
892 int perf_evlist__start_workload(struct perf_evlist
*evlist
)
894 if (evlist
->workload
.cork_fd
> 0) {
898 * Remove the cork, let it rip!
900 ret
= write(evlist
->workload
.cork_fd
, &bf
, 1);
902 perror("enable to write to pipe");
904 close(evlist
->workload
.cork_fd
);
911 int perf_evlist__parse_sample(struct perf_evlist
*evlist
, union perf_event
*event
,
912 struct perf_sample
*sample
)
914 struct perf_evsel
*evsel
= perf_evlist__first(evlist
);
915 return perf_evsel__parse_sample(evsel
, event
, sample
);
918 size_t perf_evlist__fprintf(struct perf_evlist
*evlist
, FILE *fp
)
920 struct perf_evsel
*evsel
;
923 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
924 printed
+= fprintf(fp
, "%s%s", evsel
->idx
? ", " : "",
925 perf_evsel__name(evsel
));
928 return printed
+ fprintf(fp
, "\n");;