Commit | Line | Data |
---|---|---|
f8a95309 ACM |
1 | /* |
2 | * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> | |
3 | * | |
4 | * Parts came from builtin-{top,stat,record}.c, see those files for further | |
5 | * copyright notes. | |
6 | * | |
7 | * Released under the GPL v2. (and only v2, not any later version) | |
8 | */ | |
a8c9ae18 | 9 | #include "util.h" |
85c66be1 | 10 | #include <lk/debugfs.h> |
5c581041 | 11 | #include <poll.h> |
f8a95309 ACM |
12 | #include "cpumap.h" |
13 | #include "thread_map.h" | |
12864b31 | 14 | #include "target.h" |
361c99a6 ACM |
15 | #include "evlist.h" |
16 | #include "evsel.h" | |
35b9d88e | 17 | #include <unistd.h> |
361c99a6 | 18 | |
50d08e47 ACM |
19 | #include "parse-events.h" |
20 | ||
f8a95309 ACM |
21 | #include <sys/mman.h> |
22 | ||
70db7533 ACM |
23 | #include <linux/bitops.h> |
24 | #include <linux/hash.h> | |
25 | ||
f8a95309 | 26 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) |
a91e5431 | 27 | #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) |
f8a95309 | 28 | |
7e2ed097 ACM |
29 | void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, |
30 | struct thread_map *threads) | |
ef1d1af2 ACM |
31 | { |
32 | int i; | |
33 | ||
34 | for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) | |
35 | INIT_HLIST_HEAD(&evlist->heads[i]); | |
36 | INIT_LIST_HEAD(&evlist->entries); | |
7e2ed097 | 37 | perf_evlist__set_maps(evlist, cpus, threads); |
35b9d88e | 38 | evlist->workload.pid = -1; |
ef1d1af2 ACM |
39 | } |
40 | ||
334fe7a3 | 41 | struct perf_evlist *perf_evlist__new(void) |
361c99a6 ACM |
42 | { |
43 | struct perf_evlist *evlist = zalloc(sizeof(*evlist)); | |
44 | ||
ef1d1af2 | 45 | if (evlist != NULL) |
334fe7a3 | 46 | perf_evlist__init(evlist, NULL, NULL); |
361c99a6 ACM |
47 | |
48 | return evlist; | |
49 | } | |
50 | ||
f77a9518 ACM |
51 | void perf_evlist__config(struct perf_evlist *evlist, |
52 | struct perf_record_opts *opts) | |
0f82ebc4 | 53 | { |
cac21425 | 54 | struct perf_evsel *evsel; |
f77a9518 ACM |
55 | /* |
56 | * Set the evsel leader links before we configure attributes, | |
57 | * since some might depend on this info. | |
58 | */ | |
59 | if (opts->group) | |
60 | perf_evlist__set_leader(evlist); | |
0f82ebc4 ACM |
61 | |
62 | if (evlist->cpus->map[0] < 0) | |
63 | opts->no_inherit = true; | |
64 | ||
65 | list_for_each_entry(evsel, &evlist->entries, node) { | |
cac21425 | 66 | perf_evsel__config(evsel, opts); |
0f82ebc4 ACM |
67 | |
68 | if (evlist->nr_entries > 1) | |
3a5afaec | 69 | perf_evsel__set_sample_id(evsel); |
0f82ebc4 ACM |
70 | } |
71 | } | |
72 | ||
361c99a6 ACM |
73 | static void perf_evlist__purge(struct perf_evlist *evlist) |
74 | { | |
75 | struct perf_evsel *pos, *n; | |
76 | ||
77 | list_for_each_entry_safe(pos, n, &evlist->entries, node) { | |
78 | list_del_init(&pos->node); | |
79 | perf_evsel__delete(pos); | |
80 | } | |
81 | ||
82 | evlist->nr_entries = 0; | |
83 | } | |
84 | ||
ef1d1af2 | 85 | void perf_evlist__exit(struct perf_evlist *evlist) |
361c99a6 | 86 | { |
70db7533 | 87 | free(evlist->mmap); |
5c581041 | 88 | free(evlist->pollfd); |
ef1d1af2 ACM |
89 | evlist->mmap = NULL; |
90 | evlist->pollfd = NULL; | |
91 | } | |
92 | ||
93 | void perf_evlist__delete(struct perf_evlist *evlist) | |
94 | { | |
95 | perf_evlist__purge(evlist); | |
96 | perf_evlist__exit(evlist); | |
361c99a6 ACM |
97 | free(evlist); |
98 | } | |
99 | ||
100 | void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) | |
101 | { | |
102 | list_add_tail(&entry->node, &evlist->entries); | |
103 | ++evlist->nr_entries; | |
104 | } | |
105 | ||
0529bc1f JO |
106 | void perf_evlist__splice_list_tail(struct perf_evlist *evlist, |
107 | struct list_head *list, | |
108 | int nr_entries) | |
50d08e47 ACM |
109 | { |
110 | list_splice_tail(list, &evlist->entries); | |
111 | evlist->nr_entries += nr_entries; | |
112 | } | |
113 | ||
63dab225 ACM |
114 | void __perf_evlist__set_leader(struct list_head *list) |
115 | { | |
116 | struct perf_evsel *evsel, *leader; | |
117 | ||
118 | leader = list_entry(list->next, struct perf_evsel, node); | |
97f63e4a NK |
119 | evsel = list_entry(list->prev, struct perf_evsel, node); |
120 | ||
121 | leader->nr_members = evsel->idx - leader->idx + 1; | |
63dab225 ACM |
122 | |
123 | list_for_each_entry(evsel, list, node) { | |
74b2133d | 124 | evsel->leader = leader; |
63dab225 ACM |
125 | } |
126 | } | |
127 | ||
128 | void perf_evlist__set_leader(struct perf_evlist *evlist) | |
6a4bb04c | 129 | { |
97f63e4a NK |
130 | if (evlist->nr_entries) { |
131 | evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0; | |
63dab225 | 132 | __perf_evlist__set_leader(&evlist->entries); |
97f63e4a | 133 | } |
6a4bb04c JO |
134 | } |
135 | ||
361c99a6 ACM |
136 | int perf_evlist__add_default(struct perf_evlist *evlist) |
137 | { | |
138 | struct perf_event_attr attr = { | |
139 | .type = PERF_TYPE_HARDWARE, | |
140 | .config = PERF_COUNT_HW_CPU_CYCLES, | |
141 | }; | |
1aed2671 JR |
142 | struct perf_evsel *evsel; |
143 | ||
144 | event_attr_init(&attr); | |
361c99a6 | 145 | |
1aed2671 | 146 | evsel = perf_evsel__new(&attr, 0); |
361c99a6 | 147 | if (evsel == NULL) |
cc2d86b0 SE |
148 | goto error; |
149 | ||
150 | /* use strdup() because free(evsel) assumes name is allocated */ | |
151 | evsel->name = strdup("cycles"); | |
152 | if (!evsel->name) | |
153 | goto error_free; | |
361c99a6 ACM |
154 | |
155 | perf_evlist__add(evlist, evsel); | |
156 | return 0; | |
cc2d86b0 SE |
157 | error_free: |
158 | perf_evsel__delete(evsel); | |
159 | error: | |
160 | return -ENOMEM; | |
361c99a6 | 161 | } |
5c581041 | 162 | |
e60fc847 ACM |
163 | static int perf_evlist__add_attrs(struct perf_evlist *evlist, |
164 | struct perf_event_attr *attrs, size_t nr_attrs) | |
50d08e47 ACM |
165 | { |
166 | struct perf_evsel *evsel, *n; | |
167 | LIST_HEAD(head); | |
168 | size_t i; | |
169 | ||
170 | for (i = 0; i < nr_attrs; i++) { | |
171 | evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i); | |
172 | if (evsel == NULL) | |
173 | goto out_delete_partial_list; | |
174 | list_add_tail(&evsel->node, &head); | |
175 | } | |
176 | ||
177 | perf_evlist__splice_list_tail(evlist, &head, nr_attrs); | |
178 | ||
179 | return 0; | |
180 | ||
181 | out_delete_partial_list: | |
182 | list_for_each_entry_safe(evsel, n, &head, node) | |
183 | perf_evsel__delete(evsel); | |
184 | return -1; | |
185 | } | |
186 | ||
79695e1b ACM |
187 | int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, |
188 | struct perf_event_attr *attrs, size_t nr_attrs) | |
189 | { | |
190 | size_t i; | |
191 | ||
192 | for (i = 0; i < nr_attrs; i++) | |
193 | event_attr_init(attrs + i); | |
194 | ||
195 | return perf_evlist__add_attrs(evlist, attrs, nr_attrs); | |
196 | } | |
197 | ||
da378962 ACM |
198 | struct perf_evsel * |
199 | perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) | |
ee29be62 ACM |
200 | { |
201 | struct perf_evsel *evsel; | |
202 | ||
203 | list_for_each_entry(evsel, &evlist->entries, node) { | |
204 | if (evsel->attr.type == PERF_TYPE_TRACEPOINT && | |
205 | (int)evsel->attr.config == id) | |
206 | return evsel; | |
207 | } | |
208 | ||
209 | return NULL; | |
210 | } | |
211 | ||
39876e7d ACM |
212 | int perf_evlist__add_newtp(struct perf_evlist *evlist, |
213 | const char *sys, const char *name, void *handler) | |
214 | { | |
215 | struct perf_evsel *evsel; | |
216 | ||
217 | evsel = perf_evsel__newtp(sys, name, evlist->nr_entries); | |
218 | if (evsel == NULL) | |
219 | return -1; | |
220 | ||
221 | evsel->handler.func = handler; | |
222 | perf_evlist__add(evlist, evsel); | |
223 | return 0; | |
224 | } | |
225 | ||
4152ab37 ACM |
226 | void perf_evlist__disable(struct perf_evlist *evlist) |
227 | { | |
228 | int cpu, thread; | |
229 | struct perf_evsel *pos; | |
b3a319d5 NK |
230 | int nr_cpus = cpu_map__nr(evlist->cpus); |
231 | int nr_threads = thread_map__nr(evlist->threads); | |
4152ab37 | 232 | |
b3a319d5 | 233 | for (cpu = 0; cpu < nr_cpus; cpu++) { |
4152ab37 | 234 | list_for_each_entry(pos, &evlist->entries, node) { |
823254ed | 235 | if (!perf_evsel__is_group_leader(pos)) |
3fe4430d | 236 | continue; |
b3a319d5 | 237 | for (thread = 0; thread < nr_threads; thread++) |
55da8005 NK |
238 | ioctl(FD(pos, cpu, thread), |
239 | PERF_EVENT_IOC_DISABLE, 0); | |
4152ab37 ACM |
240 | } |
241 | } | |
242 | } | |
243 | ||
764e16a3 DA |
244 | void perf_evlist__enable(struct perf_evlist *evlist) |
245 | { | |
246 | int cpu, thread; | |
247 | struct perf_evsel *pos; | |
b3a319d5 NK |
248 | int nr_cpus = cpu_map__nr(evlist->cpus); |
249 | int nr_threads = thread_map__nr(evlist->threads); | |
764e16a3 | 250 | |
b3a319d5 | 251 | for (cpu = 0; cpu < nr_cpus; cpu++) { |
764e16a3 | 252 | list_for_each_entry(pos, &evlist->entries, node) { |
823254ed | 253 | if (!perf_evsel__is_group_leader(pos)) |
3fe4430d | 254 | continue; |
b3a319d5 | 255 | for (thread = 0; thread < nr_threads; thread++) |
55da8005 NK |
256 | ioctl(FD(pos, cpu, thread), |
257 | PERF_EVENT_IOC_ENABLE, 0); | |
764e16a3 DA |
258 | } |
259 | } | |
260 | } | |
261 | ||
806fb630 | 262 | static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) |
5c581041 | 263 | { |
b3a319d5 NK |
264 | int nr_cpus = cpu_map__nr(evlist->cpus); |
265 | int nr_threads = thread_map__nr(evlist->threads); | |
266 | int nfds = nr_cpus * nr_threads * evlist->nr_entries; | |
5c581041 ACM |
267 | evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); |
268 | return evlist->pollfd != NULL ? 0 : -ENOMEM; | |
269 | } | |
70082dd9 ACM |
270 | |
271 | void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) | |
272 | { | |
273 | fcntl(fd, F_SETFL, O_NONBLOCK); | |
274 | evlist->pollfd[evlist->nr_fds].fd = fd; | |
275 | evlist->pollfd[evlist->nr_fds].events = POLLIN; | |
276 | evlist->nr_fds++; | |
277 | } | |
70db7533 | 278 | |
a91e5431 ACM |
279 | static void perf_evlist__id_hash(struct perf_evlist *evlist, |
280 | struct perf_evsel *evsel, | |
281 | int cpu, int thread, u64 id) | |
3d3b5e95 ACM |
282 | { |
283 | int hash; | |
284 | struct perf_sample_id *sid = SID(evsel, cpu, thread); | |
285 | ||
286 | sid->id = id; | |
287 | sid->evsel = evsel; | |
288 | hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); | |
289 | hlist_add_head(&sid->node, &evlist->heads[hash]); | |
290 | } | |
291 | ||
a91e5431 ACM |
292 | void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, |
293 | int cpu, int thread, u64 id) | |
294 | { | |
295 | perf_evlist__id_hash(evlist, evsel, cpu, thread, id); | |
296 | evsel->id[evsel->ids++] = id; | |
297 | } | |
298 | ||
299 | static int perf_evlist__id_add_fd(struct perf_evlist *evlist, | |
300 | struct perf_evsel *evsel, | |
301 | int cpu, int thread, int fd) | |
f8a95309 | 302 | { |
f8a95309 | 303 | u64 read_data[4] = { 0, }; |
3d3b5e95 | 304 | int id_idx = 1; /* The first entry is the counter value */ |
e2b5abe0 JO |
305 | u64 id; |
306 | int ret; | |
307 | ||
308 | ret = ioctl(fd, PERF_EVENT_IOC_ID, &id); | |
309 | if (!ret) | |
310 | goto add; | |
311 | ||
312 | if (errno != ENOTTY) | |
313 | return -1; | |
314 | ||
315 | /* Legacy way to get event id.. All hail to old kernels! */ | |
f8a95309 | 316 | |
c4861afe JO |
317 | /* |
318 | * This way does not work with group format read, so bail | |
319 | * out in that case. | |
320 | */ | |
321 | if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP) | |
322 | return -1; | |
323 | ||
f8a95309 ACM |
324 | if (!(evsel->attr.read_format & PERF_FORMAT_ID) || |
325 | read(fd, &read_data, sizeof(read_data)) == -1) | |
326 | return -1; | |
327 | ||
328 | if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | |
329 | ++id_idx; | |
330 | if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | |
331 | ++id_idx; | |
332 | ||
e2b5abe0 JO |
333 | id = read_data[id_idx]; |
334 | ||
335 | add: | |
336 | perf_evlist__id_add(evlist, evsel, cpu, thread, id); | |
f8a95309 ACM |
337 | return 0; |
338 | } | |
339 | ||
932a3594 | 340 | struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id) |
70db7533 ACM |
341 | { |
342 | struct hlist_head *head; | |
70db7533 ACM |
343 | struct perf_sample_id *sid; |
344 | int hash; | |
345 | ||
70db7533 ACM |
346 | hash = hash_64(id, PERF_EVLIST__HLIST_BITS); |
347 | head = &evlist->heads[hash]; | |
348 | ||
b67bfe0d | 349 | hlist_for_each_entry(sid, head, node) |
70db7533 | 350 | if (sid->id == id) |
932a3594 JO |
351 | return sid; |
352 | ||
353 | return NULL; | |
354 | } | |
355 | ||
356 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) | |
357 | { | |
358 | struct perf_sample_id *sid; | |
359 | ||
360 | if (evlist->nr_entries == 1) | |
361 | return perf_evlist__first(evlist); | |
362 | ||
363 | sid = perf_evlist__id2sid(evlist, id); | |
364 | if (sid) | |
365 | return sid->evsel; | |
30e68bcc NK |
366 | |
367 | if (!perf_evlist__sample_id_all(evlist)) | |
0c21f736 | 368 | return perf_evlist__first(evlist); |
30e68bcc | 369 | |
70db7533 ACM |
370 | return NULL; |
371 | } | |
04391deb | 372 | |
aece948f | 373 | union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) |
04391deb | 374 | { |
aece948f | 375 | struct perf_mmap *md = &evlist->mmap[idx]; |
04391deb ACM |
376 | unsigned int head = perf_mmap__read_head(md); |
377 | unsigned int old = md->prev; | |
378 | unsigned char *data = md->base + page_size; | |
8115d60c | 379 | union perf_event *event = NULL; |
04391deb | 380 | |
7bb41152 | 381 | if (evlist->overwrite) { |
04391deb | 382 | /* |
7bb41152 ACM |
383 | * If we're further behind than half the buffer, there's a chance |
384 | * the writer will bite our tail and mess up the samples under us. | |
385 | * | |
386 | * If we somehow ended up ahead of the head, we got messed up. | |
387 | * | |
388 | * In either case, truncate and restart at head. | |
04391deb | 389 | */ |
7bb41152 ACM |
390 | int diff = head - old; |
391 | if (diff > md->mask / 2 || diff < 0) { | |
392 | fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); | |
393 | ||
394 | /* | |
395 | * head points to a known good entry, start there. | |
396 | */ | |
397 | old = head; | |
398 | } | |
04391deb ACM |
399 | } |
400 | ||
401 | if (old != head) { | |
402 | size_t size; | |
403 | ||
8115d60c | 404 | event = (union perf_event *)&data[old & md->mask]; |
04391deb ACM |
405 | size = event->header.size; |
406 | ||
407 | /* | |
408 | * Event straddles the mmap boundary -- header should always | |
409 | * be inside due to u64 alignment of output. | |
410 | */ | |
411 | if ((old & md->mask) + size != ((old + size) & md->mask)) { | |
412 | unsigned int offset = old; | |
413 | unsigned int len = min(sizeof(*event), size), cpy; | |
0479b8b9 | 414 | void *dst = &md->event_copy; |
04391deb ACM |
415 | |
416 | do { | |
417 | cpy = min(md->mask + 1 - (offset & md->mask), len); | |
418 | memcpy(dst, &data[offset & md->mask], cpy); | |
419 | offset += cpy; | |
420 | dst += cpy; | |
421 | len -= cpy; | |
422 | } while (len); | |
423 | ||
0479b8b9 | 424 | event = &md->event_copy; |
04391deb ACM |
425 | } |
426 | ||
427 | old += size; | |
428 | } | |
429 | ||
430 | md->prev = old; | |
7bb41152 ACM |
431 | |
432 | if (!evlist->overwrite) | |
433 | perf_mmap__write_tail(md, old); | |
434 | ||
04391deb ACM |
435 | return event; |
436 | } | |
f8a95309 | 437 | |
93edcbd9 AH |
438 | static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx) |
439 | { | |
440 | if (evlist->mmap[idx].base != NULL) { | |
441 | munmap(evlist->mmap[idx].base, evlist->mmap_len); | |
442 | evlist->mmap[idx].base = NULL; | |
443 | } | |
444 | } | |
445 | ||
7e2ed097 | 446 | void perf_evlist__munmap(struct perf_evlist *evlist) |
f8a95309 | 447 | { |
aece948f | 448 | int i; |
f8a95309 | 449 | |
93edcbd9 AH |
450 | for (i = 0; i < evlist->nr_mmaps; i++) |
451 | __perf_evlist__munmap(evlist, i); | |
aece948f ACM |
452 | |
453 | free(evlist->mmap); | |
454 | evlist->mmap = NULL; | |
f8a95309 ACM |
455 | } |
456 | ||
806fb630 | 457 | static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) |
f8a95309 | 458 | { |
a14bb7a6 | 459 | evlist->nr_mmaps = cpu_map__nr(evlist->cpus); |
ec1e7e43 | 460 | if (cpu_map__empty(evlist->cpus)) |
b3a319d5 | 461 | evlist->nr_mmaps = thread_map__nr(evlist->threads); |
aece948f | 462 | evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); |
f8a95309 ACM |
463 | return evlist->mmap != NULL ? 0 : -ENOMEM; |
464 | } | |
465 | ||
bccdaba0 | 466 | static int __perf_evlist__mmap(struct perf_evlist *evlist, |
aece948f | 467 | int idx, int prot, int mask, int fd) |
f8a95309 | 468 | { |
aece948f ACM |
469 | evlist->mmap[idx].prev = 0; |
470 | evlist->mmap[idx].mask = mask; | |
471 | evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot, | |
f8a95309 | 472 | MAP_SHARED, fd, 0); |
301b195d NE |
473 | if (evlist->mmap[idx].base == MAP_FAILED) { |
474 | evlist->mmap[idx].base = NULL; | |
f8a95309 | 475 | return -1; |
301b195d | 476 | } |
f8a95309 ACM |
477 | |
478 | perf_evlist__add_pollfd(evlist, fd); | |
479 | return 0; | |
480 | } | |
481 | ||
aece948f ACM |
482 | static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask) |
483 | { | |
484 | struct perf_evsel *evsel; | |
485 | int cpu, thread; | |
b3a319d5 NK |
486 | int nr_cpus = cpu_map__nr(evlist->cpus); |
487 | int nr_threads = thread_map__nr(evlist->threads); | |
aece948f | 488 | |
b3a319d5 | 489 | for (cpu = 0; cpu < nr_cpus; cpu++) { |
aece948f ACM |
490 | int output = -1; |
491 | ||
b3a319d5 | 492 | for (thread = 0; thread < nr_threads; thread++) { |
aece948f ACM |
493 | list_for_each_entry(evsel, &evlist->entries, node) { |
494 | int fd = FD(evsel, cpu, thread); | |
495 | ||
496 | if (output == -1) { | |
497 | output = fd; | |
bccdaba0 | 498 | if (__perf_evlist__mmap(evlist, cpu, |
aece948f ACM |
499 | prot, mask, output) < 0) |
500 | goto out_unmap; | |
501 | } else { | |
502 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) | |
503 | goto out_unmap; | |
504 | } | |
505 | ||
506 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | |
507 | perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) | |
508 | goto out_unmap; | |
509 | } | |
510 | } | |
511 | } | |
512 | ||
513 | return 0; | |
514 | ||
515 | out_unmap: | |
93edcbd9 AH |
516 | for (cpu = 0; cpu < nr_cpus; cpu++) |
517 | __perf_evlist__munmap(evlist, cpu); | |
aece948f ACM |
518 | return -1; |
519 | } | |
520 | ||
521 | static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask) | |
522 | { | |
523 | struct perf_evsel *evsel; | |
524 | int thread; | |
b3a319d5 | 525 | int nr_threads = thread_map__nr(evlist->threads); |
aece948f | 526 | |
b3a319d5 | 527 | for (thread = 0; thread < nr_threads; thread++) { |
aece948f ACM |
528 | int output = -1; |
529 | ||
530 | list_for_each_entry(evsel, &evlist->entries, node) { | |
531 | int fd = FD(evsel, 0, thread); | |
532 | ||
533 | if (output == -1) { | |
534 | output = fd; | |
bccdaba0 | 535 | if (__perf_evlist__mmap(evlist, thread, |
aece948f ACM |
536 | prot, mask, output) < 0) |
537 | goto out_unmap; | |
538 | } else { | |
539 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) | |
540 | goto out_unmap; | |
541 | } | |
542 | ||
543 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | |
544 | perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0) | |
545 | goto out_unmap; | |
546 | } | |
547 | } | |
548 | ||
549 | return 0; | |
550 | ||
551 | out_unmap: | |
93edcbd9 AH |
552 | for (thread = 0; thread < nr_threads; thread++) |
553 | __perf_evlist__munmap(evlist, thread); | |
aece948f ACM |
554 | return -1; |
555 | } | |
556 | ||
f8a95309 ACM |
557 | /** perf_evlist__mmap - Create per cpu maps to receive events |
558 | * | |
559 | * @evlist - list of events | |
f8a95309 ACM |
560 | * @pages - map length in pages |
561 | * @overwrite - overwrite older events? | |
562 | * | |
563 | * If overwrite is false the user needs to signal event consuption using: | |
564 | * | |
565 | * struct perf_mmap *m = &evlist->mmap[cpu]; | |
566 | * unsigned int head = perf_mmap__read_head(m); | |
567 | * | |
568 | * perf_mmap__write_tail(m, head) | |
7e2ed097 ACM |
569 | * |
570 | * Using perf_evlist__read_on_cpu does this automatically. | |
f8a95309 | 571 | */ |
50a682ce ACM |
572 | int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, |
573 | bool overwrite) | |
f8a95309 | 574 | { |
aece948f | 575 | struct perf_evsel *evsel; |
7e2ed097 ACM |
576 | const struct cpu_map *cpus = evlist->cpus; |
577 | const struct thread_map *threads = evlist->threads; | |
50a682ce ACM |
578 | int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask; |
579 | ||
580 | /* 512 kiB: default amount of unprivileged mlocked memory */ | |
581 | if (pages == UINT_MAX) | |
582 | pages = (512 * 1024) / page_size; | |
41d0d933 NE |
583 | else if (!is_power_of_2(pages)) |
584 | return -EINVAL; | |
50a682ce ACM |
585 | |
586 | mask = pages * page_size - 1; | |
f8a95309 | 587 | |
7e2ed097 | 588 | if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) |
f8a95309 ACM |
589 | return -ENOMEM; |
590 | ||
7e2ed097 | 591 | if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0) |
f8a95309 ACM |
592 | return -ENOMEM; |
593 | ||
594 | evlist->overwrite = overwrite; | |
595 | evlist->mmap_len = (pages + 1) * page_size; | |
f8a95309 ACM |
596 | |
597 | list_for_each_entry(evsel, &evlist->entries, node) { | |
598 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | |
a91e5431 | 599 | evsel->sample_id == NULL && |
a14bb7a6 | 600 | perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0) |
f8a95309 | 601 | return -ENOMEM; |
f8a95309 ACM |
602 | } |
603 | ||
ec1e7e43 | 604 | if (cpu_map__empty(cpus)) |
aece948f | 605 | return perf_evlist__mmap_per_thread(evlist, prot, mask); |
f8a95309 | 606 | |
aece948f | 607 | return perf_evlist__mmap_per_cpu(evlist, prot, mask); |
f8a95309 | 608 | } |
7e2ed097 | 609 | |
b809ac10 NK |
610 | int perf_evlist__create_maps(struct perf_evlist *evlist, |
611 | struct perf_target *target) | |
7e2ed097 | 612 | { |
b809ac10 NK |
613 | evlist->threads = thread_map__new_str(target->pid, target->tid, |
614 | target->uid); | |
7e2ed097 ACM |
615 | |
616 | if (evlist->threads == NULL) | |
617 | return -1; | |
618 | ||
879d77d0 | 619 | if (perf_target__has_task(target)) |
d67356e7 | 620 | evlist->cpus = cpu_map__dummy_new(); |
d1cb9fce NK |
621 | else if (!perf_target__has_cpu(target) && !target->uses_mmap) |
622 | evlist->cpus = cpu_map__dummy_new(); | |
879d77d0 NK |
623 | else |
624 | evlist->cpus = cpu_map__new(target->cpu_list); | |
7e2ed097 ACM |
625 | |
626 | if (evlist->cpus == NULL) | |
627 | goto out_delete_threads; | |
628 | ||
629 | return 0; | |
630 | ||
631 | out_delete_threads: | |
632 | thread_map__delete(evlist->threads); | |
633 | return -1; | |
634 | } | |
635 | ||
636 | void perf_evlist__delete_maps(struct perf_evlist *evlist) | |
637 | { | |
638 | cpu_map__delete(evlist->cpus); | |
639 | thread_map__delete(evlist->threads); | |
640 | evlist->cpus = NULL; | |
641 | evlist->threads = NULL; | |
642 | } | |
0a102479 | 643 | |
1491a632 | 644 | int perf_evlist__apply_filters(struct perf_evlist *evlist) |
0a102479 | 645 | { |
0a102479 | 646 | struct perf_evsel *evsel; |
745cefc5 ACM |
647 | int err = 0; |
648 | const int ncpus = cpu_map__nr(evlist->cpus), | |
b3a319d5 | 649 | nthreads = thread_map__nr(evlist->threads); |
0a102479 FW |
650 | |
651 | list_for_each_entry(evsel, &evlist->entries, node) { | |
745cefc5 | 652 | if (evsel->filter == NULL) |
0a102479 | 653 | continue; |
745cefc5 ACM |
654 | |
655 | err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter); | |
656 | if (err) | |
657 | break; | |
0a102479 FW |
658 | } |
659 | ||
745cefc5 ACM |
660 | return err; |
661 | } | |
662 | ||
663 | int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) | |
664 | { | |
665 | struct perf_evsel *evsel; | |
666 | int err = 0; | |
667 | const int ncpus = cpu_map__nr(evlist->cpus), | |
b3a319d5 | 668 | nthreads = thread_map__nr(evlist->threads); |
745cefc5 ACM |
669 | |
670 | list_for_each_entry(evsel, &evlist->entries, node) { | |
671 | err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter); | |
672 | if (err) | |
673 | break; | |
674 | } | |
675 | ||
676 | return err; | |
0a102479 | 677 | } |
74429964 | 678 | |
0c21f736 | 679 | bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) |
74429964 | 680 | { |
0c21f736 | 681 | struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; |
c2a70653 ACM |
682 | |
683 | list_for_each_entry_continue(pos, &evlist->entries, node) { | |
684 | if (first->attr.sample_type != pos->attr.sample_type) | |
685 | return false; | |
74429964 FW |
686 | } |
687 | ||
c2a70653 | 688 | return true; |
74429964 FW |
689 | } |
690 | ||
0c21f736 | 691 | u64 perf_evlist__sample_type(struct perf_evlist *evlist) |
c2a70653 | 692 | { |
0c21f736 | 693 | struct perf_evsel *first = perf_evlist__first(evlist); |
c2a70653 ACM |
694 | return first->attr.sample_type; |
695 | } | |
696 | ||
9ede473c JO |
697 | bool perf_evlist__valid_read_format(struct perf_evlist *evlist) |
698 | { | |
699 | struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; | |
700 | u64 read_format = first->attr.read_format; | |
701 | u64 sample_type = first->attr.sample_type; | |
702 | ||
703 | list_for_each_entry_continue(pos, &evlist->entries, node) { | |
704 | if (read_format != pos->attr.read_format) | |
705 | return false; | |
706 | } | |
707 | ||
708 | /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */ | |
709 | if ((sample_type & PERF_SAMPLE_READ) && | |
710 | !(read_format & PERF_FORMAT_ID)) { | |
711 | return false; | |
712 | } | |
713 | ||
714 | return true; | |
715 | } | |
716 | ||
717 | u64 perf_evlist__read_format(struct perf_evlist *evlist) | |
718 | { | |
719 | struct perf_evsel *first = perf_evlist__first(evlist); | |
720 | return first->attr.read_format; | |
721 | } | |
722 | ||
0c21f736 | 723 | u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) |
81e36bff | 724 | { |
0c21f736 | 725 | struct perf_evsel *first = perf_evlist__first(evlist); |
81e36bff ACM |
726 | struct perf_sample *data; |
727 | u64 sample_type; | |
728 | u16 size = 0; | |
729 | ||
81e36bff ACM |
730 | if (!first->attr.sample_id_all) |
731 | goto out; | |
732 | ||
733 | sample_type = first->attr.sample_type; | |
734 | ||
735 | if (sample_type & PERF_SAMPLE_TID) | |
736 | size += sizeof(data->tid) * 2; | |
737 | ||
738 | if (sample_type & PERF_SAMPLE_TIME) | |
739 | size += sizeof(data->time); | |
740 | ||
741 | if (sample_type & PERF_SAMPLE_ID) | |
742 | size += sizeof(data->id); | |
743 | ||
744 | if (sample_type & PERF_SAMPLE_STREAM_ID) | |
745 | size += sizeof(data->stream_id); | |
746 | ||
747 | if (sample_type & PERF_SAMPLE_CPU) | |
748 | size += sizeof(data->cpu) * 2; | |
749 | out: | |
750 | return size; | |
751 | } | |
752 | ||
0c21f736 | 753 | bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist) |
74429964 | 754 | { |
0c21f736 | 755 | struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; |
c2a70653 ACM |
756 | |
757 | list_for_each_entry_continue(pos, &evlist->entries, node) { | |
758 | if (first->attr.sample_id_all != pos->attr.sample_id_all) | |
759 | return false; | |
74429964 FW |
760 | } |
761 | ||
c2a70653 ACM |
762 | return true; |
763 | } | |
764 | ||
0c21f736 | 765 | bool perf_evlist__sample_id_all(struct perf_evlist *evlist) |
c2a70653 | 766 | { |
0c21f736 | 767 | struct perf_evsel *first = perf_evlist__first(evlist); |
c2a70653 | 768 | return first->attr.sample_id_all; |
74429964 | 769 | } |
81cce8de ACM |
770 | |
771 | void perf_evlist__set_selected(struct perf_evlist *evlist, | |
772 | struct perf_evsel *evsel) | |
773 | { | |
774 | evlist->selected = evsel; | |
775 | } | |
727ab04e | 776 | |
a74b4b66 NK |
777 | void perf_evlist__close(struct perf_evlist *evlist) |
778 | { | |
779 | struct perf_evsel *evsel; | |
780 | int ncpus = cpu_map__nr(evlist->cpus); | |
781 | int nthreads = thread_map__nr(evlist->threads); | |
782 | ||
783 | list_for_each_entry_reverse(evsel, &evlist->entries, node) | |
784 | perf_evsel__close(evsel, ncpus, nthreads); | |
785 | } | |
786 | ||
6a4bb04c | 787 | int perf_evlist__open(struct perf_evlist *evlist) |
727ab04e | 788 | { |
6a4bb04c | 789 | struct perf_evsel *evsel; |
a74b4b66 | 790 | int err; |
727ab04e | 791 | |
727ab04e | 792 | list_for_each_entry(evsel, &evlist->entries, node) { |
6a4bb04c | 793 | err = perf_evsel__open(evsel, evlist->cpus, evlist->threads); |
727ab04e ACM |
794 | if (err < 0) |
795 | goto out_err; | |
796 | } | |
797 | ||
798 | return 0; | |
799 | out_err: | |
a74b4b66 | 800 | perf_evlist__close(evlist); |
41c21a68 | 801 | errno = -err; |
727ab04e ACM |
802 | return err; |
803 | } | |
35b9d88e ACM |
804 | |
805 | int perf_evlist__prepare_workload(struct perf_evlist *evlist, | |
6ef73ec4 | 806 | struct perf_target *target, |
55e162ea NK |
807 | const char *argv[], bool pipe_output, |
808 | bool want_signal) | |
35b9d88e ACM |
809 | { |
810 | int child_ready_pipe[2], go_pipe[2]; | |
811 | char bf; | |
812 | ||
813 | if (pipe(child_ready_pipe) < 0) { | |
814 | perror("failed to create 'ready' pipe"); | |
815 | return -1; | |
816 | } | |
817 | ||
818 | if (pipe(go_pipe) < 0) { | |
819 | perror("failed to create 'go' pipe"); | |
820 | goto out_close_ready_pipe; | |
821 | } | |
822 | ||
823 | evlist->workload.pid = fork(); | |
824 | if (evlist->workload.pid < 0) { | |
825 | perror("failed to fork"); | |
826 | goto out_close_pipes; | |
827 | } | |
828 | ||
829 | if (!evlist->workload.pid) { | |
119fa3c9 | 830 | if (pipe_output) |
35b9d88e ACM |
831 | dup2(2, 1); |
832 | ||
0817df08 DA |
833 | signal(SIGTERM, SIG_DFL); |
834 | ||
35b9d88e ACM |
835 | close(child_ready_pipe[0]); |
836 | close(go_pipe[1]); | |
837 | fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); | |
838 | ||
35b9d88e ACM |
839 | /* |
840 | * Tell the parent we're ready to go | |
841 | */ | |
842 | close(child_ready_pipe[1]); | |
843 | ||
844 | /* | |
845 | * Wait until the parent tells us to go. | |
846 | */ | |
847 | if (read(go_pipe[0], &bf, 1) == -1) | |
848 | perror("unable to read pipe"); | |
849 | ||
850 | execvp(argv[0], (char **)argv); | |
851 | ||
852 | perror(argv[0]); | |
55e162ea NK |
853 | if (want_signal) |
854 | kill(getppid(), SIGUSR1); | |
35b9d88e ACM |
855 | exit(-1); |
856 | } | |
857 | ||
6ef73ec4 | 858 | if (perf_target__none(target)) |
35b9d88e ACM |
859 | evlist->threads->map[0] = evlist->workload.pid; |
860 | ||
861 | close(child_ready_pipe[1]); | |
862 | close(go_pipe[0]); | |
863 | /* | |
864 | * wait for child to settle | |
865 | */ | |
866 | if (read(child_ready_pipe[0], &bf, 1) == -1) { | |
867 | perror("unable to read pipe"); | |
868 | goto out_close_pipes; | |
869 | } | |
870 | ||
bcf3145f | 871 | fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC); |
35b9d88e ACM |
872 | evlist->workload.cork_fd = go_pipe[1]; |
873 | close(child_ready_pipe[0]); | |
874 | return 0; | |
875 | ||
876 | out_close_pipes: | |
877 | close(go_pipe[0]); | |
878 | close(go_pipe[1]); | |
879 | out_close_ready_pipe: | |
880 | close(child_ready_pipe[0]); | |
881 | close(child_ready_pipe[1]); | |
882 | return -1; | |
883 | } | |
884 | ||
885 | int perf_evlist__start_workload(struct perf_evlist *evlist) | |
886 | { | |
887 | if (evlist->workload.cork_fd > 0) { | |
b3824404 | 888 | char bf = 0; |
bcf3145f | 889 | int ret; |
35b9d88e ACM |
890 | /* |
891 | * Remove the cork, let it rip! | |
892 | */ | |
bcf3145f NK |
893 | ret = write(evlist->workload.cork_fd, &bf, 1); |
894 | if (ret < 0) | |
895 | perror("enable to write to pipe"); | |
896 | ||
897 | close(evlist->workload.cork_fd); | |
898 | return ret; | |
35b9d88e ACM |
899 | } |
900 | ||
901 | return 0; | |
902 | } | |
cb0b29e0 | 903 | |
a3f698fe | 904 | int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, |
0807d2d8 | 905 | struct perf_sample *sample) |
cb0b29e0 | 906 | { |
0c21f736 | 907 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
0807d2d8 | 908 | return perf_evsel__parse_sample(evsel, event, sample); |
cb0b29e0 | 909 | } |
78f067b3 ACM |
910 | |
911 | size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp) | |
912 | { | |
913 | struct perf_evsel *evsel; | |
914 | size_t printed = 0; | |
915 | ||
916 | list_for_each_entry(evsel, &evlist->entries, node) { | |
917 | printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "", | |
918 | perf_evsel__name(evsel)); | |
919 | } | |
920 | ||
921 | return printed + fprintf(fp, "\n");; | |
922 | } |