perf machine: Introduce synthesize_threads method out of open coded equivalent
[deliverable/linux.git] / tools / perf / builtin-record.c
CommitLineData
abaff32a 1/*
bf9e1876
IM
2 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
abaff32a 7 */
16f762a2 8#include "builtin.h"
bf9e1876
IM
9
10#include "perf.h"
11
6122e4e4 12#include "util/build-id.h"
6eda5838 13#include "util/util.h"
0e9b20b8 14#include "util/parse-options.h"
8ad8db37 15#include "util/parse-events.h"
6eda5838 16
7c6a1c65 17#include "util/header.h"
66e274f3 18#include "util/event.h"
361c99a6 19#include "util/evlist.h"
69aad6f1 20#include "util/evsel.h"
8f28827a 21#include "util/debug.h"
94c744b6 22#include "util/session.h"
45694aa7 23#include "util/tool.h"
8d06367f 24#include "util/symbol.h"
a12b51c4 25#include "util/cpumap.h"
fd78260b 26#include "util/thread_map.h"
f5fc1412 27#include "util/data.h"
7c6a1c65 28
97124d5e 29#include <unistd.h>
de9ac07b 30#include <sched.h>
a41794cd 31#include <sys/mman.h>
de9ac07b 32
89fe808a 33#ifndef HAVE_ON_EXIT_SUPPORT
78da39fa
BR
34#ifndef ATEXIT_MAX
35#define ATEXIT_MAX 32
36#endif
37static int __on_exit_count = 0;
38typedef void (*on_exit_func_t) (int, void *);
39static on_exit_func_t __on_exit_funcs[ATEXIT_MAX];
40static void *__on_exit_args[ATEXIT_MAX];
41static int __exitcode = 0;
42static void __handle_on_exit_funcs(void);
43static int on_exit(on_exit_func_t function, void *arg);
44#define exit(x) (exit)(__exitcode = (x))
45
46static int on_exit(on_exit_func_t function, void *arg)
47{
48 if (__on_exit_count == ATEXIT_MAX)
49 return -ENOMEM;
50 else if (__on_exit_count == 0)
51 atexit(__handle_on_exit_funcs);
52 __on_exit_funcs[__on_exit_count] = function;
53 __on_exit_args[__on_exit_count++] = arg;
54 return 0;
55}
56
57static void __handle_on_exit_funcs(void)
58{
59 int i;
60 for (i = 0; i < __on_exit_count; i++)
61 __on_exit_funcs[i] (__exitcode, __on_exit_args[i]);
62}
63#endif
64
d20deb64 65struct perf_record {
45694aa7 66 struct perf_tool tool;
d20deb64
ACM
67 struct perf_record_opts opts;
68 u64 bytes_written;
f5fc1412 69 struct perf_data_file file;
d20deb64
ACM
70 struct perf_evlist *evlist;
71 struct perf_session *session;
72 const char *progname;
d20deb64 73 int realtime_prio;
d20deb64
ACM
74 bool no_buildid;
75 bool no_buildid_cache;
d20deb64 76 long samples;
0f82ebc4 77};
a21ca2ca 78
8d3eca20 79static int write_output(struct perf_record *rec, void *buf, size_t size)
f5970550 80{
f5fc1412
JO
81 struct perf_data_file *file = &rec->file;
82
f5970550 83 while (size) {
f5fc1412 84 int ret = write(file->fd, buf, size);
f5970550 85
8d3eca20 86 if (ret < 0) {
4f624685 87 pr_err("failed to write perf data, error: %m\n");
8d3eca20
DA
88 return -1;
89 }
f5970550
PZ
90
91 size -= ret;
92 buf += ret;
93
d20deb64 94 rec->bytes_written += ret;
f5970550 95 }
8d3eca20
DA
96
97 return 0;
f5970550
PZ
98}
99
45694aa7 100static int process_synthesized_event(struct perf_tool *tool,
d20deb64 101 union perf_event *event,
1d037ca1
IT
102 struct perf_sample *sample __maybe_unused,
103 struct machine *machine __maybe_unused)
234fbbf5 104{
45694aa7 105 struct perf_record *rec = container_of(tool, struct perf_record, tool);
8d3eca20
DA
106 if (write_output(rec, event, event->header.size) < 0)
107 return -1;
108
234fbbf5
ACM
109 return 0;
110}
111
8d3eca20 112static int perf_record__mmap_read(struct perf_record *rec,
d20deb64 113 struct perf_mmap *md)
de9ac07b 114{
744bd8aa 115 unsigned int head = perf_mmap__read_head(md);
de9ac07b 116 unsigned int old = md->prev;
918512b4 117 unsigned char *data = md->base + page_size;
de9ac07b
PZ
118 unsigned long size;
119 void *buf;
8d3eca20 120 int rc = 0;
de9ac07b 121
dc82009a 122 if (old == head)
8d3eca20 123 return 0;
dc82009a 124
d20deb64 125 rec->samples++;
de9ac07b
PZ
126
127 size = head - old;
128
129 if ((old & md->mask) + size != (head & md->mask)) {
130 buf = &data[old & md->mask];
131 size = md->mask + 1 - (old & md->mask);
132 old += size;
021e9f47 133
8d3eca20
DA
134 if (write_output(rec, buf, size) < 0) {
135 rc = -1;
136 goto out;
137 }
de9ac07b
PZ
138 }
139
140 buf = &data[old & md->mask];
141 size = head - old;
142 old += size;
021e9f47 143
8d3eca20
DA
144 if (write_output(rec, buf, size) < 0) {
145 rc = -1;
146 goto out;
147 }
de9ac07b
PZ
148
149 md->prev = old;
115d2d89 150 perf_mmap__write_tail(md, old);
8d3eca20
DA
151
152out:
153 return rc;
de9ac07b
PZ
154}
155
156static volatile int done = 0;
f7b7c26e 157static volatile int signr = -1;
33e49ea7 158static volatile int child_finished = 0;
de9ac07b 159
16c8a109 160static void sig_handler(int sig)
de9ac07b 161{
33e49ea7
AK
162 if (sig == SIGCHLD)
163 child_finished = 1;
164
16c8a109 165 done = 1;
f7b7c26e
PZ
166 signr = sig;
167}
168
1d037ca1 169static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg)
f7b7c26e 170{
d20deb64 171 struct perf_record *rec = arg;
33e49ea7
AK
172 int status;
173
d20deb64 174 if (rec->evlist->workload.pid > 0) {
33e49ea7 175 if (!child_finished)
d20deb64 176 kill(rec->evlist->workload.pid, SIGTERM);
33e49ea7
AK
177
178 wait(&status);
179 if (WIFSIGNALED(status))
d20deb64 180 psignal(WTERMSIG(status), rec->progname);
33e49ea7 181 }
933da83a 182
18483b81 183 if (signr == -1 || signr == SIGUSR1)
f7b7c26e
PZ
184 return;
185
186 signal(signr, SIG_DFL);
de9ac07b
PZ
187}
188
8d3eca20 189static int perf_record__open(struct perf_record *rec)
dd7927f4 190{
56e52e85 191 char msg[512];
6a4bb04c 192 struct perf_evsel *pos;
d20deb64
ACM
193 struct perf_evlist *evlist = rec->evlist;
194 struct perf_session *session = rec->session;
195 struct perf_record_opts *opts = &rec->opts;
8d3eca20 196 int rc = 0;
dd7927f4 197
f77a9518 198 perf_evlist__config(evlist, opts);
cac21425 199
dd7927f4 200 list_for_each_entry(pos, &evlist->entries, node) {
dd7927f4 201try_again:
6a4bb04c 202 if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
56e52e85 203 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
d6d901c2 204 if (verbose)
c0a54341 205 ui__warning("%s\n", msg);
d6d901c2
ZY
206 goto try_again;
207 }
ca6a4258 208
56e52e85
ACM
209 rc = -errno;
210 perf_evsel__open_strerror(pos, &opts->target,
211 errno, msg, sizeof(msg));
212 ui__error("%s\n", msg);
8d3eca20 213 goto out;
c171b552
LZ
214 }
215 }
a43d3f08 216
1491a632 217 if (perf_evlist__apply_filters(evlist)) {
0a102479
FW
218 error("failed to set filter with %d (%s)\n", errno,
219 strerror(errno));
8d3eca20
DA
220 rc = -1;
221 goto out;
0a102479
FW
222 }
223
18e60939 224 if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
8d3eca20
DA
225 if (errno == EPERM) {
226 pr_err("Permission error mapping pages.\n"
227 "Consider increasing "
228 "/proc/sys/kernel/perf_event_mlock_kb,\n"
229 "or try again with a smaller value of -m/--mmap_pages.\n"
230 "(current value: %d)\n", opts->mmap_pages);
231 rc = -errno;
8d3eca20
DA
232 } else {
233 pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
234 rc = -errno;
235 }
236 goto out;
18e60939 237 }
0a27d7f9 238
563aecb2 239 session->evlist = evlist;
7b56cce2 240 perf_session__set_id_hdr_size(session);
8d3eca20
DA
241out:
242 return rc;
16c8a109
PZ
243}
244
d20deb64 245static int process_buildids(struct perf_record *rec)
6122e4e4 246{
f5fc1412
JO
247 struct perf_data_file *file = &rec->file;
248 struct perf_session *session = rec->session;
7ab75cff 249 u64 start = session->header.data_offset;
6122e4e4 250
f5fc1412 251 u64 size = lseek(file->fd, 0, SEEK_CUR);
9f591fd7
ACM
252 if (size == 0)
253 return 0;
254
7ab75cff
DA
255 return __perf_session__process_events(session, start,
256 size - start,
6122e4e4
ACM
257 size, &build_id__mark_dso_hit_ops);
258}
259
8d3eca20 260static void perf_record__exit(int status, void *arg)
f5970550 261{
d20deb64 262 struct perf_record *rec = arg;
f5fc1412 263 struct perf_data_file *file = &rec->file;
d20deb64 264
8d3eca20
DA
265 if (status != 0)
266 return;
267
f5fc1412 268 if (!file->is_pipe) {
d20deb64
ACM
269 rec->session->header.data_size += rec->bytes_written;
270
271 if (!rec->no_buildid)
272 process_buildids(rec);
273 perf_session__write_header(rec->session, rec->evlist,
f5fc1412 274 file->fd, true);
d20deb64
ACM
275 perf_session__delete(rec->session);
276 perf_evlist__delete(rec->evlist);
d65a458b 277 symbol__exit();
c7929e47 278 }
f5970550
PZ
279}
280
8115d60c 281static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
a1645ce1
ZY
282{
283 int err;
45694aa7 284 struct perf_tool *tool = data;
a1645ce1
ZY
285 /*
286 *As for guest kernel when processing subcommand record&report,
287 *we arrange module mmap prior to guest kernel mmap and trigger
288 *a preload dso because default guest module symbols are loaded
289 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
290 *method is used to avoid symbol missing when the first addr is
291 *in module instead of in guest kernel.
292 */
45694aa7 293 err = perf_event__synthesize_modules(tool, process_synthesized_event,
743eb868 294 machine);
a1645ce1
ZY
295 if (err < 0)
296 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 297 " relocation symbol.\n", machine->pid);
a1645ce1 298
a1645ce1
ZY
299 /*
300 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
301 * have no _text sometimes.
302 */
45694aa7 303 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
743eb868 304 machine, "_text");
a1645ce1 305 if (err < 0)
45694aa7 306 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
743eb868 307 machine, "_stext");
a1645ce1
ZY
308 if (err < 0)
309 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 310 " relocation symbol.\n", machine->pid);
a1645ce1
ZY
311}
312
98402807
FW
313static struct perf_event_header finished_round_event = {
314 .size = sizeof(struct perf_event_header),
315 .type = PERF_RECORD_FINISHED_ROUND,
316};
317
8d3eca20 318static int perf_record__mmap_read_all(struct perf_record *rec)
98402807 319{
0e2e63dd 320 int i;
8d3eca20 321 int rc = 0;
98402807 322
d20deb64 323 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
8d3eca20
DA
324 if (rec->evlist->mmap[i].base) {
325 if (perf_record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
326 rc = -1;
327 goto out;
328 }
329 }
98402807
FW
330 }
331
2eeaaa09 332 if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
8d3eca20
DA
333 rc = write_output(rec, &finished_round_event,
334 sizeof(finished_round_event));
335
336out:
337 return rc;
98402807
FW
338}
339
57706abc
DA
340static void perf_record__init_features(struct perf_record *rec)
341{
342 struct perf_evlist *evsel_list = rec->evlist;
343 struct perf_session *session = rec->session;
344 int feat;
345
346 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
347 perf_header__set_feat(&session->header, feat);
348
349 if (rec->no_buildid)
350 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
351
352 if (!have_tracepoints(&evsel_list->entries))
353 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
354
355 if (!rec->opts.branch_stack)
356 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
357}
358
d20deb64 359static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
16c8a109 360{
57706abc 361 int err;
8b412664 362 unsigned long waking = 0;
46be604b 363 const bool forks = argc > 0;
23346f21 364 struct machine *machine;
45694aa7 365 struct perf_tool *tool = &rec->tool;
d20deb64
ACM
366 struct perf_record_opts *opts = &rec->opts;
367 struct perf_evlist *evsel_list = rec->evlist;
f5fc1412 368 struct perf_data_file *file = &rec->file;
d20deb64 369 struct perf_session *session;
2711926a 370 bool disabled = false;
de9ac07b 371
d20deb64 372 rec->progname = argv[0];
33e49ea7 373
d20deb64 374 on_exit(perf_record__sig_exit, rec);
f5970550
PZ
375 signal(SIGCHLD, sig_handler);
376 signal(SIGINT, sig_handler);
18483b81 377 signal(SIGUSR1, sig_handler);
804f7ac7 378 signal(SIGTERM, sig_handler);
f5970550 379
f5fc1412 380 session = perf_session__new(file, false, NULL);
94c744b6 381 if (session == NULL) {
a9a70bbc
ACM
382 pr_err("Not enough memory for reading perf file header\n");
383 return -1;
384 }
385
d20deb64
ACM
386 rec->session = session;
387
57706abc 388 perf_record__init_features(rec);
330aa675 389
d4db3f16 390 if (forks) {
6ef73ec4 391 err = perf_evlist__prepare_workload(evsel_list, &opts->target,
f5fc1412 392 argv, file->is_pipe,
55e162ea 393 true);
35b9d88e
ACM
394 if (err < 0) {
395 pr_err("Couldn't run the workload!\n");
396 goto out_delete_session;
856e9660 397 }
856e9660
PZ
398 }
399
8d3eca20
DA
400 if (perf_record__open(rec) != 0) {
401 err = -1;
402 goto out_delete_session;
403 }
de9ac07b 404
a8bb559b
NK
405 if (!evsel_list->nr_groups)
406 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
407
712a4b60 408 /*
d20deb64 409 * perf_session__delete(session) will be called at perf_record__exit()
712a4b60 410 */
d20deb64 411 on_exit(perf_record__exit, rec);
712a4b60 412
f5fc1412
JO
413 if (file->is_pipe) {
414 err = perf_header__write_pipe(file->fd);
529870e3 415 if (err < 0)
8d3eca20 416 goto out_delete_session;
563aecb2 417 } else {
a91e5431 418 err = perf_session__write_header(session, evsel_list,
f5fc1412 419 file->fd, false);
d5eed904 420 if (err < 0)
8d3eca20 421 goto out_delete_session;
56b03f3c
ACM
422 }
423
d3665498 424 if (!rec->no_buildid
e20960c0 425 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
d3665498 426 pr_err("Couldn't generate buildids. "
e20960c0 427 "Use --no-buildid to profile anyway.\n");
8d3eca20
DA
428 err = -1;
429 goto out_delete_session;
e20960c0
RR
430 }
431
34ba5122 432 machine = &session->machines.host;
743eb868 433
f5fc1412 434 if (file->is_pipe) {
45694aa7 435 err = perf_event__synthesize_attrs(tool, session,
d20deb64 436 process_synthesized_event);
2c46dbb5
TZ
437 if (err < 0) {
438 pr_err("Couldn't synthesize attrs.\n");
8d3eca20 439 goto out_delete_session;
2c46dbb5 440 }
cd19a035 441
361c99a6 442 if (have_tracepoints(&evsel_list->entries)) {
63e0c771
TZ
443 /*
444 * FIXME err <= 0 here actually means that
445 * there were no tracepoints so its not really
446 * an error, just that we don't need to
447 * synthesize anything. We really have to
448 * return this more properly and also
449 * propagate errors that now are calling die()
450 */
f5fc1412 451 err = perf_event__synthesize_tracing_data(tool, file->fd, evsel_list,
743eb868 452 process_synthesized_event);
63e0c771
TZ
453 if (err <= 0) {
454 pr_err("Couldn't record tracing data.\n");
8d3eca20 455 goto out_delete_session;
63e0c771 456 }
f34b9001 457 rec->bytes_written += err;
63e0c771 458 }
2c46dbb5
TZ
459 }
460
45694aa7 461 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
743eb868 462 machine, "_text");
70162138 463 if (err < 0)
45694aa7 464 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
743eb868 465 machine, "_stext");
c1a3a4b9
ACM
466 if (err < 0)
467 pr_err("Couldn't record kernel reference relocation symbol\n"
468 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
469 "Check /proc/kallsyms permission or run as root.\n");
b7cece76 470
45694aa7 471 err = perf_event__synthesize_modules(tool, process_synthesized_event,
743eb868 472 machine);
c1a3a4b9
ACM
473 if (err < 0)
474 pr_err("Couldn't record kernel module information.\n"
475 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
476 "Check /proc/modules permission or run as root.\n");
477
7e383de4 478 if (perf_guest) {
876650e6
ACM
479 machines__process_guests(&session->machines,
480 perf_event__synthesize_guest_os, tool);
7e383de4 481 }
7c6a1c65 482
58d925dc
ACM
483 err = machine__synthesize_threads(machine, tool, &opts->target, evsel_list->threads,
484 process_synthesized_event, opts->sample_address);
8d3eca20
DA
485 if (err != 0)
486 goto out_delete_session;
487
d20deb64 488 if (rec->realtime_prio) {
de9ac07b
PZ
489 struct sched_param param;
490
d20deb64 491 param.sched_priority = rec->realtime_prio;
de9ac07b 492 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
6beba7ad 493 pr_err("Could not set realtime priority.\n");
8d3eca20
DA
494 err = -1;
495 goto out_delete_session;
de9ac07b
PZ
496 }
497 }
498
774cb499
JO
499 /*
500 * When perf is starting the traced process, all the events
501 * (apart from group members) have enable_on_exec=1 set,
502 * so don't spoil it by prematurely enabling them.
503 */
504 if (!perf_target__none(&opts->target))
505 perf_evlist__enable(evsel_list);
764e16a3 506
856e9660
PZ
507 /*
508 * Let the child rip
509 */
d4db3f16 510 if (forks)
35b9d88e 511 perf_evlist__start_workload(evsel_list);
856e9660 512
649c48a9 513 for (;;) {
d20deb64 514 int hits = rec->samples;
de9ac07b 515
8d3eca20
DA
516 if (perf_record__mmap_read_all(rec) < 0) {
517 err = -1;
518 goto out_delete_session;
519 }
de9ac07b 520
d20deb64 521 if (hits == rec->samples) {
649c48a9
PZ
522 if (done)
523 break;
5c581041 524 err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
8b412664
PZ
525 waking++;
526 }
527
774cb499
JO
528 /*
529 * When perf is starting the traced process, at the end events
530 * die with the process and we wait for that. Thus no need to
531 * disable events in this case.
532 */
2711926a 533 if (done && !disabled && !perf_target__none(&opts->target)) {
4152ab37 534 perf_evlist__disable(evsel_list);
2711926a
JO
535 disabled = true;
536 }
de9ac07b
PZ
537 }
538
18483b81 539 if (quiet || signr == SIGUSR1)
b44308f5
ACM
540 return 0;
541
8b412664
PZ
542 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
543
021e9f47
IM
544 /*
545 * Approximate RIP event size: 24 bytes.
546 */
547 fprintf(stderr,
9486aa38 548 "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
d20deb64 549 (double)rec->bytes_written / 1024.0 / 1024.0,
6a4d98d7 550 file->path,
d20deb64 551 rec->bytes_written / 24);
addc2785 552
de9ac07b 553 return 0;
39d17dac
ACM
554
555out_delete_session:
556 perf_session__delete(session);
557 return err;
de9ac07b 558}
0e9b20b8 559
bdfebd84
RAV
560#define BRANCH_OPT(n, m) \
561 { .name = n, .mode = (m) }
562
563#define BRANCH_END { .name = NULL }
564
565struct branch_mode {
566 const char *name;
567 int mode;
568};
569
570static const struct branch_mode branch_modes[] = {
571 BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
572 BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
573 BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
574 BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
575 BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
576 BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
577 BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
0126d493
AK
578 BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
579 BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
580 BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
bdfebd84
RAV
581 BRANCH_END
582};
583
584static int
a5aabdac 585parse_branch_stack(const struct option *opt, const char *str, int unset)
bdfebd84
RAV
586{
587#define ONLY_PLM \
588 (PERF_SAMPLE_BRANCH_USER |\
589 PERF_SAMPLE_BRANCH_KERNEL |\
590 PERF_SAMPLE_BRANCH_HV)
591
592 uint64_t *mode = (uint64_t *)opt->value;
593 const struct branch_mode *br;
a5aabdac 594 char *s, *os = NULL, *p;
bdfebd84
RAV
595 int ret = -1;
596
a5aabdac
SE
597 if (unset)
598 return 0;
bdfebd84 599
a5aabdac
SE
600 /*
601 * cannot set it twice, -b + --branch-filter for instance
602 */
603 if (*mode)
bdfebd84
RAV
604 return -1;
605
a5aabdac
SE
606 /* str may be NULL in case no arg is passed to -b */
607 if (str) {
608 /* because str is read-only */
609 s = os = strdup(str);
610 if (!s)
611 return -1;
612
613 for (;;) {
614 p = strchr(s, ',');
615 if (p)
616 *p = '\0';
617
618 for (br = branch_modes; br->name; br++) {
619 if (!strcasecmp(s, br->name))
620 break;
621 }
622 if (!br->name) {
623 ui__warning("unknown branch filter %s,"
624 " check man page\n", s);
625 goto error;
626 }
bdfebd84 627
a5aabdac 628 *mode |= br->mode;
bdfebd84 629
a5aabdac
SE
630 if (!p)
631 break;
bdfebd84 632
a5aabdac
SE
633 s = p + 1;
634 }
bdfebd84
RAV
635 }
636 ret = 0;
637
a5aabdac 638 /* default to any branch */
bdfebd84 639 if ((*mode & ~ONLY_PLM) == 0) {
a5aabdac 640 *mode = PERF_SAMPLE_BRANCH_ANY;
bdfebd84
RAV
641 }
642error:
643 free(os);
644 return ret;
645}
646
89fe808a 647#ifdef HAVE_LIBUNWIND_SUPPORT
26d33022
JO
648static int get_stack_size(char *str, unsigned long *_size)
649{
650 char *endptr;
651 unsigned long size;
652 unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
653
654 size = strtoul(str, &endptr, 0);
655
656 do {
657 if (*endptr)
658 break;
659
660 size = round_up(size, sizeof(u64));
661 if (!size || size > max_size)
662 break;
663
664 *_size = size;
665 return 0;
666
667 } while (0);
668
669 pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
670 max_size, str);
671 return -1;
672}
89fe808a 673#endif /* HAVE_LIBUNWIND_SUPPORT */
26d33022 674
09b0fd45 675int record_parse_callchain(const char *arg, struct perf_record_opts *opts)
26d33022 676{
26d33022
JO
677 char *tok, *name, *saveptr = NULL;
678 char *buf;
679 int ret = -1;
680
26d33022
JO
681 /* We need buffer that we know we can write to. */
682 buf = malloc(strlen(arg) + 1);
683 if (!buf)
684 return -ENOMEM;
685
686 strcpy(buf, arg);
687
688 tok = strtok_r((char *)buf, ",", &saveptr);
689 name = tok ? : (char *)buf;
690
691 do {
692 /* Framepointer style */
693 if (!strncmp(name, "fp", sizeof("fp"))) {
694 if (!strtok_r(NULL, ",", &saveptr)) {
c5ff78c3 695 opts->call_graph = CALLCHAIN_FP;
26d33022
JO
696 ret = 0;
697 } else
698 pr_err("callchain: No more arguments "
699 "needed for -g fp\n");
700 break;
701
89fe808a 702#ifdef HAVE_LIBUNWIND_SUPPORT
26d33022
JO
703 /* Dwarf style */
704 } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
61eaa3be
ACM
705 const unsigned long default_stack_dump_size = 8192;
706
26d33022 707 ret = 0;
c5ff78c3
ACM
708 opts->call_graph = CALLCHAIN_DWARF;
709 opts->stack_dump_size = default_stack_dump_size;
26d33022
JO
710
711 tok = strtok_r(NULL, ",", &saveptr);
712 if (tok) {
713 unsigned long size = 0;
714
715 ret = get_stack_size(tok, &size);
c5ff78c3 716 opts->stack_dump_size = size;
26d33022 717 }
89fe808a 718#endif /* HAVE_LIBUNWIND_SUPPORT */
26d33022 719 } else {
09b0fd45 720 pr_err("callchain: Unknown --call-graph option "
26d33022
JO
721 "value: %s\n", arg);
722 break;
723 }
724
725 } while (0);
726
727 free(buf);
09b0fd45
JO
728 return ret;
729}
730
731static void callchain_debug(struct perf_record_opts *opts)
732{
733 pr_debug("callchain: type %d\n", opts->call_graph);
26d33022 734
09b0fd45
JO
735 if (opts->call_graph == CALLCHAIN_DWARF)
736 pr_debug("callchain: stack dump size %d\n",
737 opts->stack_dump_size);
738}
739
740int record_parse_callchain_opt(const struct option *opt,
741 const char *arg,
742 int unset)
743{
744 struct perf_record_opts *opts = opt->value;
745 int ret;
746
747 /* --no-call-graph */
748 if (unset) {
749 opts->call_graph = CALLCHAIN_NONE;
750 pr_debug("callchain: disabled\n");
751 return 0;
752 }
753
754 ret = record_parse_callchain(arg, opts);
26d33022 755 if (!ret)
09b0fd45 756 callchain_debug(opts);
26d33022
JO
757
758 return ret;
759}
760
09b0fd45
JO
761int record_callchain_opt(const struct option *opt,
762 const char *arg __maybe_unused,
763 int unset __maybe_unused)
764{
765 struct perf_record_opts *opts = opt->value;
766
767 if (opts->call_graph == CALLCHAIN_NONE)
768 opts->call_graph = CALLCHAIN_FP;
769
770 callchain_debug(opts);
771 return 0;
772}
773
0e9b20b8 774static const char * const record_usage[] = {
9e096753
MG
775 "perf record [<options>] [<command>]",
776 "perf record [<options>] -- <command> [<options>]",
0e9b20b8
IM
777 NULL
778};
779
d20deb64
ACM
780/*
781 * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
782 * because we need to have access to it in perf_record__exit, that is called
783 * after cmd_record() exits, but since record_options need to be accessible to
784 * builtin-script, leave it here.
785 *
786 * At least we don't ouch it in all the other functions here directly.
787 *
788 * Just say no to tons of global variables, sigh.
789 */
790static struct perf_record record = {
791 .opts = {
d20deb64
ACM
792 .mmap_pages = UINT_MAX,
793 .user_freq = UINT_MAX,
794 .user_interval = ULLONG_MAX,
447a6013 795 .freq = 4000,
d1cb9fce
NK
796 .target = {
797 .uses_mmap = true,
798 },
d20deb64 799 },
d20deb64 800};
7865e817 801
09b0fd45 802#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
61eaa3be 803
89fe808a 804#ifdef HAVE_LIBUNWIND_SUPPORT
09b0fd45 805const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
61eaa3be 806#else
09b0fd45 807const char record_callchain_help[] = CALLCHAIN_HELP "fp";
61eaa3be
ACM
808#endif
809
d20deb64
ACM
810/*
811 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
812 * with it and switch to use the library functions in perf_evlist that came
813 * from builtin-record.c, i.e. use perf_record_opts,
814 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
815 * using pipes, etc.
816 */
bca647aa 817const struct option record_options[] = {
d20deb64 818 OPT_CALLBACK('e', "event", &record.evlist, "event",
86847b62 819 "event selector. use 'perf list' to list available events",
f120f9d5 820 parse_events_option),
d20deb64 821 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
c171b552 822 "event filter", parse_filter),
bea03405 823 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
d6d901c2 824 "record events on existing process id"),
bea03405 825 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
d6d901c2 826 "record events on existing thread id"),
d20deb64 827 OPT_INTEGER('r', "realtime", &record.realtime_prio,
0e9b20b8 828 "collect data with this RT SCHED_FIFO priority"),
d20deb64 829 OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
acac03fa 830 "collect data without buffering"),
d20deb64 831 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
daac07b2 832 "collect raw sample records from all opened counters"),
bea03405 833 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
0e9b20b8 834 "system-wide collection from all CPUs"),
bea03405 835 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
c45c6ea2 836 "list of cpus to monitor"),
d20deb64 837 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
f5fc1412 838 OPT_STRING('o', "output", &record.file.path, "file",
abaff32a 839 "output file name"),
d20deb64 840 OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit,
2e6cdf99 841 "child tasks do not inherit counters"),
d20deb64 842 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
994a1f78
JO
843 OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
844 "number of mmap data pages",
845 perf_evlist__parse_mmap_pages),
d20deb64 846 OPT_BOOLEAN(0, "group", &record.opts.group,
43bece79 847 "put the counters into a counter group"),
09b0fd45
JO
848 OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
849 NULL, "enables call-graph recording" ,
850 &record_callchain_opt),
851 OPT_CALLBACK(0, "call-graph", &record.opts,
852 "mode[,dump_size]", record_callchain_help,
853 &record_parse_callchain_opt),
c0555642 854 OPT_INCR('v', "verbose", &verbose,
3da297a6 855 "be more verbose (show counter open errors, etc)"),
b44308f5 856 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
d20deb64 857 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
649c48a9 858 "per thread counts"),
d20deb64 859 OPT_BOOLEAN('d', "data", &record.opts.sample_address,
4bba828d 860 "Sample addresses"),
d20deb64 861 OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
3e76ac78 862 OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
d20deb64 863 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
649c48a9 864 "don't sample"),
d20deb64 865 OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
a1ac1d3c 866 "do not update the buildid cache"),
d20deb64 867 OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
baa2f6ce 868 "do not collect buildids in perf.data"),
d20deb64 869 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
023695d9
SE
870 "monitor event in cgroup name only",
871 parse_cgroups),
bea03405
NK
872 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
873 "user to profile"),
a5aabdac
SE
874
875 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
876 "branch any", "sample any taken branches",
877 parse_branch_stack),
878
879 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
880 "branch filter mask", "branch stack filter modes",
bdfebd84 881 parse_branch_stack),
05484298
AK
882 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
883 "sample by weight (on special events only)"),
475eeab9
AK
884 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
885 "sample transaction flags (special events only)"),
0e9b20b8
IM
886 OPT_END()
887};
888
1d037ca1 889int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
0e9b20b8 890{
69aad6f1 891 int err = -ENOMEM;
d20deb64
ACM
892 struct perf_evlist *evsel_list;
893 struct perf_record *rec = &record;
16ad2ffb 894 char errbuf[BUFSIZ];
0e9b20b8 895
334fe7a3 896 evsel_list = perf_evlist__new();
361c99a6
ACM
897 if (evsel_list == NULL)
898 return -ENOMEM;
899
d20deb64
ACM
900 rec->evlist = evsel_list;
901
bca647aa 902 argc = parse_options(argc, argv, record_options, record_usage,
655000e7 903 PARSE_OPT_STOP_AT_NON_OPTION);
d67356e7 904 if (!argc && perf_target__none(&rec->opts.target))
bca647aa 905 usage_with_options(record_usage, record_options);
0e9b20b8 906
bea03405 907 if (nr_cgroups && !rec->opts.target.system_wide) {
3780f488
NK
908 ui__error("cgroup monitoring only available in"
909 " system-wide mode\n");
023695d9
SE
910 usage_with_options(record_usage, record_options);
911 }
912
655000e7 913 symbol__init();
baa2f6ce 914
ec80fde7 915 if (symbol_conf.kptr_restrict)
646aaea6
ACM
916 pr_warning(
917"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
918"check /proc/sys/kernel/kptr_restrict.\n\n"
919"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
920"file is not found in the buildid cache or in the vmlinux path.\n\n"
921"Samples in kernel modules won't be resolved at all.\n\n"
922"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
923"even with a suitable vmlinux or kallsyms file.\n\n");
ec80fde7 924
d20deb64 925 if (rec->no_buildid_cache || rec->no_buildid)
a1ac1d3c 926 disable_buildid_cache();
655000e7 927
361c99a6
ACM
928 if (evsel_list->nr_entries == 0 &&
929 perf_evlist__add_default(evsel_list) < 0) {
69aad6f1
ACM
930 pr_err("Not enough memory for event selector list\n");
931 goto out_symbol_exit;
bbd36e5e 932 }
0e9b20b8 933
16ad2ffb
NK
934 err = perf_target__validate(&rec->opts.target);
935 if (err) {
936 perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
937 ui__warning("%s", errbuf);
938 }
939
940 err = perf_target__parse_uid(&rec->opts.target);
941 if (err) {
942 int saved_errno = errno;
4bd0f2d2 943
16ad2ffb 944 perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
3780f488 945 ui__error("%s", errbuf);
16ad2ffb
NK
946
947 err = -saved_errno;
8fa60e1f 948 goto out_symbol_exit;
16ad2ffb 949 }
0d37aa34 950
16ad2ffb 951 err = -ENOMEM;
b809ac10 952 if (perf_evlist__create_maps(evsel_list, &rec->opts.target) < 0)
dd7927f4 953 usage_with_options(record_usage, record_options);
69aad6f1 954
714647bd 955 if (perf_record_opts__config(&rec->opts)) {
39d17dac 956 err = -EINVAL;
5c581041 957 goto out_free_fd;
7e4ff9e3
MG
958 }
959
d20deb64 960 err = __cmd_record(&record, argc, argv);
8fa60e1f
NK
961
962 perf_evlist__munmap(evsel_list);
963 perf_evlist__close(evsel_list);
39d17dac 964out_free_fd:
7e2ed097 965 perf_evlist__delete_maps(evsel_list);
d65a458b
ACM
966out_symbol_exit:
967 symbol__exit();
39d17dac 968 return err;
0e9b20b8 969}
This page took 0.210348 seconds and 5 git commands to generate.