perf record: Use strerror_r instead of strerror
[deliverable/linux.git] / tools / perf / builtin-record.c
1 /*
2 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
7 */
8 #include "builtin.h"
9
10 #include "perf.h"
11
12 #include "util/build-id.h"
13 #include "util/util.h"
14 #include "util/parse-options.h"
15 #include "util/parse-events.h"
16
17 #include "util/header.h"
18 #include "util/event.h"
19 #include "util/evlist.h"
20 #include "util/evsel.h"
21 #include "util/debug.h"
22 #include "util/session.h"
23 #include "util/tool.h"
24 #include "util/symbol.h"
25 #include "util/cpumap.h"
26 #include "util/thread_map.h"
27 #include "util/data.h"
28
29 #include <unistd.h>
30 #include <sched.h>
31 #include <sys/mman.h>
32
33
34 struct record {
35 struct perf_tool tool;
36 struct record_opts opts;
37 u64 bytes_written;
38 struct perf_data_file file;
39 struct perf_evlist *evlist;
40 struct perf_session *session;
41 const char *progname;
42 int realtime_prio;
43 bool no_buildid;
44 bool no_buildid_cache;
45 long samples;
46 };
47
48 static int record__write(struct record *rec, void *bf, size_t size)
49 {
50 if (perf_data_file__write(rec->session->file, bf, size) < 0) {
51 pr_err("failed to write perf data, error: %m\n");
52 return -1;
53 }
54
55 rec->bytes_written += size;
56 return 0;
57 }
58
59 static int process_synthesized_event(struct perf_tool *tool,
60 union perf_event *event,
61 struct perf_sample *sample __maybe_unused,
62 struct machine *machine __maybe_unused)
63 {
64 struct record *rec = container_of(tool, struct record, tool);
65 return record__write(rec, event, event->header.size);
66 }
67
68 static int record__mmap_read(struct record *rec, struct perf_mmap *md)
69 {
70 unsigned int head = perf_mmap__read_head(md);
71 unsigned int old = md->prev;
72 unsigned char *data = md->base + page_size;
73 unsigned long size;
74 void *buf;
75 int rc = 0;
76
77 if (old == head)
78 return 0;
79
80 rec->samples++;
81
82 size = head - old;
83
84 if ((old & md->mask) + size != (head & md->mask)) {
85 buf = &data[old & md->mask];
86 size = md->mask + 1 - (old & md->mask);
87 old += size;
88
89 if (record__write(rec, buf, size) < 0) {
90 rc = -1;
91 goto out;
92 }
93 }
94
95 buf = &data[old & md->mask];
96 size = head - old;
97 old += size;
98
99 if (record__write(rec, buf, size) < 0) {
100 rc = -1;
101 goto out;
102 }
103
104 md->prev = old;
105 perf_mmap__write_tail(md, old);
106
107 out:
108 return rc;
109 }
110
111 static volatile int done = 0;
112 static volatile int signr = -1;
113 static volatile int child_finished = 0;
114
115 static void sig_handler(int sig)
116 {
117 if (sig == SIGCHLD)
118 child_finished = 1;
119 else
120 signr = sig;
121
122 done = 1;
123 }
124
125 static void record__sig_exit(void)
126 {
127 if (signr == -1)
128 return;
129
130 signal(signr, SIG_DFL);
131 raise(signr);
132 }
133
134 static int record__open(struct record *rec)
135 {
136 char msg[512];
137 struct perf_evsel *pos;
138 struct perf_evlist *evlist = rec->evlist;
139 struct perf_session *session = rec->session;
140 struct record_opts *opts = &rec->opts;
141 int rc = 0;
142
143 perf_evlist__config(evlist, opts);
144
145 evlist__for_each(evlist, pos) {
146 try_again:
147 if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
148 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
149 if (verbose)
150 ui__warning("%s\n", msg);
151 goto try_again;
152 }
153
154 rc = -errno;
155 perf_evsel__open_strerror(pos, &opts->target,
156 errno, msg, sizeof(msg));
157 ui__error("%s\n", msg);
158 goto out;
159 }
160 }
161
162 if (perf_evlist__apply_filters(evlist)) {
163 error("failed to set filter with %d (%s)\n", errno,
164 strerror_r(errno, msg, sizeof(msg)));
165 rc = -1;
166 goto out;
167 }
168
169 if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
170 if (errno == EPERM) {
171 pr_err("Permission error mapping pages.\n"
172 "Consider increasing "
173 "/proc/sys/kernel/perf_event_mlock_kb,\n"
174 "or try again with a smaller value of -m/--mmap_pages.\n"
175 "(current value: %u)\n", opts->mmap_pages);
176 rc = -errno;
177 } else {
178 pr_err("failed to mmap with %d (%s)\n", errno,
179 strerror_r(errno, msg, sizeof(msg)));
180 rc = -errno;
181 }
182 goto out;
183 }
184
185 session->evlist = evlist;
186 perf_session__set_id_hdr_size(session);
187 out:
188 return rc;
189 }
190
191 static int process_buildids(struct record *rec)
192 {
193 struct perf_data_file *file = &rec->file;
194 struct perf_session *session = rec->session;
195 u64 start = session->header.data_offset;
196
197 u64 size = lseek(file->fd, 0, SEEK_CUR);
198 if (size == 0)
199 return 0;
200
201 return __perf_session__process_events(session, start,
202 size - start,
203 size, &build_id__mark_dso_hit_ops);
204 }
205
206 static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
207 {
208 int err;
209 struct perf_tool *tool = data;
210 /*
211 *As for guest kernel when processing subcommand record&report,
212 *we arrange module mmap prior to guest kernel mmap and trigger
213 *a preload dso because default guest module symbols are loaded
214 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
215 *method is used to avoid symbol missing when the first addr is
216 *in module instead of in guest kernel.
217 */
218 err = perf_event__synthesize_modules(tool, process_synthesized_event,
219 machine);
220 if (err < 0)
221 pr_err("Couldn't record guest kernel [%d]'s reference"
222 " relocation symbol.\n", machine->pid);
223
224 /*
225 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
226 * have no _text sometimes.
227 */
228 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
229 machine);
230 if (err < 0)
231 pr_err("Couldn't record guest kernel [%d]'s reference"
232 " relocation symbol.\n", machine->pid);
233 }
234
235 static struct perf_event_header finished_round_event = {
236 .size = sizeof(struct perf_event_header),
237 .type = PERF_RECORD_FINISHED_ROUND,
238 };
239
240 static int record__mmap_read_all(struct record *rec)
241 {
242 u64 bytes_written = rec->bytes_written;
243 int i;
244 int rc = 0;
245
246 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
247 if (rec->evlist->mmap[i].base) {
248 if (record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
249 rc = -1;
250 goto out;
251 }
252 }
253 }
254
255 /*
256 * Mark the round finished in case we wrote
257 * at least one event.
258 */
259 if (bytes_written != rec->bytes_written)
260 rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
261
262 out:
263 return rc;
264 }
265
266 static void record__init_features(struct record *rec)
267 {
268 struct perf_session *session = rec->session;
269 int feat;
270
271 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
272 perf_header__set_feat(&session->header, feat);
273
274 if (rec->no_buildid)
275 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
276
277 if (!have_tracepoints(&rec->evlist->entries))
278 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
279
280 if (!rec->opts.branch_stack)
281 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
282 }
283
284 static volatile int workload_exec_errno;
285
286 /*
287 * perf_evlist__prepare_workload will send a SIGUSR1
288 * if the fork fails, since we asked by setting its
289 * want_signal to true.
290 */
291 static void workload_exec_failed_signal(int signo __maybe_unused,
292 siginfo_t *info,
293 void *ucontext __maybe_unused)
294 {
295 workload_exec_errno = info->si_value.sival_int;
296 done = 1;
297 child_finished = 1;
298 }
299
300 static int __cmd_record(struct record *rec, int argc, const char **argv)
301 {
302 int err;
303 int status = 0;
304 unsigned long waking = 0;
305 const bool forks = argc > 0;
306 struct machine *machine;
307 struct perf_tool *tool = &rec->tool;
308 struct record_opts *opts = &rec->opts;
309 struct perf_data_file *file = &rec->file;
310 struct perf_session *session;
311 bool disabled = false;
312
313 rec->progname = argv[0];
314
315 atexit(record__sig_exit);
316 signal(SIGCHLD, sig_handler);
317 signal(SIGINT, sig_handler);
318 signal(SIGTERM, sig_handler);
319
320 session = perf_session__new(file, false, NULL);
321 if (session == NULL) {
322 pr_err("Perf session creation failed.\n");
323 return -1;
324 }
325
326 rec->session = session;
327
328 record__init_features(rec);
329
330 if (forks) {
331 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
332 argv, file->is_pipe,
333 workload_exec_failed_signal);
334 if (err < 0) {
335 pr_err("Couldn't run the workload!\n");
336 status = err;
337 goto out_delete_session;
338 }
339 }
340
341 if (record__open(rec) != 0) {
342 err = -1;
343 goto out_child;
344 }
345
346 if (!rec->evlist->nr_groups)
347 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
348
349 if (file->is_pipe) {
350 err = perf_header__write_pipe(file->fd);
351 if (err < 0)
352 goto out_child;
353 } else {
354 err = perf_session__write_header(session, rec->evlist,
355 file->fd, false);
356 if (err < 0)
357 goto out_child;
358 }
359
360 if (!rec->no_buildid
361 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
362 pr_err("Couldn't generate buildids. "
363 "Use --no-buildid to profile anyway.\n");
364 err = -1;
365 goto out_child;
366 }
367
368 machine = &session->machines.host;
369
370 if (file->is_pipe) {
371 err = perf_event__synthesize_attrs(tool, session,
372 process_synthesized_event);
373 if (err < 0) {
374 pr_err("Couldn't synthesize attrs.\n");
375 goto out_child;
376 }
377
378 if (have_tracepoints(&rec->evlist->entries)) {
379 /*
380 * FIXME err <= 0 here actually means that
381 * there were no tracepoints so its not really
382 * an error, just that we don't need to
383 * synthesize anything. We really have to
384 * return this more properly and also
385 * propagate errors that now are calling die()
386 */
387 err = perf_event__synthesize_tracing_data(tool, file->fd, rec->evlist,
388 process_synthesized_event);
389 if (err <= 0) {
390 pr_err("Couldn't record tracing data.\n");
391 goto out_child;
392 }
393 rec->bytes_written += err;
394 }
395 }
396
397 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
398 machine);
399 if (err < 0)
400 pr_err("Couldn't record kernel reference relocation symbol\n"
401 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
402 "Check /proc/kallsyms permission or run as root.\n");
403
404 err = perf_event__synthesize_modules(tool, process_synthesized_event,
405 machine);
406 if (err < 0)
407 pr_err("Couldn't record kernel module information.\n"
408 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
409 "Check /proc/modules permission or run as root.\n");
410
411 if (perf_guest) {
412 machines__process_guests(&session->machines,
413 perf_event__synthesize_guest_os, tool);
414 }
415
416 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
417 process_synthesized_event, opts->sample_address);
418 if (err != 0)
419 goto out_child;
420
421 if (rec->realtime_prio) {
422 struct sched_param param;
423
424 param.sched_priority = rec->realtime_prio;
425 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
426 pr_err("Could not set realtime priority.\n");
427 err = -1;
428 goto out_child;
429 }
430 }
431
432 /*
433 * When perf is starting the traced process, all the events
434 * (apart from group members) have enable_on_exec=1 set,
435 * so don't spoil it by prematurely enabling them.
436 */
437 if (!target__none(&opts->target) && !opts->initial_delay)
438 perf_evlist__enable(rec->evlist);
439
440 /*
441 * Let the child rip
442 */
443 if (forks)
444 perf_evlist__start_workload(rec->evlist);
445
446 if (opts->initial_delay) {
447 usleep(opts->initial_delay * 1000);
448 perf_evlist__enable(rec->evlist);
449 }
450
451 for (;;) {
452 int hits = rec->samples;
453
454 if (record__mmap_read_all(rec) < 0) {
455 err = -1;
456 goto out_child;
457 }
458
459 if (hits == rec->samples) {
460 if (done)
461 break;
462 err = poll(rec->evlist->pollfd, rec->evlist->nr_fds, -1);
463 /*
464 * Propagate error, only if there's any. Ignore positive
465 * number of returned events and interrupt error.
466 */
467 if (err > 0 || (err < 0 && errno == EINTR))
468 err = 0;
469 waking++;
470 }
471
472 /*
473 * When perf is starting the traced process, at the end events
474 * die with the process and we wait for that. Thus no need to
475 * disable events in this case.
476 */
477 if (done && !disabled && !target__none(&opts->target)) {
478 perf_evlist__disable(rec->evlist);
479 disabled = true;
480 }
481 }
482
483 if (forks && workload_exec_errno) {
484 char msg[STRERR_BUFSIZE];
485 const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
486 pr_err("Workload failed: %s\n", emsg);
487 err = -1;
488 goto out_child;
489 }
490
491 if (!quiet) {
492 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
493
494 /*
495 * Approximate RIP event size: 24 bytes.
496 */
497 fprintf(stderr,
498 "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
499 (double)rec->bytes_written / 1024.0 / 1024.0,
500 file->path,
501 rec->bytes_written / 24);
502 }
503
504 out_child:
505 if (forks) {
506 int exit_status;
507
508 if (!child_finished)
509 kill(rec->evlist->workload.pid, SIGTERM);
510
511 wait(&exit_status);
512
513 if (err < 0)
514 status = err;
515 else if (WIFEXITED(exit_status))
516 status = WEXITSTATUS(exit_status);
517 else if (WIFSIGNALED(exit_status))
518 signr = WTERMSIG(exit_status);
519 } else
520 status = err;
521
522 if (!err && !file->is_pipe) {
523 rec->session->header.data_size += rec->bytes_written;
524
525 if (!rec->no_buildid)
526 process_buildids(rec);
527 perf_session__write_header(rec->session, rec->evlist,
528 file->fd, true);
529 }
530
531 out_delete_session:
532 perf_session__delete(session);
533 return status;
534 }
535
536 #define BRANCH_OPT(n, m) \
537 { .name = n, .mode = (m) }
538
539 #define BRANCH_END { .name = NULL }
540
541 struct branch_mode {
542 const char *name;
543 int mode;
544 };
545
546 static const struct branch_mode branch_modes[] = {
547 BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
548 BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
549 BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
550 BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
551 BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
552 BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
553 BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
554 BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
555 BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
556 BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
557 BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND),
558 BRANCH_END
559 };
560
561 static int
562 parse_branch_stack(const struct option *opt, const char *str, int unset)
563 {
564 #define ONLY_PLM \
565 (PERF_SAMPLE_BRANCH_USER |\
566 PERF_SAMPLE_BRANCH_KERNEL |\
567 PERF_SAMPLE_BRANCH_HV)
568
569 uint64_t *mode = (uint64_t *)opt->value;
570 const struct branch_mode *br;
571 char *s, *os = NULL, *p;
572 int ret = -1;
573
574 if (unset)
575 return 0;
576
577 /*
578 * cannot set it twice, -b + --branch-filter for instance
579 */
580 if (*mode)
581 return -1;
582
583 /* str may be NULL in case no arg is passed to -b */
584 if (str) {
585 /* because str is read-only */
586 s = os = strdup(str);
587 if (!s)
588 return -1;
589
590 for (;;) {
591 p = strchr(s, ',');
592 if (p)
593 *p = '\0';
594
595 for (br = branch_modes; br->name; br++) {
596 if (!strcasecmp(s, br->name))
597 break;
598 }
599 if (!br->name) {
600 ui__warning("unknown branch filter %s,"
601 " check man page\n", s);
602 goto error;
603 }
604
605 *mode |= br->mode;
606
607 if (!p)
608 break;
609
610 s = p + 1;
611 }
612 }
613 ret = 0;
614
615 /* default to any branch */
616 if ((*mode & ~ONLY_PLM) == 0) {
617 *mode = PERF_SAMPLE_BRANCH_ANY;
618 }
619 error:
620 free(os);
621 return ret;
622 }
623
624 #ifdef HAVE_DWARF_UNWIND_SUPPORT
625 static int get_stack_size(char *str, unsigned long *_size)
626 {
627 char *endptr;
628 unsigned long size;
629 unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
630
631 size = strtoul(str, &endptr, 0);
632
633 do {
634 if (*endptr)
635 break;
636
637 size = round_up(size, sizeof(u64));
638 if (!size || size > max_size)
639 break;
640
641 *_size = size;
642 return 0;
643
644 } while (0);
645
646 pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
647 max_size, str);
648 return -1;
649 }
650 #endif /* HAVE_DWARF_UNWIND_SUPPORT */
651
652 int record_parse_callchain(const char *arg, struct record_opts *opts)
653 {
654 char *tok, *name, *saveptr = NULL;
655 char *buf;
656 int ret = -1;
657
658 /* We need buffer that we know we can write to. */
659 buf = malloc(strlen(arg) + 1);
660 if (!buf)
661 return -ENOMEM;
662
663 strcpy(buf, arg);
664
665 tok = strtok_r((char *)buf, ",", &saveptr);
666 name = tok ? : (char *)buf;
667
668 do {
669 /* Framepointer style */
670 if (!strncmp(name, "fp", sizeof("fp"))) {
671 if (!strtok_r(NULL, ",", &saveptr)) {
672 opts->call_graph = CALLCHAIN_FP;
673 ret = 0;
674 } else
675 pr_err("callchain: No more arguments "
676 "needed for -g fp\n");
677 break;
678
679 #ifdef HAVE_DWARF_UNWIND_SUPPORT
680 /* Dwarf style */
681 } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
682 const unsigned long default_stack_dump_size = 8192;
683
684 ret = 0;
685 opts->call_graph = CALLCHAIN_DWARF;
686 opts->stack_dump_size = default_stack_dump_size;
687
688 tok = strtok_r(NULL, ",", &saveptr);
689 if (tok) {
690 unsigned long size = 0;
691
692 ret = get_stack_size(tok, &size);
693 opts->stack_dump_size = size;
694 }
695 #endif /* HAVE_DWARF_UNWIND_SUPPORT */
696 } else {
697 pr_err("callchain: Unknown --call-graph option "
698 "value: %s\n", arg);
699 break;
700 }
701
702 } while (0);
703
704 free(buf);
705 return ret;
706 }
707
708 static void callchain_debug(struct record_opts *opts)
709 {
710 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF" };
711
712 pr_debug("callchain: type %s\n", str[opts->call_graph]);
713
714 if (opts->call_graph == CALLCHAIN_DWARF)
715 pr_debug("callchain: stack dump size %d\n",
716 opts->stack_dump_size);
717 }
718
719 int record_parse_callchain_opt(const struct option *opt,
720 const char *arg,
721 int unset)
722 {
723 struct record_opts *opts = opt->value;
724 int ret;
725
726 opts->call_graph_enabled = !unset;
727
728 /* --no-call-graph */
729 if (unset) {
730 opts->call_graph = CALLCHAIN_NONE;
731 pr_debug("callchain: disabled\n");
732 return 0;
733 }
734
735 ret = record_parse_callchain(arg, opts);
736 if (!ret)
737 callchain_debug(opts);
738
739 return ret;
740 }
741
742 int record_callchain_opt(const struct option *opt,
743 const char *arg __maybe_unused,
744 int unset __maybe_unused)
745 {
746 struct record_opts *opts = opt->value;
747
748 opts->call_graph_enabled = !unset;
749
750 if (opts->call_graph == CALLCHAIN_NONE)
751 opts->call_graph = CALLCHAIN_FP;
752
753 callchain_debug(opts);
754 return 0;
755 }
756
757 static int perf_record_config(const char *var, const char *value, void *cb)
758 {
759 struct record *rec = cb;
760
761 if (!strcmp(var, "record.call-graph"))
762 return record_parse_callchain(value, &rec->opts);
763
764 return perf_default_config(var, value, cb);
765 }
766
767 static const char * const record_usage[] = {
768 "perf record [<options>] [<command>]",
769 "perf record [<options>] -- <command> [<options>]",
770 NULL
771 };
772
773 /*
774 * XXX Ideally would be local to cmd_record() and passed to a record__new
775 * because we need to have access to it in record__exit, that is called
776 * after cmd_record() exits, but since record_options need to be accessible to
777 * builtin-script, leave it here.
778 *
779 * At least we don't ouch it in all the other functions here directly.
780 *
781 * Just say no to tons of global variables, sigh.
782 */
783 static struct record record = {
784 .opts = {
785 .sample_time = true,
786 .mmap_pages = UINT_MAX,
787 .user_freq = UINT_MAX,
788 .user_interval = ULLONG_MAX,
789 .freq = 4000,
790 .target = {
791 .uses_mmap = true,
792 .default_per_cpu = true,
793 },
794 },
795 };
796
797 #define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
798
799 #ifdef HAVE_DWARF_UNWIND_SUPPORT
800 const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
801 #else
802 const char record_callchain_help[] = CALLCHAIN_HELP "fp";
803 #endif
804
805 /*
806 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
807 * with it and switch to use the library functions in perf_evlist that came
808 * from builtin-record.c, i.e. use record_opts,
809 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
810 * using pipes, etc.
811 */
812 const struct option record_options[] = {
813 OPT_CALLBACK('e', "event", &record.evlist, "event",
814 "event selector. use 'perf list' to list available events",
815 parse_events_option),
816 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
817 "event filter", parse_filter),
818 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
819 "record events on existing process id"),
820 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
821 "record events on existing thread id"),
822 OPT_INTEGER('r', "realtime", &record.realtime_prio,
823 "collect data with this RT SCHED_FIFO priority"),
824 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
825 "collect data without buffering"),
826 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
827 "collect raw sample records from all opened counters"),
828 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
829 "system-wide collection from all CPUs"),
830 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
831 "list of cpus to monitor"),
832 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
833 OPT_STRING('o', "output", &record.file.path, "file",
834 "output file name"),
835 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
836 &record.opts.no_inherit_set,
837 "child tasks do not inherit counters"),
838 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
839 OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
840 "number of mmap data pages",
841 perf_evlist__parse_mmap_pages),
842 OPT_BOOLEAN(0, "group", &record.opts.group,
843 "put the counters into a counter group"),
844 OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
845 NULL, "enables call-graph recording" ,
846 &record_callchain_opt),
847 OPT_CALLBACK(0, "call-graph", &record.opts,
848 "mode[,dump_size]", record_callchain_help,
849 &record_parse_callchain_opt),
850 OPT_INCR('v', "verbose", &verbose,
851 "be more verbose (show counter open errors, etc)"),
852 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
853 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
854 "per thread counts"),
855 OPT_BOOLEAN('d', "data", &record.opts.sample_address,
856 "Sample addresses"),
857 OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
858 OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
859 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
860 "don't sample"),
861 OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
862 "do not update the buildid cache"),
863 OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
864 "do not collect buildids in perf.data"),
865 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
866 "monitor event in cgroup name only",
867 parse_cgroups),
868 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
869 "ms to wait before starting measurement after program start"),
870 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
871 "user to profile"),
872
873 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
874 "branch any", "sample any taken branches",
875 parse_branch_stack),
876
877 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
878 "branch filter mask", "branch stack filter modes",
879 parse_branch_stack),
880 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
881 "sample by weight (on special events only)"),
882 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
883 "sample transaction flags (special events only)"),
884 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
885 "use per-thread mmaps"),
886 OPT_END()
887 };
888
889 int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
890 {
891 int err = -ENOMEM;
892 struct record *rec = &record;
893 char errbuf[BUFSIZ];
894
895 rec->evlist = perf_evlist__new();
896 if (rec->evlist == NULL)
897 return -ENOMEM;
898
899 perf_config(perf_record_config, rec);
900
901 argc = parse_options(argc, argv, record_options, record_usage,
902 PARSE_OPT_STOP_AT_NON_OPTION);
903 if (!argc && target__none(&rec->opts.target))
904 usage_with_options(record_usage, record_options);
905
906 if (nr_cgroups && !rec->opts.target.system_wide) {
907 ui__error("cgroup monitoring only available in"
908 " system-wide mode\n");
909 usage_with_options(record_usage, record_options);
910 }
911
912 symbol__init(NULL);
913
914 if (symbol_conf.kptr_restrict)
915 pr_warning(
916 "WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
917 "check /proc/sys/kernel/kptr_restrict.\n\n"
918 "Samples in kernel functions may not be resolved if a suitable vmlinux\n"
919 "file is not found in the buildid cache or in the vmlinux path.\n\n"
920 "Samples in kernel modules won't be resolved at all.\n\n"
921 "If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
922 "even with a suitable vmlinux or kallsyms file.\n\n");
923
924 if (rec->no_buildid_cache || rec->no_buildid)
925 disable_buildid_cache();
926
927 if (rec->evlist->nr_entries == 0 &&
928 perf_evlist__add_default(rec->evlist) < 0) {
929 pr_err("Not enough memory for event selector list\n");
930 goto out_symbol_exit;
931 }
932
933 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
934 rec->opts.no_inherit = true;
935
936 err = target__validate(&rec->opts.target);
937 if (err) {
938 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
939 ui__warning("%s", errbuf);
940 }
941
942 err = target__parse_uid(&rec->opts.target);
943 if (err) {
944 int saved_errno = errno;
945
946 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
947 ui__error("%s", errbuf);
948
949 err = -saved_errno;
950 goto out_symbol_exit;
951 }
952
953 err = -ENOMEM;
954 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
955 usage_with_options(record_usage, record_options);
956
957 if (record_opts__config(&rec->opts)) {
958 err = -EINVAL;
959 goto out_symbol_exit;
960 }
961
962 err = __cmd_record(&record, argc, argv);
963 out_symbol_exit:
964 perf_evlist__delete(rec->evlist);
965 symbol__exit();
966 return err;
967 }
This page took 0.058661 seconds and 6 git commands to generate.