4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
12 #include "util/build-id.h"
13 #include "util/util.h"
14 #include "util/parse-options.h"
15 #include "util/parse-events.h"
17 #include "util/header.h"
18 #include "util/event.h"
19 #include "util/evlist.h"
20 #include "util/evsel.h"
21 #include "util/debug.h"
22 #include "util/session.h"
23 #include "util/tool.h"
24 #include "util/symbol.h"
25 #include "util/cpumap.h"
26 #include "util/thread_map.h"
27 #include "util/data.h"
35 struct perf_tool tool
;
36 struct record_opts opts
;
38 struct perf_data_file file
;
39 struct perf_evlist
*evlist
;
40 struct perf_session
*session
;
44 bool no_buildid_cache
;
48 static int record__write(struct record
*rec
, void *bf
, size_t size
)
50 if (perf_data_file__write(rec
->session
->file
, bf
, size
) < 0) {
51 pr_err("failed to write perf data, error: %m\n");
55 rec
->bytes_written
+= size
;
59 static int process_synthesized_event(struct perf_tool
*tool
,
60 union perf_event
*event
,
61 struct perf_sample
*sample __maybe_unused
,
62 struct machine
*machine __maybe_unused
)
64 struct record
*rec
= container_of(tool
, struct record
, tool
);
65 return record__write(rec
, event
, event
->header
.size
);
68 static int record__mmap_read(struct record
*rec
, struct perf_mmap
*md
)
70 unsigned int head
= perf_mmap__read_head(md
);
71 unsigned int old
= md
->prev
;
72 unsigned char *data
= md
->base
+ page_size
;
84 if ((old
& md
->mask
) + size
!= (head
& md
->mask
)) {
85 buf
= &data
[old
& md
->mask
];
86 size
= md
->mask
+ 1 - (old
& md
->mask
);
89 if (record__write(rec
, buf
, size
) < 0) {
95 buf
= &data
[old
& md
->mask
];
99 if (record__write(rec
, buf
, size
) < 0) {
105 perf_mmap__write_tail(md
, old
);
111 static volatile int done
= 0;
112 static volatile int signr
= -1;
113 static volatile int child_finished
= 0;
115 static void sig_handler(int sig
)
125 static void record__sig_exit(void)
130 signal(signr
, SIG_DFL
);
134 static int record__open(struct record
*rec
)
137 struct perf_evsel
*pos
;
138 struct perf_evlist
*evlist
= rec
->evlist
;
139 struct perf_session
*session
= rec
->session
;
140 struct record_opts
*opts
= &rec
->opts
;
143 perf_evlist__config(evlist
, opts
);
145 evlist__for_each(evlist
, pos
) {
147 if (perf_evsel__open(pos
, evlist
->cpus
, evlist
->threads
) < 0) {
148 if (perf_evsel__fallback(pos
, errno
, msg
, sizeof(msg
))) {
150 ui__warning("%s\n", msg
);
155 perf_evsel__open_strerror(pos
, &opts
->target
,
156 errno
, msg
, sizeof(msg
));
157 ui__error("%s\n", msg
);
162 if (perf_evlist__apply_filters(evlist
)) {
163 error("failed to set filter with %d (%s)\n", errno
,
164 strerror_r(errno
, msg
, sizeof(msg
)));
169 if (perf_evlist__mmap(evlist
, opts
->mmap_pages
, false) < 0) {
170 if (errno
== EPERM
) {
171 pr_err("Permission error mapping pages.\n"
172 "Consider increasing "
173 "/proc/sys/kernel/perf_event_mlock_kb,\n"
174 "or try again with a smaller value of -m/--mmap_pages.\n"
175 "(current value: %u)\n", opts
->mmap_pages
);
178 pr_err("failed to mmap with %d (%s)\n", errno
,
179 strerror_r(errno
, msg
, sizeof(msg
)));
185 session
->evlist
= evlist
;
186 perf_session__set_id_hdr_size(session
);
191 static int process_buildids(struct record
*rec
)
193 struct perf_data_file
*file
= &rec
->file
;
194 struct perf_session
*session
= rec
->session
;
195 u64 start
= session
->header
.data_offset
;
197 u64 size
= lseek(file
->fd
, 0, SEEK_CUR
);
201 return __perf_session__process_events(session
, start
,
203 size
, &build_id__mark_dso_hit_ops
);
206 static void perf_event__synthesize_guest_os(struct machine
*machine
, void *data
)
209 struct perf_tool
*tool
= data
;
211 *As for guest kernel when processing subcommand record&report,
212 *we arrange module mmap prior to guest kernel mmap and trigger
213 *a preload dso because default guest module symbols are loaded
214 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
215 *method is used to avoid symbol missing when the first addr is
216 *in module instead of in guest kernel.
218 err
= perf_event__synthesize_modules(tool
, process_synthesized_event
,
221 pr_err("Couldn't record guest kernel [%d]'s reference"
222 " relocation symbol.\n", machine
->pid
);
225 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
226 * have no _text sometimes.
228 err
= perf_event__synthesize_kernel_mmap(tool
, process_synthesized_event
,
231 pr_err("Couldn't record guest kernel [%d]'s reference"
232 " relocation symbol.\n", machine
->pid
);
235 static struct perf_event_header finished_round_event
= {
236 .size
= sizeof(struct perf_event_header
),
237 .type
= PERF_RECORD_FINISHED_ROUND
,
240 static int record__mmap_read_all(struct record
*rec
)
242 u64 bytes_written
= rec
->bytes_written
;
246 for (i
= 0; i
< rec
->evlist
->nr_mmaps
; i
++) {
247 if (rec
->evlist
->mmap
[i
].base
) {
248 if (record__mmap_read(rec
, &rec
->evlist
->mmap
[i
]) != 0) {
256 * Mark the round finished in case we wrote
257 * at least one event.
259 if (bytes_written
!= rec
->bytes_written
)
260 rc
= record__write(rec
, &finished_round_event
, sizeof(finished_round_event
));
266 static void record__init_features(struct record
*rec
)
268 struct perf_session
*session
= rec
->session
;
271 for (feat
= HEADER_FIRST_FEATURE
; feat
< HEADER_LAST_FEATURE
; feat
++)
272 perf_header__set_feat(&session
->header
, feat
);
275 perf_header__clear_feat(&session
->header
, HEADER_BUILD_ID
);
277 if (!have_tracepoints(&rec
->evlist
->entries
))
278 perf_header__clear_feat(&session
->header
, HEADER_TRACING_DATA
);
280 if (!rec
->opts
.branch_stack
)
281 perf_header__clear_feat(&session
->header
, HEADER_BRANCH_STACK
);
284 static volatile int workload_exec_errno
;
287 * perf_evlist__prepare_workload will send a SIGUSR1
288 * if the fork fails, since we asked by setting its
289 * want_signal to true.
291 static void workload_exec_failed_signal(int signo __maybe_unused
,
293 void *ucontext __maybe_unused
)
295 workload_exec_errno
= info
->si_value
.sival_int
;
300 static int __cmd_record(struct record
*rec
, int argc
, const char **argv
)
304 unsigned long waking
= 0;
305 const bool forks
= argc
> 0;
306 struct machine
*machine
;
307 struct perf_tool
*tool
= &rec
->tool
;
308 struct record_opts
*opts
= &rec
->opts
;
309 struct perf_data_file
*file
= &rec
->file
;
310 struct perf_session
*session
;
311 bool disabled
= false;
313 rec
->progname
= argv
[0];
315 atexit(record__sig_exit
);
316 signal(SIGCHLD
, sig_handler
);
317 signal(SIGINT
, sig_handler
);
318 signal(SIGTERM
, sig_handler
);
320 session
= perf_session__new(file
, false, NULL
);
321 if (session
== NULL
) {
322 pr_err("Perf session creation failed.\n");
326 rec
->session
= session
;
328 record__init_features(rec
);
331 err
= perf_evlist__prepare_workload(rec
->evlist
, &opts
->target
,
333 workload_exec_failed_signal
);
335 pr_err("Couldn't run the workload!\n");
337 goto out_delete_session
;
341 if (record__open(rec
) != 0) {
346 if (!rec
->evlist
->nr_groups
)
347 perf_header__clear_feat(&session
->header
, HEADER_GROUP_DESC
);
350 err
= perf_header__write_pipe(file
->fd
);
354 err
= perf_session__write_header(session
, rec
->evlist
,
361 && !perf_header__has_feat(&session
->header
, HEADER_BUILD_ID
)) {
362 pr_err("Couldn't generate buildids. "
363 "Use --no-buildid to profile anyway.\n");
368 machine
= &session
->machines
.host
;
371 err
= perf_event__synthesize_attrs(tool
, session
,
372 process_synthesized_event
);
374 pr_err("Couldn't synthesize attrs.\n");
378 if (have_tracepoints(&rec
->evlist
->entries
)) {
380 * FIXME err <= 0 here actually means that
381 * there were no tracepoints so its not really
382 * an error, just that we don't need to
383 * synthesize anything. We really have to
384 * return this more properly and also
385 * propagate errors that now are calling die()
387 err
= perf_event__synthesize_tracing_data(tool
, file
->fd
, rec
->evlist
,
388 process_synthesized_event
);
390 pr_err("Couldn't record tracing data.\n");
393 rec
->bytes_written
+= err
;
397 err
= perf_event__synthesize_kernel_mmap(tool
, process_synthesized_event
,
400 pr_err("Couldn't record kernel reference relocation symbol\n"
401 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
402 "Check /proc/kallsyms permission or run as root.\n");
404 err
= perf_event__synthesize_modules(tool
, process_synthesized_event
,
407 pr_err("Couldn't record kernel module information.\n"
408 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
409 "Check /proc/modules permission or run as root.\n");
412 machines__process_guests(&session
->machines
,
413 perf_event__synthesize_guest_os
, tool
);
416 err
= __machine__synthesize_threads(machine
, tool
, &opts
->target
, rec
->evlist
->threads
,
417 process_synthesized_event
, opts
->sample_address
);
421 if (rec
->realtime_prio
) {
422 struct sched_param param
;
424 param
.sched_priority
= rec
->realtime_prio
;
425 if (sched_setscheduler(0, SCHED_FIFO
, ¶m
)) {
426 pr_err("Could not set realtime priority.\n");
433 * When perf is starting the traced process, all the events
434 * (apart from group members) have enable_on_exec=1 set,
435 * so don't spoil it by prematurely enabling them.
437 if (!target__none(&opts
->target
) && !opts
->initial_delay
)
438 perf_evlist__enable(rec
->evlist
);
444 perf_evlist__start_workload(rec
->evlist
);
446 if (opts
->initial_delay
) {
447 usleep(opts
->initial_delay
* 1000);
448 perf_evlist__enable(rec
->evlist
);
452 int hits
= rec
->samples
;
454 if (record__mmap_read_all(rec
) < 0) {
459 if (hits
== rec
->samples
) {
462 err
= poll(rec
->evlist
->pollfd
, rec
->evlist
->nr_fds
, -1);
464 * Propagate error, only if there's any. Ignore positive
465 * number of returned events and interrupt error.
467 if (err
> 0 || (err
< 0 && errno
== EINTR
))
473 * When perf is starting the traced process, at the end events
474 * die with the process and we wait for that. Thus no need to
475 * disable events in this case.
477 if (done
&& !disabled
&& !target__none(&opts
->target
)) {
478 perf_evlist__disable(rec
->evlist
);
483 if (forks
&& workload_exec_errno
) {
484 char msg
[STRERR_BUFSIZE
];
485 const char *emsg
= strerror_r(workload_exec_errno
, msg
, sizeof(msg
));
486 pr_err("Workload failed: %s\n", emsg
);
492 fprintf(stderr
, "[ perf record: Woken up %ld times to write data ]\n", waking
);
495 * Approximate RIP event size: 24 bytes.
498 "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64
" samples) ]\n",
499 (double)rec
->bytes_written
/ 1024.0 / 1024.0,
501 rec
->bytes_written
/ 24);
509 kill(rec
->evlist
->workload
.pid
, SIGTERM
);
515 else if (WIFEXITED(exit_status
))
516 status
= WEXITSTATUS(exit_status
);
517 else if (WIFSIGNALED(exit_status
))
518 signr
= WTERMSIG(exit_status
);
522 if (!err
&& !file
->is_pipe
) {
523 rec
->session
->header
.data_size
+= rec
->bytes_written
;
525 if (!rec
->no_buildid
)
526 process_buildids(rec
);
527 perf_session__write_header(rec
->session
, rec
->evlist
,
532 perf_session__delete(session
);
536 #define BRANCH_OPT(n, m) \
537 { .name = n, .mode = (m) }
539 #define BRANCH_END { .name = NULL }
546 static const struct branch_mode branch_modes
[] = {
547 BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER
),
548 BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL
),
549 BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV
),
550 BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY
),
551 BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL
),
552 BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN
),
553 BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL
),
554 BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX
),
555 BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX
),
556 BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX
),
557 BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND
),
562 parse_branch_stack(const struct option
*opt
, const char *str
, int unset
)
565 (PERF_SAMPLE_BRANCH_USER |\
566 PERF_SAMPLE_BRANCH_KERNEL |\
567 PERF_SAMPLE_BRANCH_HV)
569 uint64_t *mode
= (uint64_t *)opt
->value
;
570 const struct branch_mode
*br
;
571 char *s
, *os
= NULL
, *p
;
578 * cannot set it twice, -b + --branch-filter for instance
583 /* str may be NULL in case no arg is passed to -b */
585 /* because str is read-only */
586 s
= os
= strdup(str
);
595 for (br
= branch_modes
; br
->name
; br
++) {
596 if (!strcasecmp(s
, br
->name
))
600 ui__warning("unknown branch filter %s,"
601 " check man page\n", s
);
615 /* default to any branch */
616 if ((*mode
& ~ONLY_PLM
) == 0) {
617 *mode
= PERF_SAMPLE_BRANCH_ANY
;
624 #ifdef HAVE_DWARF_UNWIND_SUPPORT
625 static int get_stack_size(char *str
, unsigned long *_size
)
629 unsigned long max_size
= round_down(USHRT_MAX
, sizeof(u64
));
631 size
= strtoul(str
, &endptr
, 0);
637 size
= round_up(size
, sizeof(u64
));
638 if (!size
|| size
> max_size
)
646 pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
650 #endif /* HAVE_DWARF_UNWIND_SUPPORT */
652 int record_parse_callchain(const char *arg
, struct record_opts
*opts
)
654 char *tok
, *name
, *saveptr
= NULL
;
658 /* We need buffer that we know we can write to. */
659 buf
= malloc(strlen(arg
) + 1);
665 tok
= strtok_r((char *)buf
, ",", &saveptr
);
666 name
= tok
? : (char *)buf
;
669 /* Framepointer style */
670 if (!strncmp(name
, "fp", sizeof("fp"))) {
671 if (!strtok_r(NULL
, ",", &saveptr
)) {
672 opts
->call_graph
= CALLCHAIN_FP
;
675 pr_err("callchain: No more arguments "
676 "needed for -g fp\n");
679 #ifdef HAVE_DWARF_UNWIND_SUPPORT
681 } else if (!strncmp(name
, "dwarf", sizeof("dwarf"))) {
682 const unsigned long default_stack_dump_size
= 8192;
685 opts
->call_graph
= CALLCHAIN_DWARF
;
686 opts
->stack_dump_size
= default_stack_dump_size
;
688 tok
= strtok_r(NULL
, ",", &saveptr
);
690 unsigned long size
= 0;
692 ret
= get_stack_size(tok
, &size
);
693 opts
->stack_dump_size
= size
;
695 #endif /* HAVE_DWARF_UNWIND_SUPPORT */
697 pr_err("callchain: Unknown --call-graph option "
708 static void callchain_debug(struct record_opts
*opts
)
710 static const char *str
[CALLCHAIN_MAX
] = { "NONE", "FP", "DWARF" };
712 pr_debug("callchain: type %s\n", str
[opts
->call_graph
]);
714 if (opts
->call_graph
== CALLCHAIN_DWARF
)
715 pr_debug("callchain: stack dump size %d\n",
716 opts
->stack_dump_size
);
719 int record_parse_callchain_opt(const struct option
*opt
,
723 struct record_opts
*opts
= opt
->value
;
726 opts
->call_graph_enabled
= !unset
;
728 /* --no-call-graph */
730 opts
->call_graph
= CALLCHAIN_NONE
;
731 pr_debug("callchain: disabled\n");
735 ret
= record_parse_callchain(arg
, opts
);
737 callchain_debug(opts
);
742 int record_callchain_opt(const struct option
*opt
,
743 const char *arg __maybe_unused
,
744 int unset __maybe_unused
)
746 struct record_opts
*opts
= opt
->value
;
748 opts
->call_graph_enabled
= !unset
;
750 if (opts
->call_graph
== CALLCHAIN_NONE
)
751 opts
->call_graph
= CALLCHAIN_FP
;
753 callchain_debug(opts
);
757 static int perf_record_config(const char *var
, const char *value
, void *cb
)
759 struct record
*rec
= cb
;
761 if (!strcmp(var
, "record.call-graph"))
762 return record_parse_callchain(value
, &rec
->opts
);
764 return perf_default_config(var
, value
, cb
);
767 static const char * const record_usage
[] = {
768 "perf record [<options>] [<command>]",
769 "perf record [<options>] -- <command> [<options>]",
774 * XXX Ideally would be local to cmd_record() and passed to a record__new
775 * because we need to have access to it in record__exit, that is called
776 * after cmd_record() exits, but since record_options need to be accessible to
777 * builtin-script, leave it here.
779 * At least we don't ouch it in all the other functions here directly.
781 * Just say no to tons of global variables, sigh.
783 static struct record record
= {
786 .mmap_pages
= UINT_MAX
,
787 .user_freq
= UINT_MAX
,
788 .user_interval
= ULLONG_MAX
,
792 .default_per_cpu
= true,
797 #define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
799 #ifdef HAVE_DWARF_UNWIND_SUPPORT
800 const char record_callchain_help
[] = CALLCHAIN_HELP
"fp dwarf";
802 const char record_callchain_help
[] = CALLCHAIN_HELP
"fp";
806 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
807 * with it and switch to use the library functions in perf_evlist that came
808 * from builtin-record.c, i.e. use record_opts,
809 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
812 const struct option record_options
[] = {
813 OPT_CALLBACK('e', "event", &record
.evlist
, "event",
814 "event selector. use 'perf list' to list available events",
815 parse_events_option
),
816 OPT_CALLBACK(0, "filter", &record
.evlist
, "filter",
817 "event filter", parse_filter
),
818 OPT_STRING('p', "pid", &record
.opts
.target
.pid
, "pid",
819 "record events on existing process id"),
820 OPT_STRING('t', "tid", &record
.opts
.target
.tid
, "tid",
821 "record events on existing thread id"),
822 OPT_INTEGER('r', "realtime", &record
.realtime_prio
,
823 "collect data with this RT SCHED_FIFO priority"),
824 OPT_BOOLEAN(0, "no-buffering", &record
.opts
.no_buffering
,
825 "collect data without buffering"),
826 OPT_BOOLEAN('R', "raw-samples", &record
.opts
.raw_samples
,
827 "collect raw sample records from all opened counters"),
828 OPT_BOOLEAN('a', "all-cpus", &record
.opts
.target
.system_wide
,
829 "system-wide collection from all CPUs"),
830 OPT_STRING('C', "cpu", &record
.opts
.target
.cpu_list
, "cpu",
831 "list of cpus to monitor"),
832 OPT_U64('c', "count", &record
.opts
.user_interval
, "event period to sample"),
833 OPT_STRING('o', "output", &record
.file
.path
, "file",
835 OPT_BOOLEAN_SET('i', "no-inherit", &record
.opts
.no_inherit
,
836 &record
.opts
.no_inherit_set
,
837 "child tasks do not inherit counters"),
838 OPT_UINTEGER('F', "freq", &record
.opts
.user_freq
, "profile at this frequency"),
839 OPT_CALLBACK('m', "mmap-pages", &record
.opts
.mmap_pages
, "pages",
840 "number of mmap data pages",
841 perf_evlist__parse_mmap_pages
),
842 OPT_BOOLEAN(0, "group", &record
.opts
.group
,
843 "put the counters into a counter group"),
844 OPT_CALLBACK_NOOPT('g', NULL
, &record
.opts
,
845 NULL
, "enables call-graph recording" ,
846 &record_callchain_opt
),
847 OPT_CALLBACK(0, "call-graph", &record
.opts
,
848 "mode[,dump_size]", record_callchain_help
,
849 &record_parse_callchain_opt
),
850 OPT_INCR('v', "verbose", &verbose
,
851 "be more verbose (show counter open errors, etc)"),
852 OPT_BOOLEAN('q', "quiet", &quiet
, "don't print any message"),
853 OPT_BOOLEAN('s', "stat", &record
.opts
.inherit_stat
,
854 "per thread counts"),
855 OPT_BOOLEAN('d', "data", &record
.opts
.sample_address
,
857 OPT_BOOLEAN('T', "timestamp", &record
.opts
.sample_time
, "Sample timestamps"),
858 OPT_BOOLEAN('P', "period", &record
.opts
.period
, "Sample period"),
859 OPT_BOOLEAN('n', "no-samples", &record
.opts
.no_samples
,
861 OPT_BOOLEAN('N', "no-buildid-cache", &record
.no_buildid_cache
,
862 "do not update the buildid cache"),
863 OPT_BOOLEAN('B', "no-buildid", &record
.no_buildid
,
864 "do not collect buildids in perf.data"),
865 OPT_CALLBACK('G', "cgroup", &record
.evlist
, "name",
866 "monitor event in cgroup name only",
868 OPT_UINTEGER('D', "delay", &record
.opts
.initial_delay
,
869 "ms to wait before starting measurement after program start"),
870 OPT_STRING('u', "uid", &record
.opts
.target
.uid_str
, "user",
873 OPT_CALLBACK_NOOPT('b', "branch-any", &record
.opts
.branch_stack
,
874 "branch any", "sample any taken branches",
877 OPT_CALLBACK('j', "branch-filter", &record
.opts
.branch_stack
,
878 "branch filter mask", "branch stack filter modes",
880 OPT_BOOLEAN('W', "weight", &record
.opts
.sample_weight
,
881 "sample by weight (on special events only)"),
882 OPT_BOOLEAN(0, "transaction", &record
.opts
.sample_transaction
,
883 "sample transaction flags (special events only)"),
884 OPT_BOOLEAN(0, "per-thread", &record
.opts
.target
.per_thread
,
885 "use per-thread mmaps"),
889 int cmd_record(int argc
, const char **argv
, const char *prefix __maybe_unused
)
892 struct record
*rec
= &record
;
895 rec
->evlist
= perf_evlist__new();
896 if (rec
->evlist
== NULL
)
899 perf_config(perf_record_config
, rec
);
901 argc
= parse_options(argc
, argv
, record_options
, record_usage
,
902 PARSE_OPT_STOP_AT_NON_OPTION
);
903 if (!argc
&& target__none(&rec
->opts
.target
))
904 usage_with_options(record_usage
, record_options
);
906 if (nr_cgroups
&& !rec
->opts
.target
.system_wide
) {
907 ui__error("cgroup monitoring only available in"
908 " system-wide mode\n");
909 usage_with_options(record_usage
, record_options
);
914 if (symbol_conf
.kptr_restrict
)
916 "WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
917 "check /proc/sys/kernel/kptr_restrict.\n\n"
918 "Samples in kernel functions may not be resolved if a suitable vmlinux\n"
919 "file is not found in the buildid cache or in the vmlinux path.\n\n"
920 "Samples in kernel modules won't be resolved at all.\n\n"
921 "If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
922 "even with a suitable vmlinux or kallsyms file.\n\n");
924 if (rec
->no_buildid_cache
|| rec
->no_buildid
)
925 disable_buildid_cache();
927 if (rec
->evlist
->nr_entries
== 0 &&
928 perf_evlist__add_default(rec
->evlist
) < 0) {
929 pr_err("Not enough memory for event selector list\n");
930 goto out_symbol_exit
;
933 if (rec
->opts
.target
.tid
&& !rec
->opts
.no_inherit_set
)
934 rec
->opts
.no_inherit
= true;
936 err
= target__validate(&rec
->opts
.target
);
938 target__strerror(&rec
->opts
.target
, err
, errbuf
, BUFSIZ
);
939 ui__warning("%s", errbuf
);
942 err
= target__parse_uid(&rec
->opts
.target
);
944 int saved_errno
= errno
;
946 target__strerror(&rec
->opts
.target
, err
, errbuf
, BUFSIZ
);
947 ui__error("%s", errbuf
);
950 goto out_symbol_exit
;
954 if (perf_evlist__create_maps(rec
->evlist
, &rec
->opts
.target
) < 0)
955 usage_with_options(record_usage
, record_options
);
957 if (record_opts__config(&rec
->opts
)) {
959 goto out_symbol_exit
;
962 err
= __cmd_record(&record
, argc
, argv
);
964 perf_evlist__delete(rec
->evlist
);