perf record: Apply config to BPF objects before recording
[deliverable/linux.git] / tools / perf / builtin-record.c
1 /*
2 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
7 */
8 #include "builtin.h"
9
10 #include "perf.h"
11
12 #include "util/build-id.h"
13 #include "util/util.h"
14 #include <subcmd/parse-options.h>
15 #include "util/parse-events.h"
16
17 #include "util/callchain.h"
18 #include "util/cgroup.h"
19 #include "util/header.h"
20 #include "util/event.h"
21 #include "util/evlist.h"
22 #include "util/evsel.h"
23 #include "util/debug.h"
24 #include "util/session.h"
25 #include "util/tool.h"
26 #include "util/symbol.h"
27 #include "util/cpumap.h"
28 #include "util/thread_map.h"
29 #include "util/data.h"
30 #include "util/perf_regs.h"
31 #include "util/auxtrace.h"
32 #include "util/parse-branch-options.h"
33 #include "util/parse-regs-options.h"
34 #include "util/llvm-utils.h"
35 #include "util/bpf-loader.h"
36
37 #include <unistd.h>
38 #include <sched.h>
39 #include <sys/mman.h>
40
41
42 struct record {
43 struct perf_tool tool;
44 struct record_opts opts;
45 u64 bytes_written;
46 struct perf_data_file file;
47 struct auxtrace_record *itr;
48 struct perf_evlist *evlist;
49 struct perf_session *session;
50 const char *progname;
51 int realtime_prio;
52 bool no_buildid;
53 bool no_buildid_set;
54 bool no_buildid_cache;
55 bool no_buildid_cache_set;
56 bool buildid_all;
57 unsigned long long samples;
58 };
59
60 static int record__write(struct record *rec, void *bf, size_t size)
61 {
62 if (perf_data_file__write(rec->session->file, bf, size) < 0) {
63 pr_err("failed to write perf data, error: %m\n");
64 return -1;
65 }
66
67 rec->bytes_written += size;
68 return 0;
69 }
70
71 static int process_synthesized_event(struct perf_tool *tool,
72 union perf_event *event,
73 struct perf_sample *sample __maybe_unused,
74 struct machine *machine __maybe_unused)
75 {
76 struct record *rec = container_of(tool, struct record, tool);
77 return record__write(rec, event, event->header.size);
78 }
79
80 static int record__mmap_read(struct record *rec, int idx)
81 {
82 struct perf_mmap *md = &rec->evlist->mmap[idx];
83 u64 head = perf_mmap__read_head(md);
84 u64 old = md->prev;
85 unsigned char *data = md->base + page_size;
86 unsigned long size;
87 void *buf;
88 int rc = 0;
89
90 if (old == head)
91 return 0;
92
93 rec->samples++;
94
95 size = head - old;
96
97 if ((old & md->mask) + size != (head & md->mask)) {
98 buf = &data[old & md->mask];
99 size = md->mask + 1 - (old & md->mask);
100 old += size;
101
102 if (record__write(rec, buf, size) < 0) {
103 rc = -1;
104 goto out;
105 }
106 }
107
108 buf = &data[old & md->mask];
109 size = head - old;
110 old += size;
111
112 if (record__write(rec, buf, size) < 0) {
113 rc = -1;
114 goto out;
115 }
116
117 md->prev = old;
118 perf_evlist__mmap_consume(rec->evlist, idx);
119 out:
120 return rc;
121 }
122
123 static volatile int done;
124 static volatile int signr = -1;
125 static volatile int child_finished;
126 static volatile int auxtrace_snapshot_enabled;
127 static volatile int auxtrace_snapshot_err;
128 static volatile int auxtrace_record__snapshot_started;
129
130 static void sig_handler(int sig)
131 {
132 if (sig == SIGCHLD)
133 child_finished = 1;
134 else
135 signr = sig;
136
137 done = 1;
138 }
139
140 static void record__sig_exit(void)
141 {
142 if (signr == -1)
143 return;
144
145 signal(signr, SIG_DFL);
146 raise(signr);
147 }
148
149 #ifdef HAVE_AUXTRACE_SUPPORT
150
151 static int record__process_auxtrace(struct perf_tool *tool,
152 union perf_event *event, void *data1,
153 size_t len1, void *data2, size_t len2)
154 {
155 struct record *rec = container_of(tool, struct record, tool);
156 struct perf_data_file *file = &rec->file;
157 size_t padding;
158 u8 pad[8] = {0};
159
160 if (!perf_data_file__is_pipe(file)) {
161 off_t file_offset;
162 int fd = perf_data_file__fd(file);
163 int err;
164
165 file_offset = lseek(fd, 0, SEEK_CUR);
166 if (file_offset == -1)
167 return -1;
168 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
169 event, file_offset);
170 if (err)
171 return err;
172 }
173
174 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
175 padding = (len1 + len2) & 7;
176 if (padding)
177 padding = 8 - padding;
178
179 record__write(rec, event, event->header.size);
180 record__write(rec, data1, len1);
181 if (len2)
182 record__write(rec, data2, len2);
183 record__write(rec, &pad, padding);
184
185 return 0;
186 }
187
188 static int record__auxtrace_mmap_read(struct record *rec,
189 struct auxtrace_mmap *mm)
190 {
191 int ret;
192
193 ret = auxtrace_mmap__read(mm, rec->itr, &rec->tool,
194 record__process_auxtrace);
195 if (ret < 0)
196 return ret;
197
198 if (ret)
199 rec->samples++;
200
201 return 0;
202 }
203
204 static int record__auxtrace_mmap_read_snapshot(struct record *rec,
205 struct auxtrace_mmap *mm)
206 {
207 int ret;
208
209 ret = auxtrace_mmap__read_snapshot(mm, rec->itr, &rec->tool,
210 record__process_auxtrace,
211 rec->opts.auxtrace_snapshot_size);
212 if (ret < 0)
213 return ret;
214
215 if (ret)
216 rec->samples++;
217
218 return 0;
219 }
220
221 static int record__auxtrace_read_snapshot_all(struct record *rec)
222 {
223 int i;
224 int rc = 0;
225
226 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
227 struct auxtrace_mmap *mm =
228 &rec->evlist->mmap[i].auxtrace_mmap;
229
230 if (!mm->base)
231 continue;
232
233 if (record__auxtrace_mmap_read_snapshot(rec, mm) != 0) {
234 rc = -1;
235 goto out;
236 }
237 }
238 out:
239 return rc;
240 }
241
242 static void record__read_auxtrace_snapshot(struct record *rec)
243 {
244 pr_debug("Recording AUX area tracing snapshot\n");
245 if (record__auxtrace_read_snapshot_all(rec) < 0) {
246 auxtrace_snapshot_err = -1;
247 } else {
248 auxtrace_snapshot_err = auxtrace_record__snapshot_finish(rec->itr);
249 if (!auxtrace_snapshot_err)
250 auxtrace_snapshot_enabled = 1;
251 }
252 }
253
254 #else
255
256 static inline
257 int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
258 struct auxtrace_mmap *mm __maybe_unused)
259 {
260 return 0;
261 }
262
263 static inline
264 void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
265 {
266 }
267
268 static inline
269 int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
270 {
271 return 0;
272 }
273
274 #endif
275
276 static int record__open(struct record *rec)
277 {
278 char msg[512];
279 struct perf_evsel *pos;
280 struct perf_evlist *evlist = rec->evlist;
281 struct perf_session *session = rec->session;
282 struct record_opts *opts = &rec->opts;
283 int rc = 0;
284
285 perf_evlist__config(evlist, opts);
286
287 evlist__for_each(evlist, pos) {
288 try_again:
289 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
290 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
291 if (verbose)
292 ui__warning("%s\n", msg);
293 goto try_again;
294 }
295
296 rc = -errno;
297 perf_evsel__open_strerror(pos, &opts->target,
298 errno, msg, sizeof(msg));
299 ui__error("%s\n", msg);
300 goto out;
301 }
302 }
303
304 if (perf_evlist__apply_filters(evlist, &pos)) {
305 error("failed to set filter \"%s\" on event %s with %d (%s)\n",
306 pos->filter, perf_evsel__name(pos), errno,
307 strerror_r(errno, msg, sizeof(msg)));
308 rc = -1;
309 goto out;
310 }
311
312 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
313 opts->auxtrace_mmap_pages,
314 opts->auxtrace_snapshot_mode) < 0) {
315 if (errno == EPERM) {
316 pr_err("Permission error mapping pages.\n"
317 "Consider increasing "
318 "/proc/sys/kernel/perf_event_mlock_kb,\n"
319 "or try again with a smaller value of -m/--mmap_pages.\n"
320 "(current value: %u,%u)\n",
321 opts->mmap_pages, opts->auxtrace_mmap_pages);
322 rc = -errno;
323 } else {
324 pr_err("failed to mmap with %d (%s)\n", errno,
325 strerror_r(errno, msg, sizeof(msg)));
326 rc = -errno;
327 }
328 goto out;
329 }
330
331 session->evlist = evlist;
332 perf_session__set_id_hdr_size(session);
333 out:
334 return rc;
335 }
336
337 static int process_sample_event(struct perf_tool *tool,
338 union perf_event *event,
339 struct perf_sample *sample,
340 struct perf_evsel *evsel,
341 struct machine *machine)
342 {
343 struct record *rec = container_of(tool, struct record, tool);
344
345 rec->samples++;
346
347 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
348 }
349
350 static int process_buildids(struct record *rec)
351 {
352 struct perf_data_file *file = &rec->file;
353 struct perf_session *session = rec->session;
354
355 if (file->size == 0)
356 return 0;
357
358 /*
359 * During this process, it'll load kernel map and replace the
360 * dso->long_name to a real pathname it found. In this case
361 * we prefer the vmlinux path like
362 * /lib/modules/3.16.4/build/vmlinux
363 *
364 * rather than build-id path (in debug directory).
365 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
366 */
367 symbol_conf.ignore_vmlinux_buildid = true;
368
369 /*
370 * If --buildid-all is given, it marks all DSO regardless of hits,
371 * so no need to process samples.
372 */
373 if (rec->buildid_all)
374 rec->tool.sample = NULL;
375
376 return perf_session__process_events(session);
377 }
378
379 static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
380 {
381 int err;
382 struct perf_tool *tool = data;
383 /*
384 *As for guest kernel when processing subcommand record&report,
385 *we arrange module mmap prior to guest kernel mmap and trigger
386 *a preload dso because default guest module symbols are loaded
387 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
388 *method is used to avoid symbol missing when the first addr is
389 *in module instead of in guest kernel.
390 */
391 err = perf_event__synthesize_modules(tool, process_synthesized_event,
392 machine);
393 if (err < 0)
394 pr_err("Couldn't record guest kernel [%d]'s reference"
395 " relocation symbol.\n", machine->pid);
396
397 /*
398 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
399 * have no _text sometimes.
400 */
401 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
402 machine);
403 if (err < 0)
404 pr_err("Couldn't record guest kernel [%d]'s reference"
405 " relocation symbol.\n", machine->pid);
406 }
407
408 static struct perf_event_header finished_round_event = {
409 .size = sizeof(struct perf_event_header),
410 .type = PERF_RECORD_FINISHED_ROUND,
411 };
412
413 static int record__mmap_read_all(struct record *rec)
414 {
415 u64 bytes_written = rec->bytes_written;
416 int i;
417 int rc = 0;
418
419 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
420 struct auxtrace_mmap *mm = &rec->evlist->mmap[i].auxtrace_mmap;
421
422 if (rec->evlist->mmap[i].base) {
423 if (record__mmap_read(rec, i) != 0) {
424 rc = -1;
425 goto out;
426 }
427 }
428
429 if (mm->base && !rec->opts.auxtrace_snapshot_mode &&
430 record__auxtrace_mmap_read(rec, mm) != 0) {
431 rc = -1;
432 goto out;
433 }
434 }
435
436 /*
437 * Mark the round finished in case we wrote
438 * at least one event.
439 */
440 if (bytes_written != rec->bytes_written)
441 rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
442
443 out:
444 return rc;
445 }
446
447 static void record__init_features(struct record *rec)
448 {
449 struct perf_session *session = rec->session;
450 int feat;
451
452 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
453 perf_header__set_feat(&session->header, feat);
454
455 if (rec->no_buildid)
456 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
457
458 if (!have_tracepoints(&rec->evlist->entries))
459 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
460
461 if (!rec->opts.branch_stack)
462 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
463
464 if (!rec->opts.full_auxtrace)
465 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
466
467 perf_header__clear_feat(&session->header, HEADER_STAT);
468 }
469
470 static volatile int workload_exec_errno;
471
472 /*
473 * perf_evlist__prepare_workload will send a SIGUSR1
474 * if the fork fails, since we asked by setting its
475 * want_signal to true.
476 */
477 static void workload_exec_failed_signal(int signo __maybe_unused,
478 siginfo_t *info,
479 void *ucontext __maybe_unused)
480 {
481 workload_exec_errno = info->si_value.sival_int;
482 done = 1;
483 child_finished = 1;
484 }
485
486 static void snapshot_sig_handler(int sig);
487
488 static int __cmd_record(struct record *rec, int argc, const char **argv)
489 {
490 int err;
491 int status = 0;
492 unsigned long waking = 0;
493 const bool forks = argc > 0;
494 struct machine *machine;
495 struct perf_tool *tool = &rec->tool;
496 struct record_opts *opts = &rec->opts;
497 struct perf_data_file *file = &rec->file;
498 struct perf_session *session;
499 bool disabled = false, draining = false;
500 int fd;
501
502 rec->progname = argv[0];
503
504 atexit(record__sig_exit);
505 signal(SIGCHLD, sig_handler);
506 signal(SIGINT, sig_handler);
507 signal(SIGTERM, sig_handler);
508 if (rec->opts.auxtrace_snapshot_mode)
509 signal(SIGUSR2, snapshot_sig_handler);
510 else
511 signal(SIGUSR2, SIG_IGN);
512
513 session = perf_session__new(file, false, tool);
514 if (session == NULL) {
515 pr_err("Perf session creation failed.\n");
516 return -1;
517 }
518
519 fd = perf_data_file__fd(file);
520 rec->session = session;
521
522 record__init_features(rec);
523
524 if (forks) {
525 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
526 argv, file->is_pipe,
527 workload_exec_failed_signal);
528 if (err < 0) {
529 pr_err("Couldn't run the workload!\n");
530 status = err;
531 goto out_delete_session;
532 }
533 }
534
535 if (record__open(rec) != 0) {
536 err = -1;
537 goto out_child;
538 }
539
540 err = bpf__apply_obj_config();
541 if (err) {
542 char errbuf[BUFSIZ];
543
544 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
545 pr_err("ERROR: Apply config to BPF failed: %s\n",
546 errbuf);
547 goto out_child;
548 }
549
550 /*
551 * Normally perf_session__new would do this, but it doesn't have the
552 * evlist.
553 */
554 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
555 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
556 rec->tool.ordered_events = false;
557 }
558
559 if (!rec->evlist->nr_groups)
560 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
561
562 if (file->is_pipe) {
563 err = perf_header__write_pipe(fd);
564 if (err < 0)
565 goto out_child;
566 } else {
567 err = perf_session__write_header(session, rec->evlist, fd, false);
568 if (err < 0)
569 goto out_child;
570 }
571
572 if (!rec->no_buildid
573 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
574 pr_err("Couldn't generate buildids. "
575 "Use --no-buildid to profile anyway.\n");
576 err = -1;
577 goto out_child;
578 }
579
580 machine = &session->machines.host;
581
582 if (file->is_pipe) {
583 err = perf_event__synthesize_attrs(tool, session,
584 process_synthesized_event);
585 if (err < 0) {
586 pr_err("Couldn't synthesize attrs.\n");
587 goto out_child;
588 }
589
590 if (have_tracepoints(&rec->evlist->entries)) {
591 /*
592 * FIXME err <= 0 here actually means that
593 * there were no tracepoints so its not really
594 * an error, just that we don't need to
595 * synthesize anything. We really have to
596 * return this more properly and also
597 * propagate errors that now are calling die()
598 */
599 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
600 process_synthesized_event);
601 if (err <= 0) {
602 pr_err("Couldn't record tracing data.\n");
603 goto out_child;
604 }
605 rec->bytes_written += err;
606 }
607 }
608
609 if (rec->opts.full_auxtrace) {
610 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
611 session, process_synthesized_event);
612 if (err)
613 goto out_delete_session;
614 }
615
616 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
617 machine);
618 if (err < 0)
619 pr_err("Couldn't record kernel reference relocation symbol\n"
620 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
621 "Check /proc/kallsyms permission or run as root.\n");
622
623 err = perf_event__synthesize_modules(tool, process_synthesized_event,
624 machine);
625 if (err < 0)
626 pr_err("Couldn't record kernel module information.\n"
627 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
628 "Check /proc/modules permission or run as root.\n");
629
630 if (perf_guest) {
631 machines__process_guests(&session->machines,
632 perf_event__synthesize_guest_os, tool);
633 }
634
635 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
636 process_synthesized_event, opts->sample_address,
637 opts->proc_map_timeout);
638 if (err != 0)
639 goto out_child;
640
641 if (rec->realtime_prio) {
642 struct sched_param param;
643
644 param.sched_priority = rec->realtime_prio;
645 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
646 pr_err("Could not set realtime priority.\n");
647 err = -1;
648 goto out_child;
649 }
650 }
651
652 /*
653 * When perf is starting the traced process, all the events
654 * (apart from group members) have enable_on_exec=1 set,
655 * so don't spoil it by prematurely enabling them.
656 */
657 if (!target__none(&opts->target) && !opts->initial_delay)
658 perf_evlist__enable(rec->evlist);
659
660 /*
661 * Let the child rip
662 */
663 if (forks) {
664 union perf_event *event;
665
666 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
667 if (event == NULL) {
668 err = -ENOMEM;
669 goto out_child;
670 }
671
672 /*
673 * Some H/W events are generated before COMM event
674 * which is emitted during exec(), so perf script
675 * cannot see a correct process name for those events.
676 * Synthesize COMM event to prevent it.
677 */
678 perf_event__synthesize_comm(tool, event,
679 rec->evlist->workload.pid,
680 process_synthesized_event,
681 machine);
682 free(event);
683
684 perf_evlist__start_workload(rec->evlist);
685 }
686
687 if (opts->initial_delay) {
688 usleep(opts->initial_delay * 1000);
689 perf_evlist__enable(rec->evlist);
690 }
691
692 auxtrace_snapshot_enabled = 1;
693 for (;;) {
694 unsigned long long hits = rec->samples;
695
696 if (record__mmap_read_all(rec) < 0) {
697 auxtrace_snapshot_enabled = 0;
698 err = -1;
699 goto out_child;
700 }
701
702 if (auxtrace_record__snapshot_started) {
703 auxtrace_record__snapshot_started = 0;
704 if (!auxtrace_snapshot_err)
705 record__read_auxtrace_snapshot(rec);
706 if (auxtrace_snapshot_err) {
707 pr_err("AUX area tracing snapshot failed\n");
708 err = -1;
709 goto out_child;
710 }
711 }
712
713 if (hits == rec->samples) {
714 if (done || draining)
715 break;
716 err = perf_evlist__poll(rec->evlist, -1);
717 /*
718 * Propagate error, only if there's any. Ignore positive
719 * number of returned events and interrupt error.
720 */
721 if (err > 0 || (err < 0 && errno == EINTR))
722 err = 0;
723 waking++;
724
725 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
726 draining = true;
727 }
728
729 /*
730 * When perf is starting the traced process, at the end events
731 * die with the process and we wait for that. Thus no need to
732 * disable events in this case.
733 */
734 if (done && !disabled && !target__none(&opts->target)) {
735 auxtrace_snapshot_enabled = 0;
736 perf_evlist__disable(rec->evlist);
737 disabled = true;
738 }
739 }
740 auxtrace_snapshot_enabled = 0;
741
742 if (forks && workload_exec_errno) {
743 char msg[STRERR_BUFSIZE];
744 const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
745 pr_err("Workload failed: %s\n", emsg);
746 err = -1;
747 goto out_child;
748 }
749
750 if (!quiet)
751 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
752
753 out_child:
754 if (forks) {
755 int exit_status;
756
757 if (!child_finished)
758 kill(rec->evlist->workload.pid, SIGTERM);
759
760 wait(&exit_status);
761
762 if (err < 0)
763 status = err;
764 else if (WIFEXITED(exit_status))
765 status = WEXITSTATUS(exit_status);
766 else if (WIFSIGNALED(exit_status))
767 signr = WTERMSIG(exit_status);
768 } else
769 status = err;
770
771 /* this will be recalculated during process_buildids() */
772 rec->samples = 0;
773
774 if (!err && !file->is_pipe) {
775 rec->session->header.data_size += rec->bytes_written;
776 file->size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
777
778 if (!rec->no_buildid) {
779 process_buildids(rec);
780
781 if (rec->buildid_all)
782 dsos__hit_all(rec->session);
783 }
784 perf_session__write_header(rec->session, rec->evlist, fd, true);
785 }
786
787 if (!err && !quiet) {
788 char samples[128];
789
790 if (rec->samples && !rec->opts.full_auxtrace)
791 scnprintf(samples, sizeof(samples),
792 " (%" PRIu64 " samples)", rec->samples);
793 else
794 samples[0] = '\0';
795
796 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s ]\n",
797 perf_data_file__size(file) / 1024.0 / 1024.0,
798 file->path, samples);
799 }
800
801 out_delete_session:
802 perf_session__delete(session);
803 return status;
804 }
805
806 static void callchain_debug(void)
807 {
808 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
809
810 pr_debug("callchain: type %s\n", str[callchain_param.record_mode]);
811
812 if (callchain_param.record_mode == CALLCHAIN_DWARF)
813 pr_debug("callchain: stack dump size %d\n",
814 callchain_param.dump_size);
815 }
816
817 int record_parse_callchain_opt(const struct option *opt,
818 const char *arg,
819 int unset)
820 {
821 int ret;
822 struct record_opts *record = (struct record_opts *)opt->value;
823
824 record->callgraph_set = true;
825 callchain_param.enabled = !unset;
826
827 /* --no-call-graph */
828 if (unset) {
829 callchain_param.record_mode = CALLCHAIN_NONE;
830 pr_debug("callchain: disabled\n");
831 return 0;
832 }
833
834 ret = parse_callchain_record_opt(arg, &callchain_param);
835 if (!ret) {
836 /* Enable data address sampling for DWARF unwind. */
837 if (callchain_param.record_mode == CALLCHAIN_DWARF)
838 record->sample_address = true;
839 callchain_debug();
840 }
841
842 return ret;
843 }
844
845 int record_callchain_opt(const struct option *opt,
846 const char *arg __maybe_unused,
847 int unset __maybe_unused)
848 {
849 struct record_opts *record = (struct record_opts *)opt->value;
850
851 record->callgraph_set = true;
852 callchain_param.enabled = true;
853
854 if (callchain_param.record_mode == CALLCHAIN_NONE)
855 callchain_param.record_mode = CALLCHAIN_FP;
856
857 callchain_debug();
858 return 0;
859 }
860
861 static int perf_record_config(const char *var, const char *value, void *cb)
862 {
863 struct record *rec = cb;
864
865 if (!strcmp(var, "record.build-id")) {
866 if (!strcmp(value, "cache"))
867 rec->no_buildid_cache = false;
868 else if (!strcmp(value, "no-cache"))
869 rec->no_buildid_cache = true;
870 else if (!strcmp(value, "skip"))
871 rec->no_buildid = true;
872 else
873 return -1;
874 return 0;
875 }
876 if (!strcmp(var, "record.call-graph"))
877 var = "call-graph.record-mode"; /* fall-through */
878
879 return perf_default_config(var, value, cb);
880 }
881
882 struct clockid_map {
883 const char *name;
884 int clockid;
885 };
886
887 #define CLOCKID_MAP(n, c) \
888 { .name = n, .clockid = (c), }
889
890 #define CLOCKID_END { .name = NULL, }
891
892
893 /*
894 * Add the missing ones, we need to build on many distros...
895 */
896 #ifndef CLOCK_MONOTONIC_RAW
897 #define CLOCK_MONOTONIC_RAW 4
898 #endif
899 #ifndef CLOCK_BOOTTIME
900 #define CLOCK_BOOTTIME 7
901 #endif
902 #ifndef CLOCK_TAI
903 #define CLOCK_TAI 11
904 #endif
905
906 static const struct clockid_map clockids[] = {
907 /* available for all events, NMI safe */
908 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
909 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
910
911 /* available for some events */
912 CLOCKID_MAP("realtime", CLOCK_REALTIME),
913 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
914 CLOCKID_MAP("tai", CLOCK_TAI),
915
916 /* available for the lazy */
917 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
918 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
919 CLOCKID_MAP("real", CLOCK_REALTIME),
920 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
921
922 CLOCKID_END,
923 };
924
925 static int parse_clockid(const struct option *opt, const char *str, int unset)
926 {
927 struct record_opts *opts = (struct record_opts *)opt->value;
928 const struct clockid_map *cm;
929 const char *ostr = str;
930
931 if (unset) {
932 opts->use_clockid = 0;
933 return 0;
934 }
935
936 /* no arg passed */
937 if (!str)
938 return 0;
939
940 /* no setting it twice */
941 if (opts->use_clockid)
942 return -1;
943
944 opts->use_clockid = true;
945
946 /* if its a number, we're done */
947 if (sscanf(str, "%d", &opts->clockid) == 1)
948 return 0;
949
950 /* allow a "CLOCK_" prefix to the name */
951 if (!strncasecmp(str, "CLOCK_", 6))
952 str += 6;
953
954 for (cm = clockids; cm->name; cm++) {
955 if (!strcasecmp(str, cm->name)) {
956 opts->clockid = cm->clockid;
957 return 0;
958 }
959 }
960
961 opts->use_clockid = false;
962 ui__warning("unknown clockid %s, check man page\n", ostr);
963 return -1;
964 }
965
966 static int record__parse_mmap_pages(const struct option *opt,
967 const char *str,
968 int unset __maybe_unused)
969 {
970 struct record_opts *opts = opt->value;
971 char *s, *p;
972 unsigned int mmap_pages;
973 int ret;
974
975 if (!str)
976 return -EINVAL;
977
978 s = strdup(str);
979 if (!s)
980 return -ENOMEM;
981
982 p = strchr(s, ',');
983 if (p)
984 *p = '\0';
985
986 if (*s) {
987 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
988 if (ret)
989 goto out_free;
990 opts->mmap_pages = mmap_pages;
991 }
992
993 if (!p) {
994 ret = 0;
995 goto out_free;
996 }
997
998 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
999 if (ret)
1000 goto out_free;
1001
1002 opts->auxtrace_mmap_pages = mmap_pages;
1003
1004 out_free:
1005 free(s);
1006 return ret;
1007 }
1008
1009 static const char * const __record_usage[] = {
1010 "perf record [<options>] [<command>]",
1011 "perf record [<options>] -- <command> [<options>]",
1012 NULL
1013 };
1014 const char * const *record_usage = __record_usage;
1015
1016 /*
1017 * XXX Ideally would be local to cmd_record() and passed to a record__new
1018 * because we need to have access to it in record__exit, that is called
1019 * after cmd_record() exits, but since record_options need to be accessible to
1020 * builtin-script, leave it here.
1021 *
1022 * At least we don't ouch it in all the other functions here directly.
1023 *
1024 * Just say no to tons of global variables, sigh.
1025 */
1026 static struct record record = {
1027 .opts = {
1028 .sample_time = true,
1029 .mmap_pages = UINT_MAX,
1030 .user_freq = UINT_MAX,
1031 .user_interval = ULLONG_MAX,
1032 .freq = 4000,
1033 .target = {
1034 .uses_mmap = true,
1035 .default_per_cpu = true,
1036 },
1037 .proc_map_timeout = 500,
1038 },
1039 .tool = {
1040 .sample = process_sample_event,
1041 .fork = perf_event__process_fork,
1042 .exit = perf_event__process_exit,
1043 .comm = perf_event__process_comm,
1044 .mmap = perf_event__process_mmap,
1045 .mmap2 = perf_event__process_mmap2,
1046 .ordered_events = true,
1047 },
1048 };
1049
1050 const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1051 "\n\t\t\t\tDefault: fp";
1052
1053 /*
1054 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1055 * with it and switch to use the library functions in perf_evlist that came
1056 * from builtin-record.c, i.e. use record_opts,
1057 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1058 * using pipes, etc.
1059 */
1060 struct option __record_options[] = {
1061 OPT_CALLBACK('e', "event", &record.evlist, "event",
1062 "event selector. use 'perf list' to list available events",
1063 parse_events_option),
1064 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
1065 "event filter", parse_filter),
1066 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1067 NULL, "don't record events from perf itself",
1068 exclude_perf),
1069 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
1070 "record events on existing process id"),
1071 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
1072 "record events on existing thread id"),
1073 OPT_INTEGER('r', "realtime", &record.realtime_prio,
1074 "collect data with this RT SCHED_FIFO priority"),
1075 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
1076 "collect data without buffering"),
1077 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
1078 "collect raw sample records from all opened counters"),
1079 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
1080 "system-wide collection from all CPUs"),
1081 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
1082 "list of cpus to monitor"),
1083 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
1084 OPT_STRING('o', "output", &record.file.path, "file",
1085 "output file name"),
1086 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1087 &record.opts.no_inherit_set,
1088 "child tasks do not inherit counters"),
1089 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
1090 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1091 "number of mmap data pages and AUX area tracing mmap pages",
1092 record__parse_mmap_pages),
1093 OPT_BOOLEAN(0, "group", &record.opts.group,
1094 "put the counters into a counter group"),
1095 OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
1096 NULL, "enables call-graph recording" ,
1097 &record_callchain_opt),
1098 OPT_CALLBACK(0, "call-graph", &record.opts,
1099 "record_mode[,record_size]", record_callchain_help,
1100 &record_parse_callchain_opt),
1101 OPT_INCR('v', "verbose", &verbose,
1102 "be more verbose (show counter open errors, etc)"),
1103 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
1104 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
1105 "per thread counts"),
1106 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
1107 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1108 &record.opts.sample_time_set,
1109 "Record the sample timestamps"),
1110 OPT_BOOLEAN('P', "period", &record.opts.period, "Record the sample period"),
1111 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
1112 "don't sample"),
1113 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1114 &record.no_buildid_cache_set,
1115 "do not update the buildid cache"),
1116 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1117 &record.no_buildid_set,
1118 "do not collect buildids in perf.data"),
1119 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
1120 "monitor event in cgroup name only",
1121 parse_cgroups),
1122 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
1123 "ms to wait before starting measurement after program start"),
1124 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1125 "user to profile"),
1126
1127 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1128 "branch any", "sample any taken branches",
1129 parse_branch_stack),
1130
1131 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1132 "branch filter mask", "branch stack filter modes",
1133 parse_branch_stack),
1134 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1135 "sample by weight (on special events only)"),
1136 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1137 "sample transaction flags (special events only)"),
1138 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1139 "use per-thread mmaps"),
1140 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1141 "sample selected machine registers on interrupt,"
1142 " use -I ? to list register names", parse_regs),
1143 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1144 "Record running/enabled time of read (:S) events"),
1145 OPT_CALLBACK('k', "clockid", &record.opts,
1146 "clockid", "clockid to use for events, see clock_gettime()",
1147 parse_clockid),
1148 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1149 "opts", "AUX area tracing Snapshot Mode", ""),
1150 OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
1151 "per thread proc mmap processing timeout in ms"),
1152 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1153 "Record context switch events"),
1154 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
1155 "Configure all used events to run in kernel space.",
1156 PARSE_OPT_EXCLUSIVE),
1157 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
1158 "Configure all used events to run in user space.",
1159 PARSE_OPT_EXCLUSIVE),
1160 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1161 "clang binary to use for compiling BPF scriptlets"),
1162 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1163 "options passed to clang when compiling BPF scriptlets"),
1164 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1165 "file", "vmlinux pathname"),
1166 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
1167 "Record build-id of all DSOs regardless of hits"),
1168 OPT_END()
1169 };
1170
1171 struct option *record_options = __record_options;
1172
1173 int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
1174 {
1175 int err;
1176 struct record *rec = &record;
1177 char errbuf[BUFSIZ];
1178
1179 #ifndef HAVE_LIBBPF_SUPPORT
1180 # define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
1181 set_nobuild('\0', "clang-path", true);
1182 set_nobuild('\0', "clang-opt", true);
1183 # undef set_nobuild
1184 #endif
1185
1186 #ifndef HAVE_BPF_PROLOGUE
1187 # if !defined (HAVE_DWARF_SUPPORT)
1188 # define REASON "NO_DWARF=1"
1189 # elif !defined (HAVE_LIBBPF_SUPPORT)
1190 # define REASON "NO_LIBBPF=1"
1191 # else
1192 # define REASON "this architecture doesn't support BPF prologue"
1193 # endif
1194 # define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
1195 set_nobuild('\0', "vmlinux", true);
1196 # undef set_nobuild
1197 # undef REASON
1198 #endif
1199
1200 rec->evlist = perf_evlist__new();
1201 if (rec->evlist == NULL)
1202 return -ENOMEM;
1203
1204 perf_config(perf_record_config, rec);
1205
1206 argc = parse_options(argc, argv, record_options, record_usage,
1207 PARSE_OPT_STOP_AT_NON_OPTION);
1208 if (!argc && target__none(&rec->opts.target))
1209 usage_with_options(record_usage, record_options);
1210
1211 if (nr_cgroups && !rec->opts.target.system_wide) {
1212 usage_with_options_msg(record_usage, record_options,
1213 "cgroup monitoring only available in system-wide mode");
1214
1215 }
1216 if (rec->opts.record_switch_events &&
1217 !perf_can_record_switch_events()) {
1218 ui__error("kernel does not support recording context switch events\n");
1219 parse_options_usage(record_usage, record_options, "switch-events", 0);
1220 return -EINVAL;
1221 }
1222
1223 if (!rec->itr) {
1224 rec->itr = auxtrace_record__init(rec->evlist, &err);
1225 if (err)
1226 return err;
1227 }
1228
1229 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
1230 rec->opts.auxtrace_snapshot_opts);
1231 if (err)
1232 return err;
1233
1234 err = -ENOMEM;
1235
1236 symbol__init(NULL);
1237
1238 if (symbol_conf.kptr_restrict)
1239 pr_warning(
1240 "WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1241 "check /proc/sys/kernel/kptr_restrict.\n\n"
1242 "Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1243 "file is not found in the buildid cache or in the vmlinux path.\n\n"
1244 "Samples in kernel modules won't be resolved at all.\n\n"
1245 "If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1246 "even with a suitable vmlinux or kallsyms file.\n\n");
1247
1248 if (rec->no_buildid_cache || rec->no_buildid)
1249 disable_buildid_cache();
1250
1251 if (rec->evlist->nr_entries == 0 &&
1252 perf_evlist__add_default(rec->evlist) < 0) {
1253 pr_err("Not enough memory for event selector list\n");
1254 goto out_symbol_exit;
1255 }
1256
1257 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
1258 rec->opts.no_inherit = true;
1259
1260 err = target__validate(&rec->opts.target);
1261 if (err) {
1262 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
1263 ui__warning("%s", errbuf);
1264 }
1265
1266 err = target__parse_uid(&rec->opts.target);
1267 if (err) {
1268 int saved_errno = errno;
1269
1270 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
1271 ui__error("%s", errbuf);
1272
1273 err = -saved_errno;
1274 goto out_symbol_exit;
1275 }
1276
1277 err = -ENOMEM;
1278 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
1279 usage_with_options(record_usage, record_options);
1280
1281 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
1282 if (err)
1283 goto out_symbol_exit;
1284
1285 /*
1286 * We take all buildids when the file contains
1287 * AUX area tracing data because we do not decode the
1288 * trace because it would take too long.
1289 */
1290 if (rec->opts.full_auxtrace)
1291 rec->buildid_all = true;
1292
1293 if (record_opts__config(&rec->opts)) {
1294 err = -EINVAL;
1295 goto out_symbol_exit;
1296 }
1297
1298 err = __cmd_record(&record, argc, argv);
1299 out_symbol_exit:
1300 perf_evlist__delete(rec->evlist);
1301 symbol__exit();
1302 auxtrace_record__free(rec->itr);
1303 return err;
1304 }
1305
1306 static void snapshot_sig_handler(int sig __maybe_unused)
1307 {
1308 if (!auxtrace_snapshot_enabled)
1309 return;
1310 auxtrace_snapshot_enabled = 0;
1311 auxtrace_snapshot_err = auxtrace_record__snapshot_start(record.itr);
1312 auxtrace_record__snapshot_started = 1;
1313 }
This page took 0.079688 seconds and 6 git commands to generate.