perf tools: Introduce per user view
[deliverable/linux.git] / tools / perf / builtin-record.c
1 /*
2 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
7 */
8 #define _FILE_OFFSET_BITS 64
9
10 #include "builtin.h"
11
12 #include "perf.h"
13
14 #include "util/build-id.h"
15 #include "util/util.h"
16 #include "util/parse-options.h"
17 #include "util/parse-events.h"
18
19 #include "util/header.h"
20 #include "util/event.h"
21 #include "util/evlist.h"
22 #include "util/evsel.h"
23 #include "util/debug.h"
24 #include "util/session.h"
25 #include "util/tool.h"
26 #include "util/symbol.h"
27 #include "util/cpumap.h"
28 #include "util/thread_map.h"
29
30 #include <unistd.h>
31 #include <sched.h>
32 #include <sys/mman.h>
33
34 enum write_mode_t {
35 WRITE_FORCE,
36 WRITE_APPEND
37 };
38
39 struct perf_record {
40 struct perf_tool tool;
41 struct perf_record_opts opts;
42 u64 bytes_written;
43 const char *output_name;
44 struct perf_evlist *evlist;
45 struct perf_session *session;
46 const char *progname;
47 const char *uid_str;
48 int output;
49 unsigned int page_size;
50 int realtime_prio;
51 enum write_mode_t write_mode;
52 bool no_buildid;
53 bool no_buildid_cache;
54 bool force;
55 bool file_new;
56 bool append_file;
57 long samples;
58 off_t post_processing_offset;
59 };
60
61 static void advance_output(struct perf_record *rec, size_t size)
62 {
63 rec->bytes_written += size;
64 }
65
66 static void write_output(struct perf_record *rec, void *buf, size_t size)
67 {
68 while (size) {
69 int ret = write(rec->output, buf, size);
70
71 if (ret < 0)
72 die("failed to write");
73
74 size -= ret;
75 buf += ret;
76
77 rec->bytes_written += ret;
78 }
79 }
80
81 static int process_synthesized_event(struct perf_tool *tool,
82 union perf_event *event,
83 struct perf_sample *sample __used,
84 struct machine *machine __used)
85 {
86 struct perf_record *rec = container_of(tool, struct perf_record, tool);
87 write_output(rec, event, event->header.size);
88 return 0;
89 }
90
91 static void perf_record__mmap_read(struct perf_record *rec,
92 struct perf_mmap *md)
93 {
94 unsigned int head = perf_mmap__read_head(md);
95 unsigned int old = md->prev;
96 unsigned char *data = md->base + rec->page_size;
97 unsigned long size;
98 void *buf;
99
100 if (old == head)
101 return;
102
103 rec->samples++;
104
105 size = head - old;
106
107 if ((old & md->mask) + size != (head & md->mask)) {
108 buf = &data[old & md->mask];
109 size = md->mask + 1 - (old & md->mask);
110 old += size;
111
112 write_output(rec, buf, size);
113 }
114
115 buf = &data[old & md->mask];
116 size = head - old;
117 old += size;
118
119 write_output(rec, buf, size);
120
121 md->prev = old;
122 perf_mmap__write_tail(md, old);
123 }
124
125 static volatile int done = 0;
126 static volatile int signr = -1;
127 static volatile int child_finished = 0;
128
129 static void sig_handler(int sig)
130 {
131 if (sig == SIGCHLD)
132 child_finished = 1;
133
134 done = 1;
135 signr = sig;
136 }
137
138 static void perf_record__sig_exit(int exit_status __used, void *arg)
139 {
140 struct perf_record *rec = arg;
141 int status;
142
143 if (rec->evlist->workload.pid > 0) {
144 if (!child_finished)
145 kill(rec->evlist->workload.pid, SIGTERM);
146
147 wait(&status);
148 if (WIFSIGNALED(status))
149 psignal(WTERMSIG(status), rec->progname);
150 }
151
152 if (signr == -1 || signr == SIGUSR1)
153 return;
154
155 signal(signr, SIG_DFL);
156 kill(getpid(), signr);
157 }
158
159 static bool perf_evlist__equal(struct perf_evlist *evlist,
160 struct perf_evlist *other)
161 {
162 struct perf_evsel *pos, *pair;
163
164 if (evlist->nr_entries != other->nr_entries)
165 return false;
166
167 pair = list_entry(other->entries.next, struct perf_evsel, node);
168
169 list_for_each_entry(pos, &evlist->entries, node) {
170 if (memcmp(&pos->attr, &pair->attr, sizeof(pos->attr) != 0))
171 return false;
172 pair = list_entry(pair->node.next, struct perf_evsel, node);
173 }
174
175 return true;
176 }
177
178 static void perf_record__open(struct perf_record *rec)
179 {
180 struct perf_evsel *pos, *first;
181 struct perf_evlist *evlist = rec->evlist;
182 struct perf_session *session = rec->session;
183 struct perf_record_opts *opts = &rec->opts;
184
185 first = list_entry(evlist->entries.next, struct perf_evsel, node);
186
187 perf_evlist__config_attrs(evlist, opts);
188
189 list_for_each_entry(pos, &evlist->entries, node) {
190 struct perf_event_attr *attr = &pos->attr;
191 struct xyarray *group_fd = NULL;
192 /*
193 * Check if parse_single_tracepoint_event has already asked for
194 * PERF_SAMPLE_TIME.
195 *
196 * XXX this is kludgy but short term fix for problems introduced by
197 * eac23d1c that broke 'perf script' by having different sample_types
198 * when using multiple tracepoint events when we use a perf binary
199 * that tries to use sample_id_all on an older kernel.
200 *
201 * We need to move counter creation to perf_session, support
202 * different sample_types, etc.
203 */
204 bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
205
206 if (opts->group && pos != first)
207 group_fd = first->fd;
208 retry_sample_id:
209 attr->sample_id_all = opts->sample_id_all_avail ? 1 : 0;
210 try_again:
211 if (perf_evsel__open(pos, evlist->cpus, evlist->threads,
212 opts->group, group_fd) < 0) {
213 int err = errno;
214
215 if (err == EPERM || err == EACCES) {
216 ui__error_paranoid();
217 exit(EXIT_FAILURE);
218 } else if (err == ENODEV && opts->cpu_list) {
219 die("No such device - did you specify"
220 " an out-of-range profile CPU?\n");
221 } else if (err == EINVAL && opts->sample_id_all_avail) {
222 /*
223 * Old kernel, no attr->sample_id_type_all field
224 */
225 opts->sample_id_all_avail = false;
226 if (!opts->sample_time && !opts->raw_samples && !time_needed)
227 attr->sample_type &= ~PERF_SAMPLE_TIME;
228
229 goto retry_sample_id;
230 }
231
232 /*
233 * If it's cycles then fall back to hrtimer
234 * based cpu-clock-tick sw counter, which
235 * is always available even if no PMU support:
236 */
237 if (attr->type == PERF_TYPE_HARDWARE
238 && attr->config == PERF_COUNT_HW_CPU_CYCLES) {
239
240 if (verbose)
241 ui__warning("The cycles event is not supported, "
242 "trying to fall back to cpu-clock-ticks\n");
243 attr->type = PERF_TYPE_SOFTWARE;
244 attr->config = PERF_COUNT_SW_CPU_CLOCK;
245 goto try_again;
246 }
247
248 if (err == ENOENT) {
249 ui__warning("The %s event is not supported.\n",
250 event_name(pos));
251 exit(EXIT_FAILURE);
252 }
253
254 printf("\n");
255 error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n",
256 err, strerror(err));
257
258 #if defined(__i386__) || defined(__x86_64__)
259 if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP)
260 die("No hardware sampling interrupt available."
261 " No APIC? If so then you can boot the kernel"
262 " with the \"lapic\" boot parameter to"
263 " force-enable it.\n");
264 #endif
265
266 die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
267 }
268 }
269
270 if (perf_evlist__set_filters(evlist)) {
271 error("failed to set filter with %d (%s)\n", errno,
272 strerror(errno));
273 exit(-1);
274 }
275
276 if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
277 if (errno == EPERM)
278 die("Permission error mapping pages.\n"
279 "Consider increasing "
280 "/proc/sys/kernel/perf_event_mlock_kb,\n"
281 "or try again with a smaller value of -m/--mmap_pages.\n"
282 "(current value: %d)\n", opts->mmap_pages);
283 else if (!is_power_of_2(opts->mmap_pages))
284 die("--mmap_pages/-m value must be a power of two.");
285
286 die("failed to mmap with %d (%s)\n", errno, strerror(errno));
287 }
288
289 if (rec->file_new)
290 session->evlist = evlist;
291 else {
292 if (!perf_evlist__equal(session->evlist, evlist)) {
293 fprintf(stderr, "incompatible append\n");
294 exit(-1);
295 }
296 }
297
298 perf_session__update_sample_type(session);
299 }
300
301 static int process_buildids(struct perf_record *rec)
302 {
303 u64 size = lseek(rec->output, 0, SEEK_CUR);
304
305 if (size == 0)
306 return 0;
307
308 rec->session->fd = rec->output;
309 return __perf_session__process_events(rec->session, rec->post_processing_offset,
310 size - rec->post_processing_offset,
311 size, &build_id__mark_dso_hit_ops);
312 }
313
314 static void perf_record__exit(int status __used, void *arg)
315 {
316 struct perf_record *rec = arg;
317
318 if (!rec->opts.pipe_output) {
319 rec->session->header.data_size += rec->bytes_written;
320
321 if (!rec->no_buildid)
322 process_buildids(rec);
323 perf_session__write_header(rec->session, rec->evlist,
324 rec->output, true);
325 perf_session__delete(rec->session);
326 perf_evlist__delete(rec->evlist);
327 symbol__exit();
328 }
329 }
330
331 static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
332 {
333 int err;
334 struct perf_tool *tool = data;
335
336 if (machine__is_host(machine))
337 return;
338
339 /*
340 *As for guest kernel when processing subcommand record&report,
341 *we arrange module mmap prior to guest kernel mmap and trigger
342 *a preload dso because default guest module symbols are loaded
343 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
344 *method is used to avoid symbol missing when the first addr is
345 *in module instead of in guest kernel.
346 */
347 err = perf_event__synthesize_modules(tool, process_synthesized_event,
348 machine);
349 if (err < 0)
350 pr_err("Couldn't record guest kernel [%d]'s reference"
351 " relocation symbol.\n", machine->pid);
352
353 /*
354 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
355 * have no _text sometimes.
356 */
357 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
358 machine, "_text");
359 if (err < 0)
360 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
361 machine, "_stext");
362 if (err < 0)
363 pr_err("Couldn't record guest kernel [%d]'s reference"
364 " relocation symbol.\n", machine->pid);
365 }
366
367 static struct perf_event_header finished_round_event = {
368 .size = sizeof(struct perf_event_header),
369 .type = PERF_RECORD_FINISHED_ROUND,
370 };
371
372 static void perf_record__mmap_read_all(struct perf_record *rec)
373 {
374 int i;
375
376 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
377 if (rec->evlist->mmap[i].base)
378 perf_record__mmap_read(rec, &rec->evlist->mmap[i]);
379 }
380
381 if (perf_header__has_feat(&rec->session->header, HEADER_TRACE_INFO))
382 write_output(rec, &finished_round_event, sizeof(finished_round_event));
383 }
384
385 static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
386 {
387 struct stat st;
388 int flags;
389 int err, output;
390 unsigned long waking = 0;
391 const bool forks = argc > 0;
392 struct machine *machine;
393 struct perf_tool *tool = &rec->tool;
394 struct perf_record_opts *opts = &rec->opts;
395 struct perf_evlist *evsel_list = rec->evlist;
396 const char *output_name = rec->output_name;
397 struct perf_session *session;
398
399 rec->progname = argv[0];
400
401 rec->page_size = sysconf(_SC_PAGE_SIZE);
402
403 on_exit(perf_record__sig_exit, rec);
404 signal(SIGCHLD, sig_handler);
405 signal(SIGINT, sig_handler);
406 signal(SIGUSR1, sig_handler);
407
408 if (!output_name) {
409 if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode))
410 opts->pipe_output = true;
411 else
412 rec->output_name = output_name = "perf.data";
413 }
414 if (output_name) {
415 if (!strcmp(output_name, "-"))
416 opts->pipe_output = true;
417 else if (!stat(output_name, &st) && st.st_size) {
418 if (rec->write_mode == WRITE_FORCE) {
419 char oldname[PATH_MAX];
420 snprintf(oldname, sizeof(oldname), "%s.old",
421 output_name);
422 unlink(oldname);
423 rename(output_name, oldname);
424 }
425 } else if (rec->write_mode == WRITE_APPEND) {
426 rec->write_mode = WRITE_FORCE;
427 }
428 }
429
430 flags = O_CREAT|O_RDWR;
431 if (rec->write_mode == WRITE_APPEND)
432 rec->file_new = 0;
433 else
434 flags |= O_TRUNC;
435
436 if (opts->pipe_output)
437 output = STDOUT_FILENO;
438 else
439 output = open(output_name, flags, S_IRUSR | S_IWUSR);
440 if (output < 0) {
441 perror("failed to create output file");
442 exit(-1);
443 }
444
445 rec->output = output;
446
447 session = perf_session__new(output_name, O_WRONLY,
448 rec->write_mode == WRITE_FORCE, false, NULL);
449 if (session == NULL) {
450 pr_err("Not enough memory for reading perf file header\n");
451 return -1;
452 }
453
454 rec->session = session;
455
456 if (!rec->no_buildid)
457 perf_header__set_feat(&session->header, HEADER_BUILD_ID);
458
459 if (!rec->file_new) {
460 err = perf_session__read_header(session, output);
461 if (err < 0)
462 goto out_delete_session;
463 }
464
465 if (have_tracepoints(&evsel_list->entries))
466 perf_header__set_feat(&session->header, HEADER_TRACE_INFO);
467
468 perf_header__set_feat(&session->header, HEADER_HOSTNAME);
469 perf_header__set_feat(&session->header, HEADER_OSRELEASE);
470 perf_header__set_feat(&session->header, HEADER_ARCH);
471 perf_header__set_feat(&session->header, HEADER_CPUDESC);
472 perf_header__set_feat(&session->header, HEADER_NRCPUS);
473 perf_header__set_feat(&session->header, HEADER_EVENT_DESC);
474 perf_header__set_feat(&session->header, HEADER_CMDLINE);
475 perf_header__set_feat(&session->header, HEADER_VERSION);
476 perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
477 perf_header__set_feat(&session->header, HEADER_TOTAL_MEM);
478 perf_header__set_feat(&session->header, HEADER_NUMA_TOPOLOGY);
479 perf_header__set_feat(&session->header, HEADER_CPUID);
480
481 if (forks) {
482 err = perf_evlist__prepare_workload(evsel_list, opts, argv);
483 if (err < 0) {
484 pr_err("Couldn't run the workload!\n");
485 goto out_delete_session;
486 }
487 }
488
489 perf_record__open(rec);
490
491 /*
492 * perf_session__delete(session) will be called at perf_record__exit()
493 */
494 on_exit(perf_record__exit, rec);
495
496 if (opts->pipe_output) {
497 err = perf_header__write_pipe(output);
498 if (err < 0)
499 return err;
500 } else if (rec->file_new) {
501 err = perf_session__write_header(session, evsel_list,
502 output, false);
503 if (err < 0)
504 return err;
505 }
506
507 if (!!rec->no_buildid
508 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
509 pr_err("Couldn't generating buildids. "
510 "Use --no-buildid to profile anyway.\n");
511 return -1;
512 }
513
514 rec->post_processing_offset = lseek(output, 0, SEEK_CUR);
515
516 machine = perf_session__find_host_machine(session);
517 if (!machine) {
518 pr_err("Couldn't find native kernel information.\n");
519 return -1;
520 }
521
522 if (opts->pipe_output) {
523 err = perf_event__synthesize_attrs(tool, session,
524 process_synthesized_event);
525 if (err < 0) {
526 pr_err("Couldn't synthesize attrs.\n");
527 return err;
528 }
529
530 err = perf_event__synthesize_event_types(tool, process_synthesized_event,
531 machine);
532 if (err < 0) {
533 pr_err("Couldn't synthesize event_types.\n");
534 return err;
535 }
536
537 if (have_tracepoints(&evsel_list->entries)) {
538 /*
539 * FIXME err <= 0 here actually means that
540 * there were no tracepoints so its not really
541 * an error, just that we don't need to
542 * synthesize anything. We really have to
543 * return this more properly and also
544 * propagate errors that now are calling die()
545 */
546 err = perf_event__synthesize_tracing_data(tool, output, evsel_list,
547 process_synthesized_event);
548 if (err <= 0) {
549 pr_err("Couldn't record tracing data.\n");
550 return err;
551 }
552 advance_output(rec, err);
553 }
554 }
555
556 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
557 machine, "_text");
558 if (err < 0)
559 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
560 machine, "_stext");
561 if (err < 0)
562 pr_err("Couldn't record kernel reference relocation symbol\n"
563 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
564 "Check /proc/kallsyms permission or run as root.\n");
565
566 err = perf_event__synthesize_modules(tool, process_synthesized_event,
567 machine);
568 if (err < 0)
569 pr_err("Couldn't record kernel module information.\n"
570 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
571 "Check /proc/modules permission or run as root.\n");
572
573 if (perf_guest)
574 perf_session__process_machines(session, tool,
575 perf_event__synthesize_guest_os);
576
577 if (!opts->system_wide)
578 perf_event__synthesize_thread_map(tool, evsel_list->threads,
579 process_synthesized_event,
580 machine);
581 else
582 perf_event__synthesize_threads(tool, process_synthesized_event,
583 machine);
584
585 if (rec->realtime_prio) {
586 struct sched_param param;
587
588 param.sched_priority = rec->realtime_prio;
589 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
590 pr_err("Could not set realtime priority.\n");
591 exit(-1);
592 }
593 }
594
595 perf_evlist__enable(evsel_list);
596
597 /*
598 * Let the child rip
599 */
600 if (forks)
601 perf_evlist__start_workload(evsel_list);
602
603 for (;;) {
604 int hits = rec->samples;
605
606 perf_record__mmap_read_all(rec);
607
608 if (hits == rec->samples) {
609 if (done)
610 break;
611 err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
612 waking++;
613 }
614
615 if (done)
616 perf_evlist__disable(evsel_list);
617 }
618
619 if (quiet || signr == SIGUSR1)
620 return 0;
621
622 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
623
624 /*
625 * Approximate RIP event size: 24 bytes.
626 */
627 fprintf(stderr,
628 "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
629 (double)rec->bytes_written / 1024.0 / 1024.0,
630 output_name,
631 rec->bytes_written / 24);
632
633 return 0;
634
635 out_delete_session:
636 perf_session__delete(session);
637 return err;
638 }
639
640 static const char * const record_usage[] = {
641 "perf record [<options>] [<command>]",
642 "perf record [<options>] -- <command> [<options>]",
643 NULL
644 };
645
646 /*
647 * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
648 * because we need to have access to it in perf_record__exit, that is called
649 * after cmd_record() exits, but since record_options need to be accessible to
650 * builtin-script, leave it here.
651 *
652 * At least we don't ouch it in all the other functions here directly.
653 *
654 * Just say no to tons of global variables, sigh.
655 */
656 static struct perf_record record = {
657 .opts = {
658 .target_pid = -1,
659 .target_tid = -1,
660 .mmap_pages = UINT_MAX,
661 .user_freq = UINT_MAX,
662 .user_interval = ULLONG_MAX,
663 .freq = 1000,
664 .sample_id_all_avail = true,
665 },
666 .write_mode = WRITE_FORCE,
667 .file_new = true,
668 };
669
670 /*
671 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
672 * with it and switch to use the library functions in perf_evlist that came
673 * from builtin-record.c, i.e. use perf_record_opts,
674 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
675 * using pipes, etc.
676 */
677 const struct option record_options[] = {
678 OPT_CALLBACK('e', "event", &record.evlist, "event",
679 "event selector. use 'perf list' to list available events",
680 parse_events_option),
681 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
682 "event filter", parse_filter),
683 OPT_INTEGER('p', "pid", &record.opts.target_pid,
684 "record events on existing process id"),
685 OPT_INTEGER('t', "tid", &record.opts.target_tid,
686 "record events on existing thread id"),
687 OPT_INTEGER('r', "realtime", &record.realtime_prio,
688 "collect data with this RT SCHED_FIFO priority"),
689 OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
690 "collect data without buffering"),
691 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
692 "collect raw sample records from all opened counters"),
693 OPT_BOOLEAN('a', "all-cpus", &record.opts.system_wide,
694 "system-wide collection from all CPUs"),
695 OPT_BOOLEAN('A', "append", &record.append_file,
696 "append to the output file to do incremental profiling"),
697 OPT_STRING('C', "cpu", &record.opts.cpu_list, "cpu",
698 "list of cpus to monitor"),
699 OPT_BOOLEAN('f', "force", &record.force,
700 "overwrite existing data file (deprecated)"),
701 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
702 OPT_STRING('o', "output", &record.output_name, "file",
703 "output file name"),
704 OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit,
705 "child tasks do not inherit counters"),
706 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
707 OPT_UINTEGER('m', "mmap-pages", &record.opts.mmap_pages,
708 "number of mmap data pages"),
709 OPT_BOOLEAN(0, "group", &record.opts.group,
710 "put the counters into a counter group"),
711 OPT_BOOLEAN('g', "call-graph", &record.opts.call_graph,
712 "do call-graph (stack chain/backtrace) recording"),
713 OPT_INCR('v', "verbose", &verbose,
714 "be more verbose (show counter open errors, etc)"),
715 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
716 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
717 "per thread counts"),
718 OPT_BOOLEAN('d', "data", &record.opts.sample_address,
719 "Sample addresses"),
720 OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
721 OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
722 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
723 "don't sample"),
724 OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
725 "do not update the buildid cache"),
726 OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
727 "do not collect buildids in perf.data"),
728 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
729 "monitor event in cgroup name only",
730 parse_cgroups),
731 OPT_STRING('u', "uid", &record.uid_str, "user", "user to profile"),
732 OPT_END()
733 };
734
735 int cmd_record(int argc, const char **argv, const char *prefix __used)
736 {
737 int err = -ENOMEM;
738 struct perf_evsel *pos;
739 struct perf_evlist *evsel_list;
740 struct perf_record *rec = &record;
741
742 perf_header__set_cmdline(argc, argv);
743
744 evsel_list = perf_evlist__new(NULL, NULL);
745 if (evsel_list == NULL)
746 return -ENOMEM;
747
748 rec->evlist = evsel_list;
749
750 argc = parse_options(argc, argv, record_options, record_usage,
751 PARSE_OPT_STOP_AT_NON_OPTION);
752 if (!argc && rec->opts.target_pid == -1 && rec->opts.target_tid == -1 &&
753 !rec->opts.system_wide && !rec->opts.cpu_list && !rec->uid_str)
754 usage_with_options(record_usage, record_options);
755
756 if (rec->force && rec->append_file) {
757 fprintf(stderr, "Can't overwrite and append at the same time."
758 " You need to choose between -f and -A");
759 usage_with_options(record_usage, record_options);
760 } else if (rec->append_file) {
761 rec->write_mode = WRITE_APPEND;
762 } else {
763 rec->write_mode = WRITE_FORCE;
764 }
765
766 if (nr_cgroups && !rec->opts.system_wide) {
767 fprintf(stderr, "cgroup monitoring only available in"
768 " system-wide mode\n");
769 usage_with_options(record_usage, record_options);
770 }
771
772 symbol__init();
773
774 if (symbol_conf.kptr_restrict)
775 pr_warning(
776 "WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
777 "check /proc/sys/kernel/kptr_restrict.\n\n"
778 "Samples in kernel functions may not be resolved if a suitable vmlinux\n"
779 "file is not found in the buildid cache or in the vmlinux path.\n\n"
780 "Samples in kernel modules won't be resolved at all.\n\n"
781 "If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
782 "even with a suitable vmlinux or kallsyms file.\n\n");
783
784 if (rec->no_buildid_cache || rec->no_buildid)
785 disable_buildid_cache();
786
787 if (evsel_list->nr_entries == 0 &&
788 perf_evlist__add_default(evsel_list) < 0) {
789 pr_err("Not enough memory for event selector list\n");
790 goto out_symbol_exit;
791 }
792
793 rec->opts.uid = parse_target_uid(rec->uid_str, rec->opts.target_tid,
794 rec->opts.target_pid);
795 if (rec->uid_str != NULL && rec->opts.uid == UINT_MAX - 1)
796 goto out_free_fd;
797
798 if (rec->opts.target_pid != -1)
799 rec->opts.target_tid = rec->opts.target_pid;
800
801 if (perf_evlist__create_maps(evsel_list, rec->opts.target_pid,
802 rec->opts.target_tid, rec->opts.uid,
803 rec->opts.cpu_list) < 0)
804 usage_with_options(record_usage, record_options);
805
806 list_for_each_entry(pos, &evsel_list->entries, node) {
807 if (perf_header__push_event(pos->attr.config, event_name(pos)))
808 goto out_free_fd;
809 }
810
811 if (rec->opts.user_interval != ULLONG_MAX)
812 rec->opts.default_interval = rec->opts.user_interval;
813 if (rec->opts.user_freq != UINT_MAX)
814 rec->opts.freq = rec->opts.user_freq;
815
816 /*
817 * User specified count overrides default frequency.
818 */
819 if (rec->opts.default_interval)
820 rec->opts.freq = 0;
821 else if (rec->opts.freq) {
822 rec->opts.default_interval = rec->opts.freq;
823 } else {
824 fprintf(stderr, "frequency and count are zero, aborting\n");
825 err = -EINVAL;
826 goto out_free_fd;
827 }
828
829 err = __cmd_record(&record, argc, argv);
830 out_free_fd:
831 perf_evlist__delete_maps(evsel_list);
832 out_symbol_exit:
833 symbol__exit();
834 return err;
835 }
This page took 0.047797 seconds and 5 git commands to generate.