perf probe: Warn user to rebuild target with debuginfo
[deliverable/linux.git] / tools / perf / util / machine.c
1 #include "callchain.h"
2 #include "debug.h"
3 #include "event.h"
4 #include "evsel.h"
5 #include "hist.h"
6 #include "machine.h"
7 #include "map.h"
8 #include "sort.h"
9 #include "strlist.h"
10 #include "thread.h"
11 #include "vdso.h"
12 #include <stdbool.h>
13 #include <symbol/kallsyms.h>
14 #include "unwind.h"
15
16 int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
17 {
18 map_groups__init(&machine->kmaps);
19 RB_CLEAR_NODE(&machine->rb_node);
20 INIT_LIST_HEAD(&machine->user_dsos);
21 INIT_LIST_HEAD(&machine->kernel_dsos);
22
23 machine->threads = RB_ROOT;
24 INIT_LIST_HEAD(&machine->dead_threads);
25 machine->last_match = NULL;
26
27 machine->vdso_info = NULL;
28
29 machine->kmaps.machine = machine;
30 machine->pid = pid;
31
32 machine->symbol_filter = NULL;
33 machine->id_hdr_size = 0;
34 machine->comm_exec = false;
35
36 machine->root_dir = strdup(root_dir);
37 if (machine->root_dir == NULL)
38 return -ENOMEM;
39
40 if (pid != HOST_KERNEL_ID) {
41 struct thread *thread = machine__findnew_thread(machine, -1,
42 pid);
43 char comm[64];
44
45 if (thread == NULL)
46 return -ENOMEM;
47
48 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
49 thread__set_comm(thread, comm, 0);
50 }
51
52 machine->current_tid = NULL;
53
54 return 0;
55 }
56
57 struct machine *machine__new_host(void)
58 {
59 struct machine *machine = malloc(sizeof(*machine));
60
61 if (machine != NULL) {
62 machine__init(machine, "", HOST_KERNEL_ID);
63
64 if (machine__create_kernel_maps(machine) < 0)
65 goto out_delete;
66 }
67
68 return machine;
69 out_delete:
70 free(machine);
71 return NULL;
72 }
73
74 static void dsos__delete(struct list_head *dsos)
75 {
76 struct dso *pos, *n;
77
78 list_for_each_entry_safe(pos, n, dsos, node) {
79 list_del(&pos->node);
80 dso__delete(pos);
81 }
82 }
83
84 void machine__delete_dead_threads(struct machine *machine)
85 {
86 struct thread *n, *t;
87
88 list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
89 list_del(&t->node);
90 thread__delete(t);
91 }
92 }
93
94 void machine__delete_threads(struct machine *machine)
95 {
96 struct rb_node *nd = rb_first(&machine->threads);
97
98 while (nd) {
99 struct thread *t = rb_entry(nd, struct thread, rb_node);
100
101 rb_erase(&t->rb_node, &machine->threads);
102 nd = rb_next(nd);
103 thread__delete(t);
104 }
105 }
106
107 void machine__exit(struct machine *machine)
108 {
109 map_groups__exit(&machine->kmaps);
110 dsos__delete(&machine->user_dsos);
111 dsos__delete(&machine->kernel_dsos);
112 vdso__exit(machine);
113 zfree(&machine->root_dir);
114 zfree(&machine->current_tid);
115 }
116
117 void machine__delete(struct machine *machine)
118 {
119 machine__exit(machine);
120 free(machine);
121 }
122
123 void machines__init(struct machines *machines)
124 {
125 machine__init(&machines->host, "", HOST_KERNEL_ID);
126 machines->guests = RB_ROOT;
127 machines->symbol_filter = NULL;
128 }
129
130 void machines__exit(struct machines *machines)
131 {
132 machine__exit(&machines->host);
133 /* XXX exit guest */
134 }
135
136 struct machine *machines__add(struct machines *machines, pid_t pid,
137 const char *root_dir)
138 {
139 struct rb_node **p = &machines->guests.rb_node;
140 struct rb_node *parent = NULL;
141 struct machine *pos, *machine = malloc(sizeof(*machine));
142
143 if (machine == NULL)
144 return NULL;
145
146 if (machine__init(machine, root_dir, pid) != 0) {
147 free(machine);
148 return NULL;
149 }
150
151 machine->symbol_filter = machines->symbol_filter;
152
153 while (*p != NULL) {
154 parent = *p;
155 pos = rb_entry(parent, struct machine, rb_node);
156 if (pid < pos->pid)
157 p = &(*p)->rb_left;
158 else
159 p = &(*p)->rb_right;
160 }
161
162 rb_link_node(&machine->rb_node, parent, p);
163 rb_insert_color(&machine->rb_node, &machines->guests);
164
165 return machine;
166 }
167
168 void machines__set_symbol_filter(struct machines *machines,
169 symbol_filter_t symbol_filter)
170 {
171 struct rb_node *nd;
172
173 machines->symbol_filter = symbol_filter;
174 machines->host.symbol_filter = symbol_filter;
175
176 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
177 struct machine *machine = rb_entry(nd, struct machine, rb_node);
178
179 machine->symbol_filter = symbol_filter;
180 }
181 }
182
183 void machines__set_comm_exec(struct machines *machines, bool comm_exec)
184 {
185 struct rb_node *nd;
186
187 machines->host.comm_exec = comm_exec;
188
189 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
190 struct machine *machine = rb_entry(nd, struct machine, rb_node);
191
192 machine->comm_exec = comm_exec;
193 }
194 }
195
196 struct machine *machines__find(struct machines *machines, pid_t pid)
197 {
198 struct rb_node **p = &machines->guests.rb_node;
199 struct rb_node *parent = NULL;
200 struct machine *machine;
201 struct machine *default_machine = NULL;
202
203 if (pid == HOST_KERNEL_ID)
204 return &machines->host;
205
206 while (*p != NULL) {
207 parent = *p;
208 machine = rb_entry(parent, struct machine, rb_node);
209 if (pid < machine->pid)
210 p = &(*p)->rb_left;
211 else if (pid > machine->pid)
212 p = &(*p)->rb_right;
213 else
214 return machine;
215 if (!machine->pid)
216 default_machine = machine;
217 }
218
219 return default_machine;
220 }
221
222 struct machine *machines__findnew(struct machines *machines, pid_t pid)
223 {
224 char path[PATH_MAX];
225 const char *root_dir = "";
226 struct machine *machine = machines__find(machines, pid);
227
228 if (machine && (machine->pid == pid))
229 goto out;
230
231 if ((pid != HOST_KERNEL_ID) &&
232 (pid != DEFAULT_GUEST_KERNEL_ID) &&
233 (symbol_conf.guestmount)) {
234 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
235 if (access(path, R_OK)) {
236 static struct strlist *seen;
237
238 if (!seen)
239 seen = strlist__new(true, NULL);
240
241 if (!strlist__has_entry(seen, path)) {
242 pr_err("Can't access file %s\n", path);
243 strlist__add(seen, path);
244 }
245 machine = NULL;
246 goto out;
247 }
248 root_dir = path;
249 }
250
251 machine = machines__add(machines, pid, root_dir);
252 out:
253 return machine;
254 }
255
256 void machines__process_guests(struct machines *machines,
257 machine__process_t process, void *data)
258 {
259 struct rb_node *nd;
260
261 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
262 struct machine *pos = rb_entry(nd, struct machine, rb_node);
263 process(pos, data);
264 }
265 }
266
267 char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
268 {
269 if (machine__is_host(machine))
270 snprintf(bf, size, "[%s]", "kernel.kallsyms");
271 else if (machine__is_default_guest(machine))
272 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
273 else {
274 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms",
275 machine->pid);
276 }
277
278 return bf;
279 }
280
281 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
282 {
283 struct rb_node *node;
284 struct machine *machine;
285
286 machines->host.id_hdr_size = id_hdr_size;
287
288 for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
289 machine = rb_entry(node, struct machine, rb_node);
290 machine->id_hdr_size = id_hdr_size;
291 }
292
293 return;
294 }
295
296 static void machine__update_thread_pid(struct machine *machine,
297 struct thread *th, pid_t pid)
298 {
299 struct thread *leader;
300
301 if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
302 return;
303
304 th->pid_ = pid;
305
306 if (th->pid_ == th->tid)
307 return;
308
309 leader = machine__findnew_thread(machine, th->pid_, th->pid_);
310 if (!leader)
311 goto out_err;
312
313 if (!leader->mg)
314 leader->mg = map_groups__new();
315
316 if (!leader->mg)
317 goto out_err;
318
319 if (th->mg == leader->mg)
320 return;
321
322 if (th->mg) {
323 /*
324 * Maps are created from MMAP events which provide the pid and
325 * tid. Consequently there never should be any maps on a thread
326 * with an unknown pid. Just print an error if there are.
327 */
328 if (!map_groups__empty(th->mg))
329 pr_err("Discarding thread maps for %d:%d\n",
330 th->pid_, th->tid);
331 map_groups__delete(th->mg);
332 }
333
334 th->mg = map_groups__get(leader->mg);
335
336 return;
337
338 out_err:
339 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
340 }
341
342 static struct thread *__machine__findnew_thread(struct machine *machine,
343 pid_t pid, pid_t tid,
344 bool create)
345 {
346 struct rb_node **p = &machine->threads.rb_node;
347 struct rb_node *parent = NULL;
348 struct thread *th;
349
350 /*
351 * Front-end cache - TID lookups come in blocks,
352 * so most of the time we dont have to look up
353 * the full rbtree:
354 */
355 th = machine->last_match;
356 if (th && th->tid == tid) {
357 machine__update_thread_pid(machine, th, pid);
358 return th;
359 }
360
361 while (*p != NULL) {
362 parent = *p;
363 th = rb_entry(parent, struct thread, rb_node);
364
365 if (th->tid == tid) {
366 machine->last_match = th;
367 machine__update_thread_pid(machine, th, pid);
368 return th;
369 }
370
371 if (tid < th->tid)
372 p = &(*p)->rb_left;
373 else
374 p = &(*p)->rb_right;
375 }
376
377 if (!create)
378 return NULL;
379
380 th = thread__new(pid, tid);
381 if (th != NULL) {
382 rb_link_node(&th->rb_node, parent, p);
383 rb_insert_color(&th->rb_node, &machine->threads);
384 machine->last_match = th;
385
386 /*
387 * We have to initialize map_groups separately
388 * after rb tree is updated.
389 *
390 * The reason is that we call machine__findnew_thread
391 * within thread__init_map_groups to find the thread
392 * leader and that would screwed the rb tree.
393 */
394 if (thread__init_map_groups(th, machine)) {
395 thread__delete(th);
396 return NULL;
397 }
398 }
399
400 return th;
401 }
402
403 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
404 pid_t tid)
405 {
406 return __machine__findnew_thread(machine, pid, tid, true);
407 }
408
409 struct thread *machine__find_thread(struct machine *machine, pid_t pid,
410 pid_t tid)
411 {
412 return __machine__findnew_thread(machine, pid, tid, false);
413 }
414
415 struct comm *machine__thread_exec_comm(struct machine *machine,
416 struct thread *thread)
417 {
418 if (machine->comm_exec)
419 return thread__exec_comm(thread);
420 else
421 return thread__comm(thread);
422 }
423
424 int machine__process_comm_event(struct machine *machine, union perf_event *event,
425 struct perf_sample *sample)
426 {
427 struct thread *thread = machine__findnew_thread(machine,
428 event->comm.pid,
429 event->comm.tid);
430 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
431
432 if (exec)
433 machine->comm_exec = true;
434
435 if (dump_trace)
436 perf_event__fprintf_comm(event, stdout);
437
438 if (thread == NULL ||
439 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
440 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
441 return -1;
442 }
443
444 return 0;
445 }
446
447 int machine__process_lost_event(struct machine *machine __maybe_unused,
448 union perf_event *event, struct perf_sample *sample __maybe_unused)
449 {
450 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
451 event->lost.id, event->lost.lost);
452 return 0;
453 }
454
455 struct map *machine__new_module(struct machine *machine, u64 start,
456 const char *filename)
457 {
458 struct map *map;
459 struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename);
460
461 if (dso == NULL)
462 return NULL;
463
464 map = map__new2(start, dso, MAP__FUNCTION);
465 if (map == NULL)
466 return NULL;
467
468 if (machine__is_host(machine))
469 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
470 else
471 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
472 map_groups__insert(&machine->kmaps, map);
473 return map;
474 }
475
476 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
477 {
478 struct rb_node *nd;
479 size_t ret = __dsos__fprintf(&machines->host.kernel_dsos, fp) +
480 __dsos__fprintf(&machines->host.user_dsos, fp);
481
482 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
483 struct machine *pos = rb_entry(nd, struct machine, rb_node);
484 ret += __dsos__fprintf(&pos->kernel_dsos, fp);
485 ret += __dsos__fprintf(&pos->user_dsos, fp);
486 }
487
488 return ret;
489 }
490
491 size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
492 bool (skip)(struct dso *dso, int parm), int parm)
493 {
494 return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, skip, parm) +
495 __dsos__fprintf_buildid(&machine->user_dsos, fp, skip, parm);
496 }
497
498 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
499 bool (skip)(struct dso *dso, int parm), int parm)
500 {
501 struct rb_node *nd;
502 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
503
504 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
505 struct machine *pos = rb_entry(nd, struct machine, rb_node);
506 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
507 }
508 return ret;
509 }
510
511 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
512 {
513 int i;
514 size_t printed = 0;
515 struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
516
517 if (kdso->has_build_id) {
518 char filename[PATH_MAX];
519 if (dso__build_id_filename(kdso, filename, sizeof(filename)))
520 printed += fprintf(fp, "[0] %s\n", filename);
521 }
522
523 for (i = 0; i < vmlinux_path__nr_entries; ++i)
524 printed += fprintf(fp, "[%d] %s\n",
525 i + kdso->has_build_id, vmlinux_path[i]);
526
527 return printed;
528 }
529
530 size_t machine__fprintf(struct machine *machine, FILE *fp)
531 {
532 size_t ret = 0;
533 struct rb_node *nd;
534
535 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
536 struct thread *pos = rb_entry(nd, struct thread, rb_node);
537
538 ret += thread__fprintf(pos, fp);
539 }
540
541 return ret;
542 }
543
544 static struct dso *machine__get_kernel(struct machine *machine)
545 {
546 const char *vmlinux_name = NULL;
547 struct dso *kernel;
548
549 if (machine__is_host(machine)) {
550 vmlinux_name = symbol_conf.vmlinux_name;
551 if (!vmlinux_name)
552 vmlinux_name = "[kernel.kallsyms]";
553
554 kernel = dso__kernel_findnew(machine, vmlinux_name,
555 "[kernel]",
556 DSO_TYPE_KERNEL);
557 } else {
558 char bf[PATH_MAX];
559
560 if (machine__is_default_guest(machine))
561 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
562 if (!vmlinux_name)
563 vmlinux_name = machine__mmap_name(machine, bf,
564 sizeof(bf));
565
566 kernel = dso__kernel_findnew(machine, vmlinux_name,
567 "[guest.kernel]",
568 DSO_TYPE_GUEST_KERNEL);
569 }
570
571 if (kernel != NULL && (!kernel->has_build_id))
572 dso__read_running_kernel_build_id(kernel, machine);
573
574 return kernel;
575 }
576
577 struct process_args {
578 u64 start;
579 };
580
581 static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
582 size_t bufsz)
583 {
584 if (machine__is_default_guest(machine))
585 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
586 else
587 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
588 }
589
590 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
591
592 /* Figure out the start address of kernel map from /proc/kallsyms.
593 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
594 * symbol_name if it's not that important.
595 */
596 static u64 machine__get_kernel_start_addr(struct machine *machine,
597 const char **symbol_name)
598 {
599 char filename[PATH_MAX];
600 int i;
601 const char *name;
602 u64 addr = 0;
603
604 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
605
606 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
607 return 0;
608
609 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
610 addr = kallsyms__get_function_start(filename, name);
611 if (addr)
612 break;
613 }
614
615 if (symbol_name)
616 *symbol_name = name;
617
618 return addr;
619 }
620
621 int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
622 {
623 enum map_type type;
624 u64 start = machine__get_kernel_start_addr(machine, NULL);
625
626 for (type = 0; type < MAP__NR_TYPES; ++type) {
627 struct kmap *kmap;
628
629 machine->vmlinux_maps[type] = map__new2(start, kernel, type);
630 if (machine->vmlinux_maps[type] == NULL)
631 return -1;
632
633 machine->vmlinux_maps[type]->map_ip =
634 machine->vmlinux_maps[type]->unmap_ip =
635 identity__map_ip;
636 kmap = map__kmap(machine->vmlinux_maps[type]);
637 kmap->kmaps = &machine->kmaps;
638 map_groups__insert(&machine->kmaps,
639 machine->vmlinux_maps[type]);
640 }
641
642 return 0;
643 }
644
645 void machine__destroy_kernel_maps(struct machine *machine)
646 {
647 enum map_type type;
648
649 for (type = 0; type < MAP__NR_TYPES; ++type) {
650 struct kmap *kmap;
651
652 if (machine->vmlinux_maps[type] == NULL)
653 continue;
654
655 kmap = map__kmap(machine->vmlinux_maps[type]);
656 map_groups__remove(&machine->kmaps,
657 machine->vmlinux_maps[type]);
658 if (kmap->ref_reloc_sym) {
659 /*
660 * ref_reloc_sym is shared among all maps, so free just
661 * on one of them.
662 */
663 if (type == MAP__FUNCTION) {
664 zfree((char **)&kmap->ref_reloc_sym->name);
665 zfree(&kmap->ref_reloc_sym);
666 } else
667 kmap->ref_reloc_sym = NULL;
668 }
669
670 map__delete(machine->vmlinux_maps[type]);
671 machine->vmlinux_maps[type] = NULL;
672 }
673 }
674
675 int machines__create_guest_kernel_maps(struct machines *machines)
676 {
677 int ret = 0;
678 struct dirent **namelist = NULL;
679 int i, items = 0;
680 char path[PATH_MAX];
681 pid_t pid;
682 char *endp;
683
684 if (symbol_conf.default_guest_vmlinux_name ||
685 symbol_conf.default_guest_modules ||
686 symbol_conf.default_guest_kallsyms) {
687 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
688 }
689
690 if (symbol_conf.guestmount) {
691 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
692 if (items <= 0)
693 return -ENOENT;
694 for (i = 0; i < items; i++) {
695 if (!isdigit(namelist[i]->d_name[0])) {
696 /* Filter out . and .. */
697 continue;
698 }
699 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
700 if ((*endp != '\0') ||
701 (endp == namelist[i]->d_name) ||
702 (errno == ERANGE)) {
703 pr_debug("invalid directory (%s). Skipping.\n",
704 namelist[i]->d_name);
705 continue;
706 }
707 sprintf(path, "%s/%s/proc/kallsyms",
708 symbol_conf.guestmount,
709 namelist[i]->d_name);
710 ret = access(path, R_OK);
711 if (ret) {
712 pr_debug("Can't access file %s\n", path);
713 goto failure;
714 }
715 machines__create_kernel_maps(machines, pid);
716 }
717 failure:
718 free(namelist);
719 }
720
721 return ret;
722 }
723
724 void machines__destroy_kernel_maps(struct machines *machines)
725 {
726 struct rb_node *next = rb_first(&machines->guests);
727
728 machine__destroy_kernel_maps(&machines->host);
729
730 while (next) {
731 struct machine *pos = rb_entry(next, struct machine, rb_node);
732
733 next = rb_next(&pos->rb_node);
734 rb_erase(&pos->rb_node, &machines->guests);
735 machine__delete(pos);
736 }
737 }
738
739 int machines__create_kernel_maps(struct machines *machines, pid_t pid)
740 {
741 struct machine *machine = machines__findnew(machines, pid);
742
743 if (machine == NULL)
744 return -1;
745
746 return machine__create_kernel_maps(machine);
747 }
748
749 int machine__load_kallsyms(struct machine *machine, const char *filename,
750 enum map_type type, symbol_filter_t filter)
751 {
752 struct map *map = machine->vmlinux_maps[type];
753 int ret = dso__load_kallsyms(map->dso, filename, map, filter);
754
755 if (ret > 0) {
756 dso__set_loaded(map->dso, type);
757 /*
758 * Since /proc/kallsyms will have multiple sessions for the
759 * kernel, with modules between them, fixup the end of all
760 * sections.
761 */
762 __map_groups__fixup_end(&machine->kmaps, type);
763 }
764
765 return ret;
766 }
767
768 int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
769 symbol_filter_t filter)
770 {
771 struct map *map = machine->vmlinux_maps[type];
772 int ret = dso__load_vmlinux_path(map->dso, map, filter);
773
774 if (ret > 0)
775 dso__set_loaded(map->dso, type);
776
777 return ret;
778 }
779
780 static void map_groups__fixup_end(struct map_groups *mg)
781 {
782 int i;
783 for (i = 0; i < MAP__NR_TYPES; ++i)
784 __map_groups__fixup_end(mg, i);
785 }
786
787 static char *get_kernel_version(const char *root_dir)
788 {
789 char version[PATH_MAX];
790 FILE *file;
791 char *name, *tmp;
792 const char *prefix = "Linux version ";
793
794 sprintf(version, "%s/proc/version", root_dir);
795 file = fopen(version, "r");
796 if (!file)
797 return NULL;
798
799 version[0] = '\0';
800 tmp = fgets(version, sizeof(version), file);
801 fclose(file);
802
803 name = strstr(version, prefix);
804 if (!name)
805 return NULL;
806 name += strlen(prefix);
807 tmp = strchr(name, ' ');
808 if (tmp)
809 *tmp = '\0';
810
811 return strdup(name);
812 }
813
814 static int map_groups__set_modules_path_dir(struct map_groups *mg,
815 const char *dir_name, int depth)
816 {
817 struct dirent *dent;
818 DIR *dir = opendir(dir_name);
819 int ret = 0;
820
821 if (!dir) {
822 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
823 return -1;
824 }
825
826 while ((dent = readdir(dir)) != NULL) {
827 char path[PATH_MAX];
828 struct stat st;
829
830 /*sshfs might return bad dent->d_type, so we have to stat*/
831 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
832 if (stat(path, &st))
833 continue;
834
835 if (S_ISDIR(st.st_mode)) {
836 if (!strcmp(dent->d_name, ".") ||
837 !strcmp(dent->d_name, ".."))
838 continue;
839
840 /* Do not follow top-level source and build symlinks */
841 if (depth == 0) {
842 if (!strcmp(dent->d_name, "source") ||
843 !strcmp(dent->d_name, "build"))
844 continue;
845 }
846
847 ret = map_groups__set_modules_path_dir(mg, path,
848 depth + 1);
849 if (ret < 0)
850 goto out;
851 } else {
852 char *dot = strrchr(dent->d_name, '.'),
853 dso_name[PATH_MAX];
854 struct map *map;
855 char *long_name;
856
857 if (dot == NULL || strcmp(dot, ".ko"))
858 continue;
859 snprintf(dso_name, sizeof(dso_name), "[%.*s]",
860 (int)(dot - dent->d_name), dent->d_name);
861
862 strxfrchar(dso_name, '-', '_');
863 map = map_groups__find_by_name(mg, MAP__FUNCTION,
864 dso_name);
865 if (map == NULL)
866 continue;
867
868 long_name = strdup(path);
869 if (long_name == NULL) {
870 ret = -1;
871 goto out;
872 }
873 dso__set_long_name(map->dso, long_name, true);
874 dso__kernel_module_get_build_id(map->dso, "");
875 }
876 }
877
878 out:
879 closedir(dir);
880 return ret;
881 }
882
883 static int machine__set_modules_path(struct machine *machine)
884 {
885 char *version;
886 char modules_path[PATH_MAX];
887
888 version = get_kernel_version(machine->root_dir);
889 if (!version)
890 return -1;
891
892 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
893 machine->root_dir, version);
894 free(version);
895
896 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
897 }
898
899 static int machine__create_module(void *arg, const char *name, u64 start)
900 {
901 struct machine *machine = arg;
902 struct map *map;
903
904 map = machine__new_module(machine, start, name);
905 if (map == NULL)
906 return -1;
907
908 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
909
910 return 0;
911 }
912
913 static int machine__create_modules(struct machine *machine)
914 {
915 const char *modules;
916 char path[PATH_MAX];
917
918 if (machine__is_default_guest(machine)) {
919 modules = symbol_conf.default_guest_modules;
920 } else {
921 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
922 modules = path;
923 }
924
925 if (symbol__restricted_filename(modules, "/proc/modules"))
926 return -1;
927
928 if (modules__parse(modules, machine, machine__create_module))
929 return -1;
930
931 if (!machine__set_modules_path(machine))
932 return 0;
933
934 pr_debug("Problems setting modules path maps, continuing anyway...\n");
935
936 return 0;
937 }
938
939 int machine__create_kernel_maps(struct machine *machine)
940 {
941 struct dso *kernel = machine__get_kernel(machine);
942 const char *name;
943 u64 addr = machine__get_kernel_start_addr(machine, &name);
944 if (!addr)
945 return -1;
946
947 if (kernel == NULL ||
948 __machine__create_kernel_maps(machine, kernel) < 0)
949 return -1;
950
951 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
952 if (machine__is_host(machine))
953 pr_debug("Problems creating module maps, "
954 "continuing anyway...\n");
955 else
956 pr_debug("Problems creating module maps for guest %d, "
957 "continuing anyway...\n", machine->pid);
958 }
959
960 /*
961 * Now that we have all the maps created, just set the ->end of them:
962 */
963 map_groups__fixup_end(&machine->kmaps);
964
965 if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
966 addr)) {
967 machine__destroy_kernel_maps(machine);
968 return -1;
969 }
970
971 return 0;
972 }
973
974 static void machine__set_kernel_mmap_len(struct machine *machine,
975 union perf_event *event)
976 {
977 int i;
978
979 for (i = 0; i < MAP__NR_TYPES; i++) {
980 machine->vmlinux_maps[i]->start = event->mmap.start;
981 machine->vmlinux_maps[i]->end = (event->mmap.start +
982 event->mmap.len);
983 /*
984 * Be a bit paranoid here, some perf.data file came with
985 * a zero sized synthesized MMAP event for the kernel.
986 */
987 if (machine->vmlinux_maps[i]->end == 0)
988 machine->vmlinux_maps[i]->end = ~0ULL;
989 }
990 }
991
992 static bool machine__uses_kcore(struct machine *machine)
993 {
994 struct dso *dso;
995
996 list_for_each_entry(dso, &machine->kernel_dsos, node) {
997 if (dso__is_kcore(dso))
998 return true;
999 }
1000
1001 return false;
1002 }
1003
1004 static int machine__process_kernel_mmap_event(struct machine *machine,
1005 union perf_event *event)
1006 {
1007 struct map *map;
1008 char kmmap_prefix[PATH_MAX];
1009 enum dso_kernel_type kernel_type;
1010 bool is_kernel_mmap;
1011
1012 /* If we have maps from kcore then we do not need or want any others */
1013 if (machine__uses_kcore(machine))
1014 return 0;
1015
1016 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
1017 if (machine__is_host(machine))
1018 kernel_type = DSO_TYPE_KERNEL;
1019 else
1020 kernel_type = DSO_TYPE_GUEST_KERNEL;
1021
1022 is_kernel_mmap = memcmp(event->mmap.filename,
1023 kmmap_prefix,
1024 strlen(kmmap_prefix) - 1) == 0;
1025 if (event->mmap.filename[0] == '/' ||
1026 (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
1027
1028 char short_module_name[1024];
1029 char *name, *dot;
1030
1031 if (event->mmap.filename[0] == '/') {
1032 name = strrchr(event->mmap.filename, '/');
1033 if (name == NULL)
1034 goto out_problem;
1035
1036 ++name; /* skip / */
1037 dot = strrchr(name, '.');
1038 if (dot == NULL)
1039 goto out_problem;
1040 snprintf(short_module_name, sizeof(short_module_name),
1041 "[%.*s]", (int)(dot - name), name);
1042 strxfrchar(short_module_name, '-', '_');
1043 } else
1044 strcpy(short_module_name, event->mmap.filename);
1045
1046 map = machine__new_module(machine, event->mmap.start,
1047 event->mmap.filename);
1048 if (map == NULL)
1049 goto out_problem;
1050
1051 name = strdup(short_module_name);
1052 if (name == NULL)
1053 goto out_problem;
1054
1055 dso__set_short_name(map->dso, name, true);
1056 map->end = map->start + event->mmap.len;
1057 } else if (is_kernel_mmap) {
1058 const char *symbol_name = (event->mmap.filename +
1059 strlen(kmmap_prefix));
1060 /*
1061 * Should be there already, from the build-id table in
1062 * the header.
1063 */
1064 struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
1065 kmmap_prefix);
1066 if (kernel == NULL)
1067 goto out_problem;
1068
1069 kernel->kernel = kernel_type;
1070 if (__machine__create_kernel_maps(machine, kernel) < 0)
1071 goto out_problem;
1072
1073 machine__set_kernel_mmap_len(machine, event);
1074
1075 /*
1076 * Avoid using a zero address (kptr_restrict) for the ref reloc
1077 * symbol. Effectively having zero here means that at record
1078 * time /proc/sys/kernel/kptr_restrict was non zero.
1079 */
1080 if (event->mmap.pgoff != 0) {
1081 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
1082 symbol_name,
1083 event->mmap.pgoff);
1084 }
1085
1086 if (machine__is_default_guest(machine)) {
1087 /*
1088 * preload dso of guest kernel and modules
1089 */
1090 dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
1091 NULL);
1092 }
1093 }
1094 return 0;
1095 out_problem:
1096 return -1;
1097 }
1098
1099 int machine__process_mmap2_event(struct machine *machine,
1100 union perf_event *event,
1101 struct perf_sample *sample __maybe_unused)
1102 {
1103 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1104 struct thread *thread;
1105 struct map *map;
1106 enum map_type type;
1107 int ret = 0;
1108
1109 if (dump_trace)
1110 perf_event__fprintf_mmap2(event, stdout);
1111
1112 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1113 cpumode == PERF_RECORD_MISC_KERNEL) {
1114 ret = machine__process_kernel_mmap_event(machine, event);
1115 if (ret < 0)
1116 goto out_problem;
1117 return 0;
1118 }
1119
1120 thread = machine__findnew_thread(machine, event->mmap2.pid,
1121 event->mmap2.tid);
1122 if (thread == NULL)
1123 goto out_problem;
1124
1125 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1126 type = MAP__VARIABLE;
1127 else
1128 type = MAP__FUNCTION;
1129
1130 map = map__new(machine, event->mmap2.start,
1131 event->mmap2.len, event->mmap2.pgoff,
1132 event->mmap2.pid, event->mmap2.maj,
1133 event->mmap2.min, event->mmap2.ino,
1134 event->mmap2.ino_generation,
1135 event->mmap2.prot,
1136 event->mmap2.flags,
1137 event->mmap2.filename, type, thread);
1138
1139 if (map == NULL)
1140 goto out_problem;
1141
1142 thread__insert_map(thread, map);
1143 return 0;
1144
1145 out_problem:
1146 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1147 return 0;
1148 }
1149
1150 int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1151 struct perf_sample *sample __maybe_unused)
1152 {
1153 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1154 struct thread *thread;
1155 struct map *map;
1156 enum map_type type;
1157 int ret = 0;
1158
1159 if (dump_trace)
1160 perf_event__fprintf_mmap(event, stdout);
1161
1162 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1163 cpumode == PERF_RECORD_MISC_KERNEL) {
1164 ret = machine__process_kernel_mmap_event(machine, event);
1165 if (ret < 0)
1166 goto out_problem;
1167 return 0;
1168 }
1169
1170 thread = machine__findnew_thread(machine, event->mmap.pid,
1171 event->mmap.tid);
1172 if (thread == NULL)
1173 goto out_problem;
1174
1175 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1176 type = MAP__VARIABLE;
1177 else
1178 type = MAP__FUNCTION;
1179
1180 map = map__new(machine, event->mmap.start,
1181 event->mmap.len, event->mmap.pgoff,
1182 event->mmap.pid, 0, 0, 0, 0, 0, 0,
1183 event->mmap.filename,
1184 type, thread);
1185
1186 if (map == NULL)
1187 goto out_problem;
1188
1189 thread__insert_map(thread, map);
1190 return 0;
1191
1192 out_problem:
1193 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1194 return 0;
1195 }
1196
1197 static void machine__remove_thread(struct machine *machine, struct thread *th)
1198 {
1199 machine->last_match = NULL;
1200 rb_erase(&th->rb_node, &machine->threads);
1201 /*
1202 * We may have references to this thread, for instance in some hist_entry
1203 * instances, so just move them to a separate list.
1204 */
1205 list_add_tail(&th->node, &machine->dead_threads);
1206 }
1207
1208 int machine__process_fork_event(struct machine *machine, union perf_event *event,
1209 struct perf_sample *sample)
1210 {
1211 struct thread *thread = machine__find_thread(machine,
1212 event->fork.pid,
1213 event->fork.tid);
1214 struct thread *parent = machine__findnew_thread(machine,
1215 event->fork.ppid,
1216 event->fork.ptid);
1217
1218 /* if a thread currently exists for the thread id remove it */
1219 if (thread != NULL)
1220 machine__remove_thread(machine, thread);
1221
1222 thread = machine__findnew_thread(machine, event->fork.pid,
1223 event->fork.tid);
1224 if (dump_trace)
1225 perf_event__fprintf_task(event, stdout);
1226
1227 if (thread == NULL || parent == NULL ||
1228 thread__fork(thread, parent, sample->time) < 0) {
1229 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1230 return -1;
1231 }
1232
1233 return 0;
1234 }
1235
1236 int machine__process_exit_event(struct machine *machine, union perf_event *event,
1237 struct perf_sample *sample __maybe_unused)
1238 {
1239 struct thread *thread = machine__find_thread(machine,
1240 event->fork.pid,
1241 event->fork.tid);
1242
1243 if (dump_trace)
1244 perf_event__fprintf_task(event, stdout);
1245
1246 if (thread != NULL)
1247 thread__exited(thread);
1248
1249 return 0;
1250 }
1251
1252 int machine__process_event(struct machine *machine, union perf_event *event,
1253 struct perf_sample *sample)
1254 {
1255 int ret;
1256
1257 switch (event->header.type) {
1258 case PERF_RECORD_COMM:
1259 ret = machine__process_comm_event(machine, event, sample); break;
1260 case PERF_RECORD_MMAP:
1261 ret = machine__process_mmap_event(machine, event, sample); break;
1262 case PERF_RECORD_MMAP2:
1263 ret = machine__process_mmap2_event(machine, event, sample); break;
1264 case PERF_RECORD_FORK:
1265 ret = machine__process_fork_event(machine, event, sample); break;
1266 case PERF_RECORD_EXIT:
1267 ret = machine__process_exit_event(machine, event, sample); break;
1268 case PERF_RECORD_LOST:
1269 ret = machine__process_lost_event(machine, event, sample); break;
1270 default:
1271 ret = -1;
1272 break;
1273 }
1274
1275 return ret;
1276 }
1277
1278 static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
1279 {
1280 if (sym->name && !regexec(regex, sym->name, 0, NULL, 0))
1281 return 1;
1282 return 0;
1283 }
1284
1285 static void ip__resolve_ams(struct machine *machine, struct thread *thread,
1286 struct addr_map_symbol *ams,
1287 u64 ip)
1288 {
1289 struct addr_location al;
1290
1291 memset(&al, 0, sizeof(al));
1292 /*
1293 * We cannot use the header.misc hint to determine whether a
1294 * branch stack address is user, kernel, guest, hypervisor.
1295 * Branches may straddle the kernel/user/hypervisor boundaries.
1296 * Thus, we have to try consecutively until we find a match
1297 * or else, the symbol is unknown
1298 */
1299 thread__find_cpumode_addr_location(thread, machine, MAP__FUNCTION, ip, &al);
1300
1301 ams->addr = ip;
1302 ams->al_addr = al.addr;
1303 ams->sym = al.sym;
1304 ams->map = al.map;
1305 }
1306
1307 static void ip__resolve_data(struct machine *machine, struct thread *thread,
1308 u8 m, struct addr_map_symbol *ams, u64 addr)
1309 {
1310 struct addr_location al;
1311
1312 memset(&al, 0, sizeof(al));
1313
1314 thread__find_addr_location(thread, machine, m, MAP__VARIABLE, addr,
1315 &al);
1316 ams->addr = addr;
1317 ams->al_addr = al.addr;
1318 ams->sym = al.sym;
1319 ams->map = al.map;
1320 }
1321
1322 struct mem_info *sample__resolve_mem(struct perf_sample *sample,
1323 struct addr_location *al)
1324 {
1325 struct mem_info *mi = zalloc(sizeof(*mi));
1326
1327 if (!mi)
1328 return NULL;
1329
1330 ip__resolve_ams(al->machine, al->thread, &mi->iaddr, sample->ip);
1331 ip__resolve_data(al->machine, al->thread, al->cpumode,
1332 &mi->daddr, sample->addr);
1333 mi->data_src.val = sample->data_src;
1334
1335 return mi;
1336 }
1337
1338 struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
1339 struct addr_location *al)
1340 {
1341 unsigned int i;
1342 const struct branch_stack *bs = sample->branch_stack;
1343 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
1344
1345 if (!bi)
1346 return NULL;
1347
1348 for (i = 0; i < bs->nr; i++) {
1349 ip__resolve_ams(al->machine, al->thread, &bi[i].to, bs->entries[i].to);
1350 ip__resolve_ams(al->machine, al->thread, &bi[i].from, bs->entries[i].from);
1351 bi[i].flags = bs->entries[i].flags;
1352 }
1353 return bi;
1354 }
1355
1356 static int machine__resolve_callchain_sample(struct machine *machine,
1357 struct thread *thread,
1358 struct ip_callchain *chain,
1359 struct symbol **parent,
1360 struct addr_location *root_al,
1361 int max_stack)
1362 {
1363 u8 cpumode = PERF_RECORD_MISC_USER;
1364 int chain_nr = min(max_stack, (int)chain->nr);
1365 int i;
1366 int j;
1367 int err;
1368 int skip_idx __maybe_unused;
1369
1370 callchain_cursor_reset(&callchain_cursor);
1371
1372 if (chain->nr > PERF_MAX_STACK_DEPTH) {
1373 pr_warning("corrupted callchain. skipping...\n");
1374 return 0;
1375 }
1376
1377 /*
1378 * Based on DWARF debug information, some architectures skip
1379 * a callchain entry saved by the kernel.
1380 */
1381 skip_idx = arch_skip_callchain_idx(machine, thread, chain);
1382
1383 for (i = 0; i < chain_nr; i++) {
1384 u64 ip;
1385 struct addr_location al;
1386
1387 if (callchain_param.order == ORDER_CALLEE)
1388 j = i;
1389 else
1390 j = chain->nr - i - 1;
1391
1392 #ifdef HAVE_SKIP_CALLCHAIN_IDX
1393 if (j == skip_idx)
1394 continue;
1395 #endif
1396 ip = chain->ips[j];
1397
1398 if (ip >= PERF_CONTEXT_MAX) {
1399 switch (ip) {
1400 case PERF_CONTEXT_HV:
1401 cpumode = PERF_RECORD_MISC_HYPERVISOR;
1402 break;
1403 case PERF_CONTEXT_KERNEL:
1404 cpumode = PERF_RECORD_MISC_KERNEL;
1405 break;
1406 case PERF_CONTEXT_USER:
1407 cpumode = PERF_RECORD_MISC_USER;
1408 break;
1409 default:
1410 pr_debug("invalid callchain context: "
1411 "%"PRId64"\n", (s64) ip);
1412 /*
1413 * It seems the callchain is corrupted.
1414 * Discard all.
1415 */
1416 callchain_cursor_reset(&callchain_cursor);
1417 return 0;
1418 }
1419 continue;
1420 }
1421
1422 al.filtered = 0;
1423 thread__find_addr_location(thread, machine, cpumode,
1424 MAP__FUNCTION, ip, &al);
1425 if (al.sym != NULL) {
1426 if (sort__has_parent && !*parent &&
1427 symbol__match_regex(al.sym, &parent_regex))
1428 *parent = al.sym;
1429 else if (have_ignore_callees && root_al &&
1430 symbol__match_regex(al.sym, &ignore_callees_regex)) {
1431 /* Treat this symbol as the root,
1432 forgetting its callees. */
1433 *root_al = al;
1434 callchain_cursor_reset(&callchain_cursor);
1435 }
1436 }
1437
1438 err = callchain_cursor_append(&callchain_cursor,
1439 ip, al.map, al.sym);
1440 if (err)
1441 return err;
1442 }
1443
1444 return 0;
1445 }
1446
1447 static int unwind_entry(struct unwind_entry *entry, void *arg)
1448 {
1449 struct callchain_cursor *cursor = arg;
1450 return callchain_cursor_append(cursor, entry->ip,
1451 entry->map, entry->sym);
1452 }
1453
1454 int machine__resolve_callchain(struct machine *machine,
1455 struct perf_evsel *evsel,
1456 struct thread *thread,
1457 struct perf_sample *sample,
1458 struct symbol **parent,
1459 struct addr_location *root_al,
1460 int max_stack)
1461 {
1462 int ret;
1463
1464 ret = machine__resolve_callchain_sample(machine, thread,
1465 sample->callchain, parent,
1466 root_al, max_stack);
1467 if (ret)
1468 return ret;
1469
1470 /* Can we do dwarf post unwind? */
1471 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
1472 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
1473 return 0;
1474
1475 /* Bail out if nothing was captured. */
1476 if ((!sample->user_regs.regs) ||
1477 (!sample->user_stack.size))
1478 return 0;
1479
1480 return unwind__get_entries(unwind_entry, &callchain_cursor, machine,
1481 thread, sample, max_stack);
1482
1483 }
1484
1485 int machine__for_each_thread(struct machine *machine,
1486 int (*fn)(struct thread *thread, void *p),
1487 void *priv)
1488 {
1489 struct rb_node *nd;
1490 struct thread *thread;
1491 int rc = 0;
1492
1493 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
1494 thread = rb_entry(nd, struct thread, rb_node);
1495 rc = fn(thread, priv);
1496 if (rc != 0)
1497 return rc;
1498 }
1499
1500 list_for_each_entry(thread, &machine->dead_threads, node) {
1501 rc = fn(thread, priv);
1502 if (rc != 0)
1503 return rc;
1504 }
1505 return rc;
1506 }
1507
1508 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
1509 struct target *target, struct thread_map *threads,
1510 perf_event__handler_t process, bool data_mmap)
1511 {
1512 if (target__has_task(target))
1513 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
1514 else if (target__has_cpu(target))
1515 return perf_event__synthesize_threads(tool, process, machine, data_mmap);
1516 /* command specified */
1517 return 0;
1518 }
1519
1520 pid_t machine__get_current_tid(struct machine *machine, int cpu)
1521 {
1522 if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
1523 return -1;
1524
1525 return machine->current_tid[cpu];
1526 }
1527
1528 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
1529 pid_t tid)
1530 {
1531 struct thread *thread;
1532
1533 if (cpu < 0)
1534 return -EINVAL;
1535
1536 if (!machine->current_tid) {
1537 int i;
1538
1539 machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
1540 if (!machine->current_tid)
1541 return -ENOMEM;
1542 for (i = 0; i < MAX_NR_CPUS; i++)
1543 machine->current_tid[i] = -1;
1544 }
1545
1546 if (cpu >= MAX_NR_CPUS) {
1547 pr_err("Requested CPU %d too large. ", cpu);
1548 pr_err("Consider raising MAX_NR_CPUS\n");
1549 return -EINVAL;
1550 }
1551
1552 machine->current_tid[cpu] = tid;
1553
1554 thread = machine__findnew_thread(machine, pid, tid);
1555 if (!thread)
1556 return -ENOMEM;
1557
1558 thread->cpu = cpu;
1559
1560 return 0;
1561 }
This page took 0.067047 seconds and 5 git commands to generate.