13 #include <symbol/kallsyms.h>
16 int machine__init(struct machine
*machine
, const char *root_dir
, pid_t pid
)
18 map_groups__init(&machine
->kmaps
);
19 RB_CLEAR_NODE(&machine
->rb_node
);
20 INIT_LIST_HEAD(&machine
->user_dsos
);
21 INIT_LIST_HEAD(&machine
->kernel_dsos
);
23 machine
->threads
= RB_ROOT
;
24 INIT_LIST_HEAD(&machine
->dead_threads
);
25 machine
->last_match
= NULL
;
27 machine
->vdso_info
= NULL
;
29 machine
->kmaps
.machine
= machine
;
32 machine
->symbol_filter
= NULL
;
33 machine
->id_hdr_size
= 0;
34 machine
->comm_exec
= false;
36 machine
->root_dir
= strdup(root_dir
);
37 if (machine
->root_dir
== NULL
)
40 if (pid
!= HOST_KERNEL_ID
) {
41 struct thread
*thread
= machine__findnew_thread(machine
, -1,
48 snprintf(comm
, sizeof(comm
), "[guest/%d]", pid
);
49 thread__set_comm(thread
, comm
, 0);
52 machine
->current_tid
= NULL
;
57 struct machine
*machine__new_host(void)
59 struct machine
*machine
= malloc(sizeof(*machine
));
61 if (machine
!= NULL
) {
62 machine__init(machine
, "", HOST_KERNEL_ID
);
64 if (machine__create_kernel_maps(machine
) < 0)
74 static void dsos__delete(struct list_head
*dsos
)
78 list_for_each_entry_safe(pos
, n
, dsos
, node
) {
84 void machine__delete_dead_threads(struct machine
*machine
)
88 list_for_each_entry_safe(t
, n
, &machine
->dead_threads
, node
) {
94 void machine__delete_threads(struct machine
*machine
)
96 struct rb_node
*nd
= rb_first(&machine
->threads
);
99 struct thread
*t
= rb_entry(nd
, struct thread
, rb_node
);
101 rb_erase(&t
->rb_node
, &machine
->threads
);
107 void machine__exit(struct machine
*machine
)
109 map_groups__exit(&machine
->kmaps
);
110 dsos__delete(&machine
->user_dsos
);
111 dsos__delete(&machine
->kernel_dsos
);
113 zfree(&machine
->root_dir
);
114 zfree(&machine
->current_tid
);
117 void machine__delete(struct machine
*machine
)
119 machine__exit(machine
);
123 void machines__init(struct machines
*machines
)
125 machine__init(&machines
->host
, "", HOST_KERNEL_ID
);
126 machines
->guests
= RB_ROOT
;
127 machines
->symbol_filter
= NULL
;
130 void machines__exit(struct machines
*machines
)
132 machine__exit(&machines
->host
);
136 struct machine
*machines__add(struct machines
*machines
, pid_t pid
,
137 const char *root_dir
)
139 struct rb_node
**p
= &machines
->guests
.rb_node
;
140 struct rb_node
*parent
= NULL
;
141 struct machine
*pos
, *machine
= malloc(sizeof(*machine
));
146 if (machine__init(machine
, root_dir
, pid
) != 0) {
151 machine
->symbol_filter
= machines
->symbol_filter
;
155 pos
= rb_entry(parent
, struct machine
, rb_node
);
162 rb_link_node(&machine
->rb_node
, parent
, p
);
163 rb_insert_color(&machine
->rb_node
, &machines
->guests
);
168 void machines__set_symbol_filter(struct machines
*machines
,
169 symbol_filter_t symbol_filter
)
173 machines
->symbol_filter
= symbol_filter
;
174 machines
->host
.symbol_filter
= symbol_filter
;
176 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
177 struct machine
*machine
= rb_entry(nd
, struct machine
, rb_node
);
179 machine
->symbol_filter
= symbol_filter
;
183 void machines__set_comm_exec(struct machines
*machines
, bool comm_exec
)
187 machines
->host
.comm_exec
= comm_exec
;
189 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
190 struct machine
*machine
= rb_entry(nd
, struct machine
, rb_node
);
192 machine
->comm_exec
= comm_exec
;
196 struct machine
*machines__find(struct machines
*machines
, pid_t pid
)
198 struct rb_node
**p
= &machines
->guests
.rb_node
;
199 struct rb_node
*parent
= NULL
;
200 struct machine
*machine
;
201 struct machine
*default_machine
= NULL
;
203 if (pid
== HOST_KERNEL_ID
)
204 return &machines
->host
;
208 machine
= rb_entry(parent
, struct machine
, rb_node
);
209 if (pid
< machine
->pid
)
211 else if (pid
> machine
->pid
)
216 default_machine
= machine
;
219 return default_machine
;
222 struct machine
*machines__findnew(struct machines
*machines
, pid_t pid
)
225 const char *root_dir
= "";
226 struct machine
*machine
= machines__find(machines
, pid
);
228 if (machine
&& (machine
->pid
== pid
))
231 if ((pid
!= HOST_KERNEL_ID
) &&
232 (pid
!= DEFAULT_GUEST_KERNEL_ID
) &&
233 (symbol_conf
.guestmount
)) {
234 sprintf(path
, "%s/%d", symbol_conf
.guestmount
, pid
);
235 if (access(path
, R_OK
)) {
236 static struct strlist
*seen
;
239 seen
= strlist__new(true, NULL
);
241 if (!strlist__has_entry(seen
, path
)) {
242 pr_err("Can't access file %s\n", path
);
243 strlist__add(seen
, path
);
251 machine
= machines__add(machines
, pid
, root_dir
);
256 void machines__process_guests(struct machines
*machines
,
257 machine__process_t process
, void *data
)
261 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
262 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
267 char *machine__mmap_name(struct machine
*machine
, char *bf
, size_t size
)
269 if (machine__is_host(machine
))
270 snprintf(bf
, size
, "[%s]", "kernel.kallsyms");
271 else if (machine__is_default_guest(machine
))
272 snprintf(bf
, size
, "[%s]", "guest.kernel.kallsyms");
274 snprintf(bf
, size
, "[%s.%d]", "guest.kernel.kallsyms",
281 void machines__set_id_hdr_size(struct machines
*machines
, u16 id_hdr_size
)
283 struct rb_node
*node
;
284 struct machine
*machine
;
286 machines
->host
.id_hdr_size
= id_hdr_size
;
288 for (node
= rb_first(&machines
->guests
); node
; node
= rb_next(node
)) {
289 machine
= rb_entry(node
, struct machine
, rb_node
);
290 machine
->id_hdr_size
= id_hdr_size
;
296 static void machine__update_thread_pid(struct machine
*machine
,
297 struct thread
*th
, pid_t pid
)
299 struct thread
*leader
;
301 if (pid
== th
->pid_
|| pid
== -1 || th
->pid_
!= -1)
306 if (th
->pid_
== th
->tid
)
309 leader
= machine__findnew_thread(machine
, th
->pid_
, th
->pid_
);
314 leader
->mg
= map_groups__new();
319 if (th
->mg
== leader
->mg
)
324 * Maps are created from MMAP events which provide the pid and
325 * tid. Consequently there never should be any maps on a thread
326 * with an unknown pid. Just print an error if there are.
328 if (!map_groups__empty(th
->mg
))
329 pr_err("Discarding thread maps for %d:%d\n",
331 map_groups__delete(th
->mg
);
334 th
->mg
= map_groups__get(leader
->mg
);
339 pr_err("Failed to join map groups for %d:%d\n", th
->pid_
, th
->tid
);
342 static struct thread
*__machine__findnew_thread(struct machine
*machine
,
343 pid_t pid
, pid_t tid
,
346 struct rb_node
**p
= &machine
->threads
.rb_node
;
347 struct rb_node
*parent
= NULL
;
351 * Front-end cache - TID lookups come in blocks,
352 * so most of the time we dont have to look up
355 th
= machine
->last_match
;
356 if (th
&& th
->tid
== tid
) {
357 machine__update_thread_pid(machine
, th
, pid
);
363 th
= rb_entry(parent
, struct thread
, rb_node
);
365 if (th
->tid
== tid
) {
366 machine
->last_match
= th
;
367 machine__update_thread_pid(machine
, th
, pid
);
380 th
= thread__new(pid
, tid
);
382 rb_link_node(&th
->rb_node
, parent
, p
);
383 rb_insert_color(&th
->rb_node
, &machine
->threads
);
384 machine
->last_match
= th
;
387 * We have to initialize map_groups separately
388 * after rb tree is updated.
390 * The reason is that we call machine__findnew_thread
391 * within thread__init_map_groups to find the thread
392 * leader and that would screwed the rb tree.
394 if (thread__init_map_groups(th
, machine
)) {
403 struct thread
*machine__findnew_thread(struct machine
*machine
, pid_t pid
,
406 return __machine__findnew_thread(machine
, pid
, tid
, true);
409 struct thread
*machine__find_thread(struct machine
*machine
, pid_t pid
,
412 return __machine__findnew_thread(machine
, pid
, tid
, false);
415 struct comm
*machine__thread_exec_comm(struct machine
*machine
,
416 struct thread
*thread
)
418 if (machine
->comm_exec
)
419 return thread__exec_comm(thread
);
421 return thread__comm(thread
);
424 int machine__process_comm_event(struct machine
*machine
, union perf_event
*event
,
425 struct perf_sample
*sample
)
427 struct thread
*thread
= machine__findnew_thread(machine
,
430 bool exec
= event
->header
.misc
& PERF_RECORD_MISC_COMM_EXEC
;
433 machine
->comm_exec
= true;
436 perf_event__fprintf_comm(event
, stdout
);
438 if (thread
== NULL
||
439 __thread__set_comm(thread
, event
->comm
.comm
, sample
->time
, exec
)) {
440 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
447 int machine__process_lost_event(struct machine
*machine __maybe_unused
,
448 union perf_event
*event
, struct perf_sample
*sample __maybe_unused
)
450 dump_printf(": id:%" PRIu64
": lost:%" PRIu64
"\n",
451 event
->lost
.id
, event
->lost
.lost
);
455 struct map
*machine__new_module(struct machine
*machine
, u64 start
,
456 const char *filename
)
459 struct dso
*dso
= __dsos__findnew(&machine
->kernel_dsos
, filename
);
464 map
= map__new2(start
, dso
, MAP__FUNCTION
);
468 if (machine__is_host(machine
))
469 dso
->symtab_type
= DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE
;
471 dso
->symtab_type
= DSO_BINARY_TYPE__GUEST_KMODULE
;
472 map_groups__insert(&machine
->kmaps
, map
);
476 size_t machines__fprintf_dsos(struct machines
*machines
, FILE *fp
)
479 size_t ret
= __dsos__fprintf(&machines
->host
.kernel_dsos
, fp
) +
480 __dsos__fprintf(&machines
->host
.user_dsos
, fp
);
482 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
483 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
484 ret
+= __dsos__fprintf(&pos
->kernel_dsos
, fp
);
485 ret
+= __dsos__fprintf(&pos
->user_dsos
, fp
);
491 size_t machine__fprintf_dsos_buildid(struct machine
*machine
, FILE *fp
,
492 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
494 return __dsos__fprintf_buildid(&machine
->kernel_dsos
, fp
, skip
, parm
) +
495 __dsos__fprintf_buildid(&machine
->user_dsos
, fp
, skip
, parm
);
498 size_t machines__fprintf_dsos_buildid(struct machines
*machines
, FILE *fp
,
499 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
502 size_t ret
= machine__fprintf_dsos_buildid(&machines
->host
, fp
, skip
, parm
);
504 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
505 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
506 ret
+= machine__fprintf_dsos_buildid(pos
, fp
, skip
, parm
);
511 size_t machine__fprintf_vmlinux_path(struct machine
*machine
, FILE *fp
)
515 struct dso
*kdso
= machine
->vmlinux_maps
[MAP__FUNCTION
]->dso
;
517 if (kdso
->has_build_id
) {
518 char filename
[PATH_MAX
];
519 if (dso__build_id_filename(kdso
, filename
, sizeof(filename
)))
520 printed
+= fprintf(fp
, "[0] %s\n", filename
);
523 for (i
= 0; i
< vmlinux_path__nr_entries
; ++i
)
524 printed
+= fprintf(fp
, "[%d] %s\n",
525 i
+ kdso
->has_build_id
, vmlinux_path
[i
]);
530 size_t machine__fprintf(struct machine
*machine
, FILE *fp
)
535 for (nd
= rb_first(&machine
->threads
); nd
; nd
= rb_next(nd
)) {
536 struct thread
*pos
= rb_entry(nd
, struct thread
, rb_node
);
538 ret
+= thread__fprintf(pos
, fp
);
544 static struct dso
*machine__get_kernel(struct machine
*machine
)
546 const char *vmlinux_name
= NULL
;
549 if (machine__is_host(machine
)) {
550 vmlinux_name
= symbol_conf
.vmlinux_name
;
552 vmlinux_name
= "[kernel.kallsyms]";
554 kernel
= dso__kernel_findnew(machine
, vmlinux_name
,
560 if (machine__is_default_guest(machine
))
561 vmlinux_name
= symbol_conf
.default_guest_vmlinux_name
;
563 vmlinux_name
= machine__mmap_name(machine
, bf
,
566 kernel
= dso__kernel_findnew(machine
, vmlinux_name
,
568 DSO_TYPE_GUEST_KERNEL
);
571 if (kernel
!= NULL
&& (!kernel
->has_build_id
))
572 dso__read_running_kernel_build_id(kernel
, machine
);
577 struct process_args
{
581 static void machine__get_kallsyms_filename(struct machine
*machine
, char *buf
,
584 if (machine__is_default_guest(machine
))
585 scnprintf(buf
, bufsz
, "%s", symbol_conf
.default_guest_kallsyms
);
587 scnprintf(buf
, bufsz
, "%s/proc/kallsyms", machine
->root_dir
);
590 const char *ref_reloc_sym_names
[] = {"_text", "_stext", NULL
};
592 /* Figure out the start address of kernel map from /proc/kallsyms.
593 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
594 * symbol_name if it's not that important.
596 static u64
machine__get_kernel_start_addr(struct machine
*machine
,
597 const char **symbol_name
)
599 char filename
[PATH_MAX
];
604 machine__get_kallsyms_filename(machine
, filename
, PATH_MAX
);
606 if (symbol__restricted_filename(filename
, "/proc/kallsyms"))
609 for (i
= 0; (name
= ref_reloc_sym_names
[i
]) != NULL
; i
++) {
610 addr
= kallsyms__get_function_start(filename
, name
);
621 int __machine__create_kernel_maps(struct machine
*machine
, struct dso
*kernel
)
624 u64 start
= machine__get_kernel_start_addr(machine
, NULL
);
626 for (type
= 0; type
< MAP__NR_TYPES
; ++type
) {
629 machine
->vmlinux_maps
[type
] = map__new2(start
, kernel
, type
);
630 if (machine
->vmlinux_maps
[type
] == NULL
)
633 machine
->vmlinux_maps
[type
]->map_ip
=
634 machine
->vmlinux_maps
[type
]->unmap_ip
=
636 kmap
= map__kmap(machine
->vmlinux_maps
[type
]);
637 kmap
->kmaps
= &machine
->kmaps
;
638 map_groups__insert(&machine
->kmaps
,
639 machine
->vmlinux_maps
[type
]);
645 void machine__destroy_kernel_maps(struct machine
*machine
)
649 for (type
= 0; type
< MAP__NR_TYPES
; ++type
) {
652 if (machine
->vmlinux_maps
[type
] == NULL
)
655 kmap
= map__kmap(machine
->vmlinux_maps
[type
]);
656 map_groups__remove(&machine
->kmaps
,
657 machine
->vmlinux_maps
[type
]);
658 if (kmap
->ref_reloc_sym
) {
660 * ref_reloc_sym is shared among all maps, so free just
663 if (type
== MAP__FUNCTION
) {
664 zfree((char **)&kmap
->ref_reloc_sym
->name
);
665 zfree(&kmap
->ref_reloc_sym
);
667 kmap
->ref_reloc_sym
= NULL
;
670 map__delete(machine
->vmlinux_maps
[type
]);
671 machine
->vmlinux_maps
[type
] = NULL
;
675 int machines__create_guest_kernel_maps(struct machines
*machines
)
678 struct dirent
**namelist
= NULL
;
684 if (symbol_conf
.default_guest_vmlinux_name
||
685 symbol_conf
.default_guest_modules
||
686 symbol_conf
.default_guest_kallsyms
) {
687 machines__create_kernel_maps(machines
, DEFAULT_GUEST_KERNEL_ID
);
690 if (symbol_conf
.guestmount
) {
691 items
= scandir(symbol_conf
.guestmount
, &namelist
, NULL
, NULL
);
694 for (i
= 0; i
< items
; i
++) {
695 if (!isdigit(namelist
[i
]->d_name
[0])) {
696 /* Filter out . and .. */
699 pid
= (pid_t
)strtol(namelist
[i
]->d_name
, &endp
, 10);
700 if ((*endp
!= '\0') ||
701 (endp
== namelist
[i
]->d_name
) ||
703 pr_debug("invalid directory (%s). Skipping.\n",
704 namelist
[i
]->d_name
);
707 sprintf(path
, "%s/%s/proc/kallsyms",
708 symbol_conf
.guestmount
,
709 namelist
[i
]->d_name
);
710 ret
= access(path
, R_OK
);
712 pr_debug("Can't access file %s\n", path
);
715 machines__create_kernel_maps(machines
, pid
);
724 void machines__destroy_kernel_maps(struct machines
*machines
)
726 struct rb_node
*next
= rb_first(&machines
->guests
);
728 machine__destroy_kernel_maps(&machines
->host
);
731 struct machine
*pos
= rb_entry(next
, struct machine
, rb_node
);
733 next
= rb_next(&pos
->rb_node
);
734 rb_erase(&pos
->rb_node
, &machines
->guests
);
735 machine__delete(pos
);
739 int machines__create_kernel_maps(struct machines
*machines
, pid_t pid
)
741 struct machine
*machine
= machines__findnew(machines
, pid
);
746 return machine__create_kernel_maps(machine
);
749 int machine__load_kallsyms(struct machine
*machine
, const char *filename
,
750 enum map_type type
, symbol_filter_t filter
)
752 struct map
*map
= machine
->vmlinux_maps
[type
];
753 int ret
= dso__load_kallsyms(map
->dso
, filename
, map
, filter
);
756 dso__set_loaded(map
->dso
, type
);
758 * Since /proc/kallsyms will have multiple sessions for the
759 * kernel, with modules between them, fixup the end of all
762 __map_groups__fixup_end(&machine
->kmaps
, type
);
768 int machine__load_vmlinux_path(struct machine
*machine
, enum map_type type
,
769 symbol_filter_t filter
)
771 struct map
*map
= machine
->vmlinux_maps
[type
];
772 int ret
= dso__load_vmlinux_path(map
->dso
, map
, filter
);
775 dso__set_loaded(map
->dso
, type
);
780 static void map_groups__fixup_end(struct map_groups
*mg
)
783 for (i
= 0; i
< MAP__NR_TYPES
; ++i
)
784 __map_groups__fixup_end(mg
, i
);
787 static char *get_kernel_version(const char *root_dir
)
789 char version
[PATH_MAX
];
792 const char *prefix
= "Linux version ";
794 sprintf(version
, "%s/proc/version", root_dir
);
795 file
= fopen(version
, "r");
800 tmp
= fgets(version
, sizeof(version
), file
);
803 name
= strstr(version
, prefix
);
806 name
+= strlen(prefix
);
807 tmp
= strchr(name
, ' ');
814 static int map_groups__set_modules_path_dir(struct map_groups
*mg
,
815 const char *dir_name
, int depth
)
818 DIR *dir
= opendir(dir_name
);
822 pr_debug("%s: cannot open %s dir\n", __func__
, dir_name
);
826 while ((dent
= readdir(dir
)) != NULL
) {
830 /*sshfs might return bad dent->d_type, so we have to stat*/
831 snprintf(path
, sizeof(path
), "%s/%s", dir_name
, dent
->d_name
);
835 if (S_ISDIR(st
.st_mode
)) {
836 if (!strcmp(dent
->d_name
, ".") ||
837 !strcmp(dent
->d_name
, ".."))
840 /* Do not follow top-level source and build symlinks */
842 if (!strcmp(dent
->d_name
, "source") ||
843 !strcmp(dent
->d_name
, "build"))
847 ret
= map_groups__set_modules_path_dir(mg
, path
,
852 char *dot
= strrchr(dent
->d_name
, '.'),
857 if (dot
== NULL
|| strcmp(dot
, ".ko"))
859 snprintf(dso_name
, sizeof(dso_name
), "[%.*s]",
860 (int)(dot
- dent
->d_name
), dent
->d_name
);
862 strxfrchar(dso_name
, '-', '_');
863 map
= map_groups__find_by_name(mg
, MAP__FUNCTION
,
868 long_name
= strdup(path
);
869 if (long_name
== NULL
) {
873 dso__set_long_name(map
->dso
, long_name
, true);
874 dso__kernel_module_get_build_id(map
->dso
, "");
883 static int machine__set_modules_path(struct machine
*machine
)
886 char modules_path
[PATH_MAX
];
888 version
= get_kernel_version(machine
->root_dir
);
892 snprintf(modules_path
, sizeof(modules_path
), "%s/lib/modules/%s",
893 machine
->root_dir
, version
);
896 return map_groups__set_modules_path_dir(&machine
->kmaps
, modules_path
, 0);
899 static int machine__create_module(void *arg
, const char *name
, u64 start
)
901 struct machine
*machine
= arg
;
904 map
= machine__new_module(machine
, start
, name
);
908 dso__kernel_module_get_build_id(map
->dso
, machine
->root_dir
);
913 static int machine__create_modules(struct machine
*machine
)
918 if (machine__is_default_guest(machine
)) {
919 modules
= symbol_conf
.default_guest_modules
;
921 snprintf(path
, PATH_MAX
, "%s/proc/modules", machine
->root_dir
);
925 if (symbol__restricted_filename(modules
, "/proc/modules"))
928 if (modules__parse(modules
, machine
, machine__create_module
))
931 if (!machine__set_modules_path(machine
))
934 pr_debug("Problems setting modules path maps, continuing anyway...\n");
939 int machine__create_kernel_maps(struct machine
*machine
)
941 struct dso
*kernel
= machine__get_kernel(machine
);
943 u64 addr
= machine__get_kernel_start_addr(machine
, &name
);
947 if (kernel
== NULL
||
948 __machine__create_kernel_maps(machine
, kernel
) < 0)
951 if (symbol_conf
.use_modules
&& machine__create_modules(machine
) < 0) {
952 if (machine__is_host(machine
))
953 pr_debug("Problems creating module maps, "
954 "continuing anyway...\n");
956 pr_debug("Problems creating module maps for guest %d, "
957 "continuing anyway...\n", machine
->pid
);
961 * Now that we have all the maps created, just set the ->end of them:
963 map_groups__fixup_end(&machine
->kmaps
);
965 if (maps__set_kallsyms_ref_reloc_sym(machine
->vmlinux_maps
, name
,
967 machine__destroy_kernel_maps(machine
);
974 static void machine__set_kernel_mmap_len(struct machine
*machine
,
975 union perf_event
*event
)
979 for (i
= 0; i
< MAP__NR_TYPES
; i
++) {
980 machine
->vmlinux_maps
[i
]->start
= event
->mmap
.start
;
981 machine
->vmlinux_maps
[i
]->end
= (event
->mmap
.start
+
984 * Be a bit paranoid here, some perf.data file came with
985 * a zero sized synthesized MMAP event for the kernel.
987 if (machine
->vmlinux_maps
[i
]->end
== 0)
988 machine
->vmlinux_maps
[i
]->end
= ~0ULL;
992 static bool machine__uses_kcore(struct machine
*machine
)
996 list_for_each_entry(dso
, &machine
->kernel_dsos
, node
) {
997 if (dso__is_kcore(dso
))
1004 static int machine__process_kernel_mmap_event(struct machine
*machine
,
1005 union perf_event
*event
)
1008 char kmmap_prefix
[PATH_MAX
];
1009 enum dso_kernel_type kernel_type
;
1010 bool is_kernel_mmap
;
1012 /* If we have maps from kcore then we do not need or want any others */
1013 if (machine__uses_kcore(machine
))
1016 machine__mmap_name(machine
, kmmap_prefix
, sizeof(kmmap_prefix
));
1017 if (machine__is_host(machine
))
1018 kernel_type
= DSO_TYPE_KERNEL
;
1020 kernel_type
= DSO_TYPE_GUEST_KERNEL
;
1022 is_kernel_mmap
= memcmp(event
->mmap
.filename
,
1024 strlen(kmmap_prefix
) - 1) == 0;
1025 if (event
->mmap
.filename
[0] == '/' ||
1026 (!is_kernel_mmap
&& event
->mmap
.filename
[0] == '[')) {
1028 char short_module_name
[1024];
1031 if (event
->mmap
.filename
[0] == '/') {
1032 name
= strrchr(event
->mmap
.filename
, '/');
1036 ++name
; /* skip / */
1037 dot
= strrchr(name
, '.');
1040 snprintf(short_module_name
, sizeof(short_module_name
),
1041 "[%.*s]", (int)(dot
- name
), name
);
1042 strxfrchar(short_module_name
, '-', '_');
1044 strcpy(short_module_name
, event
->mmap
.filename
);
1046 map
= machine__new_module(machine
, event
->mmap
.start
,
1047 event
->mmap
.filename
);
1051 name
= strdup(short_module_name
);
1055 dso__set_short_name(map
->dso
, name
, true);
1056 map
->end
= map
->start
+ event
->mmap
.len
;
1057 } else if (is_kernel_mmap
) {
1058 const char *symbol_name
= (event
->mmap
.filename
+
1059 strlen(kmmap_prefix
));
1061 * Should be there already, from the build-id table in
1064 struct dso
*kernel
= __dsos__findnew(&machine
->kernel_dsos
,
1069 kernel
->kernel
= kernel_type
;
1070 if (__machine__create_kernel_maps(machine
, kernel
) < 0)
1073 machine__set_kernel_mmap_len(machine
, event
);
1076 * Avoid using a zero address (kptr_restrict) for the ref reloc
1077 * symbol. Effectively having zero here means that at record
1078 * time /proc/sys/kernel/kptr_restrict was non zero.
1080 if (event
->mmap
.pgoff
!= 0) {
1081 maps__set_kallsyms_ref_reloc_sym(machine
->vmlinux_maps
,
1086 if (machine__is_default_guest(machine
)) {
1088 * preload dso of guest kernel and modules
1090 dso__load(kernel
, machine
->vmlinux_maps
[MAP__FUNCTION
],
1099 int machine__process_mmap2_event(struct machine
*machine
,
1100 union perf_event
*event
,
1101 struct perf_sample
*sample __maybe_unused
)
1103 u8 cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
1104 struct thread
*thread
;
1110 perf_event__fprintf_mmap2(event
, stdout
);
1112 if (cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
1113 cpumode
== PERF_RECORD_MISC_KERNEL
) {
1114 ret
= machine__process_kernel_mmap_event(machine
, event
);
1120 thread
= machine__findnew_thread(machine
, event
->mmap2
.pid
,
1125 if (event
->header
.misc
& PERF_RECORD_MISC_MMAP_DATA
)
1126 type
= MAP__VARIABLE
;
1128 type
= MAP__FUNCTION
;
1130 map
= map__new(machine
, event
->mmap2
.start
,
1131 event
->mmap2
.len
, event
->mmap2
.pgoff
,
1132 event
->mmap2
.pid
, event
->mmap2
.maj
,
1133 event
->mmap2
.min
, event
->mmap2
.ino
,
1134 event
->mmap2
.ino_generation
,
1137 event
->mmap2
.filename
, type
, thread
);
1142 thread__insert_map(thread
, map
);
1146 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1150 int machine__process_mmap_event(struct machine
*machine
, union perf_event
*event
,
1151 struct perf_sample
*sample __maybe_unused
)
1153 u8 cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
1154 struct thread
*thread
;
1160 perf_event__fprintf_mmap(event
, stdout
);
1162 if (cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
1163 cpumode
== PERF_RECORD_MISC_KERNEL
) {
1164 ret
= machine__process_kernel_mmap_event(machine
, event
);
1170 thread
= machine__findnew_thread(machine
, event
->mmap
.pid
,
1175 if (event
->header
.misc
& PERF_RECORD_MISC_MMAP_DATA
)
1176 type
= MAP__VARIABLE
;
1178 type
= MAP__FUNCTION
;
1180 map
= map__new(machine
, event
->mmap
.start
,
1181 event
->mmap
.len
, event
->mmap
.pgoff
,
1182 event
->mmap
.pid
, 0, 0, 0, 0, 0, 0,
1183 event
->mmap
.filename
,
1189 thread__insert_map(thread
, map
);
1193 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1197 static void machine__remove_thread(struct machine
*machine
, struct thread
*th
)
1199 machine
->last_match
= NULL
;
1200 rb_erase(&th
->rb_node
, &machine
->threads
);
1202 * We may have references to this thread, for instance in some hist_entry
1203 * instances, so just move them to a separate list.
1205 list_add_tail(&th
->node
, &machine
->dead_threads
);
1208 int machine__process_fork_event(struct machine
*machine
, union perf_event
*event
,
1209 struct perf_sample
*sample
)
1211 struct thread
*thread
= machine__find_thread(machine
,
1214 struct thread
*parent
= machine__findnew_thread(machine
,
1218 /* if a thread currently exists for the thread id remove it */
1220 machine__remove_thread(machine
, thread
);
1222 thread
= machine__findnew_thread(machine
, event
->fork
.pid
,
1225 perf_event__fprintf_task(event
, stdout
);
1227 if (thread
== NULL
|| parent
== NULL
||
1228 thread__fork(thread
, parent
, sample
->time
) < 0) {
1229 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1236 int machine__process_exit_event(struct machine
*machine
, union perf_event
*event
,
1237 struct perf_sample
*sample __maybe_unused
)
1239 struct thread
*thread
= machine__find_thread(machine
,
1244 perf_event__fprintf_task(event
, stdout
);
1247 thread__exited(thread
);
1252 int machine__process_event(struct machine
*machine
, union perf_event
*event
,
1253 struct perf_sample
*sample
)
1257 switch (event
->header
.type
) {
1258 case PERF_RECORD_COMM
:
1259 ret
= machine__process_comm_event(machine
, event
, sample
); break;
1260 case PERF_RECORD_MMAP
:
1261 ret
= machine__process_mmap_event(machine
, event
, sample
); break;
1262 case PERF_RECORD_MMAP2
:
1263 ret
= machine__process_mmap2_event(machine
, event
, sample
); break;
1264 case PERF_RECORD_FORK
:
1265 ret
= machine__process_fork_event(machine
, event
, sample
); break;
1266 case PERF_RECORD_EXIT
:
1267 ret
= machine__process_exit_event(machine
, event
, sample
); break;
1268 case PERF_RECORD_LOST
:
1269 ret
= machine__process_lost_event(machine
, event
, sample
); break;
1278 static bool symbol__match_regex(struct symbol
*sym
, regex_t
*regex
)
1280 if (sym
->name
&& !regexec(regex
, sym
->name
, 0, NULL
, 0))
1285 static void ip__resolve_ams(struct machine
*machine
, struct thread
*thread
,
1286 struct addr_map_symbol
*ams
,
1289 struct addr_location al
;
1291 memset(&al
, 0, sizeof(al
));
1293 * We cannot use the header.misc hint to determine whether a
1294 * branch stack address is user, kernel, guest, hypervisor.
1295 * Branches may straddle the kernel/user/hypervisor boundaries.
1296 * Thus, we have to try consecutively until we find a match
1297 * or else, the symbol is unknown
1299 thread__find_cpumode_addr_location(thread
, machine
, MAP__FUNCTION
, ip
, &al
);
1302 ams
->al_addr
= al
.addr
;
1307 static void ip__resolve_data(struct machine
*machine
, struct thread
*thread
,
1308 u8 m
, struct addr_map_symbol
*ams
, u64 addr
)
1310 struct addr_location al
;
1312 memset(&al
, 0, sizeof(al
));
1314 thread__find_addr_location(thread
, machine
, m
, MAP__VARIABLE
, addr
,
1317 ams
->al_addr
= al
.addr
;
1322 struct mem_info
*sample__resolve_mem(struct perf_sample
*sample
,
1323 struct addr_location
*al
)
1325 struct mem_info
*mi
= zalloc(sizeof(*mi
));
1330 ip__resolve_ams(al
->machine
, al
->thread
, &mi
->iaddr
, sample
->ip
);
1331 ip__resolve_data(al
->machine
, al
->thread
, al
->cpumode
,
1332 &mi
->daddr
, sample
->addr
);
1333 mi
->data_src
.val
= sample
->data_src
;
1338 struct branch_info
*sample__resolve_bstack(struct perf_sample
*sample
,
1339 struct addr_location
*al
)
1342 const struct branch_stack
*bs
= sample
->branch_stack
;
1343 struct branch_info
*bi
= calloc(bs
->nr
, sizeof(struct branch_info
));
1348 for (i
= 0; i
< bs
->nr
; i
++) {
1349 ip__resolve_ams(al
->machine
, al
->thread
, &bi
[i
].to
, bs
->entries
[i
].to
);
1350 ip__resolve_ams(al
->machine
, al
->thread
, &bi
[i
].from
, bs
->entries
[i
].from
);
1351 bi
[i
].flags
= bs
->entries
[i
].flags
;
1356 static int machine__resolve_callchain_sample(struct machine
*machine
,
1357 struct thread
*thread
,
1358 struct ip_callchain
*chain
,
1359 struct symbol
**parent
,
1360 struct addr_location
*root_al
,
1363 u8 cpumode
= PERF_RECORD_MISC_USER
;
1364 int chain_nr
= min(max_stack
, (int)chain
->nr
);
1368 int skip_idx __maybe_unused
;
1370 callchain_cursor_reset(&callchain_cursor
);
1372 if (chain
->nr
> PERF_MAX_STACK_DEPTH
) {
1373 pr_warning("corrupted callchain. skipping...\n");
1378 * Based on DWARF debug information, some architectures skip
1379 * a callchain entry saved by the kernel.
1381 skip_idx
= arch_skip_callchain_idx(machine
, thread
, chain
);
1383 for (i
= 0; i
< chain_nr
; i
++) {
1385 struct addr_location al
;
1387 if (callchain_param
.order
== ORDER_CALLEE
)
1390 j
= chain
->nr
- i
- 1;
1392 #ifdef HAVE_SKIP_CALLCHAIN_IDX
1398 if (ip
>= PERF_CONTEXT_MAX
) {
1400 case PERF_CONTEXT_HV
:
1401 cpumode
= PERF_RECORD_MISC_HYPERVISOR
;
1403 case PERF_CONTEXT_KERNEL
:
1404 cpumode
= PERF_RECORD_MISC_KERNEL
;
1406 case PERF_CONTEXT_USER
:
1407 cpumode
= PERF_RECORD_MISC_USER
;
1410 pr_debug("invalid callchain context: "
1411 "%"PRId64
"\n", (s64
) ip
);
1413 * It seems the callchain is corrupted.
1416 callchain_cursor_reset(&callchain_cursor
);
1423 thread__find_addr_location(thread
, machine
, cpumode
,
1424 MAP__FUNCTION
, ip
, &al
);
1425 if (al
.sym
!= NULL
) {
1426 if (sort__has_parent
&& !*parent
&&
1427 symbol__match_regex(al
.sym
, &parent_regex
))
1429 else if (have_ignore_callees
&& root_al
&&
1430 symbol__match_regex(al
.sym
, &ignore_callees_regex
)) {
1431 /* Treat this symbol as the root,
1432 forgetting its callees. */
1434 callchain_cursor_reset(&callchain_cursor
);
1438 err
= callchain_cursor_append(&callchain_cursor
,
1439 ip
, al
.map
, al
.sym
);
1447 static int unwind_entry(struct unwind_entry
*entry
, void *arg
)
1449 struct callchain_cursor
*cursor
= arg
;
1450 return callchain_cursor_append(cursor
, entry
->ip
,
1451 entry
->map
, entry
->sym
);
1454 int machine__resolve_callchain(struct machine
*machine
,
1455 struct perf_evsel
*evsel
,
1456 struct thread
*thread
,
1457 struct perf_sample
*sample
,
1458 struct symbol
**parent
,
1459 struct addr_location
*root_al
,
1464 ret
= machine__resolve_callchain_sample(machine
, thread
,
1465 sample
->callchain
, parent
,
1466 root_al
, max_stack
);
1470 /* Can we do dwarf post unwind? */
1471 if (!((evsel
->attr
.sample_type
& PERF_SAMPLE_REGS_USER
) &&
1472 (evsel
->attr
.sample_type
& PERF_SAMPLE_STACK_USER
)))
1475 /* Bail out if nothing was captured. */
1476 if ((!sample
->user_regs
.regs
) ||
1477 (!sample
->user_stack
.size
))
1480 return unwind__get_entries(unwind_entry
, &callchain_cursor
, machine
,
1481 thread
, sample
, max_stack
);
1485 int machine__for_each_thread(struct machine
*machine
,
1486 int (*fn
)(struct thread
*thread
, void *p
),
1490 struct thread
*thread
;
1493 for (nd
= rb_first(&machine
->threads
); nd
; nd
= rb_next(nd
)) {
1494 thread
= rb_entry(nd
, struct thread
, rb_node
);
1495 rc
= fn(thread
, priv
);
1500 list_for_each_entry(thread
, &machine
->dead_threads
, node
) {
1501 rc
= fn(thread
, priv
);
1508 int __machine__synthesize_threads(struct machine
*machine
, struct perf_tool
*tool
,
1509 struct target
*target
, struct thread_map
*threads
,
1510 perf_event__handler_t process
, bool data_mmap
)
1512 if (target__has_task(target
))
1513 return perf_event__synthesize_thread_map(tool
, threads
, process
, machine
, data_mmap
);
1514 else if (target__has_cpu(target
))
1515 return perf_event__synthesize_threads(tool
, process
, machine
, data_mmap
);
1516 /* command specified */
1520 pid_t
machine__get_current_tid(struct machine
*machine
, int cpu
)
1522 if (cpu
< 0 || cpu
>= MAX_NR_CPUS
|| !machine
->current_tid
)
1525 return machine
->current_tid
[cpu
];
1528 int machine__set_current_tid(struct machine
*machine
, int cpu
, pid_t pid
,
1531 struct thread
*thread
;
1536 if (!machine
->current_tid
) {
1539 machine
->current_tid
= calloc(MAX_NR_CPUS
, sizeof(pid_t
));
1540 if (!machine
->current_tid
)
1542 for (i
= 0; i
< MAX_NR_CPUS
; i
++)
1543 machine
->current_tid
[i
] = -1;
1546 if (cpu
>= MAX_NR_CPUS
) {
1547 pr_err("Requested CPU %d too large. ", cpu
);
1548 pr_err("Consider raising MAX_NR_CPUS\n");
1552 machine
->current_tid
[cpu
] = tid
;
1554 thread
= machine__findnew_thread(machine
, pid
, tid
);