13 #include <symbol/kallsyms.h>
15 #include "linux/hash.h"
17 static void dsos__init(struct dsos
*dsos
)
19 INIT_LIST_HEAD(&dsos
->head
);
23 int machine__init(struct machine
*machine
, const char *root_dir
, pid_t pid
)
25 map_groups__init(&machine
->kmaps
, machine
);
26 RB_CLEAR_NODE(&machine
->rb_node
);
27 dsos__init(&machine
->user_dsos
);
28 dsos__init(&machine
->kernel_dsos
);
30 machine
->threads
= RB_ROOT
;
31 INIT_LIST_HEAD(&machine
->dead_threads
);
32 machine
->last_match
= NULL
;
34 machine
->vdso_info
= NULL
;
38 machine
->symbol_filter
= NULL
;
39 machine
->id_hdr_size
= 0;
40 machine
->comm_exec
= false;
41 machine
->kernel_start
= 0;
43 machine
->root_dir
= strdup(root_dir
);
44 if (machine
->root_dir
== NULL
)
47 if (pid
!= HOST_KERNEL_ID
) {
48 struct thread
*thread
= machine__findnew_thread(machine
, -1,
55 snprintf(comm
, sizeof(comm
), "[guest/%d]", pid
);
56 thread__set_comm(thread
, comm
, 0);
59 machine
->current_tid
= NULL
;
64 struct machine
*machine__new_host(void)
66 struct machine
*machine
= malloc(sizeof(*machine
));
68 if (machine
!= NULL
) {
69 machine__init(machine
, "", HOST_KERNEL_ID
);
71 if (machine__create_kernel_maps(machine
) < 0)
81 static void dsos__delete(struct dsos
*dsos
)
85 list_for_each_entry_safe(pos
, n
, &dsos
->head
, node
) {
86 RB_CLEAR_NODE(&pos
->rb_node
);
92 void machine__delete_dead_threads(struct machine
*machine
)
96 list_for_each_entry_safe(t
, n
, &machine
->dead_threads
, node
) {
102 void machine__delete_threads(struct machine
*machine
)
104 struct rb_node
*nd
= rb_first(&machine
->threads
);
107 struct thread
*t
= rb_entry(nd
, struct thread
, rb_node
);
109 rb_erase(&t
->rb_node
, &machine
->threads
);
115 void machine__exit(struct machine
*machine
)
117 map_groups__exit(&machine
->kmaps
);
118 dsos__delete(&machine
->user_dsos
);
119 dsos__delete(&machine
->kernel_dsos
);
121 zfree(&machine
->root_dir
);
122 zfree(&machine
->current_tid
);
125 void machine__delete(struct machine
*machine
)
127 machine__exit(machine
);
131 void machines__init(struct machines
*machines
)
133 machine__init(&machines
->host
, "", HOST_KERNEL_ID
);
134 machines
->guests
= RB_ROOT
;
135 machines
->symbol_filter
= NULL
;
138 void machines__exit(struct machines
*machines
)
140 machine__exit(&machines
->host
);
144 struct machine
*machines__add(struct machines
*machines
, pid_t pid
,
145 const char *root_dir
)
147 struct rb_node
**p
= &machines
->guests
.rb_node
;
148 struct rb_node
*parent
= NULL
;
149 struct machine
*pos
, *machine
= malloc(sizeof(*machine
));
154 if (machine__init(machine
, root_dir
, pid
) != 0) {
159 machine
->symbol_filter
= machines
->symbol_filter
;
163 pos
= rb_entry(parent
, struct machine
, rb_node
);
170 rb_link_node(&machine
->rb_node
, parent
, p
);
171 rb_insert_color(&machine
->rb_node
, &machines
->guests
);
176 void machines__set_symbol_filter(struct machines
*machines
,
177 symbol_filter_t symbol_filter
)
181 machines
->symbol_filter
= symbol_filter
;
182 machines
->host
.symbol_filter
= symbol_filter
;
184 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
185 struct machine
*machine
= rb_entry(nd
, struct machine
, rb_node
);
187 machine
->symbol_filter
= symbol_filter
;
191 void machines__set_comm_exec(struct machines
*machines
, bool comm_exec
)
195 machines
->host
.comm_exec
= comm_exec
;
197 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
198 struct machine
*machine
= rb_entry(nd
, struct machine
, rb_node
);
200 machine
->comm_exec
= comm_exec
;
204 struct machine
*machines__find(struct machines
*machines
, pid_t pid
)
206 struct rb_node
**p
= &machines
->guests
.rb_node
;
207 struct rb_node
*parent
= NULL
;
208 struct machine
*machine
;
209 struct machine
*default_machine
= NULL
;
211 if (pid
== HOST_KERNEL_ID
)
212 return &machines
->host
;
216 machine
= rb_entry(parent
, struct machine
, rb_node
);
217 if (pid
< machine
->pid
)
219 else if (pid
> machine
->pid
)
224 default_machine
= machine
;
227 return default_machine
;
230 struct machine
*machines__findnew(struct machines
*machines
, pid_t pid
)
233 const char *root_dir
= "";
234 struct machine
*machine
= machines__find(machines
, pid
);
236 if (machine
&& (machine
->pid
== pid
))
239 if ((pid
!= HOST_KERNEL_ID
) &&
240 (pid
!= DEFAULT_GUEST_KERNEL_ID
) &&
241 (symbol_conf
.guestmount
)) {
242 sprintf(path
, "%s/%d", symbol_conf
.guestmount
, pid
);
243 if (access(path
, R_OK
)) {
244 static struct strlist
*seen
;
247 seen
= strlist__new(true, NULL
);
249 if (!strlist__has_entry(seen
, path
)) {
250 pr_err("Can't access file %s\n", path
);
251 strlist__add(seen
, path
);
259 machine
= machines__add(machines
, pid
, root_dir
);
264 void machines__process_guests(struct machines
*machines
,
265 machine__process_t process
, void *data
)
269 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
270 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
275 char *machine__mmap_name(struct machine
*machine
, char *bf
, size_t size
)
277 if (machine__is_host(machine
))
278 snprintf(bf
, size
, "[%s]", "kernel.kallsyms");
279 else if (machine__is_default_guest(machine
))
280 snprintf(bf
, size
, "[%s]", "guest.kernel.kallsyms");
282 snprintf(bf
, size
, "[%s.%d]", "guest.kernel.kallsyms",
289 void machines__set_id_hdr_size(struct machines
*machines
, u16 id_hdr_size
)
291 struct rb_node
*node
;
292 struct machine
*machine
;
294 machines
->host
.id_hdr_size
= id_hdr_size
;
296 for (node
= rb_first(&machines
->guests
); node
; node
= rb_next(node
)) {
297 machine
= rb_entry(node
, struct machine
, rb_node
);
298 machine
->id_hdr_size
= id_hdr_size
;
304 static void machine__update_thread_pid(struct machine
*machine
,
305 struct thread
*th
, pid_t pid
)
307 struct thread
*leader
;
309 if (pid
== th
->pid_
|| pid
== -1 || th
->pid_
!= -1)
314 if (th
->pid_
== th
->tid
)
317 leader
= machine__findnew_thread(machine
, th
->pid_
, th
->pid_
);
322 leader
->mg
= map_groups__new(machine
);
327 if (th
->mg
== leader
->mg
)
332 * Maps are created from MMAP events which provide the pid and
333 * tid. Consequently there never should be any maps on a thread
334 * with an unknown pid. Just print an error if there are.
336 if (!map_groups__empty(th
->mg
))
337 pr_err("Discarding thread maps for %d:%d\n",
339 map_groups__delete(th
->mg
);
342 th
->mg
= map_groups__get(leader
->mg
);
347 pr_err("Failed to join map groups for %d:%d\n", th
->pid_
, th
->tid
);
350 static struct thread
*__machine__findnew_thread(struct machine
*machine
,
351 pid_t pid
, pid_t tid
,
354 struct rb_node
**p
= &machine
->threads
.rb_node
;
355 struct rb_node
*parent
= NULL
;
359 * Front-end cache - TID lookups come in blocks,
360 * so most of the time we dont have to look up
363 th
= machine
->last_match
;
364 if (th
&& th
->tid
== tid
) {
365 machine__update_thread_pid(machine
, th
, pid
);
371 th
= rb_entry(parent
, struct thread
, rb_node
);
373 if (th
->tid
== tid
) {
374 machine
->last_match
= th
;
375 machine__update_thread_pid(machine
, th
, pid
);
388 th
= thread__new(pid
, tid
);
390 rb_link_node(&th
->rb_node
, parent
, p
);
391 rb_insert_color(&th
->rb_node
, &machine
->threads
);
394 * We have to initialize map_groups separately
395 * after rb tree is updated.
397 * The reason is that we call machine__findnew_thread
398 * within thread__init_map_groups to find the thread
399 * leader and that would screwed the rb tree.
401 if (thread__init_map_groups(th
, machine
)) {
402 rb_erase(&th
->rb_node
, &machine
->threads
);
407 machine
->last_match
= th
;
413 struct thread
*machine__findnew_thread(struct machine
*machine
, pid_t pid
,
416 return __machine__findnew_thread(machine
, pid
, tid
, true);
419 struct thread
*machine__find_thread(struct machine
*machine
, pid_t pid
,
422 return __machine__findnew_thread(machine
, pid
, tid
, false);
425 struct comm
*machine__thread_exec_comm(struct machine
*machine
,
426 struct thread
*thread
)
428 if (machine
->comm_exec
)
429 return thread__exec_comm(thread
);
431 return thread__comm(thread
);
434 int machine__process_comm_event(struct machine
*machine
, union perf_event
*event
,
435 struct perf_sample
*sample
)
437 struct thread
*thread
= machine__findnew_thread(machine
,
440 bool exec
= event
->header
.misc
& PERF_RECORD_MISC_COMM_EXEC
;
443 machine
->comm_exec
= true;
446 perf_event__fprintf_comm(event
, stdout
);
448 if (thread
== NULL
||
449 __thread__set_comm(thread
, event
->comm
.comm
, sample
->time
, exec
)) {
450 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
457 int machine__process_lost_event(struct machine
*machine __maybe_unused
,
458 union perf_event
*event
, struct perf_sample
*sample __maybe_unused
)
460 dump_printf(": id:%" PRIu64
": lost:%" PRIu64
"\n",
461 event
->lost
.id
, event
->lost
.lost
);
465 struct map
*machine__new_module(struct machine
*machine
, u64 start
,
466 const char *filename
)
469 struct dso
*dso
= __dsos__findnew(&machine
->kernel_dsos
, filename
);
475 map
= map__new2(start
, dso
, MAP__FUNCTION
);
479 if (machine__is_host(machine
))
480 dso
->symtab_type
= DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE
;
482 dso
->symtab_type
= DSO_BINARY_TYPE__GUEST_KMODULE
;
484 /* _KMODULE_COMP should be next to _KMODULE */
485 if (is_kernel_module(filename
, &compressed
) && compressed
)
488 map_groups__insert(&machine
->kmaps
, map
);
492 size_t machines__fprintf_dsos(struct machines
*machines
, FILE *fp
)
495 size_t ret
= __dsos__fprintf(&machines
->host
.kernel_dsos
.head
, fp
) +
496 __dsos__fprintf(&machines
->host
.user_dsos
.head
, fp
);
498 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
499 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
500 ret
+= __dsos__fprintf(&pos
->kernel_dsos
.head
, fp
);
501 ret
+= __dsos__fprintf(&pos
->user_dsos
.head
, fp
);
507 size_t machine__fprintf_dsos_buildid(struct machine
*m
, FILE *fp
,
508 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
510 return __dsos__fprintf_buildid(&m
->kernel_dsos
.head
, fp
, skip
, parm
) +
511 __dsos__fprintf_buildid(&m
->user_dsos
.head
, fp
, skip
, parm
);
514 size_t machines__fprintf_dsos_buildid(struct machines
*machines
, FILE *fp
,
515 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
518 size_t ret
= machine__fprintf_dsos_buildid(&machines
->host
, fp
, skip
, parm
);
520 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
521 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
522 ret
+= machine__fprintf_dsos_buildid(pos
, fp
, skip
, parm
);
527 size_t machine__fprintf_vmlinux_path(struct machine
*machine
, FILE *fp
)
531 struct dso
*kdso
= machine
->vmlinux_maps
[MAP__FUNCTION
]->dso
;
533 if (kdso
->has_build_id
) {
534 char filename
[PATH_MAX
];
535 if (dso__build_id_filename(kdso
, filename
, sizeof(filename
)))
536 printed
+= fprintf(fp
, "[0] %s\n", filename
);
539 for (i
= 0; i
< vmlinux_path__nr_entries
; ++i
)
540 printed
+= fprintf(fp
, "[%d] %s\n",
541 i
+ kdso
->has_build_id
, vmlinux_path
[i
]);
546 size_t machine__fprintf(struct machine
*machine
, FILE *fp
)
551 for (nd
= rb_first(&machine
->threads
); nd
; nd
= rb_next(nd
)) {
552 struct thread
*pos
= rb_entry(nd
, struct thread
, rb_node
);
554 ret
+= thread__fprintf(pos
, fp
);
560 static struct dso
*machine__get_kernel(struct machine
*machine
)
562 const char *vmlinux_name
= NULL
;
565 if (machine__is_host(machine
)) {
566 vmlinux_name
= symbol_conf
.vmlinux_name
;
568 vmlinux_name
= "[kernel.kallsyms]";
570 kernel
= dso__kernel_findnew(machine
, vmlinux_name
,
576 if (machine__is_default_guest(machine
))
577 vmlinux_name
= symbol_conf
.default_guest_vmlinux_name
;
579 vmlinux_name
= machine__mmap_name(machine
, bf
,
582 kernel
= dso__kernel_findnew(machine
, vmlinux_name
,
584 DSO_TYPE_GUEST_KERNEL
);
587 if (kernel
!= NULL
&& (!kernel
->has_build_id
))
588 dso__read_running_kernel_build_id(kernel
, machine
);
593 struct process_args
{
597 static void machine__get_kallsyms_filename(struct machine
*machine
, char *buf
,
600 if (machine__is_default_guest(machine
))
601 scnprintf(buf
, bufsz
, "%s", symbol_conf
.default_guest_kallsyms
);
603 scnprintf(buf
, bufsz
, "%s/proc/kallsyms", machine
->root_dir
);
606 const char *ref_reloc_sym_names
[] = {"_text", "_stext", NULL
};
608 /* Figure out the start address of kernel map from /proc/kallsyms.
609 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
610 * symbol_name if it's not that important.
612 static u64
machine__get_running_kernel_start(struct machine
*machine
,
613 const char **symbol_name
)
615 char filename
[PATH_MAX
];
620 machine__get_kallsyms_filename(machine
, filename
, PATH_MAX
);
622 if (symbol__restricted_filename(filename
, "/proc/kallsyms"))
625 for (i
= 0; (name
= ref_reloc_sym_names
[i
]) != NULL
; i
++) {
626 addr
= kallsyms__get_function_start(filename
, name
);
637 int __machine__create_kernel_maps(struct machine
*machine
, struct dso
*kernel
)
640 u64 start
= machine__get_running_kernel_start(machine
, NULL
);
642 for (type
= 0; type
< MAP__NR_TYPES
; ++type
) {
645 machine
->vmlinux_maps
[type
] = map__new2(start
, kernel
, type
);
646 if (machine
->vmlinux_maps
[type
] == NULL
)
649 machine
->vmlinux_maps
[type
]->map_ip
=
650 machine
->vmlinux_maps
[type
]->unmap_ip
=
652 kmap
= map__kmap(machine
->vmlinux_maps
[type
]);
653 kmap
->kmaps
= &machine
->kmaps
;
654 map_groups__insert(&machine
->kmaps
,
655 machine
->vmlinux_maps
[type
]);
661 void machine__destroy_kernel_maps(struct machine
*machine
)
665 for (type
= 0; type
< MAP__NR_TYPES
; ++type
) {
668 if (machine
->vmlinux_maps
[type
] == NULL
)
671 kmap
= map__kmap(machine
->vmlinux_maps
[type
]);
672 map_groups__remove(&machine
->kmaps
,
673 machine
->vmlinux_maps
[type
]);
674 if (kmap
->ref_reloc_sym
) {
676 * ref_reloc_sym is shared among all maps, so free just
679 if (type
== MAP__FUNCTION
) {
680 zfree((char **)&kmap
->ref_reloc_sym
->name
);
681 zfree(&kmap
->ref_reloc_sym
);
683 kmap
->ref_reloc_sym
= NULL
;
686 map__delete(machine
->vmlinux_maps
[type
]);
687 machine
->vmlinux_maps
[type
] = NULL
;
691 int machines__create_guest_kernel_maps(struct machines
*machines
)
694 struct dirent
**namelist
= NULL
;
700 if (symbol_conf
.default_guest_vmlinux_name
||
701 symbol_conf
.default_guest_modules
||
702 symbol_conf
.default_guest_kallsyms
) {
703 machines__create_kernel_maps(machines
, DEFAULT_GUEST_KERNEL_ID
);
706 if (symbol_conf
.guestmount
) {
707 items
= scandir(symbol_conf
.guestmount
, &namelist
, NULL
, NULL
);
710 for (i
= 0; i
< items
; i
++) {
711 if (!isdigit(namelist
[i
]->d_name
[0])) {
712 /* Filter out . and .. */
715 pid
= (pid_t
)strtol(namelist
[i
]->d_name
, &endp
, 10);
716 if ((*endp
!= '\0') ||
717 (endp
== namelist
[i
]->d_name
) ||
719 pr_debug("invalid directory (%s). Skipping.\n",
720 namelist
[i
]->d_name
);
723 sprintf(path
, "%s/%s/proc/kallsyms",
724 symbol_conf
.guestmount
,
725 namelist
[i
]->d_name
);
726 ret
= access(path
, R_OK
);
728 pr_debug("Can't access file %s\n", path
);
731 machines__create_kernel_maps(machines
, pid
);
740 void machines__destroy_kernel_maps(struct machines
*machines
)
742 struct rb_node
*next
= rb_first(&machines
->guests
);
744 machine__destroy_kernel_maps(&machines
->host
);
747 struct machine
*pos
= rb_entry(next
, struct machine
, rb_node
);
749 next
= rb_next(&pos
->rb_node
);
750 rb_erase(&pos
->rb_node
, &machines
->guests
);
751 machine__delete(pos
);
755 int machines__create_kernel_maps(struct machines
*machines
, pid_t pid
)
757 struct machine
*machine
= machines__findnew(machines
, pid
);
762 return machine__create_kernel_maps(machine
);
765 int machine__load_kallsyms(struct machine
*machine
, const char *filename
,
766 enum map_type type
, symbol_filter_t filter
)
768 struct map
*map
= machine
->vmlinux_maps
[type
];
769 int ret
= dso__load_kallsyms(map
->dso
, filename
, map
, filter
);
772 dso__set_loaded(map
->dso
, type
);
774 * Since /proc/kallsyms will have multiple sessions for the
775 * kernel, with modules between them, fixup the end of all
778 __map_groups__fixup_end(&machine
->kmaps
, type
);
784 int machine__load_vmlinux_path(struct machine
*machine
, enum map_type type
,
785 symbol_filter_t filter
)
787 struct map
*map
= machine
->vmlinux_maps
[type
];
788 int ret
= dso__load_vmlinux_path(map
->dso
, map
, filter
);
791 dso__set_loaded(map
->dso
, type
);
796 static void map_groups__fixup_end(struct map_groups
*mg
)
799 for (i
= 0; i
< MAP__NR_TYPES
; ++i
)
800 __map_groups__fixup_end(mg
, i
);
803 static char *get_kernel_version(const char *root_dir
)
805 char version
[PATH_MAX
];
808 const char *prefix
= "Linux version ";
810 sprintf(version
, "%s/proc/version", root_dir
);
811 file
= fopen(version
, "r");
816 tmp
= fgets(version
, sizeof(version
), file
);
819 name
= strstr(version
, prefix
);
822 name
+= strlen(prefix
);
823 tmp
= strchr(name
, ' ');
830 static int map_groups__set_modules_path_dir(struct map_groups
*mg
,
831 const char *dir_name
, int depth
)
834 DIR *dir
= opendir(dir_name
);
838 pr_debug("%s: cannot open %s dir\n", __func__
, dir_name
);
842 while ((dent
= readdir(dir
)) != NULL
) {
846 /*sshfs might return bad dent->d_type, so we have to stat*/
847 snprintf(path
, sizeof(path
), "%s/%s", dir_name
, dent
->d_name
);
851 if (S_ISDIR(st
.st_mode
)) {
852 if (!strcmp(dent
->d_name
, ".") ||
853 !strcmp(dent
->d_name
, ".."))
856 /* Do not follow top-level source and build symlinks */
858 if (!strcmp(dent
->d_name
, "source") ||
859 !strcmp(dent
->d_name
, "build"))
863 ret
= map_groups__set_modules_path_dir(mg
, path
,
868 char *dot
= strrchr(dent
->d_name
, '.'),
876 /* On some system, modules are compressed like .ko.gz */
877 if (is_supported_compression(dot
+ 1) &&
878 is_kmodule_extension(dot
- 2))
881 snprintf(dso_name
, sizeof(dso_name
), "[%.*s]",
882 (int)(dot
- dent
->d_name
), dent
->d_name
);
884 strxfrchar(dso_name
, '-', '_');
885 map
= map_groups__find_by_name(mg
, MAP__FUNCTION
,
890 long_name
= strdup(path
);
891 if (long_name
== NULL
) {
895 dso__set_long_name(map
->dso
, long_name
, true);
896 dso__kernel_module_get_build_id(map
->dso
, "");
905 static int machine__set_modules_path(struct machine
*machine
)
908 char modules_path
[PATH_MAX
];
910 version
= get_kernel_version(machine
->root_dir
);
914 snprintf(modules_path
, sizeof(modules_path
), "%s/lib/modules/%s",
915 machine
->root_dir
, version
);
918 return map_groups__set_modules_path_dir(&machine
->kmaps
, modules_path
, 0);
921 static int machine__create_module(void *arg
, const char *name
, u64 start
)
923 struct machine
*machine
= arg
;
926 map
= machine__new_module(machine
, start
, name
);
930 dso__kernel_module_get_build_id(map
->dso
, machine
->root_dir
);
935 static int machine__create_modules(struct machine
*machine
)
940 if (machine__is_default_guest(machine
)) {
941 modules
= symbol_conf
.default_guest_modules
;
943 snprintf(path
, PATH_MAX
, "%s/proc/modules", machine
->root_dir
);
947 if (symbol__restricted_filename(modules
, "/proc/modules"))
950 if (modules__parse(modules
, machine
, machine__create_module
))
953 if (!machine__set_modules_path(machine
))
956 pr_debug("Problems setting modules path maps, continuing anyway...\n");
961 int machine__create_kernel_maps(struct machine
*machine
)
963 struct dso
*kernel
= machine__get_kernel(machine
);
965 u64 addr
= machine__get_running_kernel_start(machine
, &name
);
969 if (kernel
== NULL
||
970 __machine__create_kernel_maps(machine
, kernel
) < 0)
973 if (symbol_conf
.use_modules
&& machine__create_modules(machine
) < 0) {
974 if (machine__is_host(machine
))
975 pr_debug("Problems creating module maps, "
976 "continuing anyway...\n");
978 pr_debug("Problems creating module maps for guest %d, "
979 "continuing anyway...\n", machine
->pid
);
983 * Now that we have all the maps created, just set the ->end of them:
985 map_groups__fixup_end(&machine
->kmaps
);
987 if (maps__set_kallsyms_ref_reloc_sym(machine
->vmlinux_maps
, name
,
989 machine__destroy_kernel_maps(machine
);
996 static void machine__set_kernel_mmap_len(struct machine
*machine
,
997 union perf_event
*event
)
1001 for (i
= 0; i
< MAP__NR_TYPES
; i
++) {
1002 machine
->vmlinux_maps
[i
]->start
= event
->mmap
.start
;
1003 machine
->vmlinux_maps
[i
]->end
= (event
->mmap
.start
+
1006 * Be a bit paranoid here, some perf.data file came with
1007 * a zero sized synthesized MMAP event for the kernel.
1009 if (machine
->vmlinux_maps
[i
]->end
== 0)
1010 machine
->vmlinux_maps
[i
]->end
= ~0ULL;
1014 static bool machine__uses_kcore(struct machine
*machine
)
1018 list_for_each_entry(dso
, &machine
->kernel_dsos
.head
, node
) {
1019 if (dso__is_kcore(dso
))
1026 static int machine__process_kernel_mmap_event(struct machine
*machine
,
1027 union perf_event
*event
)
1030 char kmmap_prefix
[PATH_MAX
];
1031 enum dso_kernel_type kernel_type
;
1032 bool is_kernel_mmap
;
1034 /* If we have maps from kcore then we do not need or want any others */
1035 if (machine__uses_kcore(machine
))
1038 machine__mmap_name(machine
, kmmap_prefix
, sizeof(kmmap_prefix
));
1039 if (machine__is_host(machine
))
1040 kernel_type
= DSO_TYPE_KERNEL
;
1042 kernel_type
= DSO_TYPE_GUEST_KERNEL
;
1044 is_kernel_mmap
= memcmp(event
->mmap
.filename
,
1046 strlen(kmmap_prefix
) - 1) == 0;
1047 if (event
->mmap
.filename
[0] == '/' ||
1048 (!is_kernel_mmap
&& event
->mmap
.filename
[0] == '[')) {
1050 char short_module_name
[1024];
1053 if (event
->mmap
.filename
[0] == '/') {
1054 name
= strrchr(event
->mmap
.filename
, '/');
1058 ++name
; /* skip / */
1059 dot
= strrchr(name
, '.');
1062 /* On some system, modules are compressed like .ko.gz */
1063 if (is_supported_compression(dot
+ 1))
1065 if (!is_kmodule_extension(dot
+ 1))
1067 snprintf(short_module_name
, sizeof(short_module_name
),
1068 "[%.*s]", (int)(dot
- name
), name
);
1069 strxfrchar(short_module_name
, '-', '_');
1071 strcpy(short_module_name
, event
->mmap
.filename
);
1073 map
= machine__new_module(machine
, event
->mmap
.start
,
1074 event
->mmap
.filename
);
1078 name
= strdup(short_module_name
);
1082 dso__set_short_name(map
->dso
, name
, true);
1083 map
->end
= map
->start
+ event
->mmap
.len
;
1084 } else if (is_kernel_mmap
) {
1085 const char *symbol_name
= (event
->mmap
.filename
+
1086 strlen(kmmap_prefix
));
1088 * Should be there already, from the build-id table in
1091 struct dso
*kernel
= NULL
;
1094 list_for_each_entry(dso
, &machine
->kernel_dsos
.head
, node
) {
1095 if (is_kernel_module(dso
->long_name
, NULL
))
1103 kernel
= __dsos__findnew(&machine
->kernel_dsos
,
1108 kernel
->kernel
= kernel_type
;
1109 if (__machine__create_kernel_maps(machine
, kernel
) < 0)
1112 if (strstr(kernel
->long_name
, "vmlinux"))
1113 dso__set_short_name(kernel
, "[kernel.vmlinux]", false);
1115 machine__set_kernel_mmap_len(machine
, event
);
1118 * Avoid using a zero address (kptr_restrict) for the ref reloc
1119 * symbol. Effectively having zero here means that at record
1120 * time /proc/sys/kernel/kptr_restrict was non zero.
1122 if (event
->mmap
.pgoff
!= 0) {
1123 maps__set_kallsyms_ref_reloc_sym(machine
->vmlinux_maps
,
1128 if (machine__is_default_guest(machine
)) {
1130 * preload dso of guest kernel and modules
1132 dso__load(kernel
, machine
->vmlinux_maps
[MAP__FUNCTION
],
1141 int machine__process_mmap2_event(struct machine
*machine
,
1142 union perf_event
*event
,
1143 struct perf_sample
*sample __maybe_unused
)
1145 u8 cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
1146 struct thread
*thread
;
1152 perf_event__fprintf_mmap2(event
, stdout
);
1154 if (cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
1155 cpumode
== PERF_RECORD_MISC_KERNEL
) {
1156 ret
= machine__process_kernel_mmap_event(machine
, event
);
1162 thread
= machine__findnew_thread(machine
, event
->mmap2
.pid
,
1167 if (event
->header
.misc
& PERF_RECORD_MISC_MMAP_DATA
)
1168 type
= MAP__VARIABLE
;
1170 type
= MAP__FUNCTION
;
1172 map
= map__new(machine
, event
->mmap2
.start
,
1173 event
->mmap2
.len
, event
->mmap2
.pgoff
,
1174 event
->mmap2
.pid
, event
->mmap2
.maj
,
1175 event
->mmap2
.min
, event
->mmap2
.ino
,
1176 event
->mmap2
.ino_generation
,
1179 event
->mmap2
.filename
, type
, thread
);
1184 thread__insert_map(thread
, map
);
1188 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1192 int machine__process_mmap_event(struct machine
*machine
, union perf_event
*event
,
1193 struct perf_sample
*sample __maybe_unused
)
1195 u8 cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
1196 struct thread
*thread
;
1202 perf_event__fprintf_mmap(event
, stdout
);
1204 if (cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
1205 cpumode
== PERF_RECORD_MISC_KERNEL
) {
1206 ret
= machine__process_kernel_mmap_event(machine
, event
);
1212 thread
= machine__findnew_thread(machine
, event
->mmap
.pid
,
1217 if (event
->header
.misc
& PERF_RECORD_MISC_MMAP_DATA
)
1218 type
= MAP__VARIABLE
;
1220 type
= MAP__FUNCTION
;
1222 map
= map__new(machine
, event
->mmap
.start
,
1223 event
->mmap
.len
, event
->mmap
.pgoff
,
1224 event
->mmap
.pid
, 0, 0, 0, 0, 0, 0,
1225 event
->mmap
.filename
,
1231 thread__insert_map(thread
, map
);
1235 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1239 static void machine__remove_thread(struct machine
*machine
, struct thread
*th
)
1241 machine
->last_match
= NULL
;
1242 rb_erase(&th
->rb_node
, &machine
->threads
);
1244 * We may have references to this thread, for instance in some hist_entry
1245 * instances, so just move them to a separate list.
1247 list_add_tail(&th
->node
, &machine
->dead_threads
);
1250 int machine__process_fork_event(struct machine
*machine
, union perf_event
*event
,
1251 struct perf_sample
*sample
)
1253 struct thread
*thread
= machine__find_thread(machine
,
1256 struct thread
*parent
= machine__findnew_thread(machine
,
1260 /* if a thread currently exists for the thread id remove it */
1262 machine__remove_thread(machine
, thread
);
1264 thread
= machine__findnew_thread(machine
, event
->fork
.pid
,
1267 perf_event__fprintf_task(event
, stdout
);
1269 if (thread
== NULL
|| parent
== NULL
||
1270 thread__fork(thread
, parent
, sample
->time
) < 0) {
1271 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1278 int machine__process_exit_event(struct machine
*machine
, union perf_event
*event
,
1279 struct perf_sample
*sample __maybe_unused
)
1281 struct thread
*thread
= machine__find_thread(machine
,
1286 perf_event__fprintf_task(event
, stdout
);
1289 thread__exited(thread
);
1294 int machine__process_event(struct machine
*machine
, union perf_event
*event
,
1295 struct perf_sample
*sample
)
1299 switch (event
->header
.type
) {
1300 case PERF_RECORD_COMM
:
1301 ret
= machine__process_comm_event(machine
, event
, sample
); break;
1302 case PERF_RECORD_MMAP
:
1303 ret
= machine__process_mmap_event(machine
, event
, sample
); break;
1304 case PERF_RECORD_MMAP2
:
1305 ret
= machine__process_mmap2_event(machine
, event
, sample
); break;
1306 case PERF_RECORD_FORK
:
1307 ret
= machine__process_fork_event(machine
, event
, sample
); break;
1308 case PERF_RECORD_EXIT
:
1309 ret
= machine__process_exit_event(machine
, event
, sample
); break;
1310 case PERF_RECORD_LOST
:
1311 ret
= machine__process_lost_event(machine
, event
, sample
); break;
1320 static bool symbol__match_regex(struct symbol
*sym
, regex_t
*regex
)
1322 if (sym
->name
&& !regexec(regex
, sym
->name
, 0, NULL
, 0))
1327 static void ip__resolve_ams(struct thread
*thread
,
1328 struct addr_map_symbol
*ams
,
1331 struct addr_location al
;
1333 memset(&al
, 0, sizeof(al
));
1335 * We cannot use the header.misc hint to determine whether a
1336 * branch stack address is user, kernel, guest, hypervisor.
1337 * Branches may straddle the kernel/user/hypervisor boundaries.
1338 * Thus, we have to try consecutively until we find a match
1339 * or else, the symbol is unknown
1341 thread__find_cpumode_addr_location(thread
, MAP__FUNCTION
, ip
, &al
);
1344 ams
->al_addr
= al
.addr
;
1349 static void ip__resolve_data(struct thread
*thread
,
1350 u8 m
, struct addr_map_symbol
*ams
, u64 addr
)
1352 struct addr_location al
;
1354 memset(&al
, 0, sizeof(al
));
1356 thread__find_addr_location(thread
, m
, MAP__VARIABLE
, addr
, &al
);
1357 if (al
.map
== NULL
) {
1359 * some shared data regions have execute bit set which puts
1360 * their mapping in the MAP__FUNCTION type array.
1361 * Check there as a fallback option before dropping the sample.
1363 thread__find_addr_location(thread
, m
, MAP__FUNCTION
, addr
, &al
);
1367 ams
->al_addr
= al
.addr
;
1372 struct mem_info
*sample__resolve_mem(struct perf_sample
*sample
,
1373 struct addr_location
*al
)
1375 struct mem_info
*mi
= zalloc(sizeof(*mi
));
1380 ip__resolve_ams(al
->thread
, &mi
->iaddr
, sample
->ip
);
1381 ip__resolve_data(al
->thread
, al
->cpumode
, &mi
->daddr
, sample
->addr
);
1382 mi
->data_src
.val
= sample
->data_src
;
1387 static int add_callchain_ip(struct thread
*thread
,
1388 struct symbol
**parent
,
1389 struct addr_location
*root_al
,
1390 bool branch_history
,
1393 struct addr_location al
;
1398 thread__find_cpumode_addr_location(thread
, MAP__FUNCTION
,
1401 u8 cpumode
= PERF_RECORD_MISC_USER
;
1403 if (ip
>= PERF_CONTEXT_MAX
) {
1405 case PERF_CONTEXT_HV
:
1406 cpumode
= PERF_RECORD_MISC_HYPERVISOR
;
1408 case PERF_CONTEXT_KERNEL
:
1409 cpumode
= PERF_RECORD_MISC_KERNEL
;
1411 case PERF_CONTEXT_USER
:
1412 cpumode
= PERF_RECORD_MISC_USER
;
1415 pr_debug("invalid callchain context: "
1416 "%"PRId64
"\n", (s64
) ip
);
1418 * It seems the callchain is corrupted.
1421 callchain_cursor_reset(&callchain_cursor
);
1426 thread__find_addr_location(thread
, cpumode
, MAP__FUNCTION
,
1430 if (al
.sym
!= NULL
) {
1431 if (sort__has_parent
&& !*parent
&&
1432 symbol__match_regex(al
.sym
, &parent_regex
))
1434 else if (have_ignore_callees
&& root_al
&&
1435 symbol__match_regex(al
.sym
, &ignore_callees_regex
)) {
1436 /* Treat this symbol as the root,
1437 forgetting its callees. */
1439 callchain_cursor_reset(&callchain_cursor
);
1443 return callchain_cursor_append(&callchain_cursor
, al
.addr
, al
.map
, al
.sym
);
1446 struct branch_info
*sample__resolve_bstack(struct perf_sample
*sample
,
1447 struct addr_location
*al
)
1450 const struct branch_stack
*bs
= sample
->branch_stack
;
1451 struct branch_info
*bi
= calloc(bs
->nr
, sizeof(struct branch_info
));
1456 for (i
= 0; i
< bs
->nr
; i
++) {
1457 ip__resolve_ams(al
->thread
, &bi
[i
].to
, bs
->entries
[i
].to
);
1458 ip__resolve_ams(al
->thread
, &bi
[i
].from
, bs
->entries
[i
].from
);
1459 bi
[i
].flags
= bs
->entries
[i
].flags
;
1466 #define NO_ENTRY 0xff
1468 #define PERF_MAX_BRANCH_DEPTH 127
1471 static int remove_loops(struct branch_entry
*l
, int nr
)
1474 unsigned char chash
[CHASHSZ
];
1476 memset(chash
, NO_ENTRY
, sizeof(chash
));
1478 BUG_ON(PERF_MAX_BRANCH_DEPTH
> 255);
1480 for (i
= 0; i
< nr
; i
++) {
1481 int h
= hash_64(l
[i
].from
, CHASHBITS
) % CHASHSZ
;
1483 /* no collision handling for now */
1484 if (chash
[h
] == NO_ENTRY
) {
1486 } else if (l
[chash
[h
]].from
== l
[i
].from
) {
1487 bool is_loop
= true;
1488 /* check if it is a real loop */
1490 for (j
= chash
[h
]; j
< i
&& i
+ off
< nr
; j
++, off
++)
1491 if (l
[j
].from
!= l
[i
+ off
].from
) {
1496 memmove(l
+ i
, l
+ i
+ off
,
1497 (nr
- (i
+ off
)) * sizeof(*l
));
1505 static int thread__resolve_callchain_sample(struct thread
*thread
,
1506 struct ip_callchain
*chain
,
1507 struct branch_stack
*branch
,
1508 struct symbol
**parent
,
1509 struct addr_location
*root_al
,
1512 int chain_nr
= min(max_stack
, (int)chain
->nr
);
1518 * Based on DWARF debug information, some architectures skip
1519 * a callchain entry saved by the kernel.
1521 if (chain
->nr
< PERF_MAX_STACK_DEPTH
)
1522 skip_idx
= arch_skip_callchain_idx(thread
, chain
);
1524 callchain_cursor_reset(&callchain_cursor
);
1527 * Add branches to call stack for easier browsing. This gives
1528 * more context for a sample than just the callers.
1530 * This uses individual histograms of paths compared to the
1531 * aggregated histograms the normal LBR mode uses.
1533 * Limitations for now:
1534 * - No extra filters
1535 * - No annotations (should annotate somehow)
1538 if (branch
&& callchain_param
.branch_callstack
) {
1539 int nr
= min(max_stack
, (int)branch
->nr
);
1540 struct branch_entry be
[nr
];
1542 if (branch
->nr
> PERF_MAX_BRANCH_DEPTH
) {
1543 pr_warning("corrupted branch chain. skipping...\n");
1547 for (i
= 0; i
< nr
; i
++) {
1548 if (callchain_param
.order
== ORDER_CALLEE
) {
1549 be
[i
] = branch
->entries
[i
];
1551 * Check for overlap into the callchain.
1552 * The return address is one off compared to
1553 * the branch entry. To adjust for this
1554 * assume the calling instruction is not longer
1557 if (i
== skip_idx
||
1558 chain
->ips
[first_call
] >= PERF_CONTEXT_MAX
)
1560 else if (be
[i
].from
< chain
->ips
[first_call
] &&
1561 be
[i
].from
>= chain
->ips
[first_call
] - 8)
1564 be
[i
] = branch
->entries
[branch
->nr
- i
- 1];
1567 nr
= remove_loops(be
, nr
);
1569 for (i
= 0; i
< nr
; i
++) {
1570 err
= add_callchain_ip(thread
, parent
, root_al
,
1573 err
= add_callchain_ip(thread
, parent
, root_al
,
1584 if (chain
->nr
> PERF_MAX_STACK_DEPTH
) {
1585 pr_warning("corrupted callchain. skipping...\n");
1589 for (i
= first_call
; i
< chain_nr
; i
++) {
1592 if (callchain_param
.order
== ORDER_CALLEE
)
1595 j
= chain
->nr
- i
- 1;
1597 #ifdef HAVE_SKIP_CALLCHAIN_IDX
1603 err
= add_callchain_ip(thread
, parent
, root_al
, false, ip
);
1606 return (err
< 0) ? err
: 0;
1612 static int unwind_entry(struct unwind_entry
*entry
, void *arg
)
1614 struct callchain_cursor
*cursor
= arg
;
1615 return callchain_cursor_append(cursor
, entry
->ip
,
1616 entry
->map
, entry
->sym
);
1619 int thread__resolve_callchain(struct thread
*thread
,
1620 struct perf_evsel
*evsel
,
1621 struct perf_sample
*sample
,
1622 struct symbol
**parent
,
1623 struct addr_location
*root_al
,
1626 int ret
= thread__resolve_callchain_sample(thread
, sample
->callchain
,
1627 sample
->branch_stack
,
1628 parent
, root_al
, max_stack
);
1632 /* Can we do dwarf post unwind? */
1633 if (!((evsel
->attr
.sample_type
& PERF_SAMPLE_REGS_USER
) &&
1634 (evsel
->attr
.sample_type
& PERF_SAMPLE_STACK_USER
)))
1637 /* Bail out if nothing was captured. */
1638 if ((!sample
->user_regs
.regs
) ||
1639 (!sample
->user_stack
.size
))
1642 return unwind__get_entries(unwind_entry
, &callchain_cursor
,
1643 thread
, sample
, max_stack
);
1647 int machine__for_each_thread(struct machine
*machine
,
1648 int (*fn
)(struct thread
*thread
, void *p
),
1652 struct thread
*thread
;
1655 for (nd
= rb_first(&machine
->threads
); nd
; nd
= rb_next(nd
)) {
1656 thread
= rb_entry(nd
, struct thread
, rb_node
);
1657 rc
= fn(thread
, priv
);
1662 list_for_each_entry(thread
, &machine
->dead_threads
, node
) {
1663 rc
= fn(thread
, priv
);
1670 int __machine__synthesize_threads(struct machine
*machine
, struct perf_tool
*tool
,
1671 struct target
*target
, struct thread_map
*threads
,
1672 perf_event__handler_t process
, bool data_mmap
)
1674 if (target__has_task(target
))
1675 return perf_event__synthesize_thread_map(tool
, threads
, process
, machine
, data_mmap
);
1676 else if (target__has_cpu(target
))
1677 return perf_event__synthesize_threads(tool
, process
, machine
, data_mmap
);
1678 /* command specified */
1682 pid_t
machine__get_current_tid(struct machine
*machine
, int cpu
)
1684 if (cpu
< 0 || cpu
>= MAX_NR_CPUS
|| !machine
->current_tid
)
1687 return machine
->current_tid
[cpu
];
1690 int machine__set_current_tid(struct machine
*machine
, int cpu
, pid_t pid
,
1693 struct thread
*thread
;
1698 if (!machine
->current_tid
) {
1701 machine
->current_tid
= calloc(MAX_NR_CPUS
, sizeof(pid_t
));
1702 if (!machine
->current_tid
)
1704 for (i
= 0; i
< MAX_NR_CPUS
; i
++)
1705 machine
->current_tid
[i
] = -1;
1708 if (cpu
>= MAX_NR_CPUS
) {
1709 pr_err("Requested CPU %d too large. ", cpu
);
1710 pr_err("Consider raising MAX_NR_CPUS\n");
1714 machine
->current_tid
[cpu
] = tid
;
1716 thread
= machine__findnew_thread(machine
, pid
, tid
);
1725 int machine__get_kernel_start(struct machine
*machine
)
1727 struct map
*map
= machine__kernel_map(machine
, MAP__FUNCTION
);
1731 * The only addresses above 2^63 are kernel addresses of a 64-bit
1732 * kernel. Note that addresses are unsigned so that on a 32-bit system
1733 * all addresses including kernel addresses are less than 2^32. In
1734 * that case (32-bit system), if the kernel mapping is unknown, all
1735 * addresses will be assumed to be in user space - see
1736 * machine__kernel_ip().
1738 machine
->kernel_start
= 1ULL << 63;
1740 err
= map__load(map
, machine
->symbol_filter
);
1742 machine
->kernel_start
= map
->start
;