13 #include <symbol/kallsyms.h>
15 #include "linux/hash.h"
17 static void __machine__remove_thread(struct machine
*machine
, struct thread
*th
, bool lock
);
19 static void dsos__init(struct dsos
*dsos
)
21 INIT_LIST_HEAD(&dsos
->head
);
23 pthread_rwlock_init(&dsos
->lock
, NULL
);
26 int machine__init(struct machine
*machine
, const char *root_dir
, pid_t pid
)
28 memset(machine
, 0, sizeof(*machine
));
29 map_groups__init(&machine
->kmaps
, machine
);
30 RB_CLEAR_NODE(&machine
->rb_node
);
31 dsos__init(&machine
->dsos
);
33 machine
->threads
= RB_ROOT
;
34 pthread_rwlock_init(&machine
->threads_lock
, NULL
);
35 machine
->nr_threads
= 0;
36 INIT_LIST_HEAD(&machine
->dead_threads
);
37 machine
->last_match
= NULL
;
39 machine
->vdso_info
= NULL
;
44 machine
->symbol_filter
= NULL
;
45 machine
->id_hdr_size
= 0;
46 machine
->comm_exec
= false;
47 machine
->kernel_start
= 0;
49 memset(machine
->vmlinux_maps
, 0, sizeof(machine
->vmlinux_maps
));
51 machine
->root_dir
= strdup(root_dir
);
52 if (machine
->root_dir
== NULL
)
55 if (pid
!= HOST_KERNEL_ID
) {
56 struct thread
*thread
= machine__findnew_thread(machine
, -1,
63 snprintf(comm
, sizeof(comm
), "[guest/%d]", pid
);
64 thread__set_comm(thread
, comm
, 0);
68 machine
->current_tid
= NULL
;
73 struct machine
*machine__new_host(void)
75 struct machine
*machine
= malloc(sizeof(*machine
));
77 if (machine
!= NULL
) {
78 machine__init(machine
, "", HOST_KERNEL_ID
);
80 if (machine__create_kernel_maps(machine
) < 0)
90 static void dsos__purge(struct dsos
*dsos
)
94 pthread_rwlock_wrlock(&dsos
->lock
);
96 list_for_each_entry_safe(pos
, n
, &dsos
->head
, node
) {
97 RB_CLEAR_NODE(&pos
->rb_node
);
99 list_del_init(&pos
->node
);
103 pthread_rwlock_unlock(&dsos
->lock
);
106 static void dsos__exit(struct dsos
*dsos
)
109 pthread_rwlock_destroy(&dsos
->lock
);
112 void machine__delete_threads(struct machine
*machine
)
116 pthread_rwlock_wrlock(&machine
->threads_lock
);
117 nd
= rb_first(&machine
->threads
);
119 struct thread
*t
= rb_entry(nd
, struct thread
, rb_node
);
122 __machine__remove_thread(machine
, t
, false);
124 pthread_rwlock_unlock(&machine
->threads_lock
);
127 void machine__exit(struct machine
*machine
)
129 machine__destroy_kernel_maps(machine
);
130 map_groups__exit(&machine
->kmaps
);
131 dsos__exit(&machine
->dsos
);
132 machine__exit_vdso(machine
);
133 zfree(&machine
->root_dir
);
134 zfree(&machine
->current_tid
);
135 pthread_rwlock_destroy(&machine
->threads_lock
);
138 void machine__delete(struct machine
*machine
)
140 machine__exit(machine
);
144 void machines__init(struct machines
*machines
)
146 machine__init(&machines
->host
, "", HOST_KERNEL_ID
);
147 machines
->guests
= RB_ROOT
;
148 machines
->symbol_filter
= NULL
;
151 void machines__exit(struct machines
*machines
)
153 machine__exit(&machines
->host
);
157 struct machine
*machines__add(struct machines
*machines
, pid_t pid
,
158 const char *root_dir
)
160 struct rb_node
**p
= &machines
->guests
.rb_node
;
161 struct rb_node
*parent
= NULL
;
162 struct machine
*pos
, *machine
= malloc(sizeof(*machine
));
167 if (machine__init(machine
, root_dir
, pid
) != 0) {
172 machine
->symbol_filter
= machines
->symbol_filter
;
176 pos
= rb_entry(parent
, struct machine
, rb_node
);
183 rb_link_node(&machine
->rb_node
, parent
, p
);
184 rb_insert_color(&machine
->rb_node
, &machines
->guests
);
189 void machines__set_symbol_filter(struct machines
*machines
,
190 symbol_filter_t symbol_filter
)
194 machines
->symbol_filter
= symbol_filter
;
195 machines
->host
.symbol_filter
= symbol_filter
;
197 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
198 struct machine
*machine
= rb_entry(nd
, struct machine
, rb_node
);
200 machine
->symbol_filter
= symbol_filter
;
204 void machines__set_comm_exec(struct machines
*machines
, bool comm_exec
)
208 machines
->host
.comm_exec
= comm_exec
;
210 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
211 struct machine
*machine
= rb_entry(nd
, struct machine
, rb_node
);
213 machine
->comm_exec
= comm_exec
;
217 struct machine
*machines__find(struct machines
*machines
, pid_t pid
)
219 struct rb_node
**p
= &machines
->guests
.rb_node
;
220 struct rb_node
*parent
= NULL
;
221 struct machine
*machine
;
222 struct machine
*default_machine
= NULL
;
224 if (pid
== HOST_KERNEL_ID
)
225 return &machines
->host
;
229 machine
= rb_entry(parent
, struct machine
, rb_node
);
230 if (pid
< machine
->pid
)
232 else if (pid
> machine
->pid
)
237 default_machine
= machine
;
240 return default_machine
;
243 struct machine
*machines__findnew(struct machines
*machines
, pid_t pid
)
246 const char *root_dir
= "";
247 struct machine
*machine
= machines__find(machines
, pid
);
249 if (machine
&& (machine
->pid
== pid
))
252 if ((pid
!= HOST_KERNEL_ID
) &&
253 (pid
!= DEFAULT_GUEST_KERNEL_ID
) &&
254 (symbol_conf
.guestmount
)) {
255 sprintf(path
, "%s/%d", symbol_conf
.guestmount
, pid
);
256 if (access(path
, R_OK
)) {
257 static struct strlist
*seen
;
260 seen
= strlist__new(NULL
, NULL
);
262 if (!strlist__has_entry(seen
, path
)) {
263 pr_err("Can't access file %s\n", path
);
264 strlist__add(seen
, path
);
272 machine
= machines__add(machines
, pid
, root_dir
);
277 void machines__process_guests(struct machines
*machines
,
278 machine__process_t process
, void *data
)
282 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
283 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
288 char *machine__mmap_name(struct machine
*machine
, char *bf
, size_t size
)
290 if (machine__is_host(machine
))
291 snprintf(bf
, size
, "[%s]", "kernel.kallsyms");
292 else if (machine__is_default_guest(machine
))
293 snprintf(bf
, size
, "[%s]", "guest.kernel.kallsyms");
295 snprintf(bf
, size
, "[%s.%d]", "guest.kernel.kallsyms",
302 void machines__set_id_hdr_size(struct machines
*machines
, u16 id_hdr_size
)
304 struct rb_node
*node
;
305 struct machine
*machine
;
307 machines
->host
.id_hdr_size
= id_hdr_size
;
309 for (node
= rb_first(&machines
->guests
); node
; node
= rb_next(node
)) {
310 machine
= rb_entry(node
, struct machine
, rb_node
);
311 machine
->id_hdr_size
= id_hdr_size
;
317 static void machine__update_thread_pid(struct machine
*machine
,
318 struct thread
*th
, pid_t pid
)
320 struct thread
*leader
;
322 if (pid
== th
->pid_
|| pid
== -1 || th
->pid_
!= -1)
327 if (th
->pid_
== th
->tid
)
330 leader
= __machine__findnew_thread(machine
, th
->pid_
, th
->pid_
);
335 leader
->mg
= map_groups__new(machine
);
340 if (th
->mg
== leader
->mg
)
345 * Maps are created from MMAP events which provide the pid and
346 * tid. Consequently there never should be any maps on a thread
347 * with an unknown pid. Just print an error if there are.
349 if (!map_groups__empty(th
->mg
))
350 pr_err("Discarding thread maps for %d:%d\n",
352 map_groups__put(th
->mg
);
355 th
->mg
= map_groups__get(leader
->mg
);
360 pr_err("Failed to join map groups for %d:%d\n", th
->pid_
, th
->tid
);
365 * Caller must eventually drop thread->refcnt returned with a successful
366 * lookup/new thread inserted.
368 static struct thread
*____machine__findnew_thread(struct machine
*machine
,
369 pid_t pid
, pid_t tid
,
372 struct rb_node
**p
= &machine
->threads
.rb_node
;
373 struct rb_node
*parent
= NULL
;
377 * Front-end cache - TID lookups come in blocks,
378 * so most of the time we dont have to look up
381 th
= machine
->last_match
;
383 if (th
->tid
== tid
) {
384 machine__update_thread_pid(machine
, th
, pid
);
385 return thread__get(th
);
388 machine
->last_match
= NULL
;
393 th
= rb_entry(parent
, struct thread
, rb_node
);
395 if (th
->tid
== tid
) {
396 machine
->last_match
= th
;
397 machine__update_thread_pid(machine
, th
, pid
);
398 return thread__get(th
);
410 th
= thread__new(pid
, tid
);
412 rb_link_node(&th
->rb_node
, parent
, p
);
413 rb_insert_color(&th
->rb_node
, &machine
->threads
);
416 * We have to initialize map_groups separately
417 * after rb tree is updated.
419 * The reason is that we call machine__findnew_thread
420 * within thread__init_map_groups to find the thread
421 * leader and that would screwed the rb tree.
423 if (thread__init_map_groups(th
, machine
)) {
424 rb_erase_init(&th
->rb_node
, &machine
->threads
);
425 RB_CLEAR_NODE(&th
->rb_node
);
430 * It is now in the rbtree, get a ref
433 machine
->last_match
= th
;
434 ++machine
->nr_threads
;
440 struct thread
*__machine__findnew_thread(struct machine
*machine
, pid_t pid
, pid_t tid
)
442 return ____machine__findnew_thread(machine
, pid
, tid
, true);
445 struct thread
*machine__findnew_thread(struct machine
*machine
, pid_t pid
,
450 pthread_rwlock_wrlock(&machine
->threads_lock
);
451 th
= __machine__findnew_thread(machine
, pid
, tid
);
452 pthread_rwlock_unlock(&machine
->threads_lock
);
456 struct thread
*machine__find_thread(struct machine
*machine
, pid_t pid
,
460 pthread_rwlock_rdlock(&machine
->threads_lock
);
461 th
= ____machine__findnew_thread(machine
, pid
, tid
, false);
462 pthread_rwlock_unlock(&machine
->threads_lock
);
466 struct comm
*machine__thread_exec_comm(struct machine
*machine
,
467 struct thread
*thread
)
469 if (machine
->comm_exec
)
470 return thread__exec_comm(thread
);
472 return thread__comm(thread
);
475 int machine__process_comm_event(struct machine
*machine
, union perf_event
*event
,
476 struct perf_sample
*sample
)
478 struct thread
*thread
= machine__findnew_thread(machine
,
481 bool exec
= event
->header
.misc
& PERF_RECORD_MISC_COMM_EXEC
;
485 machine
->comm_exec
= true;
488 perf_event__fprintf_comm(event
, stdout
);
490 if (thread
== NULL
||
491 __thread__set_comm(thread
, event
->comm
.comm
, sample
->time
, exec
)) {
492 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
501 int machine__process_lost_event(struct machine
*machine __maybe_unused
,
502 union perf_event
*event
, struct perf_sample
*sample __maybe_unused
)
504 dump_printf(": id:%" PRIu64
": lost:%" PRIu64
"\n",
505 event
->lost
.id
, event
->lost
.lost
);
509 int machine__process_lost_samples_event(struct machine
*machine __maybe_unused
,
510 union perf_event
*event
, struct perf_sample
*sample
)
512 dump_printf(": id:%" PRIu64
": lost samples :%" PRIu64
"\n",
513 sample
->id
, event
->lost_samples
.lost
);
517 static struct dso
*machine__findnew_module_dso(struct machine
*machine
,
519 const char *filename
)
523 pthread_rwlock_wrlock(&machine
->dsos
.lock
);
525 dso
= __dsos__find(&machine
->dsos
, m
->name
, true);
527 dso
= __dsos__addnew(&machine
->dsos
, m
->name
);
531 if (machine__is_host(machine
))
532 dso
->symtab_type
= DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE
;
534 dso
->symtab_type
= DSO_BINARY_TYPE__GUEST_KMODULE
;
536 /* _KMODULE_COMP should be next to _KMODULE */
537 if (m
->kmod
&& m
->comp
)
540 dso__set_short_name(dso
, strdup(m
->name
), true);
541 dso__set_long_name(dso
, strdup(filename
), true);
546 pthread_rwlock_unlock(&machine
->dsos
.lock
);
550 int machine__process_aux_event(struct machine
*machine __maybe_unused
,
551 union perf_event
*event
)
554 perf_event__fprintf_aux(event
, stdout
);
558 int machine__process_itrace_start_event(struct machine
*machine __maybe_unused
,
559 union perf_event
*event
)
562 perf_event__fprintf_itrace_start(event
, stdout
);
566 int machine__process_switch_event(struct machine
*machine __maybe_unused
,
567 union perf_event
*event
)
570 perf_event__fprintf_switch(event
, stdout
);
574 static void dso__adjust_kmod_long_name(struct dso
*dso
, const char *filename
)
576 const char *dup_filename
;
578 if (!filename
|| !dso
|| !dso
->long_name
)
580 if (dso
->long_name
[0] != '[')
582 if (!strchr(filename
, '/'))
585 dup_filename
= strdup(filename
);
589 dso__set_long_name(dso
, dup_filename
, true);
592 struct map
*machine__findnew_module_map(struct machine
*machine
, u64 start
,
593 const char *filename
)
595 struct map
*map
= NULL
;
596 struct dso
*dso
= NULL
;
599 if (kmod_path__parse_name(&m
, filename
))
602 map
= map_groups__find_by_name(&machine
->kmaps
, MAP__FUNCTION
,
606 * If the map's dso is an offline module, give dso__load()
607 * a chance to find the file path of that module by fixing
610 dso__adjust_kmod_long_name(map
->dso
, filename
);
614 dso
= machine__findnew_module_dso(machine
, &m
, filename
);
618 map
= map__new2(start
, dso
, MAP__FUNCTION
);
622 map_groups__insert(&machine
->kmaps
, map
);
624 /* Put the map here because map_groups__insert alread got it */
627 /* put the dso here, corresponding to machine__findnew_module_dso */
633 size_t machines__fprintf_dsos(struct machines
*machines
, FILE *fp
)
636 size_t ret
= __dsos__fprintf(&machines
->host
.dsos
.head
, fp
);
638 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
639 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
640 ret
+= __dsos__fprintf(&pos
->dsos
.head
, fp
);
646 size_t machine__fprintf_dsos_buildid(struct machine
*m
, FILE *fp
,
647 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
649 return __dsos__fprintf_buildid(&m
->dsos
.head
, fp
, skip
, parm
);
652 size_t machines__fprintf_dsos_buildid(struct machines
*machines
, FILE *fp
,
653 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
656 size_t ret
= machine__fprintf_dsos_buildid(&machines
->host
, fp
, skip
, parm
);
658 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
659 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
660 ret
+= machine__fprintf_dsos_buildid(pos
, fp
, skip
, parm
);
665 size_t machine__fprintf_vmlinux_path(struct machine
*machine
, FILE *fp
)
669 struct dso
*kdso
= machine__kernel_map(machine
)->dso
;
671 if (kdso
->has_build_id
) {
672 char filename
[PATH_MAX
];
673 if (dso__build_id_filename(kdso
, filename
, sizeof(filename
)))
674 printed
+= fprintf(fp
, "[0] %s\n", filename
);
677 for (i
= 0; i
< vmlinux_path__nr_entries
; ++i
)
678 printed
+= fprintf(fp
, "[%d] %s\n",
679 i
+ kdso
->has_build_id
, vmlinux_path
[i
]);
684 size_t machine__fprintf(struct machine
*machine
, FILE *fp
)
689 pthread_rwlock_rdlock(&machine
->threads_lock
);
691 ret
= fprintf(fp
, "Threads: %u\n", machine
->nr_threads
);
693 for (nd
= rb_first(&machine
->threads
); nd
; nd
= rb_next(nd
)) {
694 struct thread
*pos
= rb_entry(nd
, struct thread
, rb_node
);
696 ret
+= thread__fprintf(pos
, fp
);
699 pthread_rwlock_unlock(&machine
->threads_lock
);
704 static struct dso
*machine__get_kernel(struct machine
*machine
)
706 const char *vmlinux_name
= NULL
;
709 if (machine__is_host(machine
)) {
710 vmlinux_name
= symbol_conf
.vmlinux_name
;
712 vmlinux_name
= "[kernel.kallsyms]";
714 kernel
= machine__findnew_kernel(machine
, vmlinux_name
,
715 "[kernel]", DSO_TYPE_KERNEL
);
719 if (machine__is_default_guest(machine
))
720 vmlinux_name
= symbol_conf
.default_guest_vmlinux_name
;
722 vmlinux_name
= machine__mmap_name(machine
, bf
,
725 kernel
= machine__findnew_kernel(machine
, vmlinux_name
,
727 DSO_TYPE_GUEST_KERNEL
);
730 if (kernel
!= NULL
&& (!kernel
->has_build_id
))
731 dso__read_running_kernel_build_id(kernel
, machine
);
736 struct process_args
{
740 static void machine__get_kallsyms_filename(struct machine
*machine
, char *buf
,
743 if (machine__is_default_guest(machine
))
744 scnprintf(buf
, bufsz
, "%s", symbol_conf
.default_guest_kallsyms
);
746 scnprintf(buf
, bufsz
, "%s/proc/kallsyms", machine
->root_dir
);
749 const char *ref_reloc_sym_names
[] = {"_text", "_stext", NULL
};
751 /* Figure out the start address of kernel map from /proc/kallsyms.
752 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
753 * symbol_name if it's not that important.
755 static u64
machine__get_running_kernel_start(struct machine
*machine
,
756 const char **symbol_name
)
758 char filename
[PATH_MAX
];
763 machine__get_kallsyms_filename(machine
, filename
, PATH_MAX
);
765 if (symbol__restricted_filename(filename
, "/proc/kallsyms"))
768 for (i
= 0; (name
= ref_reloc_sym_names
[i
]) != NULL
; i
++) {
769 addr
= kallsyms__get_function_start(filename
, name
);
780 int __machine__create_kernel_maps(struct machine
*machine
, struct dso
*kernel
)
783 u64 start
= machine__get_running_kernel_start(machine
, NULL
);
785 /* In case of renewal the kernel map, destroy previous one */
786 machine__destroy_kernel_maps(machine
);
788 for (type
= 0; type
< MAP__NR_TYPES
; ++type
) {
792 machine
->vmlinux_maps
[type
] = map__new2(start
, kernel
, type
);
793 if (machine
->vmlinux_maps
[type
] == NULL
)
796 machine
->vmlinux_maps
[type
]->map_ip
=
797 machine
->vmlinux_maps
[type
]->unmap_ip
=
799 map
= __machine__kernel_map(machine
, type
);
800 kmap
= map__kmap(map
);
804 kmap
->kmaps
= &machine
->kmaps
;
805 map_groups__insert(&machine
->kmaps
, map
);
811 void machine__destroy_kernel_maps(struct machine
*machine
)
815 for (type
= 0; type
< MAP__NR_TYPES
; ++type
) {
817 struct map
*map
= __machine__kernel_map(machine
, type
);
822 kmap
= map__kmap(map
);
823 map_groups__remove(&machine
->kmaps
, map
);
824 if (kmap
&& kmap
->ref_reloc_sym
) {
826 * ref_reloc_sym is shared among all maps, so free just
829 if (type
== MAP__FUNCTION
) {
830 zfree((char **)&kmap
->ref_reloc_sym
->name
);
831 zfree(&kmap
->ref_reloc_sym
);
833 kmap
->ref_reloc_sym
= NULL
;
836 map__put(machine
->vmlinux_maps
[type
]);
837 machine
->vmlinux_maps
[type
] = NULL
;
841 int machines__create_guest_kernel_maps(struct machines
*machines
)
844 struct dirent
**namelist
= NULL
;
850 if (symbol_conf
.default_guest_vmlinux_name
||
851 symbol_conf
.default_guest_modules
||
852 symbol_conf
.default_guest_kallsyms
) {
853 machines__create_kernel_maps(machines
, DEFAULT_GUEST_KERNEL_ID
);
856 if (symbol_conf
.guestmount
) {
857 items
= scandir(symbol_conf
.guestmount
, &namelist
, NULL
, NULL
);
860 for (i
= 0; i
< items
; i
++) {
861 if (!isdigit(namelist
[i
]->d_name
[0])) {
862 /* Filter out . and .. */
865 pid
= (pid_t
)strtol(namelist
[i
]->d_name
, &endp
, 10);
866 if ((*endp
!= '\0') ||
867 (endp
== namelist
[i
]->d_name
) ||
869 pr_debug("invalid directory (%s). Skipping.\n",
870 namelist
[i
]->d_name
);
873 sprintf(path
, "%s/%s/proc/kallsyms",
874 symbol_conf
.guestmount
,
875 namelist
[i
]->d_name
);
876 ret
= access(path
, R_OK
);
878 pr_debug("Can't access file %s\n", path
);
881 machines__create_kernel_maps(machines
, pid
);
890 void machines__destroy_kernel_maps(struct machines
*machines
)
892 struct rb_node
*next
= rb_first(&machines
->guests
);
894 machine__destroy_kernel_maps(&machines
->host
);
897 struct machine
*pos
= rb_entry(next
, struct machine
, rb_node
);
899 next
= rb_next(&pos
->rb_node
);
900 rb_erase(&pos
->rb_node
, &machines
->guests
);
901 machine__delete(pos
);
905 int machines__create_kernel_maps(struct machines
*machines
, pid_t pid
)
907 struct machine
*machine
= machines__findnew(machines
, pid
);
912 return machine__create_kernel_maps(machine
);
915 int __machine__load_kallsyms(struct machine
*machine
, const char *filename
,
916 enum map_type type
, bool no_kcore
, symbol_filter_t filter
)
918 struct map
*map
= machine__kernel_map(machine
);
919 int ret
= __dso__load_kallsyms(map
->dso
, filename
, map
, no_kcore
, filter
);
922 dso__set_loaded(map
->dso
, type
);
924 * Since /proc/kallsyms will have multiple sessions for the
925 * kernel, with modules between them, fixup the end of all
928 __map_groups__fixup_end(&machine
->kmaps
, type
);
934 int machine__load_kallsyms(struct machine
*machine
, const char *filename
,
935 enum map_type type
, symbol_filter_t filter
)
937 return __machine__load_kallsyms(machine
, filename
, type
, false, filter
);
940 int machine__load_vmlinux_path(struct machine
*machine
, enum map_type type
,
941 symbol_filter_t filter
)
943 struct map
*map
= machine__kernel_map(machine
);
944 int ret
= dso__load_vmlinux_path(map
->dso
, map
, filter
);
947 dso__set_loaded(map
->dso
, type
);
952 static void map_groups__fixup_end(struct map_groups
*mg
)
955 for (i
= 0; i
< MAP__NR_TYPES
; ++i
)
956 __map_groups__fixup_end(mg
, i
);
959 static char *get_kernel_version(const char *root_dir
)
961 char version
[PATH_MAX
];
964 const char *prefix
= "Linux version ";
966 sprintf(version
, "%s/proc/version", root_dir
);
967 file
= fopen(version
, "r");
972 tmp
= fgets(version
, sizeof(version
), file
);
975 name
= strstr(version
, prefix
);
978 name
+= strlen(prefix
);
979 tmp
= strchr(name
, ' ');
986 static bool is_kmod_dso(struct dso
*dso
)
988 return dso
->symtab_type
== DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE
||
989 dso
->symtab_type
== DSO_BINARY_TYPE__GUEST_KMODULE
;
992 static int map_groups__set_module_path(struct map_groups
*mg
, const char *path
,
998 map
= map_groups__find_by_name(mg
, MAP__FUNCTION
, m
->name
);
1002 long_name
= strdup(path
);
1003 if (long_name
== NULL
)
1006 dso__set_long_name(map
->dso
, long_name
, true);
1007 dso__kernel_module_get_build_id(map
->dso
, "");
1010 * Full name could reveal us kmod compression, so
1011 * we need to update the symtab_type if needed.
1013 if (m
->comp
&& is_kmod_dso(map
->dso
))
1014 map
->dso
->symtab_type
++;
1019 static int map_groups__set_modules_path_dir(struct map_groups
*mg
,
1020 const char *dir_name
, int depth
)
1022 struct dirent
*dent
;
1023 DIR *dir
= opendir(dir_name
);
1027 pr_debug("%s: cannot open %s dir\n", __func__
, dir_name
);
1031 while ((dent
= readdir(dir
)) != NULL
) {
1032 char path
[PATH_MAX
];
1035 /*sshfs might return bad dent->d_type, so we have to stat*/
1036 snprintf(path
, sizeof(path
), "%s/%s", dir_name
, dent
->d_name
);
1037 if (stat(path
, &st
))
1040 if (S_ISDIR(st
.st_mode
)) {
1041 if (!strcmp(dent
->d_name
, ".") ||
1042 !strcmp(dent
->d_name
, ".."))
1045 /* Do not follow top-level source and build symlinks */
1047 if (!strcmp(dent
->d_name
, "source") ||
1048 !strcmp(dent
->d_name
, "build"))
1052 ret
= map_groups__set_modules_path_dir(mg
, path
,
1059 ret
= kmod_path__parse_name(&m
, dent
->d_name
);
1064 ret
= map_groups__set_module_path(mg
, path
, &m
);
1078 static int machine__set_modules_path(struct machine
*machine
)
1081 char modules_path
[PATH_MAX
];
1083 version
= get_kernel_version(machine
->root_dir
);
1087 snprintf(modules_path
, sizeof(modules_path
), "%s/lib/modules/%s",
1088 machine
->root_dir
, version
);
1091 return map_groups__set_modules_path_dir(&machine
->kmaps
, modules_path
, 0);
1094 static int machine__create_module(void *arg
, const char *name
, u64 start
)
1096 struct machine
*machine
= arg
;
1099 map
= machine__findnew_module_map(machine
, start
, name
);
1103 dso__kernel_module_get_build_id(map
->dso
, machine
->root_dir
);
1108 static int machine__create_modules(struct machine
*machine
)
1110 const char *modules
;
1111 char path
[PATH_MAX
];
1113 if (machine__is_default_guest(machine
)) {
1114 modules
= symbol_conf
.default_guest_modules
;
1116 snprintf(path
, PATH_MAX
, "%s/proc/modules", machine
->root_dir
);
1120 if (symbol__restricted_filename(modules
, "/proc/modules"))
1123 if (modules__parse(modules
, machine
, machine__create_module
))
1126 if (!machine__set_modules_path(machine
))
1129 pr_debug("Problems setting modules path maps, continuing anyway...\n");
1134 int machine__create_kernel_maps(struct machine
*machine
)
1136 struct dso
*kernel
= machine__get_kernel(machine
);
1138 u64 addr
= machine__get_running_kernel_start(machine
, &name
);
1141 if (!addr
|| kernel
== NULL
)
1144 ret
= __machine__create_kernel_maps(machine
, kernel
);
1149 if (symbol_conf
.use_modules
&& machine__create_modules(machine
) < 0) {
1150 if (machine__is_host(machine
))
1151 pr_debug("Problems creating module maps, "
1152 "continuing anyway...\n");
1154 pr_debug("Problems creating module maps for guest %d, "
1155 "continuing anyway...\n", machine
->pid
);
1159 * Now that we have all the maps created, just set the ->end of them:
1161 map_groups__fixup_end(&machine
->kmaps
);
1163 if (maps__set_kallsyms_ref_reloc_sym(machine
->vmlinux_maps
, name
,
1165 machine__destroy_kernel_maps(machine
);
1172 static void machine__set_kernel_mmap_len(struct machine
*machine
,
1173 union perf_event
*event
)
1177 for (i
= 0; i
< MAP__NR_TYPES
; i
++) {
1178 machine
->vmlinux_maps
[i
]->start
= event
->mmap
.start
;
1179 machine
->vmlinux_maps
[i
]->end
= (event
->mmap
.start
+
1182 * Be a bit paranoid here, some perf.data file came with
1183 * a zero sized synthesized MMAP event for the kernel.
1185 if (machine
->vmlinux_maps
[i
]->end
== 0)
1186 machine
->vmlinux_maps
[i
]->end
= ~0ULL;
1190 static bool machine__uses_kcore(struct machine
*machine
)
1194 list_for_each_entry(dso
, &machine
->dsos
.head
, node
) {
1195 if (dso__is_kcore(dso
))
1202 static int machine__process_kernel_mmap_event(struct machine
*machine
,
1203 union perf_event
*event
)
1206 char kmmap_prefix
[PATH_MAX
];
1207 enum dso_kernel_type kernel_type
;
1208 bool is_kernel_mmap
;
1210 /* If we have maps from kcore then we do not need or want any others */
1211 if (machine__uses_kcore(machine
))
1214 machine__mmap_name(machine
, kmmap_prefix
, sizeof(kmmap_prefix
));
1215 if (machine__is_host(machine
))
1216 kernel_type
= DSO_TYPE_KERNEL
;
1218 kernel_type
= DSO_TYPE_GUEST_KERNEL
;
1220 is_kernel_mmap
= memcmp(event
->mmap
.filename
,
1222 strlen(kmmap_prefix
) - 1) == 0;
1223 if (event
->mmap
.filename
[0] == '/' ||
1224 (!is_kernel_mmap
&& event
->mmap
.filename
[0] == '[')) {
1225 map
= machine__findnew_module_map(machine
, event
->mmap
.start
,
1226 event
->mmap
.filename
);
1230 map
->end
= map
->start
+ event
->mmap
.len
;
1231 } else if (is_kernel_mmap
) {
1232 const char *symbol_name
= (event
->mmap
.filename
+
1233 strlen(kmmap_prefix
));
1235 * Should be there already, from the build-id table in
1238 struct dso
*kernel
= NULL
;
1241 pthread_rwlock_rdlock(&machine
->dsos
.lock
);
1243 list_for_each_entry(dso
, &machine
->dsos
.head
, node
) {
1246 * The cpumode passed to is_kernel_module is not the
1247 * cpumode of *this* event. If we insist on passing
1248 * correct cpumode to is_kernel_module, we should
1249 * record the cpumode when we adding this dso to the
1252 * However we don't really need passing correct
1253 * cpumode. We know the correct cpumode must be kernel
1254 * mode (if not, we should not link it onto kernel_dsos
1257 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1258 * is_kernel_module() treats it as a kernel cpumode.
1262 is_kernel_module(dso
->long_name
,
1263 PERF_RECORD_MISC_CPUMODE_UNKNOWN
))
1271 pthread_rwlock_unlock(&machine
->dsos
.lock
);
1274 kernel
= machine__findnew_dso(machine
, kmmap_prefix
);
1278 kernel
->kernel
= kernel_type
;
1279 if (__machine__create_kernel_maps(machine
, kernel
) < 0) {
1284 if (strstr(kernel
->long_name
, "vmlinux"))
1285 dso__set_short_name(kernel
, "[kernel.vmlinux]", false);
1287 machine__set_kernel_mmap_len(machine
, event
);
1290 * Avoid using a zero address (kptr_restrict) for the ref reloc
1291 * symbol. Effectively having zero here means that at record
1292 * time /proc/sys/kernel/kptr_restrict was non zero.
1294 if (event
->mmap
.pgoff
!= 0) {
1295 maps__set_kallsyms_ref_reloc_sym(machine
->vmlinux_maps
,
1300 if (machine__is_default_guest(machine
)) {
1302 * preload dso of guest kernel and modules
1304 dso__load(kernel
, machine__kernel_map(machine
), NULL
);
1312 int machine__process_mmap2_event(struct machine
*machine
,
1313 union perf_event
*event
,
1314 struct perf_sample
*sample
)
1316 struct thread
*thread
;
1322 perf_event__fprintf_mmap2(event
, stdout
);
1324 if (sample
->cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
1325 sample
->cpumode
== PERF_RECORD_MISC_KERNEL
) {
1326 ret
= machine__process_kernel_mmap_event(machine
, event
);
1332 thread
= machine__findnew_thread(machine
, event
->mmap2
.pid
,
1337 if (event
->header
.misc
& PERF_RECORD_MISC_MMAP_DATA
)
1338 type
= MAP__VARIABLE
;
1340 type
= MAP__FUNCTION
;
1342 map
= map__new(machine
, event
->mmap2
.start
,
1343 event
->mmap2
.len
, event
->mmap2
.pgoff
,
1344 event
->mmap2
.pid
, event
->mmap2
.maj
,
1345 event
->mmap2
.min
, event
->mmap2
.ino
,
1346 event
->mmap2
.ino_generation
,
1349 event
->mmap2
.filename
, type
, thread
);
1352 goto out_problem_map
;
1354 thread__insert_map(thread
, map
);
1355 thread__put(thread
);
1360 thread__put(thread
);
1362 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1366 int machine__process_mmap_event(struct machine
*machine
, union perf_event
*event
,
1367 struct perf_sample
*sample
)
1369 struct thread
*thread
;
1375 perf_event__fprintf_mmap(event
, stdout
);
1377 if (sample
->cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
1378 sample
->cpumode
== PERF_RECORD_MISC_KERNEL
) {
1379 ret
= machine__process_kernel_mmap_event(machine
, event
);
1385 thread
= machine__findnew_thread(machine
, event
->mmap
.pid
,
1390 if (event
->header
.misc
& PERF_RECORD_MISC_MMAP_DATA
)
1391 type
= MAP__VARIABLE
;
1393 type
= MAP__FUNCTION
;
1395 map
= map__new(machine
, event
->mmap
.start
,
1396 event
->mmap
.len
, event
->mmap
.pgoff
,
1397 event
->mmap
.pid
, 0, 0, 0, 0, 0, 0,
1398 event
->mmap
.filename
,
1402 goto out_problem_map
;
1404 thread__insert_map(thread
, map
);
1405 thread__put(thread
);
1410 thread__put(thread
);
1412 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1416 static void __machine__remove_thread(struct machine
*machine
, struct thread
*th
, bool lock
)
1418 if (machine
->last_match
== th
)
1419 machine
->last_match
= NULL
;
1421 BUG_ON(atomic_read(&th
->refcnt
) == 0);
1423 pthread_rwlock_wrlock(&machine
->threads_lock
);
1424 rb_erase_init(&th
->rb_node
, &machine
->threads
);
1425 RB_CLEAR_NODE(&th
->rb_node
);
1426 --machine
->nr_threads
;
1428 * Move it first to the dead_threads list, then drop the reference,
1429 * if this is the last reference, then the thread__delete destructor
1430 * will be called and we will remove it from the dead_threads list.
1432 list_add_tail(&th
->node
, &machine
->dead_threads
);
1434 pthread_rwlock_unlock(&machine
->threads_lock
);
1438 void machine__remove_thread(struct machine
*machine
, struct thread
*th
)
1440 return __machine__remove_thread(machine
, th
, true);
1443 int machine__process_fork_event(struct machine
*machine
, union perf_event
*event
,
1444 struct perf_sample
*sample
)
1446 struct thread
*thread
= machine__find_thread(machine
,
1449 struct thread
*parent
= machine__findnew_thread(machine
,
1455 perf_event__fprintf_task(event
, stdout
);
1458 * There may be an existing thread that is not actually the parent,
1459 * either because we are processing events out of order, or because the
1460 * (fork) event that would have removed the thread was lost. Assume the
1461 * latter case and continue on as best we can.
1463 if (parent
->pid_
!= (pid_t
)event
->fork
.ppid
) {
1464 dump_printf("removing erroneous parent thread %d/%d\n",
1465 parent
->pid_
, parent
->tid
);
1466 machine__remove_thread(machine
, parent
);
1467 thread__put(parent
);
1468 parent
= machine__findnew_thread(machine
, event
->fork
.ppid
,
1472 /* if a thread currently exists for the thread id remove it */
1473 if (thread
!= NULL
) {
1474 machine__remove_thread(machine
, thread
);
1475 thread__put(thread
);
1478 thread
= machine__findnew_thread(machine
, event
->fork
.pid
,
1481 if (thread
== NULL
|| parent
== NULL
||
1482 thread__fork(thread
, parent
, sample
->time
) < 0) {
1483 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1486 thread__put(thread
);
1487 thread__put(parent
);
1492 int machine__process_exit_event(struct machine
*machine
, union perf_event
*event
,
1493 struct perf_sample
*sample __maybe_unused
)
1495 struct thread
*thread
= machine__find_thread(machine
,
1500 perf_event__fprintf_task(event
, stdout
);
1502 if (thread
!= NULL
) {
1503 thread__exited(thread
);
1504 thread__put(thread
);
1510 int machine__process_event(struct machine
*machine
, union perf_event
*event
,
1511 struct perf_sample
*sample
)
1515 switch (event
->header
.type
) {
1516 case PERF_RECORD_COMM
:
1517 ret
= machine__process_comm_event(machine
, event
, sample
); break;
1518 case PERF_RECORD_MMAP
:
1519 ret
= machine__process_mmap_event(machine
, event
, sample
); break;
1520 case PERF_RECORD_MMAP2
:
1521 ret
= machine__process_mmap2_event(machine
, event
, sample
); break;
1522 case PERF_RECORD_FORK
:
1523 ret
= machine__process_fork_event(machine
, event
, sample
); break;
1524 case PERF_RECORD_EXIT
:
1525 ret
= machine__process_exit_event(machine
, event
, sample
); break;
1526 case PERF_RECORD_LOST
:
1527 ret
= machine__process_lost_event(machine
, event
, sample
); break;
1528 case PERF_RECORD_AUX
:
1529 ret
= machine__process_aux_event(machine
, event
); break;
1530 case PERF_RECORD_ITRACE_START
:
1531 ret
= machine__process_itrace_start_event(machine
, event
); break;
1532 case PERF_RECORD_LOST_SAMPLES
:
1533 ret
= machine__process_lost_samples_event(machine
, event
, sample
); break;
1534 case PERF_RECORD_SWITCH
:
1535 case PERF_RECORD_SWITCH_CPU_WIDE
:
1536 ret
= machine__process_switch_event(machine
, event
); break;
1545 static bool symbol__match_regex(struct symbol
*sym
, regex_t
*regex
)
1547 if (sym
->name
&& !regexec(regex
, sym
->name
, 0, NULL
, 0))
1552 static void ip__resolve_ams(struct thread
*thread
,
1553 struct addr_map_symbol
*ams
,
1556 struct addr_location al
;
1558 memset(&al
, 0, sizeof(al
));
1560 * We cannot use the header.misc hint to determine whether a
1561 * branch stack address is user, kernel, guest, hypervisor.
1562 * Branches may straddle the kernel/user/hypervisor boundaries.
1563 * Thus, we have to try consecutively until we find a match
1564 * or else, the symbol is unknown
1566 thread__find_cpumode_addr_location(thread
, MAP__FUNCTION
, ip
, &al
);
1569 ams
->al_addr
= al
.addr
;
1574 static void ip__resolve_data(struct thread
*thread
,
1575 u8 m
, struct addr_map_symbol
*ams
, u64 addr
)
1577 struct addr_location al
;
1579 memset(&al
, 0, sizeof(al
));
1581 thread__find_addr_location(thread
, m
, MAP__VARIABLE
, addr
, &al
);
1582 if (al
.map
== NULL
) {
1584 * some shared data regions have execute bit set which puts
1585 * their mapping in the MAP__FUNCTION type array.
1586 * Check there as a fallback option before dropping the sample.
1588 thread__find_addr_location(thread
, m
, MAP__FUNCTION
, addr
, &al
);
1592 ams
->al_addr
= al
.addr
;
1597 struct mem_info
*sample__resolve_mem(struct perf_sample
*sample
,
1598 struct addr_location
*al
)
1600 struct mem_info
*mi
= zalloc(sizeof(*mi
));
1605 ip__resolve_ams(al
->thread
, &mi
->iaddr
, sample
->ip
);
1606 ip__resolve_data(al
->thread
, al
->cpumode
, &mi
->daddr
, sample
->addr
);
1607 mi
->data_src
.val
= sample
->data_src
;
1612 static int add_callchain_ip(struct thread
*thread
,
1613 struct callchain_cursor
*cursor
,
1614 struct symbol
**parent
,
1615 struct addr_location
*root_al
,
1619 struct addr_location al
;
1624 thread__find_cpumode_addr_location(thread
, MAP__FUNCTION
,
1627 if (ip
>= PERF_CONTEXT_MAX
) {
1629 case PERF_CONTEXT_HV
:
1630 *cpumode
= PERF_RECORD_MISC_HYPERVISOR
;
1632 case PERF_CONTEXT_KERNEL
:
1633 *cpumode
= PERF_RECORD_MISC_KERNEL
;
1635 case PERF_CONTEXT_USER
:
1636 *cpumode
= PERF_RECORD_MISC_USER
;
1639 pr_debug("invalid callchain context: "
1640 "%"PRId64
"\n", (s64
) ip
);
1642 * It seems the callchain is corrupted.
1645 callchain_cursor_reset(cursor
);
1650 thread__find_addr_location(thread
, *cpumode
, MAP__FUNCTION
,
1654 if (al
.sym
!= NULL
) {
1655 if (perf_hpp_list
.parent
&& !*parent
&&
1656 symbol__match_regex(al
.sym
, &parent_regex
))
1658 else if (have_ignore_callees
&& root_al
&&
1659 symbol__match_regex(al
.sym
, &ignore_callees_regex
)) {
1660 /* Treat this symbol as the root,
1661 forgetting its callees. */
1663 callchain_cursor_reset(cursor
);
1667 if (symbol_conf
.hide_unresolved
&& al
.sym
== NULL
)
1669 return callchain_cursor_append(cursor
, al
.addr
, al
.map
, al
.sym
);
1672 struct branch_info
*sample__resolve_bstack(struct perf_sample
*sample
,
1673 struct addr_location
*al
)
1676 const struct branch_stack
*bs
= sample
->branch_stack
;
1677 struct branch_info
*bi
= calloc(bs
->nr
, sizeof(struct branch_info
));
1682 for (i
= 0; i
< bs
->nr
; i
++) {
1683 ip__resolve_ams(al
->thread
, &bi
[i
].to
, bs
->entries
[i
].to
);
1684 ip__resolve_ams(al
->thread
, &bi
[i
].from
, bs
->entries
[i
].from
);
1685 bi
[i
].flags
= bs
->entries
[i
].flags
;
1692 #define NO_ENTRY 0xff
1694 #define PERF_MAX_BRANCH_DEPTH 127
1697 static int remove_loops(struct branch_entry
*l
, int nr
)
1700 unsigned char chash
[CHASHSZ
];
1702 memset(chash
, NO_ENTRY
, sizeof(chash
));
1704 BUG_ON(PERF_MAX_BRANCH_DEPTH
> 255);
1706 for (i
= 0; i
< nr
; i
++) {
1707 int h
= hash_64(l
[i
].from
, CHASHBITS
) % CHASHSZ
;
1709 /* no collision handling for now */
1710 if (chash
[h
] == NO_ENTRY
) {
1712 } else if (l
[chash
[h
]].from
== l
[i
].from
) {
1713 bool is_loop
= true;
1714 /* check if it is a real loop */
1716 for (j
= chash
[h
]; j
< i
&& i
+ off
< nr
; j
++, off
++)
1717 if (l
[j
].from
!= l
[i
+ off
].from
) {
1722 memmove(l
+ i
, l
+ i
+ off
,
1723 (nr
- (i
+ off
)) * sizeof(*l
));
1732 * Recolve LBR callstack chain sample
1734 * 1 on success get LBR callchain information
1735 * 0 no available LBR callchain information, should try fp
1736 * negative error code on other errors.
1738 static int resolve_lbr_callchain_sample(struct thread
*thread
,
1739 struct callchain_cursor
*cursor
,
1740 struct perf_sample
*sample
,
1741 struct symbol
**parent
,
1742 struct addr_location
*root_al
,
1745 struct ip_callchain
*chain
= sample
->callchain
;
1746 int chain_nr
= min(max_stack
, (int)chain
->nr
);
1747 u8 cpumode
= PERF_RECORD_MISC_USER
;
1751 for (i
= 0; i
< chain_nr
; i
++) {
1752 if (chain
->ips
[i
] == PERF_CONTEXT_USER
)
1756 /* LBR only affects the user callchain */
1757 if (i
!= chain_nr
) {
1758 struct branch_stack
*lbr_stack
= sample
->branch_stack
;
1759 int lbr_nr
= lbr_stack
->nr
;
1761 * LBR callstack can only get user call chain.
1762 * The mix_chain_nr is kernel call chain
1763 * number plus LBR user call chain number.
1764 * i is kernel call chain number,
1765 * 1 is PERF_CONTEXT_USER,
1766 * lbr_nr + 1 is the user call chain number.
1767 * For details, please refer to the comments
1768 * in callchain__printf
1770 int mix_chain_nr
= i
+ 1 + lbr_nr
+ 1;
1772 if (mix_chain_nr
> (int)sysctl_perf_event_max_stack
+ PERF_MAX_BRANCH_DEPTH
) {
1773 pr_warning("corrupted callchain. skipping...\n");
1777 for (j
= 0; j
< mix_chain_nr
; j
++) {
1778 if (callchain_param
.order
== ORDER_CALLEE
) {
1782 ip
= lbr_stack
->entries
[j
- i
- 2].from
;
1784 ip
= lbr_stack
->entries
[0].to
;
1787 ip
= lbr_stack
->entries
[lbr_nr
- j
- 1].from
;
1788 else if (j
> lbr_nr
)
1789 ip
= chain
->ips
[i
+ 1 - (j
- lbr_nr
)];
1791 ip
= lbr_stack
->entries
[0].to
;
1794 err
= add_callchain_ip(thread
, cursor
, parent
, root_al
, &cpumode
, ip
);
1796 return (err
< 0) ? err
: 0;
1804 static int thread__resolve_callchain_sample(struct thread
*thread
,
1805 struct callchain_cursor
*cursor
,
1806 struct perf_evsel
*evsel
,
1807 struct perf_sample
*sample
,
1808 struct symbol
**parent
,
1809 struct addr_location
*root_al
,
1812 struct branch_stack
*branch
= sample
->branch_stack
;
1813 struct ip_callchain
*chain
= sample
->callchain
;
1814 int chain_nr
= min(max_stack
, (int)chain
->nr
);
1815 u8 cpumode
= PERF_RECORD_MISC_USER
;
1820 if (perf_evsel__has_branch_callstack(evsel
)) {
1821 err
= resolve_lbr_callchain_sample(thread
, cursor
, sample
, parent
,
1822 root_al
, max_stack
);
1824 return (err
< 0) ? err
: 0;
1828 * Based on DWARF debug information, some architectures skip
1829 * a callchain entry saved by the kernel.
1831 if (chain
->nr
< sysctl_perf_event_max_stack
)
1832 skip_idx
= arch_skip_callchain_idx(thread
, chain
);
1835 * Add branches to call stack for easier browsing. This gives
1836 * more context for a sample than just the callers.
1838 * This uses individual histograms of paths compared to the
1839 * aggregated histograms the normal LBR mode uses.
1841 * Limitations for now:
1842 * - No extra filters
1843 * - No annotations (should annotate somehow)
1846 if (branch
&& callchain_param
.branch_callstack
) {
1847 int nr
= min(max_stack
, (int)branch
->nr
);
1848 struct branch_entry be
[nr
];
1850 if (branch
->nr
> PERF_MAX_BRANCH_DEPTH
) {
1851 pr_warning("corrupted branch chain. skipping...\n");
1855 for (i
= 0; i
< nr
; i
++) {
1856 if (callchain_param
.order
== ORDER_CALLEE
) {
1857 be
[i
] = branch
->entries
[i
];
1859 * Check for overlap into the callchain.
1860 * The return address is one off compared to
1861 * the branch entry. To adjust for this
1862 * assume the calling instruction is not longer
1865 if (i
== skip_idx
||
1866 chain
->ips
[first_call
] >= PERF_CONTEXT_MAX
)
1868 else if (be
[i
].from
< chain
->ips
[first_call
] &&
1869 be
[i
].from
>= chain
->ips
[first_call
] - 8)
1872 be
[i
] = branch
->entries
[branch
->nr
- i
- 1];
1875 nr
= remove_loops(be
, nr
);
1877 for (i
= 0; i
< nr
; i
++) {
1878 err
= add_callchain_ip(thread
, cursor
, parent
, root_al
,
1881 err
= add_callchain_ip(thread
, cursor
, parent
, root_al
,
1892 if (chain
->nr
> sysctl_perf_event_max_stack
&& (int)chain
->nr
> max_stack
) {
1893 pr_warning("corrupted callchain. skipping...\n");
1897 for (i
= first_call
; i
< chain_nr
; i
++) {
1900 if (callchain_param
.order
== ORDER_CALLEE
)
1903 j
= chain
->nr
- i
- 1;
1905 #ifdef HAVE_SKIP_CALLCHAIN_IDX
1911 err
= add_callchain_ip(thread
, cursor
, parent
, root_al
, &cpumode
, ip
);
1914 return (err
< 0) ? err
: 0;
1920 static int unwind_entry(struct unwind_entry
*entry
, void *arg
)
1922 struct callchain_cursor
*cursor
= arg
;
1924 if (symbol_conf
.hide_unresolved
&& entry
->sym
== NULL
)
1926 return callchain_cursor_append(cursor
, entry
->ip
,
1927 entry
->map
, entry
->sym
);
1930 static int thread__resolve_callchain_unwind(struct thread
*thread
,
1931 struct callchain_cursor
*cursor
,
1932 struct perf_evsel
*evsel
,
1933 struct perf_sample
*sample
,
1936 /* Can we do dwarf post unwind? */
1937 if (!((evsel
->attr
.sample_type
& PERF_SAMPLE_REGS_USER
) &&
1938 (evsel
->attr
.sample_type
& PERF_SAMPLE_STACK_USER
)))
1941 /* Bail out if nothing was captured. */
1942 if ((!sample
->user_regs
.regs
) ||
1943 (!sample
->user_stack
.size
))
1946 return unwind__get_entries(unwind_entry
, cursor
,
1947 thread
, sample
, max_stack
);
1950 int thread__resolve_callchain(struct thread
*thread
,
1951 struct callchain_cursor
*cursor
,
1952 struct perf_evsel
*evsel
,
1953 struct perf_sample
*sample
,
1954 struct symbol
**parent
,
1955 struct addr_location
*root_al
,
1960 callchain_cursor_reset(&callchain_cursor
);
1962 if (callchain_param
.order
== ORDER_CALLEE
) {
1963 ret
= thread__resolve_callchain_sample(thread
, cursor
,
1969 ret
= thread__resolve_callchain_unwind(thread
, cursor
,
1973 ret
= thread__resolve_callchain_unwind(thread
, cursor
,
1978 ret
= thread__resolve_callchain_sample(thread
, cursor
,
1987 int machine__for_each_thread(struct machine
*machine
,
1988 int (*fn
)(struct thread
*thread
, void *p
),
1992 struct thread
*thread
;
1995 for (nd
= rb_first(&machine
->threads
); nd
; nd
= rb_next(nd
)) {
1996 thread
= rb_entry(nd
, struct thread
, rb_node
);
1997 rc
= fn(thread
, priv
);
2002 list_for_each_entry(thread
, &machine
->dead_threads
, node
) {
2003 rc
= fn(thread
, priv
);
2010 int machines__for_each_thread(struct machines
*machines
,
2011 int (*fn
)(struct thread
*thread
, void *p
),
2017 rc
= machine__for_each_thread(&machines
->host
, fn
, priv
);
2021 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
2022 struct machine
*machine
= rb_entry(nd
, struct machine
, rb_node
);
2024 rc
= machine__for_each_thread(machine
, fn
, priv
);
2031 int __machine__synthesize_threads(struct machine
*machine
, struct perf_tool
*tool
,
2032 struct target
*target
, struct thread_map
*threads
,
2033 perf_event__handler_t process
, bool data_mmap
,
2034 unsigned int proc_map_timeout
)
2036 if (target__has_task(target
))
2037 return perf_event__synthesize_thread_map(tool
, threads
, process
, machine
, data_mmap
, proc_map_timeout
);
2038 else if (target__has_cpu(target
))
2039 return perf_event__synthesize_threads(tool
, process
, machine
, data_mmap
, proc_map_timeout
);
2040 /* command specified */
2044 pid_t
machine__get_current_tid(struct machine
*machine
, int cpu
)
2046 if (cpu
< 0 || cpu
>= MAX_NR_CPUS
|| !machine
->current_tid
)
2049 return machine
->current_tid
[cpu
];
2052 int machine__set_current_tid(struct machine
*machine
, int cpu
, pid_t pid
,
2055 struct thread
*thread
;
2060 if (!machine
->current_tid
) {
2063 machine
->current_tid
= calloc(MAX_NR_CPUS
, sizeof(pid_t
));
2064 if (!machine
->current_tid
)
2066 for (i
= 0; i
< MAX_NR_CPUS
; i
++)
2067 machine
->current_tid
[i
] = -1;
2070 if (cpu
>= MAX_NR_CPUS
) {
2071 pr_err("Requested CPU %d too large. ", cpu
);
2072 pr_err("Consider raising MAX_NR_CPUS\n");
2076 machine
->current_tid
[cpu
] = tid
;
2078 thread
= machine__findnew_thread(machine
, pid
, tid
);
2083 thread__put(thread
);
2088 int machine__get_kernel_start(struct machine
*machine
)
2090 struct map
*map
= machine__kernel_map(machine
);
2094 * The only addresses above 2^63 are kernel addresses of a 64-bit
2095 * kernel. Note that addresses are unsigned so that on a 32-bit system
2096 * all addresses including kernel addresses are less than 2^32. In
2097 * that case (32-bit system), if the kernel mapping is unknown, all
2098 * addresses will be assumed to be in user space - see
2099 * machine__kernel_ip().
2101 machine
->kernel_start
= 1ULL << 63;
2103 err
= map__load(map
, machine
->symbol_filter
);
2105 machine
->kernel_start
= map
->start
;
2110 struct dso
*machine__findnew_dso(struct machine
*machine
, const char *filename
)
2112 return dsos__findnew(&machine
->dsos
, filename
);
2115 char *machine__resolve_kernel_addr(void *vmachine
, unsigned long long *addrp
, char **modp
)
2117 struct machine
*machine
= vmachine
;
2119 struct symbol
*sym
= map_groups__find_symbol(&machine
->kmaps
, MAP__FUNCTION
, *addrp
, &map
, NULL
);
2124 *modp
= __map__is_kmodule(map
) ? (char *)map
->dso
->short_name
: NULL
;
2125 *addrp
= map
->unmap_ip(map
, sym
->start
);