Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[deliverable/linux.git] / tools / perf / util / dso.c
1 #include <asm/bug.h>
2 #include <sys/time.h>
3 #include <sys/resource.h>
4 #include "symbol.h"
5 #include "dso.h"
6 #include "machine.h"
7 #include "auxtrace.h"
8 #include "util.h"
9 #include "debug.h"
10
11 char dso__symtab_origin(const struct dso *dso)
12 {
13 static const char origin[] = {
14 [DSO_BINARY_TYPE__KALLSYMS] = 'k',
15 [DSO_BINARY_TYPE__VMLINUX] = 'v',
16 [DSO_BINARY_TYPE__JAVA_JIT] = 'j',
17 [DSO_BINARY_TYPE__DEBUGLINK] = 'l',
18 [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B',
19 [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f',
20 [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u',
21 [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o',
22 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
23 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
24 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K',
25 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm',
26 [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g',
27 [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G',
28 [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M',
29 [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V',
30 };
31
32 if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
33 return '!';
34 return origin[dso->symtab_type];
35 }
36
37 int dso__read_binary_type_filename(const struct dso *dso,
38 enum dso_binary_type type,
39 char *root_dir, char *filename, size_t size)
40 {
41 char build_id_hex[BUILD_ID_SIZE * 2 + 1];
42 int ret = 0;
43 size_t len;
44
45 switch (type) {
46 case DSO_BINARY_TYPE__DEBUGLINK: {
47 char *debuglink;
48
49 len = __symbol__join_symfs(filename, size, dso->long_name);
50 debuglink = filename + len;
51 while (debuglink != filename && *debuglink != '/')
52 debuglink--;
53 if (*debuglink == '/')
54 debuglink++;
55 ret = filename__read_debuglink(filename, debuglink,
56 size - (debuglink - filename));
57 }
58 break;
59 case DSO_BINARY_TYPE__BUILD_ID_CACHE:
60 /* skip the locally configured cache if a symfs is given */
61 if (symbol_conf.symfs[0] ||
62 (dso__build_id_filename(dso, filename, size) == NULL))
63 ret = -1;
64 break;
65
66 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
67 len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
68 snprintf(filename + len, size - len, "%s.debug", dso->long_name);
69 break;
70
71 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
72 len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
73 snprintf(filename + len, size - len, "%s", dso->long_name);
74 break;
75
76 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
77 {
78 const char *last_slash;
79 size_t dir_size;
80
81 last_slash = dso->long_name + dso->long_name_len;
82 while (last_slash != dso->long_name && *last_slash != '/')
83 last_slash--;
84
85 len = __symbol__join_symfs(filename, size, "");
86 dir_size = last_slash - dso->long_name + 2;
87 if (dir_size > (size - len)) {
88 ret = -1;
89 break;
90 }
91 len += scnprintf(filename + len, dir_size, "%s", dso->long_name);
92 len += scnprintf(filename + len , size - len, ".debug%s",
93 last_slash);
94 break;
95 }
96
97 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
98 if (!dso->has_build_id) {
99 ret = -1;
100 break;
101 }
102
103 build_id__sprintf(dso->build_id,
104 sizeof(dso->build_id),
105 build_id_hex);
106 len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
107 snprintf(filename + len, size - len, "%.2s/%s.debug",
108 build_id_hex, build_id_hex + 2);
109 break;
110
111 case DSO_BINARY_TYPE__VMLINUX:
112 case DSO_BINARY_TYPE__GUEST_VMLINUX:
113 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
114 __symbol__join_symfs(filename, size, dso->long_name);
115 break;
116
117 case DSO_BINARY_TYPE__GUEST_KMODULE:
118 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
119 path__join3(filename, size, symbol_conf.symfs,
120 root_dir, dso->long_name);
121 break;
122
123 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
124 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
125 __symbol__join_symfs(filename, size, dso->long_name);
126 break;
127
128 case DSO_BINARY_TYPE__KCORE:
129 case DSO_BINARY_TYPE__GUEST_KCORE:
130 snprintf(filename, size, "%s", dso->long_name);
131 break;
132
133 default:
134 case DSO_BINARY_TYPE__KALLSYMS:
135 case DSO_BINARY_TYPE__GUEST_KALLSYMS:
136 case DSO_BINARY_TYPE__JAVA_JIT:
137 case DSO_BINARY_TYPE__NOT_FOUND:
138 ret = -1;
139 break;
140 }
141
142 return ret;
143 }
144
145 static const struct {
146 const char *fmt;
147 int (*decompress)(const char *input, int output);
148 } compressions[] = {
149 #ifdef HAVE_ZLIB_SUPPORT
150 { "gz", gzip_decompress_to_file },
151 #endif
152 #ifdef HAVE_LZMA_SUPPORT
153 { "xz", lzma_decompress_to_file },
154 #endif
155 { NULL, NULL },
156 };
157
158 bool is_supported_compression(const char *ext)
159 {
160 unsigned i;
161
162 for (i = 0; compressions[i].fmt; i++) {
163 if (!strcmp(ext, compressions[i].fmt))
164 return true;
165 }
166 return false;
167 }
168
169 bool is_kernel_module(const char *pathname, int cpumode)
170 {
171 struct kmod_path m;
172 int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
173
174 WARN_ONCE(mode != cpumode,
175 "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
176 cpumode);
177
178 switch (mode) {
179 case PERF_RECORD_MISC_USER:
180 case PERF_RECORD_MISC_HYPERVISOR:
181 case PERF_RECORD_MISC_GUEST_USER:
182 return false;
183 /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
184 default:
185 if (kmod_path__parse(&m, pathname)) {
186 pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
187 pathname);
188 return true;
189 }
190 }
191
192 return m.kmod;
193 }
194
195 bool decompress_to_file(const char *ext, const char *filename, int output_fd)
196 {
197 unsigned i;
198
199 for (i = 0; compressions[i].fmt; i++) {
200 if (!strcmp(ext, compressions[i].fmt))
201 return !compressions[i].decompress(filename,
202 output_fd);
203 }
204 return false;
205 }
206
207 bool dso__needs_decompress(struct dso *dso)
208 {
209 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
210 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
211 }
212
213 /*
214 * Parses kernel module specified in @path and updates
215 * @m argument like:
216 *
217 * @comp - true if @path contains supported compression suffix,
218 * false otherwise
219 * @kmod - true if @path contains '.ko' suffix in right position,
220 * false otherwise
221 * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
222 * of the kernel module without suffixes, otherwise strudup-ed
223 * base name of @path
224 * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string
225 * the compression suffix
226 *
227 * Returns 0 if there's no strdup error, -ENOMEM otherwise.
228 */
229 int __kmod_path__parse(struct kmod_path *m, const char *path,
230 bool alloc_name, bool alloc_ext)
231 {
232 const char *name = strrchr(path, '/');
233 const char *ext = strrchr(path, '.');
234 bool is_simple_name = false;
235
236 memset(m, 0x0, sizeof(*m));
237 name = name ? name + 1 : path;
238
239 /*
240 * '.' is also a valid character for module name. For example:
241 * [aaa.bbb] is a valid module name. '[' should have higher
242 * priority than '.ko' suffix.
243 *
244 * The kernel names are from machine__mmap_name. Such
245 * name should belong to kernel itself, not kernel module.
246 */
247 if (name[0] == '[') {
248 is_simple_name = true;
249 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
250 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
251 (strncmp(name, "[vdso]", 6) == 0) ||
252 (strncmp(name, "[vsyscall]", 10) == 0)) {
253 m->kmod = false;
254
255 } else
256 m->kmod = true;
257 }
258
259 /* No extension, just return name. */
260 if ((ext == NULL) || is_simple_name) {
261 if (alloc_name) {
262 m->name = strdup(name);
263 return m->name ? 0 : -ENOMEM;
264 }
265 return 0;
266 }
267
268 if (is_supported_compression(ext + 1)) {
269 m->comp = true;
270 ext -= 3;
271 }
272
273 /* Check .ko extension only if there's enough name left. */
274 if (ext > name)
275 m->kmod = !strncmp(ext, ".ko", 3);
276
277 if (alloc_name) {
278 if (m->kmod) {
279 if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
280 return -ENOMEM;
281 } else {
282 if (asprintf(&m->name, "%s", name) == -1)
283 return -ENOMEM;
284 }
285
286 strxfrchar(m->name, '-', '_');
287 }
288
289 if (alloc_ext && m->comp) {
290 m->ext = strdup(ext + 4);
291 if (!m->ext) {
292 free((void *) m->name);
293 return -ENOMEM;
294 }
295 }
296
297 return 0;
298 }
299
300 /*
301 * Global list of open DSOs and the counter.
302 */
303 static LIST_HEAD(dso__data_open);
304 static long dso__data_open_cnt;
305 static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
306
307 static void dso__list_add(struct dso *dso)
308 {
309 list_add_tail(&dso->data.open_entry, &dso__data_open);
310 dso__data_open_cnt++;
311 }
312
313 static void dso__list_del(struct dso *dso)
314 {
315 list_del(&dso->data.open_entry);
316 WARN_ONCE(dso__data_open_cnt <= 0,
317 "DSO data fd counter out of bounds.");
318 dso__data_open_cnt--;
319 }
320
321 static void close_first_dso(void);
322
323 static int do_open(char *name)
324 {
325 int fd;
326 char sbuf[STRERR_BUFSIZE];
327
328 do {
329 fd = open(name, O_RDONLY);
330 if (fd >= 0)
331 return fd;
332
333 pr_debug("dso open failed: %s\n",
334 strerror_r(errno, sbuf, sizeof(sbuf)));
335 if (!dso__data_open_cnt || errno != EMFILE)
336 break;
337
338 close_first_dso();
339 } while (1);
340
341 return -1;
342 }
343
344 static int __open_dso(struct dso *dso, struct machine *machine)
345 {
346 int fd;
347 char *root_dir = (char *)"";
348 char *name = malloc(PATH_MAX);
349
350 if (!name)
351 return -ENOMEM;
352
353 if (machine)
354 root_dir = machine->root_dir;
355
356 if (dso__read_binary_type_filename(dso, dso->binary_type,
357 root_dir, name, PATH_MAX)) {
358 free(name);
359 return -EINVAL;
360 }
361
362 fd = do_open(name);
363 free(name);
364 return fd;
365 }
366
367 static void check_data_close(void);
368
369 /**
370 * dso_close - Open DSO data file
371 * @dso: dso object
372 *
373 * Open @dso's data file descriptor and updates
374 * list/count of open DSO objects.
375 */
376 static int open_dso(struct dso *dso, struct machine *machine)
377 {
378 int fd = __open_dso(dso, machine);
379
380 if (fd >= 0) {
381 dso__list_add(dso);
382 /*
383 * Check if we crossed the allowed number
384 * of opened DSOs and close one if needed.
385 */
386 check_data_close();
387 }
388
389 return fd;
390 }
391
392 static void close_data_fd(struct dso *dso)
393 {
394 if (dso->data.fd >= 0) {
395 close(dso->data.fd);
396 dso->data.fd = -1;
397 dso->data.file_size = 0;
398 dso__list_del(dso);
399 }
400 }
401
402 /**
403 * dso_close - Close DSO data file
404 * @dso: dso object
405 *
406 * Close @dso's data file descriptor and updates
407 * list/count of open DSO objects.
408 */
409 static void close_dso(struct dso *dso)
410 {
411 close_data_fd(dso);
412 }
413
414 static void close_first_dso(void)
415 {
416 struct dso *dso;
417
418 dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
419 close_dso(dso);
420 }
421
422 static rlim_t get_fd_limit(void)
423 {
424 struct rlimit l;
425 rlim_t limit = 0;
426
427 /* Allow half of the current open fd limit. */
428 if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
429 if (l.rlim_cur == RLIM_INFINITY)
430 limit = l.rlim_cur;
431 else
432 limit = l.rlim_cur / 2;
433 } else {
434 pr_err("failed to get fd limit\n");
435 limit = 1;
436 }
437
438 return limit;
439 }
440
441 static bool may_cache_fd(void)
442 {
443 static rlim_t limit;
444
445 if (!limit)
446 limit = get_fd_limit();
447
448 if (limit == RLIM_INFINITY)
449 return true;
450
451 return limit > (rlim_t) dso__data_open_cnt;
452 }
453
454 /*
455 * Check and close LRU dso if we crossed allowed limit
456 * for opened dso file descriptors. The limit is half
457 * of the RLIMIT_NOFILE files opened.
458 */
459 static void check_data_close(void)
460 {
461 bool cache_fd = may_cache_fd();
462
463 if (!cache_fd)
464 close_first_dso();
465 }
466
467 /**
468 * dso__data_close - Close DSO data file
469 * @dso: dso object
470 *
471 * External interface to close @dso's data file descriptor.
472 */
473 void dso__data_close(struct dso *dso)
474 {
475 pthread_mutex_lock(&dso__data_open_lock);
476 close_dso(dso);
477 pthread_mutex_unlock(&dso__data_open_lock);
478 }
479
480 static void try_to_open_dso(struct dso *dso, struct machine *machine)
481 {
482 enum dso_binary_type binary_type_data[] = {
483 DSO_BINARY_TYPE__BUILD_ID_CACHE,
484 DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
485 DSO_BINARY_TYPE__NOT_FOUND,
486 };
487 int i = 0;
488
489 if (dso->data.fd >= 0)
490 return;
491
492 if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
493 dso->data.fd = open_dso(dso, machine);
494 goto out;
495 }
496
497 do {
498 dso->binary_type = binary_type_data[i++];
499
500 dso->data.fd = open_dso(dso, machine);
501 if (dso->data.fd >= 0)
502 goto out;
503
504 } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
505 out:
506 if (dso->data.fd >= 0)
507 dso->data.status = DSO_DATA_STATUS_OK;
508 else
509 dso->data.status = DSO_DATA_STATUS_ERROR;
510 }
511
512 /**
513 * dso__data_get_fd - Get dso's data file descriptor
514 * @dso: dso object
515 * @machine: machine object
516 *
517 * External interface to find dso's file, open it and
518 * returns file descriptor. It should be paired with
519 * dso__data_put_fd() if it returns non-negative value.
520 */
521 int dso__data_get_fd(struct dso *dso, struct machine *machine)
522 {
523 if (dso->data.status == DSO_DATA_STATUS_ERROR)
524 return -1;
525
526 if (pthread_mutex_lock(&dso__data_open_lock) < 0)
527 return -1;
528
529 try_to_open_dso(dso, machine);
530
531 if (dso->data.fd < 0)
532 pthread_mutex_unlock(&dso__data_open_lock);
533
534 return dso->data.fd;
535 }
536
537 void dso__data_put_fd(struct dso *dso __maybe_unused)
538 {
539 pthread_mutex_unlock(&dso__data_open_lock);
540 }
541
542 bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
543 {
544 u32 flag = 1 << by;
545
546 if (dso->data.status_seen & flag)
547 return true;
548
549 dso->data.status_seen |= flag;
550
551 return false;
552 }
553
554 static void
555 dso_cache__free(struct dso *dso)
556 {
557 struct rb_root *root = &dso->data.cache;
558 struct rb_node *next = rb_first(root);
559
560 pthread_mutex_lock(&dso->lock);
561 while (next) {
562 struct dso_cache *cache;
563
564 cache = rb_entry(next, struct dso_cache, rb_node);
565 next = rb_next(&cache->rb_node);
566 rb_erase(&cache->rb_node, root);
567 free(cache);
568 }
569 pthread_mutex_unlock(&dso->lock);
570 }
571
572 static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset)
573 {
574 const struct rb_root *root = &dso->data.cache;
575 struct rb_node * const *p = &root->rb_node;
576 const struct rb_node *parent = NULL;
577 struct dso_cache *cache;
578
579 while (*p != NULL) {
580 u64 end;
581
582 parent = *p;
583 cache = rb_entry(parent, struct dso_cache, rb_node);
584 end = cache->offset + DSO__DATA_CACHE_SIZE;
585
586 if (offset < cache->offset)
587 p = &(*p)->rb_left;
588 else if (offset >= end)
589 p = &(*p)->rb_right;
590 else
591 return cache;
592 }
593
594 return NULL;
595 }
596
597 static struct dso_cache *
598 dso_cache__insert(struct dso *dso, struct dso_cache *new)
599 {
600 struct rb_root *root = &dso->data.cache;
601 struct rb_node **p = &root->rb_node;
602 struct rb_node *parent = NULL;
603 struct dso_cache *cache;
604 u64 offset = new->offset;
605
606 pthread_mutex_lock(&dso->lock);
607 while (*p != NULL) {
608 u64 end;
609
610 parent = *p;
611 cache = rb_entry(parent, struct dso_cache, rb_node);
612 end = cache->offset + DSO__DATA_CACHE_SIZE;
613
614 if (offset < cache->offset)
615 p = &(*p)->rb_left;
616 else if (offset >= end)
617 p = &(*p)->rb_right;
618 else
619 goto out;
620 }
621
622 rb_link_node(&new->rb_node, parent, p);
623 rb_insert_color(&new->rb_node, root);
624
625 cache = NULL;
626 out:
627 pthread_mutex_unlock(&dso->lock);
628 return cache;
629 }
630
631 static ssize_t
632 dso_cache__memcpy(struct dso_cache *cache, u64 offset,
633 u8 *data, u64 size)
634 {
635 u64 cache_offset = offset - cache->offset;
636 u64 cache_size = min(cache->size - cache_offset, size);
637
638 memcpy(data, cache->data + cache_offset, cache_size);
639 return cache_size;
640 }
641
642 static ssize_t
643 dso_cache__read(struct dso *dso, struct machine *machine,
644 u64 offset, u8 *data, ssize_t size)
645 {
646 struct dso_cache *cache;
647 struct dso_cache *old;
648 ssize_t ret;
649
650 do {
651 u64 cache_offset;
652
653 cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
654 if (!cache)
655 return -ENOMEM;
656
657 pthread_mutex_lock(&dso__data_open_lock);
658
659 /*
660 * dso->data.fd might be closed if other thread opened another
661 * file (dso) due to open file limit (RLIMIT_NOFILE).
662 */
663 try_to_open_dso(dso, machine);
664
665 if (dso->data.fd < 0) {
666 ret = -errno;
667 dso->data.status = DSO_DATA_STATUS_ERROR;
668 break;
669 }
670
671 cache_offset = offset & DSO__DATA_CACHE_MASK;
672
673 ret = pread(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE, cache_offset);
674 if (ret <= 0)
675 break;
676
677 cache->offset = cache_offset;
678 cache->size = ret;
679 } while (0);
680
681 pthread_mutex_unlock(&dso__data_open_lock);
682
683 if (ret > 0) {
684 old = dso_cache__insert(dso, cache);
685 if (old) {
686 /* we lose the race */
687 free(cache);
688 cache = old;
689 }
690
691 ret = dso_cache__memcpy(cache, offset, data, size);
692 }
693
694 if (ret <= 0)
695 free(cache);
696
697 return ret;
698 }
699
700 static ssize_t dso_cache_read(struct dso *dso, struct machine *machine,
701 u64 offset, u8 *data, ssize_t size)
702 {
703 struct dso_cache *cache;
704
705 cache = dso_cache__find(dso, offset);
706 if (cache)
707 return dso_cache__memcpy(cache, offset, data, size);
708 else
709 return dso_cache__read(dso, machine, offset, data, size);
710 }
711
712 /*
713 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
714 * in the rb_tree. Any read to already cached data is served
715 * by cached data.
716 */
717 static ssize_t cached_read(struct dso *dso, struct machine *machine,
718 u64 offset, u8 *data, ssize_t size)
719 {
720 ssize_t r = 0;
721 u8 *p = data;
722
723 do {
724 ssize_t ret;
725
726 ret = dso_cache_read(dso, machine, offset, p, size);
727 if (ret < 0)
728 return ret;
729
730 /* Reached EOF, return what we have. */
731 if (!ret)
732 break;
733
734 BUG_ON(ret > size);
735
736 r += ret;
737 p += ret;
738 offset += ret;
739 size -= ret;
740
741 } while (size);
742
743 return r;
744 }
745
746 static int data_file_size(struct dso *dso, struct machine *machine)
747 {
748 int ret = 0;
749 struct stat st;
750 char sbuf[STRERR_BUFSIZE];
751
752 if (dso->data.file_size)
753 return 0;
754
755 if (dso->data.status == DSO_DATA_STATUS_ERROR)
756 return -1;
757
758 pthread_mutex_lock(&dso__data_open_lock);
759
760 /*
761 * dso->data.fd might be closed if other thread opened another
762 * file (dso) due to open file limit (RLIMIT_NOFILE).
763 */
764 try_to_open_dso(dso, machine);
765
766 if (dso->data.fd < 0) {
767 ret = -errno;
768 dso->data.status = DSO_DATA_STATUS_ERROR;
769 goto out;
770 }
771
772 if (fstat(dso->data.fd, &st) < 0) {
773 ret = -errno;
774 pr_err("dso cache fstat failed: %s\n",
775 strerror_r(errno, sbuf, sizeof(sbuf)));
776 dso->data.status = DSO_DATA_STATUS_ERROR;
777 goto out;
778 }
779 dso->data.file_size = st.st_size;
780
781 out:
782 pthread_mutex_unlock(&dso__data_open_lock);
783 return ret;
784 }
785
786 /**
787 * dso__data_size - Return dso data size
788 * @dso: dso object
789 * @machine: machine object
790 *
791 * Return: dso data size
792 */
793 off_t dso__data_size(struct dso *dso, struct machine *machine)
794 {
795 if (data_file_size(dso, machine))
796 return -1;
797
798 /* For now just estimate dso data size is close to file size */
799 return dso->data.file_size;
800 }
801
802 static ssize_t data_read_offset(struct dso *dso, struct machine *machine,
803 u64 offset, u8 *data, ssize_t size)
804 {
805 if (data_file_size(dso, machine))
806 return -1;
807
808 /* Check the offset sanity. */
809 if (offset > dso->data.file_size)
810 return -1;
811
812 if (offset + size < offset)
813 return -1;
814
815 return cached_read(dso, machine, offset, data, size);
816 }
817
818 /**
819 * dso__data_read_offset - Read data from dso file offset
820 * @dso: dso object
821 * @machine: machine object
822 * @offset: file offset
823 * @data: buffer to store data
824 * @size: size of the @data buffer
825 *
826 * External interface to read data from dso file offset. Open
827 * dso data file and use cached_read to get the data.
828 */
829 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
830 u64 offset, u8 *data, ssize_t size)
831 {
832 if (dso->data.status == DSO_DATA_STATUS_ERROR)
833 return -1;
834
835 return data_read_offset(dso, machine, offset, data, size);
836 }
837
838 /**
839 * dso__data_read_addr - Read data from dso address
840 * @dso: dso object
841 * @machine: machine object
842 * @add: virtual memory address
843 * @data: buffer to store data
844 * @size: size of the @data buffer
845 *
846 * External interface to read data from dso address.
847 */
848 ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
849 struct machine *machine, u64 addr,
850 u8 *data, ssize_t size)
851 {
852 u64 offset = map->map_ip(map, addr);
853 return dso__data_read_offset(dso, machine, offset, data, size);
854 }
855
856 struct map *dso__new_map(const char *name)
857 {
858 struct map *map = NULL;
859 struct dso *dso = dso__new(name);
860
861 if (dso)
862 map = map__new2(0, dso, MAP__FUNCTION);
863
864 return map;
865 }
866
867 struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
868 const char *short_name, int dso_type)
869 {
870 /*
871 * The kernel dso could be created by build_id processing.
872 */
873 struct dso *dso = machine__findnew_dso(machine, name);
874
875 /*
876 * We need to run this in all cases, since during the build_id
877 * processing we had no idea this was the kernel dso.
878 */
879 if (dso != NULL) {
880 dso__set_short_name(dso, short_name, false);
881 dso->kernel = dso_type;
882 }
883
884 return dso;
885 }
886
887 /*
888 * Find a matching entry and/or link current entry to RB tree.
889 * Either one of the dso or name parameter must be non-NULL or the
890 * function will not work.
891 */
892 static struct dso *__dso__findlink_by_longname(struct rb_root *root,
893 struct dso *dso, const char *name)
894 {
895 struct rb_node **p = &root->rb_node;
896 struct rb_node *parent = NULL;
897
898 if (!name)
899 name = dso->long_name;
900 /*
901 * Find node with the matching name
902 */
903 while (*p) {
904 struct dso *this = rb_entry(*p, struct dso, rb_node);
905 int rc = strcmp(name, this->long_name);
906
907 parent = *p;
908 if (rc == 0) {
909 /*
910 * In case the new DSO is a duplicate of an existing
911 * one, print an one-time warning & put the new entry
912 * at the end of the list of duplicates.
913 */
914 if (!dso || (dso == this))
915 return this; /* Find matching dso */
916 /*
917 * The core kernel DSOs may have duplicated long name.
918 * In this case, the short name should be different.
919 * Comparing the short names to differentiate the DSOs.
920 */
921 rc = strcmp(dso->short_name, this->short_name);
922 if (rc == 0) {
923 pr_err("Duplicated dso name: %s\n", name);
924 return NULL;
925 }
926 }
927 if (rc < 0)
928 p = &parent->rb_left;
929 else
930 p = &parent->rb_right;
931 }
932 if (dso) {
933 /* Add new node and rebalance tree */
934 rb_link_node(&dso->rb_node, parent, p);
935 rb_insert_color(&dso->rb_node, root);
936 }
937 return NULL;
938 }
939
940 static inline struct dso *__dso__find_by_longname(struct rb_root *root,
941 const char *name)
942 {
943 return __dso__findlink_by_longname(root, NULL, name);
944 }
945
946 void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
947 {
948 if (name == NULL)
949 return;
950
951 if (dso->long_name_allocated)
952 free((char *)dso->long_name);
953
954 dso->long_name = name;
955 dso->long_name_len = strlen(name);
956 dso->long_name_allocated = name_allocated;
957 }
958
959 void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
960 {
961 if (name == NULL)
962 return;
963
964 if (dso->short_name_allocated)
965 free((char *)dso->short_name);
966
967 dso->short_name = name;
968 dso->short_name_len = strlen(name);
969 dso->short_name_allocated = name_allocated;
970 }
971
972 static void dso__set_basename(struct dso *dso)
973 {
974 /*
975 * basename() may modify path buffer, so we must pass
976 * a copy.
977 */
978 char *base, *lname = strdup(dso->long_name);
979
980 if (!lname)
981 return;
982
983 /*
984 * basename() may return a pointer to internal
985 * storage which is reused in subsequent calls
986 * so copy the result.
987 */
988 base = strdup(basename(lname));
989
990 free(lname);
991
992 if (!base)
993 return;
994
995 dso__set_short_name(dso, base, true);
996 }
997
998 int dso__name_len(const struct dso *dso)
999 {
1000 if (!dso)
1001 return strlen("[unknown]");
1002 if (verbose)
1003 return dso->long_name_len;
1004
1005 return dso->short_name_len;
1006 }
1007
1008 bool dso__loaded(const struct dso *dso, enum map_type type)
1009 {
1010 return dso->loaded & (1 << type);
1011 }
1012
1013 bool dso__sorted_by_name(const struct dso *dso, enum map_type type)
1014 {
1015 return dso->sorted_by_name & (1 << type);
1016 }
1017
1018 void dso__set_sorted_by_name(struct dso *dso, enum map_type type)
1019 {
1020 dso->sorted_by_name |= (1 << type);
1021 }
1022
1023 struct dso *dso__new(const char *name)
1024 {
1025 struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
1026
1027 if (dso != NULL) {
1028 int i;
1029 strcpy(dso->name, name);
1030 dso__set_long_name(dso, dso->name, false);
1031 dso__set_short_name(dso, dso->name, false);
1032 for (i = 0; i < MAP__NR_TYPES; ++i)
1033 dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
1034 dso->data.cache = RB_ROOT;
1035 dso->data.fd = -1;
1036 dso->data.status = DSO_DATA_STATUS_UNKNOWN;
1037 dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
1038 dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
1039 dso->is_64_bit = (sizeof(void *) == 8);
1040 dso->loaded = 0;
1041 dso->rel = 0;
1042 dso->sorted_by_name = 0;
1043 dso->has_build_id = 0;
1044 dso->has_srcline = 1;
1045 dso->a2l_fails = 1;
1046 dso->kernel = DSO_TYPE_USER;
1047 dso->needs_swap = DSO_SWAP__UNSET;
1048 RB_CLEAR_NODE(&dso->rb_node);
1049 INIT_LIST_HEAD(&dso->node);
1050 INIT_LIST_HEAD(&dso->data.open_entry);
1051 pthread_mutex_init(&dso->lock, NULL);
1052 atomic_set(&dso->refcnt, 1);
1053 }
1054
1055 return dso;
1056 }
1057
1058 void dso__delete(struct dso *dso)
1059 {
1060 int i;
1061
1062 if (!RB_EMPTY_NODE(&dso->rb_node))
1063 pr_err("DSO %s is still in rbtree when being deleted!\n",
1064 dso->long_name);
1065 for (i = 0; i < MAP__NR_TYPES; ++i)
1066 symbols__delete(&dso->symbols[i]);
1067
1068 if (dso->short_name_allocated) {
1069 zfree((char **)&dso->short_name);
1070 dso->short_name_allocated = false;
1071 }
1072
1073 if (dso->long_name_allocated) {
1074 zfree((char **)&dso->long_name);
1075 dso->long_name_allocated = false;
1076 }
1077
1078 dso__data_close(dso);
1079 auxtrace_cache__free(dso->auxtrace_cache);
1080 dso_cache__free(dso);
1081 dso__free_a2l(dso);
1082 zfree(&dso->symsrc_filename);
1083 pthread_mutex_destroy(&dso->lock);
1084 free(dso);
1085 }
1086
1087 struct dso *dso__get(struct dso *dso)
1088 {
1089 if (dso)
1090 atomic_inc(&dso->refcnt);
1091 return dso;
1092 }
1093
1094 void dso__put(struct dso *dso)
1095 {
1096 if (dso && atomic_dec_and_test(&dso->refcnt))
1097 dso__delete(dso);
1098 }
1099
1100 void dso__set_build_id(struct dso *dso, void *build_id)
1101 {
1102 memcpy(dso->build_id, build_id, sizeof(dso->build_id));
1103 dso->has_build_id = 1;
1104 }
1105
1106 bool dso__build_id_equal(const struct dso *dso, u8 *build_id)
1107 {
1108 return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0;
1109 }
1110
1111 void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
1112 {
1113 char path[PATH_MAX];
1114
1115 if (machine__is_default_guest(machine))
1116 return;
1117 sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
1118 if (sysfs__read_build_id(path, dso->build_id,
1119 sizeof(dso->build_id)) == 0)
1120 dso->has_build_id = true;
1121 }
1122
1123 int dso__kernel_module_get_build_id(struct dso *dso,
1124 const char *root_dir)
1125 {
1126 char filename[PATH_MAX];
1127 /*
1128 * kernel module short names are of the form "[module]" and
1129 * we need just "module" here.
1130 */
1131 const char *name = dso->short_name + 1;
1132
1133 snprintf(filename, sizeof(filename),
1134 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1135 root_dir, (int)strlen(name) - 1, name);
1136
1137 if (sysfs__read_build_id(filename, dso->build_id,
1138 sizeof(dso->build_id)) == 0)
1139 dso->has_build_id = true;
1140
1141 return 0;
1142 }
1143
1144 bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
1145 {
1146 bool have_build_id = false;
1147 struct dso *pos;
1148
1149 list_for_each_entry(pos, head, node) {
1150 if (with_hits && !pos->hit)
1151 continue;
1152 if (pos->has_build_id) {
1153 have_build_id = true;
1154 continue;
1155 }
1156 if (filename__read_build_id(pos->long_name, pos->build_id,
1157 sizeof(pos->build_id)) > 0) {
1158 have_build_id = true;
1159 pos->has_build_id = true;
1160 }
1161 }
1162
1163 return have_build_id;
1164 }
1165
1166 void __dsos__add(struct dsos *dsos, struct dso *dso)
1167 {
1168 list_add_tail(&dso->node, &dsos->head);
1169 __dso__findlink_by_longname(&dsos->root, dso, NULL);
1170 /*
1171 * It is now in the linked list, grab a reference, then garbage collect
1172 * this when needing memory, by looking at LRU dso instances in the
1173 * list with atomic_read(&dso->refcnt) == 1, i.e. no references
1174 * anywhere besides the one for the list, do, under a lock for the
1175 * list: remove it from the list, then a dso__put(), that probably will
1176 * be the last and will then call dso__delete(), end of life.
1177 *
1178 * That, or at the end of the 'struct machine' lifetime, when all
1179 * 'struct dso' instances will be removed from the list, in
1180 * dsos__exit(), if they have no other reference from some other data
1181 * structure.
1182 *
1183 * E.g.: after processing a 'perf.data' file and storing references
1184 * to objects instantiated while processing events, we will have
1185 * references to the 'thread', 'map', 'dso' structs all from 'struct
1186 * hist_entry' instances, but we may not need anything not referenced,
1187 * so we might as well call machines__exit()/machines__delete() and
1188 * garbage collect it.
1189 */
1190 dso__get(dso);
1191 }
1192
1193 void dsos__add(struct dsos *dsos, struct dso *dso)
1194 {
1195 pthread_rwlock_wrlock(&dsos->lock);
1196 __dsos__add(dsos, dso);
1197 pthread_rwlock_unlock(&dsos->lock);
1198 }
1199
1200 struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
1201 {
1202 struct dso *pos;
1203
1204 if (cmp_short) {
1205 list_for_each_entry(pos, &dsos->head, node)
1206 if (strcmp(pos->short_name, name) == 0)
1207 return pos;
1208 return NULL;
1209 }
1210 return __dso__find_by_longname(&dsos->root, name);
1211 }
1212
1213 struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
1214 {
1215 struct dso *dso;
1216 pthread_rwlock_rdlock(&dsos->lock);
1217 dso = __dsos__find(dsos, name, cmp_short);
1218 pthread_rwlock_unlock(&dsos->lock);
1219 return dso;
1220 }
1221
1222 struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
1223 {
1224 struct dso *dso = dso__new(name);
1225
1226 if (dso != NULL) {
1227 __dsos__add(dsos, dso);
1228 dso__set_basename(dso);
1229 }
1230 return dso;
1231 }
1232
1233 struct dso *__dsos__findnew(struct dsos *dsos, const char *name)
1234 {
1235 struct dso *dso = __dsos__find(dsos, name, false);
1236
1237 return dso ? dso : __dsos__addnew(dsos, name);
1238 }
1239
1240 struct dso *dsos__findnew(struct dsos *dsos, const char *name)
1241 {
1242 struct dso *dso;
1243 pthread_rwlock_wrlock(&dsos->lock);
1244 dso = dso__get(__dsos__findnew(dsos, name));
1245 pthread_rwlock_unlock(&dsos->lock);
1246 return dso;
1247 }
1248
1249 size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
1250 bool (skip)(struct dso *dso, int parm), int parm)
1251 {
1252 struct dso *pos;
1253 size_t ret = 0;
1254
1255 list_for_each_entry(pos, head, node) {
1256 if (skip && skip(pos, parm))
1257 continue;
1258 ret += dso__fprintf_buildid(pos, fp);
1259 ret += fprintf(fp, " %s\n", pos->long_name);
1260 }
1261 return ret;
1262 }
1263
1264 size_t __dsos__fprintf(struct list_head *head, FILE *fp)
1265 {
1266 struct dso *pos;
1267 size_t ret = 0;
1268
1269 list_for_each_entry(pos, head, node) {
1270 int i;
1271 for (i = 0; i < MAP__NR_TYPES; ++i)
1272 ret += dso__fprintf(pos, i, fp);
1273 }
1274
1275 return ret;
1276 }
1277
1278 size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
1279 {
1280 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
1281
1282 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
1283 return fprintf(fp, "%s", sbuild_id);
1284 }
1285
1286 size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
1287 {
1288 struct rb_node *nd;
1289 size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
1290
1291 if (dso->short_name != dso->long_name)
1292 ret += fprintf(fp, "%s, ", dso->long_name);
1293 ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
1294 dso__loaded(dso, type) ? "" : "NOT ");
1295 ret += dso__fprintf_buildid(dso, fp);
1296 ret += fprintf(fp, ")\n");
1297 for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) {
1298 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
1299 ret += symbol__fprintf(pos, fp);
1300 }
1301
1302 return ret;
1303 }
1304
1305 enum dso_type dso__type(struct dso *dso, struct machine *machine)
1306 {
1307 int fd;
1308 enum dso_type type = DSO__TYPE_UNKNOWN;
1309
1310 fd = dso__data_get_fd(dso, machine);
1311 if (fd >= 0) {
1312 type = dso__type_fd(fd);
1313 dso__data_put_fd(dso);
1314 }
1315
1316 return type;
1317 }
1318
1319 int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
1320 {
1321 int idx, errnum = dso->load_errno;
1322 /*
1323 * This must have a same ordering as the enum dso_load_errno.
1324 */
1325 static const char *dso_load__error_str[] = {
1326 "Internal tools/perf/ library error",
1327 "Invalid ELF file",
1328 "Can not read build id",
1329 "Mismatching build id",
1330 "Decompression failure",
1331 };
1332
1333 BUG_ON(buflen == 0);
1334
1335 if (errnum >= 0) {
1336 const char *err = strerror_r(errnum, buf, buflen);
1337
1338 if (err != buf)
1339 scnprintf(buf, buflen, "%s", err);
1340
1341 return 0;
1342 }
1343
1344 if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
1345 return -1;
1346
1347 idx = errnum - __DSO_LOAD_ERRNO__START;
1348 scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
1349 return 0;
1350 }
This page took 0.119961 seconds and 6 git commands to generate.