#include <string.h>
#include <stdlib.h>
#include <stdio.h>
+#include <stdbool.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <linux/perf_event.h>
struct perf_event_mmap_page *pc;
struct cds_list_head thread_field_node; /* Per-field list of thread fields (node) */
struct cds_list_head rcu_field_node; /* RCU per-thread list of fields (node) */
+ int fd; /* Perf FD */
};
struct lttng_perf_counter_thread {
static pthread_key_t perf_counter_key;
static
-size_t perf_counter_get_size(size_t offset)
+size_t perf_counter_get_size(struct lttng_ctx_field *field, size_t offset)
{
size_t size = 0;
return low | ((uint64_t) high) << 32;
}
-#else /* defined(__x86_64__) || defined(__i386__) */
-
-#error "Perf event counters are only supported on x86 so far."
-
-#endif /* #else defined(__x86_64__) || defined(__i386__) */
+static bool arch_perf_use_read(void)
+{
+ return false;
+}
static
-uint64_t read_perf_counter(struct perf_event_mmap_page *pc)
+uint64_t read_perf_counter(
+ struct lttng_perf_counter_thread_field *thread_field)
{
uint32_t seq, idx;
uint64_t count;
+ struct perf_event_mmap_page *pc = thread_field->pc;
if (caa_unlikely(!pc))
return 0;
return count;
}
+#elif defined (__ARM_ARCH_7A__)
+
+static bool arch_perf_use_read(void)
+{
+ return true;
+}
+
+static
+uint64_t read_perf_counter(
+ struct lttng_perf_counter_thread_field *thread_field)
+{
+ uint64_t count;
+
+ if (caa_unlikely(thread_field->fd < 0))
+ return 0;
+
+ if (caa_unlikely(read(thread_field->fd, &count, sizeof(count))
+ < sizeof(count)))
+ return 0;
+
+ return count;
+}
+
+#else /* defined(__x86_64__) || defined(__i386__) || defined(__ARM_ARCH_7A__) */
+
+#error "Perf event counters are only supported on x86 and ARMv7 so far."
+
+#endif /* #else defined(__x86_64__) || defined(__i386__) || defined(__ARM_ARCH_7A__) */
+
static
int sys_perf_event_open(struct perf_event_attr *attr,
pid_t pid, int cpu, int group_fd,
}
static
-struct perf_event_mmap_page *setup_perf(struct perf_event_attr *attr)
+int open_perf_fd(struct perf_event_attr *attr)
{
- void *perf_addr;
- int fd, ret;
+ int fd;
fd = sys_perf_event_open(attr, 0, -1, -1, 0);
if (fd < 0)
- return NULL;
+ return -1;
+
+ return fd;
+}
+
+static
+void close_perf_fd(int fd)
+{
+ int ret;
+
+ if (fd < 0)
+ return;
- perf_addr = mmap(NULL, sizeof(struct perf_event_mmap_page),
- PROT_READ, MAP_SHARED, fd, 0);
- if (perf_addr == MAP_FAILED)
- return NULL;
ret = close(fd);
if (ret) {
perror("Error closing LTTng-UST perf memory mapping FD");
}
+}
+
+static
+struct perf_event_mmap_page *setup_perf(
+ struct lttng_perf_counter_thread_field *thread_field)
+{
+ void *perf_addr;
+
+ perf_addr = mmap(NULL, sizeof(struct perf_event_mmap_page),
+ PROT_READ, MAP_SHARED, thread_field->fd, 0);
+ if (perf_addr == MAP_FAILED)
+ perf_addr = NULL;
+
+ if (!arch_perf_use_read()) {
+ close_perf_fd(thread_field->fd);
+ thread_field->fd = -1;
+ }
+
return perf_addr;
}
if (!thread_field)
abort();
thread_field->field = perf_field;
- thread_field->pc = setup_perf(&perf_field->attr);
- /* Note: thread_field->pc can be NULL if setup_perf() fails. */
+ thread_field->fd = open_perf_fd(&perf_field->attr);
+ if (thread_field->fd >= 0)
+ thread_field->pc = setup_perf(thread_field);
+ /*
+ * Note: thread_field->pc can be NULL if setup_perf() fails.
+ * Also, thread_field->fd can be -1 if open_perf_fd() fails.
+ */
ust_lock_nocheck();
cds_list_add_rcu(&thread_field->rcu_field_node,
&perf_thread->rcu_field_list);
perf_field = field->u.perf_counter;
perf_thread_field = get_thread_field(perf_field);
- return read_perf_counter(perf_thread_field->pc);
+ return read_perf_counter(perf_thread_field);
}
static
static
void perf_counter_get_value(struct lttng_ctx_field *field,
- union lttng_ctx_value *value)
+ struct lttng_ctx_value *value)
{
uint64_t v;
v = wrapper_perf_counter_read(field);
- value->s64 = v;
+ value->u.s64 = v;
}
/* Called with UST lock held */
void lttng_destroy_perf_thread_field(
struct lttng_perf_counter_thread_field *thread_field)
{
+ close_perf_fd(thread_field->fd);
unmap_perf_page(thread_field->pc);
cds_list_del_rcu(&thread_field->rcu_field_node);
cds_list_del(&thread_field->thread_field_node);
free(perf_field);
}
+#ifdef __ARM_ARCH_7A__
+
+static
+int perf_get_exclude_kernel(void)
+{
+ return 0;
+}
+
+#else /* __ARM_ARCH_7A__ */
+
+static
+int perf_get_exclude_kernel(void)
+{
+ return 1;
+}
+
+#endif /* __ARM_ARCH_7A__ */
+
/* Called with UST lock held */
int lttng_add_perf_counter_to_ctx(uint32_t type,
uint64_t config,
{
struct lttng_ctx_field *field;
struct lttng_perf_counter_field *perf_field;
- struct perf_event_mmap_page *tmp_pc;
char *name_alloc;
int ret;
perf_field->attr.type = type;
perf_field->attr.config = config;
- perf_field->attr.exclude_kernel = 1;
+ perf_field->attr.exclude_kernel = perf_get_exclude_kernel();
CDS_INIT_LIST_HEAD(&perf_field->thread_field_list);
field->u.perf_counter = perf_field;
/* Ensure that this perf counter can be used in this process. */
- tmp_pc = setup_perf(&perf_field->attr);
- if (!tmp_pc) {
+ ret = open_perf_fd(&perf_field->attr);
+ if (ret < 0) {
ret = -ENODEV;
goto setup_error;
}
- unmap_perf_page(tmp_pc);
+ close_perf_fd(ret);
/*
* Contexts can only be added before tracing is started, so we
* the field here.
*/
+ lttng_context_update(*ctx);
return 0;
setup_error: