char padding[LTTNG_UST_RING_BUFFER_CONFIG_PADDING];
};
+/* State returned by rseq_start, passed as argument to rseq_finish. */
+struct lttng_rseq_state {
+ volatile struct rseq *rseqp;
+ int32_t cpu_id; /* cpu_id at start. */
+ uint32_t event_counter; /* event_counter at start. */
+};
+
/*
* ring buffer context
*
uint32_t event_id)
{
struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
+ struct lttng_rseq_state rseq_state;
int ret, cpu;
- //TODO register (lazy)
- cpu = lib_ring_buffer_get_cpu(&client_config);
- if (cpu < 0)
+ if (lib_ring_buffer_begin(&client_config))
return -EPERM;
+retry:
+ rseq_state = rseq_start();
+ if (caa_unlikely(rseq_cpu_at_start(rseq_state) < 0)) {
+ if (caa_unlikely(rseq_cpu_at_start(rseq_state) == -1)) {
+ if (!rseq_register_current_thread())
+ goto retry;
+ }
+ /* rseq is unavailable. */
+ cpu = lib_ring_buffer_get_cpu(&client_config);
+ if (caa_unlikely(cpu < 0)) {
+ ret = -EPERM;
+ goto end;
+ }
+ } else {
+ cpu = rseq_cpu_at_start(rseq_state);
+ }
ctx->cpu = cpu;
switch (lttng_chan->header_type) {
if (lib_ring_buffer_backend_get_pages(&client_config, ctx,
&ctx->backend_pages)) {
ret = -EPERM;
- goto put;
+ goto end;
}
}
lttng_write_event_header(&client_config, ctx, event_id);
return 0;
-put:
- lib_ring_buffer_put_cpu(&client_config);
+end:
+ lib_ring_buffer_end(&client_config);
return ret;
}
void lttng_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
lib_ring_buffer_commit(&client_config, ctx);
- lib_ring_buffer_put_cpu(&client_config);
+ lib_ring_buffer_end(&client_config);
}
static
#include "lttng-ust-statedump.h"
#include "clock.h"
#include "../libringbuffer/getcpu.h"
+#include "../libringbuffer/rseq.h"
#include "getenv.h"
/*
lttng_ust_clock_init();
lttng_ust_getcpu_init();
lttng_ust_statedump_init();
+ rseq_init();
lttng_ring_buffer_metadata_client_init();
lttng_ring_buffer_client_overwrite_init();
lttng_ring_buffer_client_overwrite_rt_init();
lttng_ring_buffer_client_overwrite_rt_exit();
lttng_ring_buffer_client_overwrite_exit();
lttng_ring_buffer_metadata_client_exit();
+ rseq_destroy();
lttng_ust_statedump_destroy();
exit_tracepoint();
if (!exiting) {
#include <urcu-bp.h>
#include <urcu/compiler.h>
+static inline
+int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
+{
+ return lttng_ust_get_cpu();
+}
+
/**
- * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit.
+ * lib_ring_buffer_begin - Precedes ring buffer reserve/commit.
*
* Keeps a ring buffer nesting count as supplementary safety net to
* ensure tracer client code will never trigger an endless recursion.
* section.
*/
static inline
-int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
+int lib_ring_buffer_begin(const struct lttng_ust_lib_ring_buffer_config *config)
{
- int cpu, nesting;
+ int nesting;
- cpu = lttng_ust_get_cpu();
nesting = ++URCU_TLS(lib_ring_buffer_nesting);
cmm_barrier();
WARN_ON_ONCE(1);
URCU_TLS(lib_ring_buffer_nesting)--;
return -EPERM;
- } else
- return cpu;
+ }
+ return 0;
}
/**
- * lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit.
+ * lib_ring_buffer_end - Follows ring buffer reserve/commit.
*/
static inline
-void lib_ring_buffer_put_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
+void lib_ring_buffer_end(const struct lttng_ust_lib_ring_buffer_config *config)
{
cmm_barrier();
URCU_TLS(lib_ring_buffer_nesting)--; /* TLS */
_teardown \
"b %l[failure]\n\t" \
"4:\n\t" \
- : /* no outputs */ \
+ : /* gcc asm goto does not allow outputs */ \
: [start_event_counter]"r"((_start_value).event_counter), \
[current_event_counter]"m"((_start_value).rseqp->u.e.event_counter), \
[rseq_cs]"r"(&(_start_value).rseqp->rseq_cs) \
_teardown \
"b %l[failure]\n\t" \
"5:\n\t" \
- : /* no outputs */ \
+ : /* gcc asm goto does not allow outputs */ \
: [start_event_counter]"r"((_start_value).event_counter), \
[current_event_counter]"m"((_start_value).rseqp->u.e.event_counter), \
[rseq_cs]"b"(&(_start_value).rseqp->rseq_cs) \
_teardown \
"b %l[failure]\n\t" \
"5:\n\t" \
- : /* no outputs */ \
+ : /* gcc asm goto does not allow outputs */ \
: [start_event_counter]"r"((_start_value).event_counter), \
[current_event_counter]"m"((_start_value).rseqp->u.e.event_counter), \
[rseq_cs]"b"(&(_start_value).rseqp->rseq_cs) \
"1:\n\t" \
_setup \
RSEQ_INJECT_ASM(1) \
- "movq $3b, %[rseq_cs]\n\t" \
+ "leaq 3b(%%rip), %%rax\n\t" \
+ "movq %%rax, %[rseq_cs]\n\t" \
RSEQ_INJECT_ASM(2) \
"cmpl %[start_event_counter], %[current_event_counter]\n\t" \
"jnz 4f\n\t" \
_teardown \
"jmp %l[failure]\n\t" \
".popsection\n\t" \
- : /* no outputs */ \
+ : /* gcc asm goto does not allow outputs */ \
: [start_event_counter]"r"((_start_value).event_counter), \
[current_event_counter]"m"((_start_value).rseqp->u.e.event_counter), \
[rseq_cs]"m"((_start_value).rseqp->rseq_cs) \
_spec_input \
_final_input \
RSEQ_INJECT_INPUT \
- : "memory", "cc" \
+ : "memory", "cc", "rax" \
_extra_clobber \
RSEQ_INJECT_CLOBBER \
: _failure \
_teardown \
"jmp %l[failure]\n\t" \
".popsection\n\t" \
- : /* no outputs */ \
+ : /* gcc asm goto does not allow outputs */ \
: [start_event_counter]"m"((_start_value).event_counter), \
[current_event_counter]"m"((_start_value).rseqp->u.e.event_counter), \
[rseq_cs]"m"((_start_value).rseqp->rseq_cs) \
.u.e.cpu_id = -1,
};
+/* Own state, not shared with other libs. */
+static __thread int rseq_registered;
+
+static pthread_key_t rseq_key;
+
+
+#ifdef __NR_rseq
static int sys_rseq(volatile struct rseq *rseq_abi, int flags)
{
return syscall(__NR_rseq, rseq_abi, flags);
}
+#else
+static int sys_rseq(volatile struct rseq *rseq_abi, int flags)
+{
+ errno = ENOSYS;
+ return -1;
+}
+#endif
static void signal_off_save(sigset_t *oldset)
{
abort();
}
+int rseq_unregister_current_thread(void)
+{
+ sigset_t oldset;
+ int rc, ret = 0;
+
+ signal_off_save(&oldset);
+ if (rseq_registered) {
+ rc = sys_rseq(NULL, 0);
+ if (rc) {
+ fprintf(stderr, "Error: sys_rseq(...) failed(%d): %s\n",
+ errno, strerror(errno));
+ ret = -1;
+ goto end;
+ }
+ rseq_registered = 0;
+ }
+end:
+ signal_restore(oldset);
+ return ret;
+}
+
+static void destroy_rseq_key(void *key)
+{
+ if (rseq_unregister_current_thread())
+ abort();
+}
+
int rseq_register_current_thread(void)
{
- int rc;
+ sigset_t oldset;
+ int rc, ret = 0;
- rc = sys_rseq(&__rseq_abi, 0);
- if (rc) {
- fprintf(stderr, "Error: sys_rseq(...) failed(%d): %s\n",
- errno, strerror(errno));
- return -1;
+ signal_off_save(&oldset);
+ if (caa_likely(!rseq_registered)) {
+ rc = sys_rseq(&__rseq_abi, 0);
+ if (rc) {
+ fprintf(stderr, "Error: sys_rseq(...) failed(%d): %s\n",
+ errno, strerror(errno));
+ __rseq_abi.u.e.cpu_id = -2;
+ ret = -1;
+ goto end;
+ }
+ rseq_registered = 1;
+ assert(rseq_current_cpu_raw() >= 0);
+ /*
+ * Register destroy notifier. Pointer needs to
+ * be non-NULL.
+ */
+ if (pthread_setspecific(rseq_key, (void *)0x1))
+ abort();
}
- assert(rseq_current_cpu() >= 0);
- return 0;
+end:
+ signal_restore(oldset);
+ return ret;
}
-int rseq_unregister_current_thread(void)
+void rseq_init(void)
{
- int rc;
+ int ret;
- rc = sys_rseq(NULL, 0);
- if (rc) {
- fprintf(stderr, "Error: sys_rseq(...) failed(%d): %s\n",
- errno, strerror(errno));
- return -1;
+ ret = pthread_key_create(&rseq_key, destroy_rseq_key);
+ if (ret) {
+ errno = -ret;
+ perror("pthread_key_create");
+ abort();
+ }
+}
+
+void rseq_destroy(void)
+{
+ int ret;
+
+ ret = pthread_key_delete(rseq_key);
+ if (ret) {
+ errno = -ret;
+ perror("pthread_key_delete");
+ abort();
}
- return 0;
}
#include <stdio.h>
#include <stdlib.h>
#include <sched.h>
+#include <unistd.h>
#include <urcu/compiler.h>
#include <urcu/system.h>
#include <urcu/arch.h>
+#include <lttng/ringbuffer-config.h> /* for struct lttng_rseq_state */
#include "linux-rseq-abi.h"
/*
extern __thread volatile struct rseq __rseq_abi;
#if defined(__x86_64__) || defined(__i386__)
-#include <rseq-x86.h>
+#include "rseq-x86.h"
+#ifdef __NR_rseq
+#define ARCH_HAS_RSEQ 1
+#endif
#elif defined(__ARMEL__)
-#include <rseq-arm.h>
+#include "rseq-arm.h"
+#ifdef __NR_rseq
+#define ARCH_HAS_RSEQ 1
+#endif
#elif defined(__PPC__)
-#include <rseq-ppc.h>
+#include "rseq-ppc.h"
+#ifdef __NR_rseq
+#define ARCH_HAS_RSEQ 1
+#endif
#else
#error unsupported target
#endif
-/* State returned by rseq_start, passed as argument to rseq_finish. */
-struct rseq_state {
- volatile struct rseq *rseqp;
- int32_t cpu_id; /* cpu_id at start. */
- uint32_t event_counter; /* event_counter at start. */
-};
-
/*
* Register rseq for the current thread. This needs to be called once
* by any thread which uses restartable sequences, before they start
*/
int rseq_unregister_current_thread(void);
-/*
- * Restartable sequence fallback for reading the current CPU number.
- */
-int rseq_fallback_current_cpu(void);
+void rseq_init(void);
+void rseq_destroy(void);
-static inline int32_t rseq_cpu_at_start(struct rseq_state start_value)
+static inline int32_t rseq_cpu_at_start(struct lttng_rseq_state start_value)
{
return start_value.cpu_id;
}
return CMM_LOAD_SHARED(__rseq_abi.u.e.cpu_id);
}
-static inline int32_t rseq_current_cpu(void)
-{
- int32_t cpu;
-
- cpu = rseq_current_cpu_raw();
- if (caa_unlikely(cpu < 0))
- cpu = rseq_fallback_current_cpu();
- return cpu;
-}
-
+#ifdef ARCH_HAS_RSEQ
static inline __attribute__((always_inline))
-struct rseq_state rseq_start(void)
+struct lttng_rseq_state rseq_start(void)
{
- struct rseq_state result;
+ struct lttng_rseq_state result;
result.rseqp = &__rseq_abi;
if (has_single_copy_load_64()) {
cmm_barrier();
return result;
}
+#else
+static inline __attribute__((always_inline))
+struct lttng_rseq_state rseq_start(void)
+{
+ struct lttng_rseq_state result = {
+ .cpu_id = -2,
+ };
+ return result;
+}
+#endif
enum rseq_finish_type {
RSEQ_FINISH_SINGLE,
* p_final and to_write_final are used for the final write. If this
* write takes place, the rseq_finish2 is guaranteed to succeed.
*/
+#ifdef ARCH_HAS_RSEQ
static inline __attribute__((always_inline))
bool __rseq_finish(intptr_t *p_spec, intptr_t to_write_spec,
void *p_memcpy, void *to_write_memcpy, size_t len_memcpy,
intptr_t *p_final, intptr_t to_write_final,
- struct rseq_state start_value,
+ struct lttng_rseq_state start_value,
enum rseq_finish_type type, bool release)
{
RSEQ_INJECT_C(9)
RSEQ_INJECT_FAILED
return false;
}
+#else
+static inline __attribute__((always_inline))
+bool __rseq_finish(intptr_t *p_spec, intptr_t to_write_spec,
+ void *p_memcpy, void *to_write_memcpy, size_t len_memcpy,
+ intptr_t *p_final, intptr_t to_write_final,
+ struct lttng_rseq_state start_value,
+ enum rseq_finish_type type, bool release)
+{
+ return false;
+}
+#endif
static inline __attribute__((always_inline))
bool rseq_finish(intptr_t *p, intptr_t to_write,
- struct rseq_state start_value)
+ struct lttng_rseq_state start_value)
{
return __rseq_finish(NULL, 0,
NULL, NULL, 0,
static inline __attribute__((always_inline))
bool rseq_finish2(intptr_t *p_spec, intptr_t to_write_spec,
intptr_t *p_final, intptr_t to_write_final,
- struct rseq_state start_value)
+ struct lttng_rseq_state start_value)
{
return __rseq_finish(p_spec, to_write_spec,
NULL, NULL, 0,
static inline __attribute__((always_inline))
bool rseq_finish2_release(intptr_t *p_spec, intptr_t to_write_spec,
intptr_t *p_final, intptr_t to_write_final,
- struct rseq_state start_value)
+ struct lttng_rseq_state start_value)
{
return __rseq_finish(p_spec, to_write_spec,
NULL, NULL, 0,
static inline __attribute__((always_inline))
bool rseq_finish_memcpy(void *p_memcpy, void *to_write_memcpy,
size_t len_memcpy, intptr_t *p_final, intptr_t to_write_final,
- struct rseq_state start_value)
+ struct lttng_rseq_state start_value)
{
return __rseq_finish(NULL, 0,
p_memcpy, to_write_memcpy, len_memcpy,
static inline __attribute__((always_inline))
bool rseq_finish_memcpy_release(void *p_memcpy, void *to_write_memcpy,
size_t len_memcpy, intptr_t *p_final, intptr_t to_write_final,
- struct rseq_state start_value)
+ struct lttng_rseq_state start_value)
{
return __rseq_finish(NULL, 0,
p_memcpy, to_write_memcpy, len_memcpy,