void *priv; /* Private tracer context. */
} SIDE_PACKED;
+/*
+ * This structure is _not_ packed to allow atomic operations on its
+ * fields.
+ */
+struct side_event_state {
+ uintptr_t enabled;
+ const struct side_callback *callbacks;
+ uint32_t nr_callbacks;
+};
+
struct side_event_description {
- uintptr_t *enabled;
+ struct side_event_state *state;
const char *provider_name;
const char *event_name;
const struct side_event_field *fields;
const struct side_attr *attr;
- const struct side_callback *callbacks;
uint64_t flags;
uint32_t version;
uint32_t loglevel; /* enum side_loglevel */
uint32_t nr_fields;
uint32_t nr_attr;
- uint32_t nr_callbacks;
} SIDE_PACKED;
/* Event and type attributes */
#define side_arg_list(...) __VA_ARGS__
#define side_event_cond(_identifier) \
- if (side_unlikely(__atomic_load_n(&side_event_enable__##_identifier, \
+ if (side_unlikely(__atomic_load_n(&side_event_state__##_identifier.enabled, \
__ATOMIC_RELAXED)))
#define side_event_call(_identifier, _sav) \
side_event_call_variadic(_identifier, SIDE_PARAM(_sav), SIDE_PARAM(_var), SIDE_PARAM_SELECT_ARG1(_, ##_attr, side_attr_list()))
#define _side_define_event(_linkage, _identifier, _provider, _event, _loglevel, _fields, _flags, _attr...) \
- _linkage uintptr_t side_event_enable__##_identifier __attribute__((section("side_event_enable"))); \
+ _linkage struct side_event_state __attribute__((section("side_event_state"))) \
+ side_event_state__##_identifier = { \
+ .enabled = 0, \
+ .callbacks = &side_empty_callback, \
+ .nr_callbacks = 0, \
+ }; \
_linkage struct side_event_description __attribute__((section("side_event_description"))) \
_identifier = { \
- .enabled = &(side_event_enable__##_identifier), \
+ .state = &(side_event_state__##_identifier), \
.provider_name = _provider, \
.event_name = _event, \
.fields = _fields, \
.attr = SIDE_PARAM_SELECT_ARG1(_, ##_attr, side_attr_list()), \
- .callbacks = &side_empty_callback, \
.flags = (_flags), \
.version = 0, \
.loglevel = _loglevel, \
.nr_fields = SIDE_ARRAY_SIZE(SIDE_PARAM(_fields)), \
.nr_attr = SIDE_ARRAY_SIZE(SIDE_PARAM_SELECT_ARG1(_, ##_attr, side_attr_list())), \
- .nr_callbacks = 0, \
}; \
static const struct side_event_description *side_event_ptr__##_identifier \
__attribute__((section("side_event_description_ptr"), used)) = &(_identifier);
_loglevel, SIDE_PARAM(_fields), SIDE_EVENT_FLAG_VARIADIC, SIDE_PARAM_SELECT_ARG1(_, ##_attr, side_attr_list()))
#define side_declare_event(_identifier) \
- extern uintptr_t side_event_enable_##_identifier; \
+ extern struct side_event_state side_event_state_##_identifier; \
extern struct side_event_description _identifier
#ifdef __cplusplus
printf("ERROR: unexpected variadic event description\n");
abort();
}
- enabled = __atomic_load_n(desc->enabled, __ATOMIC_RELAXED);
+ enabled = __atomic_load_n(&desc->state->enabled, __ATOMIC_RELAXED);
if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
// TODO: call kernel write.
}
side_rcu_read_begin(&rcu_gp, &rcu_read_state);
- for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call != NULL; side_cb++)
+ for (side_cb = side_rcu_dereference(desc->state->callbacks); side_cb->u.call != NULL; side_cb++)
side_cb->u.call(desc, side_arg_vec, side_cb->priv);
side_rcu_read_end(&rcu_gp, &rcu_read_state);
}
printf("ERROR: unexpected non-variadic event description\n");
abort();
}
- enabled = __atomic_load_n(desc->enabled, __ATOMIC_RELAXED);
+ enabled = __atomic_load_n(&desc->state->enabled, __ATOMIC_RELAXED);
if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
// TODO: call kernel write.
}
side_rcu_read_begin(&rcu_gp, &rcu_read_state);
- for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call_variadic != NULL; side_cb++)
+ for (side_cb = side_rcu_dereference(desc->state->callbacks); side_cb->u.call_variadic != NULL; side_cb++)
side_cb->u.call_variadic(desc, side_arg_vec, var_struct, side_cb->priv);
side_rcu_read_end(&rcu_gp, &rcu_read_state);
}
{
const struct side_callback *cb;
- for (cb = desc->callbacks; cb->u.call != NULL; cb++) {
+ for (cb = desc->state->callbacks; cb->u.call != NULL; cb++) {
if ((void *) cb->u.call == call && cb->priv == priv)
return cb;
}
if (!initialized)
side_init();
pthread_mutex_lock(&side_lock);
- old_nr_cb = desc->nr_callbacks;
+ old_nr_cb = desc->state->nr_callbacks;
if (old_nr_cb == UINT32_MAX) {
ret = SIDE_ERROR_INVAL;
goto unlock;
ret = SIDE_ERROR_EXIST;
goto unlock;
}
- old_cb = (struct side_callback *) desc->callbacks;
+ old_cb = (struct side_callback *) desc->state->callbacks;
/* old_nr_cb + 1 (new cb) + 1 (NULL) */
new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
if (!new_cb) {
new_cb[old_nr_cb].u.call =
(side_tracer_callback_func) call;
new_cb[old_nr_cb].priv = priv;
- side_rcu_assign_pointer(desc->callbacks, new_cb);
+ side_rcu_assign_pointer(desc->state->callbacks, new_cb);
side_rcu_wait_grace_period(&rcu_gp);
if (old_nr_cb)
free(old_cb);
- desc->nr_callbacks++;
+ desc->state->nr_callbacks++;
/* Increment concurrently with kernel setting the top bits. */
if (!old_nr_cb)
- (void) __atomic_add_fetch(desc->enabled, 1, __ATOMIC_RELAXED);
+ (void) __atomic_add_fetch(&desc->state->enabled, 1, __ATOMIC_RELAXED);
unlock:
pthread_mutex_unlock(&side_lock);
return ret;
ret = SIDE_ERROR_NOENT;
goto unlock;
}
- old_nr_cb = desc->nr_callbacks;
- old_cb = (struct side_callback *) desc->callbacks;
+ old_nr_cb = desc->state->nr_callbacks;
+ old_cb = (struct side_callback *) desc->state->callbacks;
if (old_nr_cb == 1) {
new_cb = (struct side_callback *) &side_empty_callback;
} else {
- pos_idx = cb_pos - desc->callbacks;
+ pos_idx = cb_pos - desc->state->callbacks;
/* Remove entry at pos_idx. */
/* old_nr_cb - 1 (removed cb) + 1 (NULL) */
new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
memcpy(new_cb, old_cb, pos_idx);
memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
}
- side_rcu_assign_pointer(desc->callbacks, new_cb);
+ side_rcu_assign_pointer(desc->state->callbacks, new_cb);
side_rcu_wait_grace_period(&rcu_gp);
free(old_cb);
- desc->nr_callbacks--;
+ desc->state->nr_callbacks--;
/* Decrement concurrently with kernel setting the top bits. */
if (old_nr_cb == 1)
- (void) __atomic_add_fetch(desc->enabled, -1, __ATOMIC_RELAXED);
+ (void) __atomic_add_fetch(&desc->state->enabled, -1, __ATOMIC_RELAXED);
unlock:
pthread_mutex_unlock(&side_lock);
return ret;
static
void side_event_remove_callbacks(struct side_event_description *desc)
{
- uint32_t nr_cb = desc->nr_callbacks;
+ uint32_t nr_cb = desc->state->nr_callbacks;
struct side_callback *old_cb;
if (!nr_cb)
return;
- old_cb = (struct side_callback *) desc->callbacks;
- (void) __atomic_add_fetch(desc->enabled, -1, __ATOMIC_RELAXED);
+ old_cb = (struct side_callback *) desc->state->callbacks;
+ (void) __atomic_add_fetch(&desc->state->enabled, -1, __ATOMIC_RELAXED);
/*
* Setting the state back to 0 cb and empty callbacks out of
* caution. This should not matter because instrumentation is
* unreachable.
*/
- desc->nr_callbacks = 0;
- side_rcu_assign_pointer(desc->callbacks, &side_empty_callback);
+ desc->state->nr_callbacks = 0;
+ side_rcu_assign_pointer(desc->state->callbacks, &side_empty_callback);
/*
* No need to wait for grace period because instrumentation is
* unreachable.