void side_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec)
{
struct side_rcu_read_state rcu_read_state;
+ const struct side_event_state_0 *es0;
const struct side_callback *side_cb;
uintptr_t enabled;
return;
if (side_unlikely(!initialized))
side_init();
- assert(!(event_state->desc->flags & SIDE_EVENT_FLAG_VARIADIC));
- enabled = __atomic_load_n(&event_state->enabled, __ATOMIC_RELAXED);
+ if (side_unlikely(event_state->version != 0))
+ abort();
+ es0 = side_container_of(event_state, const struct side_event_state_0, p);
+ assert(!(es0->desc->flags & SIDE_EVENT_FLAG_VARIADIC));
+ enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED);
if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
// TODO: call kernel write.
}
side_rcu_read_begin(&rcu_gp, &rcu_read_state);
- for (side_cb = side_rcu_dereference(event_state->callbacks); side_cb->u.call != NULL; side_cb++)
- side_cb->u.call(event_state->desc, side_arg_vec, side_cb->priv);
+ for (side_cb = side_rcu_dereference(es0->callbacks); side_cb->u.call != NULL; side_cb++)
+ side_cb->u.call(es0->desc, side_arg_vec, side_cb->priv);
side_rcu_read_end(&rcu_gp, &rcu_read_state);
}
const struct side_arg_dynamic_struct *var_struct)
{
struct side_rcu_read_state rcu_read_state;
+ const struct side_event_state_0 *es0;
const struct side_callback *side_cb;
uintptr_t enabled;
return;
if (side_unlikely(!initialized))
side_init();
- assert(event_state->desc->flags & SIDE_EVENT_FLAG_VARIADIC);
- enabled = __atomic_load_n(&event_state->enabled, __ATOMIC_RELAXED);
+ if (side_unlikely(event_state->version != 0))
+ abort();
+ es0 = side_container_of(event_state, const struct side_event_state_0, p);
+ assert(es0->desc->flags & SIDE_EVENT_FLAG_VARIADIC);
+ enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED);
if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
// TODO: call kernel write.
}
side_rcu_read_begin(&rcu_gp, &rcu_read_state);
- for (side_cb = side_rcu_dereference(event_state->callbacks); side_cb->u.call_variadic != NULL; side_cb++)
- side_cb->u.call_variadic(event_state->desc, side_arg_vec, var_struct, side_cb->priv);
+ for (side_cb = side_rcu_dereference(es0->callbacks); side_cb->u.call_variadic != NULL; side_cb++)
+ side_cb->u.call_variadic(es0->desc, side_arg_vec, var_struct, side_cb->priv);
side_rcu_read_end(&rcu_gp, &rcu_read_state);
}
void *call, void *priv)
{
struct side_event_state *event_state = side_ptr_get(desc->state);
+ const struct side_event_state_0 *es0;
const struct side_callback *cb;
- for (cb = event_state->callbacks; cb->u.call != NULL; cb++) {
+ if (side_unlikely(event_state->version != 0))
+ abort();
+ es0 = side_container_of(event_state, const struct side_event_state_0, p);
+ for (cb = es0->callbacks; cb->u.call != NULL; cb++) {
if ((void *) cb->u.call == call && cb->priv == priv)
return cb;
}
{
struct side_event_state *event_state;
struct side_callback *old_cb, *new_cb;
+ struct side_event_state_0 *es0;
int ret = SIDE_ERROR_OK;
uint32_t old_nr_cb;
side_init();
pthread_mutex_lock(&side_lock);
event_state = side_ptr_get(desc->state);
+ if (side_unlikely(event_state->version != 0))
+ abort();
+ es0 = side_container_of(event_state, struct side_event_state_0, p);
old_nr_cb = desc->nr_callbacks;
if (old_nr_cb == UINT32_MAX) {
ret = SIDE_ERROR_INVAL;
ret = SIDE_ERROR_EXIST;
goto unlock;
}
- old_cb = (struct side_callback *) event_state->callbacks;
+ old_cb = (struct side_callback *) es0->callbacks;
/* old_nr_cb + 1 (new cb) + 1 (NULL) */
new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
if (!new_cb) {
new_cb[old_nr_cb].u.call =
(side_tracer_callback_func) call;
new_cb[old_nr_cb].priv = priv;
- side_rcu_assign_pointer(event_state->callbacks, new_cb);
+ /* High order bits are already zeroed. */
+ side_rcu_assign_pointer(es0->callbacks, new_cb);
side_rcu_wait_grace_period(&rcu_gp);
if (old_nr_cb)
free(old_cb);
desc->nr_callbacks++;
/* Increment concurrently with kernel setting the top bits. */
if (!old_nr_cb)
- (void) __atomic_add_fetch(&event_state->enabled, 1, __ATOMIC_RELAXED);
+ (void) __atomic_add_fetch(&es0->enabled, 1, __ATOMIC_RELAXED);
unlock:
pthread_mutex_unlock(&side_lock);
return ret;
struct side_event_state *event_state;
struct side_callback *old_cb, *new_cb;
const struct side_callback *cb_pos;
+ struct side_event_state_0 *es0;
uint32_t pos_idx;
int ret = SIDE_ERROR_OK;
uint32_t old_nr_cb;
side_init();
pthread_mutex_lock(&side_lock);
event_state = side_ptr_get(desc->state);
+ if (side_unlikely(event_state->version != 0))
+ abort();
+ es0 = side_container_of(event_state, struct side_event_state_0, p);
cb_pos = side_tracer_callback_lookup(desc, call, priv);
if (!cb_pos) {
ret = SIDE_ERROR_NOENT;
goto unlock;
}
old_nr_cb = desc->nr_callbacks;
- old_cb = (struct side_callback *) event_state->callbacks;
+ old_cb = (struct side_callback *) es0->callbacks;
if (old_nr_cb == 1) {
new_cb = (struct side_callback *) &side_empty_callback;
} else {
- pos_idx = cb_pos - event_state->callbacks;
+ pos_idx = cb_pos - es0->callbacks;
/* Remove entry at pos_idx. */
/* old_nr_cb - 1 (removed cb) + 1 (NULL) */
new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
memcpy(new_cb, old_cb, pos_idx);
memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
}
- side_rcu_assign_pointer(event_state->callbacks, new_cb);
+ /* High order bits are already zeroed. */
+ side_rcu_assign_pointer(es0->callbacks, new_cb);
side_rcu_wait_grace_period(&rcu_gp);
free(old_cb);
desc->nr_callbacks--;
/* Decrement concurrently with kernel setting the top bits. */
if (old_nr_cb == 1)
- (void) __atomic_add_fetch(&event_state->enabled, -1, __ATOMIC_RELAXED);
+ (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED);
unlock:
pthread_mutex_unlock(&side_lock);
return ret;
{
struct side_event_state *event_state = side_ptr_get(desc->state);
uint32_t nr_cb = desc->nr_callbacks;
+ struct side_event_state_0 *es0;
struct side_callback *old_cb;
if (!nr_cb)
return;
- old_cb = (struct side_callback *) event_state->callbacks;
- (void) __atomic_add_fetch(&event_state->enabled, -1, __ATOMIC_RELAXED);
+ if (side_unlikely(event_state->version != 0))
+ abort();
+ es0 = side_container_of(event_state, struct side_event_state_0, p);
+ old_cb = (struct side_callback *) es0->callbacks;
+ (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED);
/*
* Setting the state back to 0 cb and empty callbacks out of
* caution. This should not matter because instrumentation is
* unreachable.
*/
desc->nr_callbacks = 0;
- side_rcu_assign_pointer(event_state->callbacks, &side_empty_callback);
+ side_rcu_assign_pointer(es0->callbacks, &side_empty_callback);
/*
* No need to wait for grace period because instrumentation is
* unreachable.