Introduce event state ABI version
[libside.git] / src / side.c
index d02a34f135b6ada078ed5821cdd5245c8cdad7a4..b180e416e64b558f70117fdb6ed00e25ca80b3c4 100644 (file)
@@ -66,6 +66,7 @@ const struct side_callback side_empty_callback = { };
 void side_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec)
 {
        struct side_rcu_read_state rcu_read_state;
+       const struct side_event_state_0 *es0;
        const struct side_callback *side_cb;
        uintptr_t enabled;
 
@@ -73,14 +74,17 @@ void side_call(const struct side_event_state *event_state, const struct side_arg
                return;
        if (side_unlikely(!initialized))
                side_init();
-       assert(!(side_ptr_get(event_state->desc)->flags & SIDE_EVENT_FLAG_VARIADIC));
-       enabled = __atomic_load_n(&event_state->enabled, __ATOMIC_RELAXED);
+       if (side_unlikely(event_state->version != 0))
+               abort();
+       es0 = side_container_of(event_state, const struct side_event_state_0, p);
+       assert(!(side_ptr_get(es0->desc)->flags & SIDE_EVENT_FLAG_VARIADIC));
+       enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED);
        if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
                // TODO: call kernel write.
        }
        side_rcu_read_begin(&rcu_gp, &rcu_read_state);
-       for (side_cb = side_rcu_dereference(side_ptr_get(event_state->callbacks)); side_cb->u.call != NULL; side_cb++)
-               side_cb->u.call(side_ptr_get(event_state->desc), side_arg_vec, side_cb->priv);
+       for (side_cb = side_rcu_dereference(side_ptr_get(es0->callbacks)); side_cb->u.call != NULL; side_cb++)
+               side_cb->u.call(side_ptr_get(es0->desc), side_arg_vec, side_cb->priv);
        side_rcu_read_end(&rcu_gp, &rcu_read_state);
 }
 
@@ -89,6 +93,7 @@ void side_call_variadic(const struct side_event_state *event_state,
        const struct side_arg_dynamic_struct *var_struct)
 {
        struct side_rcu_read_state rcu_read_state;
+       const struct side_event_state_0 *es0;
        const struct side_callback *side_cb;
        uintptr_t enabled;
 
@@ -96,14 +101,17 @@ void side_call_variadic(const struct side_event_state *event_state,
                return;
        if (side_unlikely(!initialized))
                side_init();
-       assert(side_ptr_get(event_state->desc)->flags & SIDE_EVENT_FLAG_VARIADIC);
-       enabled = __atomic_load_n(&event_state->enabled, __ATOMIC_RELAXED);
+       if (side_unlikely(event_state->version != 0))
+               abort();
+       es0 = side_container_of(event_state, const struct side_event_state_0, p);
+       assert(side_ptr_get(es0->desc)->flags & SIDE_EVENT_FLAG_VARIADIC);
+       enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED);
        if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
                // TODO: call kernel write.
        }
        side_rcu_read_begin(&rcu_gp, &rcu_read_state);
-       for (side_cb = side_rcu_dereference(side_ptr_get(event_state->callbacks)); side_cb->u.call_variadic != NULL; side_cb++)
-               side_cb->u.call_variadic(side_ptr_get(event_state->desc), side_arg_vec, var_struct, side_cb->priv);
+       for (side_cb = side_rcu_dereference(side_ptr_get(es0->callbacks)); side_cb->u.call_variadic != NULL; side_cb++)
+               side_cb->u.call_variadic(side_ptr_get(es0->desc), side_arg_vec, var_struct, side_cb->priv);
        side_rcu_read_end(&rcu_gp, &rcu_read_state);
 }
 
@@ -113,9 +121,13 @@ const struct side_callback *side_tracer_callback_lookup(
                void *call, void *priv)
 {
        struct side_event_state *event_state = side_ptr_get(desc->state);
+       const struct side_event_state_0 *es0;
        const struct side_callback *cb;
 
-       for (cb = side_ptr_get(event_state->callbacks); cb->u.call != NULL; cb++) {
+       if (side_unlikely(event_state->version != 0))
+               abort();
+       es0 = side_container_of(event_state, const struct side_event_state_0, p);
+       for (cb = side_ptr_get(es0->callbacks); cb->u.call != NULL; cb++) {
                if ((void *) cb->u.call == call && cb->priv == priv)
                        return cb;
        }
@@ -128,6 +140,7 @@ int _side_tracer_callback_register(struct side_event_description *desc,
 {
        struct side_event_state *event_state;
        struct side_callback *old_cb, *new_cb;
+       struct side_event_state_0 *es0;
        int ret = SIDE_ERROR_OK;
        uint32_t old_nr_cb;
 
@@ -139,6 +152,9 @@ int _side_tracer_callback_register(struct side_event_description *desc,
                side_init();
        pthread_mutex_lock(&side_lock);
        event_state = side_ptr_get(desc->state);
+       if (side_unlikely(event_state->version != 0))
+               abort();
+       es0 = side_container_of(event_state, struct side_event_state_0, p);
        old_nr_cb = desc->nr_callbacks;
        if (old_nr_cb == UINT32_MAX) {
                ret = SIDE_ERROR_INVAL;
@@ -149,7 +165,7 @@ int _side_tracer_callback_register(struct side_event_description *desc,
                ret = SIDE_ERROR_EXIST;
                goto unlock;
        }
-       old_cb = (struct side_callback *) side_ptr_get(event_state->callbacks);
+       old_cb = (struct side_callback *) side_ptr_get(es0->callbacks);
        /* old_nr_cb + 1 (new cb) + 1 (NULL) */
        new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
        if (!new_cb) {
@@ -165,14 +181,14 @@ int _side_tracer_callback_register(struct side_event_description *desc,
                        (side_tracer_callback_func) call;
        new_cb[old_nr_cb].priv = priv;
        /* High order bits are already zeroed. */
-       side_rcu_assign_pointer(side_ptr_get(event_state->callbacks), new_cb);
+       side_rcu_assign_pointer(side_ptr_get(es0->callbacks), new_cb);
        side_rcu_wait_grace_period(&rcu_gp);
        if (old_nr_cb)
                free(old_cb);
        desc->nr_callbacks++;
        /* Increment concurrently with kernel setting the top bits. */
        if (!old_nr_cb)
-               (void) __atomic_add_fetch(&event_state->enabled, 1, __ATOMIC_RELAXED);
+               (void) __atomic_add_fetch(&es0->enabled, 1, __ATOMIC_RELAXED);
 unlock:
        pthread_mutex_unlock(&side_lock);
        return ret;
@@ -202,6 +218,7 @@ static int _side_tracer_callback_unregister(struct side_event_description *desc,
        struct side_event_state *event_state;
        struct side_callback *old_cb, *new_cb;
        const struct side_callback *cb_pos;
+       struct side_event_state_0 *es0;
        uint32_t pos_idx;
        int ret = SIDE_ERROR_OK;
        uint32_t old_nr_cb;
@@ -214,17 +231,20 @@ static int _side_tracer_callback_unregister(struct side_event_description *desc,
                side_init();
        pthread_mutex_lock(&side_lock);
        event_state = side_ptr_get(desc->state);
+       if (side_unlikely(event_state->version != 0))
+               abort();
+       es0 = side_container_of(event_state, struct side_event_state_0, p);
        cb_pos = side_tracer_callback_lookup(desc, call, priv);
        if (!cb_pos) {
                ret = SIDE_ERROR_NOENT;
                goto unlock;
        }
        old_nr_cb = desc->nr_callbacks;
-       old_cb = (struct side_callback *) side_ptr_get(event_state->callbacks);
+       old_cb = (struct side_callback *) side_ptr_get(es0->callbacks);
        if (old_nr_cb == 1) {
                new_cb = (struct side_callback *) &side_empty_callback;
        } else {
-               pos_idx = cb_pos - side_ptr_get(event_state->callbacks);
+               pos_idx = cb_pos - side_ptr_get(es0->callbacks);
                /* Remove entry at pos_idx. */
                /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
                new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
@@ -236,13 +256,13 @@ static int _side_tracer_callback_unregister(struct side_event_description *desc,
                memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
        }
        /* High order bits are already zeroed. */
-       side_rcu_assign_pointer(side_ptr_get(event_state->callbacks), new_cb);
+       side_rcu_assign_pointer(side_ptr_get(es0->callbacks), new_cb);
        side_rcu_wait_grace_period(&rcu_gp);
        free(old_cb);
        desc->nr_callbacks--;
        /* Decrement concurrently with kernel setting the top bits. */
        if (old_nr_cb == 1)
-               (void) __atomic_add_fetch(&event_state->enabled, -1, __ATOMIC_RELAXED);
+               (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED);
 unlock:
        pthread_mutex_unlock(&side_lock);
        return ret;
@@ -298,19 +318,23 @@ void side_event_remove_callbacks(struct side_event_description *desc)
 {
        struct side_event_state *event_state = side_ptr_get(desc->state);
        uint32_t nr_cb = desc->nr_callbacks;
+       struct side_event_state_0 *es0;
        struct side_callback *old_cb;
 
        if (!nr_cb)
                return;
-       old_cb = (struct side_callback *) side_ptr_get(event_state->callbacks);
-       (void) __atomic_add_fetch(&event_state->enabled, -1, __ATOMIC_RELAXED);
+       if (side_unlikely(event_state->version != 0))
+               abort();
+       es0 = side_container_of(event_state, struct side_event_state_0, p);
+       old_cb = (struct side_callback *) side_ptr_get(es0->callbacks);
+       (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED);
        /*
         * Setting the state back to 0 cb and empty callbacks out of
         * caution. This should not matter because instrumentation is
         * unreachable.
         */
        desc->nr_callbacks = 0;
-       side_rcu_assign_pointer(side_ptr_get(event_state->callbacks), &side_empty_callback);
+       side_rcu_assign_pointer(side_ptr_get(es0->callbacks), &side_empty_callback);
        /*
         * No need to wait for grace period because instrumentation is
         * unreachable.
This page took 0.026664 seconds and 4 git commands to generate.