* * Existing attribute types are never changed nor extended. Attribute
* types can be added to the ABI by reserving a label within
* enum side_attr_type.
- * * Each union part of the ABI has an explicit side defined by a
+ * * Each union part of the ABI has an explicit size defined by a
* side_padding() member. Each structure and union have a static
* assert validating its size.
+ * * If the semantic of the existing event description or type fields
+ * change, the SIDE_EVENT_DESCRIPTION_ABI_VERSION should be increased.
+ * * If the semantic of the "struct side_event_state_N" fields change,
+ * the SIDE_EVENT_STATE_ABI_VERSION should be increased. The
+ * "struct side_event_state_N" is not extensible and must have its
+ * ABI version increased whenever it is changed. Note that increasing
+ * the version of SIDE_EVENT_DESCRIPTION_ABI_VERSION is not necessary
+ * when changing the layout of "struct side_event_state_N".
*
* Handling of unknown types by the tracers:
*
* receiving the side_call arguments.
*
* * Event descriptions can be extended by adding fields at the end of
- * the structure. The "struct side_event_description" and "struct
- * side_event_state" are therefore structures with flexible size and
- * must not be used within arrays.
+ * the structure. The "struct side_event_description" is a structure
+ * with flexible size which must not be used within arrays.
*/
-#define SIDE_ABI_VERSION 0
+#define SIDE_EVENT_DESCRIPTION_ABI_VERSION 0
+#define SIDE_EVENT_STATE_ABI_VERSION 0
struct side_arg;
struct side_arg_vec;
struct side_arg_dynamic_struct;
struct side_events_register_handle;
struct side_arg_variant;
-struct side_event_state;
+struct side_event_state_0;
struct side_callback;
enum side_type_label {
struct side_event_description {
uint32_t struct_size; /* Size of this structure. */
- uint32_t version; /* ABI version. */
+ uint32_t version; /* Event description ABI version. */
side_ptr_t(struct side_event_state) state;
side_ptr_t(const char) provider_name;
uint32_t nr_fields;
uint32_t nr_attr;
uint32_t nr_callbacks;
-#define side_event_description_orig_abi_last nr_callbacks
+#define side_event_description_orig_abi_last nr_callbacks
/* End of fields supported in the original ABI. */
char end[]; /* End with a flexible array to account for extensibility. */
/*
* This structure is _not_ packed to allow atomic operations on its
- * fields.
+ * fields. Changes to this structure must bump the "Event state ABI
+ * version" and tracers _must_ learn how to deal with this ABI,
+ * otherwise they should reject the event.
*/
+
struct side_event_state {
- uint32_t struct_size; /* Size of this structure. */
+ uint32_t version; /* Event state ABI version. */
+};
+
+struct side_event_state_0 {
+ struct side_event_state p; /* Required first field. */
uint32_t enabled;
side_ptr_t(const struct side_callback) callbacks;
side_ptr_t(struct side_event_description) desc;
-
- char end[]; /* End with a flexible array to account for extensibility. */
};
/* Event and type attributes */
.sav = SIDE_PTR_INIT(side_sav), \
.len = SIDE_ARRAY_SIZE(side_sav), \
}; \
- side_call(&(side_event_state__##_identifier), &side_arg_vec); \
+ side_call(&(side_event_state__##_identifier).p, &side_arg_vec); \
}
#define side_event(_identifier, _sav) \
.len = SIDE_ARRAY_SIZE(side_fields), \
.nr_attr = SIDE_ARRAY_SIZE(SIDE_PARAM_SELECT_ARG1(_, ##_attr, side_attr_list())), \
}; \
- side_call_variadic(&(side_event_state__##_identifier), &side_arg_vec, &var_struct); \
+ side_call_variadic(&(side_event_state__##_identifier.p), &side_arg_vec, &var_struct); \
}
#define side_event_variadic(_identifier, _sav, _var, _attr...) \
#define _side_define_event(_linkage, _identifier, _provider, _event, _loglevel, _fields, _flags, _attr...) \
_linkage struct side_event_description __attribute__((section("side_event_description"))) \
_identifier; \
- _linkage struct side_event_state __attribute__((section("side_event_state"))) \
+ _linkage struct side_event_state_0 __attribute__((section("side_event_state"))) \
side_event_state__##_identifier = { \
- .struct_size = offsetof(struct side_event_state, end), \
+ .p = { \
+ .version = SIDE_EVENT_STATE_ABI_VERSION, \
+ }, \
.enabled = 0, \
.callbacks = SIDE_PTR_INIT(&side_empty_callback), \
.desc = SIDE_PTR_INIT(&(_identifier)), \
_linkage struct side_event_description __attribute__((section("side_event_description"))) \
_identifier = { \
.struct_size = offsetof(struct side_event_description, end), \
- .version = SIDE_ABI_VERSION, \
- .state = SIDE_PTR_INIT(&(side_event_state__##_identifier)), \
+ .version = SIDE_EVENT_DESCRIPTION_ABI_VERSION, \
+ .state = SIDE_PTR_INIT(&(side_event_state__##_identifier.p)), \
.provider_name = SIDE_PTR_INIT(_provider), \
.event_name = SIDE_PTR_INIT(_event), \
.fields = SIDE_PTR_INIT(_fields), \
_loglevel, SIDE_PARAM(_fields), SIDE_EVENT_FLAG_VARIADIC, SIDE_PARAM_SELECT_ARG1(_, ##_attr, side_attr_list()))
#define side_declare_event(_identifier) \
- extern struct side_event_state side_event_state_##_identifier; \
+ extern struct side_event_state_0 side_event_state_##_identifier; \
extern struct side_event_description _identifier
#ifdef __cplusplus
void side_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec)
{
struct side_rcu_read_state rcu_read_state;
+ const struct side_event_state_0 *es0;
const struct side_callback *side_cb;
uintptr_t enabled;
return;
if (side_unlikely(!initialized))
side_init();
- assert(!(side_ptr_get(event_state->desc)->flags & SIDE_EVENT_FLAG_VARIADIC));
- enabled = __atomic_load_n(&event_state->enabled, __ATOMIC_RELAXED);
+ if (side_unlikely(event_state->version != 0))
+ abort();
+ es0 = side_container_of(event_state, const struct side_event_state_0, p);
+ assert(!(side_ptr_get(es0->desc)->flags & SIDE_EVENT_FLAG_VARIADIC));
+ enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED);
if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
// TODO: call kernel write.
}
side_rcu_read_begin(&rcu_gp, &rcu_read_state);
- for (side_cb = side_rcu_dereference(side_ptr_get(event_state->callbacks)); side_cb->u.call != NULL; side_cb++)
- side_cb->u.call(side_ptr_get(event_state->desc), side_arg_vec, side_cb->priv);
+ for (side_cb = side_rcu_dereference(side_ptr_get(es0->callbacks)); side_cb->u.call != NULL; side_cb++)
+ side_cb->u.call(side_ptr_get(es0->desc), side_arg_vec, side_cb->priv);
side_rcu_read_end(&rcu_gp, &rcu_read_state);
}
const struct side_arg_dynamic_struct *var_struct)
{
struct side_rcu_read_state rcu_read_state;
+ const struct side_event_state_0 *es0;
const struct side_callback *side_cb;
uintptr_t enabled;
return;
if (side_unlikely(!initialized))
side_init();
- assert(side_ptr_get(event_state->desc)->flags & SIDE_EVENT_FLAG_VARIADIC);
- enabled = __atomic_load_n(&event_state->enabled, __ATOMIC_RELAXED);
+ if (side_unlikely(event_state->version != 0))
+ abort();
+ es0 = side_container_of(event_state, const struct side_event_state_0, p);
+ assert(side_ptr_get(es0->desc)->flags & SIDE_EVENT_FLAG_VARIADIC);
+ enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED);
if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
// TODO: call kernel write.
}
side_rcu_read_begin(&rcu_gp, &rcu_read_state);
- for (side_cb = side_rcu_dereference(side_ptr_get(event_state->callbacks)); side_cb->u.call_variadic != NULL; side_cb++)
- side_cb->u.call_variadic(side_ptr_get(event_state->desc), side_arg_vec, var_struct, side_cb->priv);
+ for (side_cb = side_rcu_dereference(side_ptr_get(es0->callbacks)); side_cb->u.call_variadic != NULL; side_cb++)
+ side_cb->u.call_variadic(side_ptr_get(es0->desc), side_arg_vec, var_struct, side_cb->priv);
side_rcu_read_end(&rcu_gp, &rcu_read_state);
}
void *call, void *priv)
{
struct side_event_state *event_state = side_ptr_get(desc->state);
+ const struct side_event_state_0 *es0;
const struct side_callback *cb;
- for (cb = side_ptr_get(event_state->callbacks); cb->u.call != NULL; cb++) {
+ if (side_unlikely(event_state->version != 0))
+ abort();
+ es0 = side_container_of(event_state, const struct side_event_state_0, p);
+ for (cb = side_ptr_get(es0->callbacks); cb->u.call != NULL; cb++) {
if ((void *) cb->u.call == call && cb->priv == priv)
return cb;
}
{
struct side_event_state *event_state;
struct side_callback *old_cb, *new_cb;
+ struct side_event_state_0 *es0;
int ret = SIDE_ERROR_OK;
uint32_t old_nr_cb;
side_init();
pthread_mutex_lock(&side_lock);
event_state = side_ptr_get(desc->state);
+ if (side_unlikely(event_state->version != 0))
+ abort();
+ es0 = side_container_of(event_state, struct side_event_state_0, p);
old_nr_cb = desc->nr_callbacks;
if (old_nr_cb == UINT32_MAX) {
ret = SIDE_ERROR_INVAL;
ret = SIDE_ERROR_EXIST;
goto unlock;
}
- old_cb = (struct side_callback *) side_ptr_get(event_state->callbacks);
+ old_cb = (struct side_callback *) side_ptr_get(es0->callbacks);
/* old_nr_cb + 1 (new cb) + 1 (NULL) */
new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
if (!new_cb) {
(side_tracer_callback_func) call;
new_cb[old_nr_cb].priv = priv;
/* High order bits are already zeroed. */
- side_rcu_assign_pointer(side_ptr_get(event_state->callbacks), new_cb);
+ side_rcu_assign_pointer(side_ptr_get(es0->callbacks), new_cb);
side_rcu_wait_grace_period(&rcu_gp);
if (old_nr_cb)
free(old_cb);
desc->nr_callbacks++;
/* Increment concurrently with kernel setting the top bits. */
if (!old_nr_cb)
- (void) __atomic_add_fetch(&event_state->enabled, 1, __ATOMIC_RELAXED);
+ (void) __atomic_add_fetch(&es0->enabled, 1, __ATOMIC_RELAXED);
unlock:
pthread_mutex_unlock(&side_lock);
return ret;
struct side_event_state *event_state;
struct side_callback *old_cb, *new_cb;
const struct side_callback *cb_pos;
+ struct side_event_state_0 *es0;
uint32_t pos_idx;
int ret = SIDE_ERROR_OK;
uint32_t old_nr_cb;
side_init();
pthread_mutex_lock(&side_lock);
event_state = side_ptr_get(desc->state);
+ if (side_unlikely(event_state->version != 0))
+ abort();
+ es0 = side_container_of(event_state, struct side_event_state_0, p);
cb_pos = side_tracer_callback_lookup(desc, call, priv);
if (!cb_pos) {
ret = SIDE_ERROR_NOENT;
goto unlock;
}
old_nr_cb = desc->nr_callbacks;
- old_cb = (struct side_callback *) side_ptr_get(event_state->callbacks);
+ old_cb = (struct side_callback *) side_ptr_get(es0->callbacks);
if (old_nr_cb == 1) {
new_cb = (struct side_callback *) &side_empty_callback;
} else {
- pos_idx = cb_pos - side_ptr_get(event_state->callbacks);
+ pos_idx = cb_pos - side_ptr_get(es0->callbacks);
/* Remove entry at pos_idx. */
/* old_nr_cb - 1 (removed cb) + 1 (NULL) */
new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
}
/* High order bits are already zeroed. */
- side_rcu_assign_pointer(side_ptr_get(event_state->callbacks), new_cb);
+ side_rcu_assign_pointer(side_ptr_get(es0->callbacks), new_cb);
side_rcu_wait_grace_period(&rcu_gp);
free(old_cb);
desc->nr_callbacks--;
/* Decrement concurrently with kernel setting the top bits. */
if (old_nr_cb == 1)
- (void) __atomic_add_fetch(&event_state->enabled, -1, __ATOMIC_RELAXED);
+ (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED);
unlock:
pthread_mutex_unlock(&side_lock);
return ret;
{
struct side_event_state *event_state = side_ptr_get(desc->state);
uint32_t nr_cb = desc->nr_callbacks;
+ struct side_event_state_0 *es0;
struct side_callback *old_cb;
if (!nr_cb)
return;
- old_cb = (struct side_callback *) side_ptr_get(event_state->callbacks);
- (void) __atomic_add_fetch(&event_state->enabled, -1, __ATOMIC_RELAXED);
+ if (side_unlikely(event_state->version != 0))
+ abort();
+ es0 = side_container_of(event_state, struct side_event_state_0, p);
+ old_cb = (struct side_callback *) side_ptr_get(es0->callbacks);
+ (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED);
/*
* Setting the state back to 0 cb and empty callbacks out of
* caution. This should not matter because instrumentation is
* unreachable.
*/
desc->nr_callbacks = 0;
- side_rcu_assign_pointer(side_ptr_get(event_state->callbacks), &side_empty_callback);
+ side_rcu_assign_pointer(side_ptr_get(es0->callbacks), &side_empty_callback);
/*
* No need to wait for grace period because instrumentation is
* unreachable.