Event registration/unregistration
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Fri, 28 Oct 2022 20:01:29 +0000 (16:01 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Fri, 28 Oct 2022 20:01:29 +0000 (16:01 -0400)
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
include/side/macros.h
include/side/trace.h
src/list.h
src/rcu.c
src/rcu.h
src/side.c

index af6ba1152252be219666160c18e010ebbb2d17fe..58df0413f2862156fa72f38b43169e9fa05b980d 100644 (file)
@@ -6,6 +6,8 @@
 #ifndef _SIDE_MACROS_H
 #define _SIDE_MACROS_H
 
+#include <stddef.h>
+
 /* Helper macros */
 
 #define SIDE_ARRAY_SIZE(arr)   (sizeof(arr) / sizeof((arr)[0]))
index 9db59695d050453cf5d66678e17cdc400f97e2c3..a2cc0830369c3175a93489bc05be1474f22823fa 100644 (file)
@@ -30,6 +30,7 @@ struct side_tracer_dynamic_struct_visitor_ctx;
 struct side_tracer_dynamic_vla_visitor_ctx;
 struct side_event_description;
 struct side_arg_dynamic_event_struct;
+struct side_events_register_handle;
 
 enum side_type {
        /* Basic types */
@@ -149,6 +150,7 @@ enum side_error {
        SIDE_ERROR_EXIST = 2,
        SIDE_ERROR_NOMEM = 3,
        SIDE_ERROR_NOENT = 4,
+       SIDE_ERROR_EXITING = 5,
 };
 
 typedef enum side_visitor_status (*side_visitor)(
@@ -1142,4 +1144,10 @@ int side_tracer_callback_variadic_unregister(struct side_event_description *desc
                        void *priv),
                void *priv);
 
+struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events);
+void side_events_unregister(struct side_events_register_handle *handle);
+
+void side_init(void);
+void side_exit(void);
+
 #endif /* _SIDE_TRACE_H */
index 8717bef4de7adb6f4b6ec1e41b1ac752816fff28..93e64f87516089112b4a9cdd7399ccf1c191142b 100644 (file)
@@ -55,8 +55,10 @@ void side_list_remove_node(struct side_list_node *node)
 
 /* List iteration, safe against node reclaim while iterating. */
 #define side_list_for_each_entry_safe(_entry, _next_entry, _head, _member) \
-       for ((_entry) = side_container_of((_head)->node.next, __typeof__(*(_entry)), _member), (_next_entry) = (_entry)->next; \
-               &(_entry)->member != &head->node; \
-               (_entry) = (_next_entry)->next, (_next_entry) = (_entry)->next)
+       for ((_entry) = side_container_of((_head)->node.next, __typeof__(*(_entry)), _member), \
+                       (_next_entry) = side_container_of((_entry)->_member.next, __typeof__(*(_entry)), _member); \
+               &(_entry)->_member != &(_head)->node; \
+               (_entry) = side_container_of((_next_entry)->_member.next, __typeof__(*(_entry)), _member), \
+               (_next_entry) = side_container_of((_entry)->_member.next, __typeof__(*(_entry)), _member))
 
 #endif /* _SIDE_LIST_H */
index bd6059cc3cacfa6b43cede802c6bd6154d0b1624..bb3bfed60b37efb1d8458732c836a23ba985dad9 100644 (file)
--- a/src/rcu.c
+++ b/src/rcu.c
@@ -164,3 +164,9 @@ void side_rcu_gp_init(struct side_rcu_gp_state *rcu_gp)
        if (!rcu_gp->percpu_state)
                abort();
 }
+
+void side_rcu_gp_exit(struct side_rcu_gp_state *rcu_gp)
+{
+       pthread_mutex_destroy(&rcu_gp->gp_lock);
+       free(rcu_gp->percpu_state);
+}
index dfd2227b86e16e26f3144e5e1e7c5306a4675753..f25aa876a1edcee052117a2f4905e10bc8e28a54 100644 (file)
--- a/src/rcu.h
+++ b/src/rcu.h
@@ -86,5 +86,6 @@ void side_rcu_read_end(struct side_rcu_gp_state *gp_state, unsigned int period)
 
 void side_rcu_wait_grace_period(struct side_rcu_gp_state *gp_state) __attribute__((visibility("hidden")));
 void side_rcu_gp_init(struct side_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden")));
+void side_rcu_gp_exit(struct side_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden")));
 
 #endif /* _SIDE_RCU_H */
index 0460860bd32666bfd5e6b9fe33d693f8fd1b68b9..d6542126aad74dfd40009513981de862d84f7c99 100644 (file)
 /* Allow 2^24 tracer callbacks to be registered on an event. */
 #define SIDE_EVENT_ENABLED_USER_MASK                   0x00FFFFFF
 
+struct side_events_register_handle {
+       struct side_list_node node;
+       struct side_event_description **events;
+       uint32_t nr_events;
+};
+
 static struct side_rcu_gp_state rcu_gp;
 
 /*
  * Lazy initialization for early use within library constructors.
  */
 static bool initialized;
+/*
+ * Do not register/unregister any more events after destructor.
+ */
+static bool finalized;
 
 static pthread_mutex_t side_lock = PTHREAD_MUTEX_INITIALIZER;
 
 static DEFINE_SIDE_LIST_HEAD(side_list);
 
-static
-void side_init(void)
-       __attribute__((constructor));
-
 /*
  * The empty callback has a NULL function callback pointer, which stops
  * iteration on the array of callbacks immediately.
  */
 const struct side_callback side_empty_callback;
 
+void side_init(void) __attribute__((constructor));
+void side_exit(void) __attribute__((destructor));
+
 void side_call(const struct side_event_description *desc, const struct side_arg_vec_description *sav_desc)
 {
        const struct side_callback *side_cb;
        unsigned int rcu_period;
        uint32_t enabled;
 
+       if (side_unlikely(finalized))
+               return;
        if (side_unlikely(!initialized))
                side_init();
        if (side_unlikely(desc->flags & SIDE_EVENT_FLAG_VARIADIC)) {
@@ -74,6 +85,8 @@ void side_call_variadic(const struct side_event_description *desc,
        unsigned int rcu_period;
        uint32_t enabled;
 
+       if (side_unlikely(finalized))
+               return;
        if (side_unlikely(!initialized))
                side_init();
        if (side_unlikely(!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))) {
@@ -120,6 +133,8 @@ int _side_tracer_callback_register(struct side_event_description *desc,
 
        if (!call)
                return SIDE_ERROR_INVAL;
+       if (finalized)
+               return SIDE_ERROR_EXITING;
        pthread_mutex_lock(&side_lock);
        old_nr_cb = *desc->enabled & SIDE_EVENT_ENABLED_USER_MASK;
        if (old_nr_cb == SIDE_EVENT_ENABLED_USER_MASK) {
@@ -189,6 +204,8 @@ int _side_tracer_callback_unregister(struct side_event_description *desc,
 
        if (!call)
                return SIDE_ERROR_INVAL;
+       if (finalized)
+               return SIDE_ERROR_EXITING;
        pthread_mutex_lock(&side_lock);
        cb_pos = side_tracer_callback_lookup(desc, call, priv);
        if (!cb_pos) {
@@ -244,7 +261,72 @@ int side_tracer_callback_variadic_unregister(struct side_event_description *desc
        return _side_tracer_callback_unregister(desc, call_variadic, priv);
 }
 
+struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events)
+{
+       struct side_events_register_handle *handle = NULL;
+
+       if (finalized)
+               return NULL;
+       handle = calloc(1, sizeof(struct side_events_register_handle));
+       if (!handle)
+               return NULL;
+       pthread_mutex_lock(&side_lock);
+       handle->events = events;
+       handle->nr_events = nr_events;
+       side_list_insert_node_tail(&side_list, &handle->node);
+       pthread_mutex_unlock(&side_lock);
+       //TODO: call event batch register ioctl
+       return handle;
+}
+
 static
+void side_event_remove_callbacks(struct side_event_description *desc)
+{
+       uint32_t nr_cb = *desc->enabled & SIDE_EVENT_ENABLED_USER_MASK;
+       struct side_callback *old_cb;
+
+       if (!nr_cb)
+               return;
+       old_cb = (struct side_callback *) desc->callbacks;
+       /*
+        * Setting the state back to 0 cb and empty callbacks out of
+        * caution. This should not matter because instrumentation is
+        * unreachable.
+        */
+       (void) __atomic_add_fetch(desc->enabled, -nr_cb, __ATOMIC_RELAXED);
+       side_rcu_assign_pointer(desc->callbacks, &side_empty_callback);
+       /*
+        * No need to wait for grace period because instrumentation is
+        * unreachable.
+        */
+       free(old_cb);
+}
+
+/*
+ * Unregister event handle. At this point, all side events in that
+ * handle should be unreachable.
+ */
+void side_events_unregister(struct side_events_register_handle *handle)
+{
+       uint32_t i;
+
+       if (finalized)
+               return;
+       pthread_mutex_lock(&side_lock);
+       side_list_remove_node(&handle->node);
+       for (i = 0; i < handle->nr_events; i++) {
+               struct side_event_description *event = handle->events[i];
+
+               /* Skip NULL pointers */
+               if (!event)
+                       continue;
+               side_event_remove_callbacks(event);
+       }
+       pthread_mutex_unlock(&side_lock);
+       //TODO: call event batch unregister ioctl
+       free(handle);
+}
+
 void side_init(void)
 {
        if (initialized)
@@ -252,3 +334,15 @@ void side_init(void)
        side_rcu_gp_init(&rcu_gp);
        initialized = true;
 }
+
+void side_exit(void)
+{
+       struct side_events_register_handle *handle, *tmp;
+
+       if (finalized)
+               return;
+       side_rcu_gp_exit(&rcu_gp);
+       side_list_for_each_entry_safe(handle, tmp, &side_list, node)
+               side_events_unregister(handle);
+       finalized = true;
+}
This page took 0.028043 seconds and 4 git commands to generate.