callback register/unregister
[libside.git] / src / side.c
1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6 #include <side/trace.h>
7 #include <string.h>
8
9 #include "tracer.h"
10 #include "rcu.h"
11
12 /* Top 8 bits reserved for kernel tracer use. */
13 #define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000
14 #define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000
15
16 /* Allow 2^24 tracer callbacks to be registered on an event. */
17 #define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFF
18
19 static struct side_rcu_gp_state rcu_gp;
20
21 /*
22 * Lazy initialization for early use within library constructors.
23 */
24 static bool initialized;
25
26 static pthread_mutex_t side_lock = PTHREAD_MUTEX_INITIALIZER;
27
28 static
29 void side_init(void)
30 __attribute__((constructor));
31
32 /*
33 * The empty callback has a NULL function callback pointer, which stops
34 * iteration on the array of callbacks immediately.
35 */
36 const struct side_callback side_empty_callback;
37
38 void side_call(const struct side_event_description *desc, const struct side_arg_vec_description *sav_desc)
39 {
40 const struct side_callback *side_cb;
41 unsigned int rcu_period;
42
43 if (side_unlikely(!initialized))
44 side_init();
45 if (side_unlikely(desc->flags & SIDE_EVENT_FLAG_VARIADIC)) {
46 printf("ERROR: unexpected variadic event description\n");
47 abort();
48 }
49 if (side_unlikely(*desc->enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
50 // TODO: call kernel write.
51 }
52 if (side_unlikely(!(*desc->enabled & SIDE_EVENT_ENABLED_USER_MASK)))
53 return;
54
55 //TODO: replace tracer_call by rcu iteration on list of registered callbacks
56 tracer_call(desc, sav_desc, NULL);
57
58 rcu_period = side_rcu_read_begin(&rcu_gp);
59 for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call != NULL; side_cb++)
60 side_cb->u.call(desc, sav_desc, side_cb->priv);
61 side_rcu_read_end(&rcu_gp, rcu_period);
62 }
63
64 void side_call_variadic(const struct side_event_description *desc,
65 const struct side_arg_vec_description *sav_desc,
66 const struct side_arg_dynamic_event_struct *var_struct)
67 {
68 const struct side_callback *side_cb;
69 unsigned int rcu_period;
70
71 if (side_unlikely(!initialized))
72 side_init();
73 if (side_unlikely(*desc->enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
74 // TODO: call kernel write.
75 }
76 if (side_unlikely(!(*desc->enabled & SIDE_EVENT_ENABLED_USER_MASK)))
77 return;
78
79 //TODO: replace tracer_call by rcu iteration on list of registered callbacks
80 tracer_call_variadic(desc, sav_desc, var_struct, NULL);
81
82 rcu_period = side_rcu_read_begin(&rcu_gp);
83 for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call_variadic != NULL; side_cb++)
84 side_cb->u.call_variadic(desc, sav_desc, var_struct, side_cb->priv);
85 side_rcu_read_end(&rcu_gp, rcu_period);
86 }
87
88 static
89 const struct side_callback *side_tracer_callback_lookup(
90 const struct side_event_description *desc,
91 void (*call)(), void *priv)
92 {
93 const struct side_callback *cb;
94
95 for (cb = desc->callbacks; cb->u.call != NULL; cb++) {
96 if (cb->u.call == call && cb->priv == priv)
97 return cb;
98 }
99 return NULL;
100 }
101
102 static
103 int _side_tracer_callback_register(struct side_event_description *desc,
104 void (*call)(), void *priv)
105 {
106 struct side_callback *old_cb, *new_cb;
107 int ret = SIDE_ERROR_OK;
108 uint32_t old_nr_cb;
109
110 if (!call)
111 return SIDE_ERROR_INVAL;
112 pthread_mutex_lock(&side_lock);
113 old_nr_cb = *desc->enabled & SIDE_EVENT_ENABLED_USER_MASK;
114 if (old_nr_cb == SIDE_EVENT_ENABLED_USER_MASK) {
115 ret = SIDE_ERROR_INVAL;
116 goto unlock;
117 }
118 /* Reject duplicate (call, priv) tuples. */
119 if (side_tracer_callback_lookup(desc, call, priv)) {
120 ret = SIDE_ERROR_EXIST;
121 goto unlock;
122 }
123 old_cb = (struct side_callback *) desc->callbacks;
124 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
125 new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
126 if (!new_cb) {
127 ret = SIDE_ERROR_NOMEM;
128 goto unlock;
129 }
130 memcpy(new_cb, old_cb, old_nr_cb);
131 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
132 new_cb[old_nr_cb].u.call_variadic = call;
133 else
134 new_cb[old_nr_cb].u.call = call;
135 new_cb[old_nr_cb].priv = priv;
136 side_rcu_assign_pointer(desc->callbacks, new_cb);
137 side_rcu_wait_grace_period(&rcu_gp);
138 if (old_nr_cb)
139 free(old_cb);
140 /* Increment concurrently with kernel setting the top bits. */
141 (void) __atomic_add_fetch(desc->enabled, 1, __ATOMIC_RELAXED);
142 unlock:
143 pthread_mutex_unlock(&side_lock);
144 return ret;
145 }
146
147 int side_tracer_callback_register(struct side_event_description *desc,
148 void (*call)(const struct side_event_description *desc,
149 const struct side_arg_vec_description *sav_desc,
150 void *priv),
151 void *priv)
152 {
153 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
154 return SIDE_ERROR_INVAL;
155 return _side_tracer_callback_register(desc, call, priv);
156 }
157
158 int side_tracer_callback_variadic_register(struct side_event_description *desc,
159 void (*call_variadic)(const struct side_event_description *desc,
160 const struct side_arg_vec_description *sav_desc,
161 const struct side_arg_dynamic_event_struct *var_struct,
162 void *priv),
163 void *priv)
164 {
165 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
166 return SIDE_ERROR_INVAL;
167 return _side_tracer_callback_register(desc, call_variadic, priv);
168 }
169
170 int _side_tracer_callback_unregister(struct side_event_description *desc,
171 void (*call)(), void *priv)
172 {
173 struct side_callback *old_cb, *new_cb;
174 const struct side_callback *cb_pos;
175 uint32_t pos_idx;
176 int ret = SIDE_ERROR_OK;
177 uint32_t old_nr_cb;
178
179 if (!call)
180 return SIDE_ERROR_INVAL;
181 pthread_mutex_lock(&side_lock);
182 old_nr_cb = *desc->enabled & SIDE_EVENT_ENABLED_USER_MASK;
183 if (old_nr_cb == 0) {
184 ret = SIDE_ERROR_INVAL;
185 goto unlock;
186 }
187 old_cb = (struct side_callback *) desc->callbacks;
188 cb_pos = side_tracer_callback_lookup(desc, call, priv);
189 if (!cb_pos) {
190 ret = SIDE_ERROR_NOENT;
191 goto unlock;
192 }
193 if (old_nr_cb == 1) {
194 new_cb = (struct side_callback *) &side_empty_callback;
195 } else {
196 pos_idx = cb_pos - desc->callbacks;
197 /* Remove entry at pos_idx. */
198 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
199 new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
200 if (!new_cb) {
201 ret = SIDE_ERROR_NOMEM;
202 goto unlock;
203 }
204 memcpy(new_cb, old_cb, pos_idx);
205 memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
206 }
207 side_rcu_assign_pointer(desc->callbacks, new_cb);
208 side_rcu_wait_grace_period(&rcu_gp);
209 free(old_cb);
210 /* Decrement concurrently with kernel setting the top bits. */
211 (void) __atomic_add_fetch(desc->enabled, -1, __ATOMIC_RELAXED);
212 unlock:
213 pthread_mutex_unlock(&side_lock);
214 return ret;
215 }
216
217 int side_tracer_callback_unregister(struct side_event_description *desc,
218 void (*call)(const struct side_event_description *desc,
219 const struct side_arg_vec_description *sav_desc,
220 void *priv),
221 void *priv)
222 {
223 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
224 return SIDE_ERROR_INVAL;
225 return _side_tracer_callback_unregister(desc, call, priv);
226 }
227
228 int side_tracer_callback_variadic_unregister(struct side_event_description *desc,
229 void (*call_variadic)(const struct side_event_description *desc,
230 const struct side_arg_vec_description *sav_desc,
231 const struct side_arg_dynamic_event_struct *var_struct,
232 void *priv),
233 void *priv)
234 {
235 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
236 return SIDE_ERROR_INVAL;
237 return _side_tracer_callback_unregister(desc, call_variadic, priv);
238 }
239
240 static
241 void side_init(void)
242 {
243 if (initialized)
244 return;
245 side_rcu_gp_init(&rcu_gp);
246 initialized = true;
247 }
This page took 0.037473 seconds and 5 git commands to generate.