Tracer event notification register/unregister
[libside.git] / src / side.c
1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6 #include <side/trace.h>
7 #include <string.h>
8
9 #include "tracer.h"
10 #include "rcu.h"
11 #include "list.h"
12
13 /* Top 8 bits reserved for kernel tracer use. */
14 #define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000
15 #define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000
16
17 /* Allow 2^24 tracer callbacks to be registered on an event. */
18 #define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFF
19
20 struct side_events_register_handle {
21 struct side_list_node node;
22 struct side_event_description **events;
23 uint32_t nr_events;
24 };
25
26 struct side_tracer_handle {
27 struct side_list_node node;
28 void (*cb)(enum side_tracer_notification notif,
29 struct side_event_description **events, uint32_t nr_events, void *priv);
30 void *priv;
31 };
32
33 static struct side_rcu_gp_state rcu_gp;
34
35 /*
36 * Lazy initialization for early use within library constructors.
37 */
38 static bool initialized;
39 /*
40 * Do not register/unregister any more events after destructor.
41 */
42 static bool finalized;
43
44 static pthread_mutex_t side_lock = PTHREAD_MUTEX_INITIALIZER;
45
46 static DEFINE_SIDE_LIST_HEAD(side_events_list);
47 static DEFINE_SIDE_LIST_HEAD(side_tracer_list);
48
49 /*
50 * The empty callback has a NULL function callback pointer, which stops
51 * iteration on the array of callbacks immediately.
52 */
53 const struct side_callback side_empty_callback;
54
55 void side_init(void) __attribute__((constructor));
56 void side_exit(void) __attribute__((destructor));
57
58 void side_call(const struct side_event_description *desc, const struct side_arg_vec_description *sav_desc)
59 {
60 const struct side_callback *side_cb;
61 unsigned int rcu_period;
62 uint32_t enabled;
63
64 if (side_unlikely(finalized))
65 return;
66 if (side_unlikely(!initialized))
67 side_init();
68 if (side_unlikely(desc->flags & SIDE_EVENT_FLAG_VARIADIC)) {
69 printf("ERROR: unexpected variadic event description\n");
70 abort();
71 }
72 enabled = __atomic_load_n(desc->enabled, __ATOMIC_RELAXED);
73 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
74 // TODO: call kernel write.
75 }
76 if (side_unlikely(!(enabled & SIDE_EVENT_ENABLED_USER_MASK)))
77 return;
78
79 //TODO: replace tracer_call by rcu iteration on list of registered callbacks
80 tracer_call(desc, sav_desc, NULL);
81
82 rcu_period = side_rcu_read_begin(&rcu_gp);
83 for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call != NULL; side_cb++)
84 side_cb->u.call(desc, sav_desc, side_cb->priv);
85 side_rcu_read_end(&rcu_gp, rcu_period);
86 }
87
88 void side_call_variadic(const struct side_event_description *desc,
89 const struct side_arg_vec_description *sav_desc,
90 const struct side_arg_dynamic_event_struct *var_struct)
91 {
92 const struct side_callback *side_cb;
93 unsigned int rcu_period;
94 uint32_t enabled;
95
96 if (side_unlikely(finalized))
97 return;
98 if (side_unlikely(!initialized))
99 side_init();
100 if (side_unlikely(!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))) {
101 printf("ERROR: unexpected non-variadic event description\n");
102 abort();
103 }
104 enabled = __atomic_load_n(desc->enabled, __ATOMIC_RELAXED);
105 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
106 // TODO: call kernel write.
107 }
108 if (side_unlikely(!(enabled & SIDE_EVENT_ENABLED_USER_MASK)))
109 return;
110
111 //TODO: replace tracer_call by rcu iteration on list of registered callbacks
112 tracer_call_variadic(desc, sav_desc, var_struct, NULL);
113
114 rcu_period = side_rcu_read_begin(&rcu_gp);
115 for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call_variadic != NULL; side_cb++)
116 side_cb->u.call_variadic(desc, sav_desc, var_struct, side_cb->priv);
117 side_rcu_read_end(&rcu_gp, rcu_period);
118 }
119
120 static
121 const struct side_callback *side_tracer_callback_lookup(
122 const struct side_event_description *desc,
123 void (*call)(), void *priv)
124 {
125 const struct side_callback *cb;
126
127 for (cb = desc->callbacks; cb->u.call != NULL; cb++) {
128 if (cb->u.call == call && cb->priv == priv)
129 return cb;
130 }
131 return NULL;
132 }
133
134 static
135 int _side_tracer_callback_register(struct side_event_description *desc,
136 void (*call)(), void *priv)
137 {
138 struct side_callback *old_cb, *new_cb;
139 int ret = SIDE_ERROR_OK;
140 uint32_t old_nr_cb;
141
142 if (!call)
143 return SIDE_ERROR_INVAL;
144 if (finalized)
145 return SIDE_ERROR_EXITING;
146 if (!initialized)
147 side_init();
148 pthread_mutex_lock(&side_lock);
149 old_nr_cb = *desc->enabled & SIDE_EVENT_ENABLED_USER_MASK;
150 if (old_nr_cb == SIDE_EVENT_ENABLED_USER_MASK) {
151 ret = SIDE_ERROR_INVAL;
152 goto unlock;
153 }
154 /* Reject duplicate (call, priv) tuples. */
155 if (side_tracer_callback_lookup(desc, call, priv)) {
156 ret = SIDE_ERROR_EXIST;
157 goto unlock;
158 }
159 old_cb = (struct side_callback *) desc->callbacks;
160 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
161 new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
162 if (!new_cb) {
163 ret = SIDE_ERROR_NOMEM;
164 goto unlock;
165 }
166 memcpy(new_cb, old_cb, old_nr_cb);
167 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
168 new_cb[old_nr_cb].u.call_variadic = call;
169 else
170 new_cb[old_nr_cb].u.call = call;
171 new_cb[old_nr_cb].priv = priv;
172 side_rcu_assign_pointer(desc->callbacks, new_cb);
173 side_rcu_wait_grace_period(&rcu_gp);
174 if (old_nr_cb)
175 free(old_cb);
176 /* Increment concurrently with kernel setting the top bits. */
177 (void) __atomic_add_fetch(desc->enabled, 1, __ATOMIC_RELAXED);
178 unlock:
179 pthread_mutex_unlock(&side_lock);
180 return ret;
181 }
182
183 int side_tracer_callback_register(struct side_event_description *desc,
184 void (*call)(const struct side_event_description *desc,
185 const struct side_arg_vec_description *sav_desc,
186 void *priv),
187 void *priv)
188 {
189 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
190 return SIDE_ERROR_INVAL;
191 return _side_tracer_callback_register(desc, call, priv);
192 }
193
194 int side_tracer_callback_variadic_register(struct side_event_description *desc,
195 void (*call_variadic)(const struct side_event_description *desc,
196 const struct side_arg_vec_description *sav_desc,
197 const struct side_arg_dynamic_event_struct *var_struct,
198 void *priv),
199 void *priv)
200 {
201 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
202 return SIDE_ERROR_INVAL;
203 return _side_tracer_callback_register(desc, call_variadic, priv);
204 }
205
206 int _side_tracer_callback_unregister(struct side_event_description *desc,
207 void (*call)(), void *priv)
208 {
209 struct side_callback *old_cb, *new_cb;
210 const struct side_callback *cb_pos;
211 uint32_t pos_idx;
212 int ret = SIDE_ERROR_OK;
213 uint32_t old_nr_cb;
214
215 if (!call)
216 return SIDE_ERROR_INVAL;
217 if (finalized)
218 return SIDE_ERROR_EXITING;
219 if (!initialized)
220 side_init();
221 pthread_mutex_lock(&side_lock);
222 cb_pos = side_tracer_callback_lookup(desc, call, priv);
223 if (!cb_pos) {
224 ret = SIDE_ERROR_NOENT;
225 goto unlock;
226 }
227 old_nr_cb = *desc->enabled & SIDE_EVENT_ENABLED_USER_MASK;
228 old_cb = (struct side_callback *) desc->callbacks;
229 if (old_nr_cb == 1) {
230 new_cb = (struct side_callback *) &side_empty_callback;
231 } else {
232 pos_idx = cb_pos - desc->callbacks;
233 /* Remove entry at pos_idx. */
234 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
235 new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
236 if (!new_cb) {
237 ret = SIDE_ERROR_NOMEM;
238 goto unlock;
239 }
240 memcpy(new_cb, old_cb, pos_idx);
241 memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
242 }
243 side_rcu_assign_pointer(desc->callbacks, new_cb);
244 side_rcu_wait_grace_period(&rcu_gp);
245 free(old_cb);
246 /* Decrement concurrently with kernel setting the top bits. */
247 (void) __atomic_add_fetch(desc->enabled, -1, __ATOMIC_RELAXED);
248 unlock:
249 pthread_mutex_unlock(&side_lock);
250 return ret;
251 }
252
253 int side_tracer_callback_unregister(struct side_event_description *desc,
254 void (*call)(const struct side_event_description *desc,
255 const struct side_arg_vec_description *sav_desc,
256 void *priv),
257 void *priv)
258 {
259 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
260 return SIDE_ERROR_INVAL;
261 return _side_tracer_callback_unregister(desc, call, priv);
262 }
263
264 int side_tracer_callback_variadic_unregister(struct side_event_description *desc,
265 void (*call_variadic)(const struct side_event_description *desc,
266 const struct side_arg_vec_description *sav_desc,
267 const struct side_arg_dynamic_event_struct *var_struct,
268 void *priv),
269 void *priv)
270 {
271 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
272 return SIDE_ERROR_INVAL;
273 return _side_tracer_callback_unregister(desc, call_variadic, priv);
274 }
275
276 struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events)
277 {
278 struct side_events_register_handle *events_handle = NULL;
279 struct side_tracer_handle *tracer_handle;
280
281 if (finalized)
282 return NULL;
283 if (!initialized)
284 side_init();
285 events_handle = calloc(1, sizeof(struct side_events_register_handle));
286 if (!events_handle)
287 return NULL;
288 events_handle->events = events;
289 events_handle->nr_events = nr_events;
290
291 pthread_mutex_lock(&side_lock);
292 side_list_insert_node_tail(&side_events_list, &events_handle->node);
293 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
294 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
295 events, nr_events, tracer_handle->priv);
296 }
297 pthread_mutex_unlock(&side_lock);
298 //TODO: call event batch register ioctl
299 return events_handle;
300 }
301
302 static
303 void side_event_remove_callbacks(struct side_event_description *desc)
304 {
305 uint32_t nr_cb = *desc->enabled & SIDE_EVENT_ENABLED_USER_MASK;
306 struct side_callback *old_cb;
307
308 if (!nr_cb)
309 return;
310 old_cb = (struct side_callback *) desc->callbacks;
311 /*
312 * Setting the state back to 0 cb and empty callbacks out of
313 * caution. This should not matter because instrumentation is
314 * unreachable.
315 */
316 (void) __atomic_add_fetch(desc->enabled, -nr_cb, __ATOMIC_RELAXED);
317 side_rcu_assign_pointer(desc->callbacks, &side_empty_callback);
318 /*
319 * No need to wait for grace period because instrumentation is
320 * unreachable.
321 */
322 free(old_cb);
323 }
324
325 /*
326 * Unregister event handle. At this point, all side events in that
327 * handle should be unreachable.
328 */
329 void side_events_unregister(struct side_events_register_handle *events_handle)
330 {
331 struct side_tracer_handle *tracer_handle;
332 uint32_t i;
333
334 if (finalized)
335 return;
336 if (!initialized)
337 side_init();
338 pthread_mutex_lock(&side_lock);
339 side_list_remove_node(&events_handle->node);
340 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
341 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
342 events_handle->events, events_handle->nr_events,
343 tracer_handle->priv);
344 }
345 for (i = 0; i < events_handle->nr_events; i++) {
346 struct side_event_description *event = events_handle->events[i];
347
348 /* Skip NULL pointers */
349 if (!event)
350 continue;
351 side_event_remove_callbacks(event);
352 }
353 pthread_mutex_unlock(&side_lock);
354 //TODO: call event batch unregister ioctl
355 free(events_handle);
356 }
357
358 struct side_tracer_handle *side_tracer_event_notification_register(
359 void (*cb)(enum side_tracer_notification notif,
360 struct side_event_description **events, uint32_t nr_events, void *priv),
361 void *priv)
362 {
363 struct side_tracer_handle *tracer_handle;
364 struct side_events_register_handle *events_handle;
365
366 if (finalized)
367 return NULL;
368 if (!initialized)
369 side_init();
370 tracer_handle = calloc(1, sizeof(struct side_tracer_handle));
371 if (!tracer_handle)
372 return NULL;
373 pthread_mutex_lock(&side_lock);
374 tracer_handle->cb = cb;
375 tracer_handle->priv = priv;
376 side_list_insert_node_tail(&side_tracer_list, &tracer_handle->node);
377 side_list_for_each_entry(events_handle, &side_events_list, node) {
378 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
379 events_handle->events, events_handle->nr_events, priv);
380 }
381 pthread_mutex_unlock(&side_lock);
382 return tracer_handle;
383 }
384
385 void side_tracer_event_notification_unregister(struct side_tracer_handle *tracer_handle)
386 {
387 struct side_events_register_handle *events_handle;
388
389 if (finalized)
390 return;
391 if (!initialized)
392 side_init();
393 pthread_mutex_lock(&side_lock);
394 side_list_for_each_entry(events_handle, &side_events_list, node) {
395 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
396 events_handle->events, events_handle->nr_events,
397 tracer_handle->priv);
398 }
399 side_list_remove_node(&tracer_handle->node);
400 pthread_mutex_unlock(&side_lock);
401 }
402
403 void side_init(void)
404 {
405 if (initialized)
406 return;
407 side_rcu_gp_init(&rcu_gp);
408 initialized = true;
409 }
410
411 /*
412 * side_exit() is executed from a library destructor. It can be called
413 * explicitly at application exit as well. Concurrent side API use is
414 * not expected at that point.
415 */
416 void side_exit(void)
417 {
418 struct side_events_register_handle *handle, *tmp;
419
420 if (finalized)
421 return;
422 side_rcu_gp_exit(&rcu_gp);
423 side_list_for_each_entry_safe(handle, tmp, &side_events_list, node)
424 side_events_unregister(handle);
425 finalized = true;
426 }
This page took 0.04238 seconds and 5 git commands to generate.