Move side_callback to libside internals
[libside.git] / src / side.c
CommitLineData
67337c4a
MD
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6#include <side/trace.h>
7#include <string.h>
b1bf768c 8#include <assert.h>
67337c4a
MD
9
10#include "rcu.h"
11#include "list.h"
12
13/* Top 8 bits reserved for kernel tracer use. */
14#if SIDE_BITS_PER_LONG == 64
15# define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF00000000000000ULL
16# define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x8000000000000000ULL
17
18/* Allow 2^56 tracer references on an event. */
19# define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFFFFFFFFFULL
20#else
21# define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000UL
22# define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000UL
23
24/* Allow 2^24 tracer references on an event. */
25# define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFUL
26#endif
27
28struct side_events_register_handle {
29 struct side_list_node node;
30 struct side_event_description **events;
31 uint32_t nr_events;
32};
33
34struct side_tracer_handle {
35 struct side_list_node node;
36 void (*cb)(enum side_tracer_notification notif,
37 struct side_event_description **events, uint32_t nr_events, void *priv);
38 void *priv;
39};
40
867b4725
MD
41struct side_callback {
42 union {
43 void (*call)(const struct side_event_description *desc,
44 const struct side_arg_vec *side_arg_vec,
45 void *priv);
46 void (*call_variadic)(const struct side_event_description *desc,
47 const struct side_arg_vec *side_arg_vec,
48 const struct side_arg_dynamic_struct *var_struct,
49 void *priv);
50 } u;
51 void *priv;
52};
53
67337c4a
MD
54static struct side_rcu_gp_state rcu_gp;
55
56/*
57 * Lazy initialization for early use within library constructors.
58 */
59static bool initialized;
60/*
61 * Do not register/unregister any more events after destructor.
62 */
63static bool finalized;
64
65/*
66 * Recursive mutex to allow tracer callbacks to use the side API.
67 */
68static pthread_mutex_t side_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
69
70static DEFINE_SIDE_LIST_HEAD(side_events_list);
71static DEFINE_SIDE_LIST_HEAD(side_tracer_list);
72
73/*
74 * The empty callback has a NULL function callback pointer, which stops
75 * iteration on the array of callbacks immediately.
76 */
867b4725 77const char side_empty_callback[sizeof(struct side_callback)];
67337c4a 78
0d747f98 79void side_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec)
67337c4a
MD
80{
81 struct side_rcu_read_state rcu_read_state;
b2a84b9f 82 const struct side_event_state_0 *es0;
67337c4a
MD
83 const struct side_callback *side_cb;
84 uintptr_t enabled;
85
86 if (side_unlikely(finalized))
87 return;
88 if (side_unlikely(!initialized))
89 side_init();
b2a84b9f
MD
90 if (side_unlikely(event_state->version != 0))
91 abort();
49aea3ef 92 es0 = side_container_of(event_state, const struct side_event_state_0, parent);
7269a8a3 93 assert(!(es0->desc->flags & SIDE_EVENT_FLAG_VARIADIC));
b2a84b9f 94 enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED);
67337c4a
MD
95 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
96 // TODO: call kernel write.
97 }
98 side_rcu_read_begin(&rcu_gp, &rcu_read_state);
7269a8a3
MD
99 for (side_cb = side_rcu_dereference(es0->callbacks); side_cb->u.call != NULL; side_cb++)
100 side_cb->u.call(es0->desc, side_arg_vec, side_cb->priv);
67337c4a
MD
101 side_rcu_read_end(&rcu_gp, &rcu_read_state);
102}
103
0d747f98 104void side_call_variadic(const struct side_event_state *event_state,
67337c4a
MD
105 const struct side_arg_vec *side_arg_vec,
106 const struct side_arg_dynamic_struct *var_struct)
107{
108 struct side_rcu_read_state rcu_read_state;
b2a84b9f 109 const struct side_event_state_0 *es0;
67337c4a
MD
110 const struct side_callback *side_cb;
111 uintptr_t enabled;
112
113 if (side_unlikely(finalized))
114 return;
115 if (side_unlikely(!initialized))
116 side_init();
b2a84b9f
MD
117 if (side_unlikely(event_state->version != 0))
118 abort();
49aea3ef 119 es0 = side_container_of(event_state, const struct side_event_state_0, parent);
7269a8a3 120 assert(es0->desc->flags & SIDE_EVENT_FLAG_VARIADIC);
b2a84b9f 121 enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED);
67337c4a
MD
122 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
123 // TODO: call kernel write.
124 }
125 side_rcu_read_begin(&rcu_gp, &rcu_read_state);
7269a8a3
MD
126 for (side_cb = side_rcu_dereference(es0->callbacks); side_cb->u.call_variadic != NULL; side_cb++)
127 side_cb->u.call_variadic(es0->desc, side_arg_vec, var_struct, side_cb->priv);
67337c4a
MD
128 side_rcu_read_end(&rcu_gp, &rcu_read_state);
129}
130
131static
132const struct side_callback *side_tracer_callback_lookup(
133 const struct side_event_description *desc,
134 void *call, void *priv)
135{
0b9e59d6 136 struct side_event_state *event_state = side_ptr_get(desc->state);
b2a84b9f 137 const struct side_event_state_0 *es0;
67337c4a
MD
138 const struct side_callback *cb;
139
b2a84b9f
MD
140 if (side_unlikely(event_state->version != 0))
141 abort();
49aea3ef 142 es0 = side_container_of(event_state, const struct side_event_state_0, parent);
7269a8a3 143 for (cb = es0->callbacks; cb->u.call != NULL; cb++) {
67337c4a
MD
144 if ((void *) cb->u.call == call && cb->priv == priv)
145 return cb;
146 }
147 return NULL;
148}
149
150static
151int _side_tracer_callback_register(struct side_event_description *desc,
152 void *call, void *priv)
153{
0b9e59d6 154 struct side_event_state *event_state;
67337c4a 155 struct side_callback *old_cb, *new_cb;
b2a84b9f 156 struct side_event_state_0 *es0;
67337c4a
MD
157 int ret = SIDE_ERROR_OK;
158 uint32_t old_nr_cb;
159
160 if (!call)
161 return SIDE_ERROR_INVAL;
162 if (finalized)
163 return SIDE_ERROR_EXITING;
164 if (!initialized)
165 side_init();
166 pthread_mutex_lock(&side_lock);
0b9e59d6 167 event_state = side_ptr_get(desc->state);
b2a84b9f
MD
168 if (side_unlikely(event_state->version != 0))
169 abort();
49aea3ef 170 es0 = side_container_of(event_state, struct side_event_state_0, parent);
3cac1780 171 old_nr_cb = es0->nr_callbacks;
67337c4a
MD
172 if (old_nr_cb == UINT32_MAX) {
173 ret = SIDE_ERROR_INVAL;
174 goto unlock;
175 }
176 /* Reject duplicate (call, priv) tuples. */
177 if (side_tracer_callback_lookup(desc, call, priv)) {
178 ret = SIDE_ERROR_EXIST;
179 goto unlock;
180 }
7269a8a3 181 old_cb = (struct side_callback *) es0->callbacks;
67337c4a
MD
182 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
183 new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
184 if (!new_cb) {
185 ret = SIDE_ERROR_NOMEM;
186 goto unlock;
187 }
188 memcpy(new_cb, old_cb, old_nr_cb);
189 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
190 new_cb[old_nr_cb].u.call_variadic =
191 (side_tracer_callback_variadic_func) call;
192 else
193 new_cb[old_nr_cb].u.call =
194 (side_tracer_callback_func) call;
195 new_cb[old_nr_cb].priv = priv;
f60d8121 196 /* High order bits are already zeroed. */
7269a8a3 197 side_rcu_assign_pointer(es0->callbacks, new_cb);
67337c4a
MD
198 side_rcu_wait_grace_period(&rcu_gp);
199 if (old_nr_cb)
200 free(old_cb);
3cac1780 201 es0->nr_callbacks++;
67337c4a
MD
202 /* Increment concurrently with kernel setting the top bits. */
203 if (!old_nr_cb)
b2a84b9f 204 (void) __atomic_add_fetch(&es0->enabled, 1, __ATOMIC_RELAXED);
67337c4a
MD
205unlock:
206 pthread_mutex_unlock(&side_lock);
207 return ret;
208}
209
210int side_tracer_callback_register(struct side_event_description *desc,
211 side_tracer_callback_func call,
212 void *priv)
213{
214 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
215 return SIDE_ERROR_INVAL;
216 return _side_tracer_callback_register(desc, (void *) call, priv);
217}
218
219int side_tracer_callback_variadic_register(struct side_event_description *desc,
220 side_tracer_callback_variadic_func call_variadic,
221 void *priv)
222{
223 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
224 return SIDE_ERROR_INVAL;
225 return _side_tracer_callback_register(desc, (void *) call_variadic, priv);
226}
227
228static int _side_tracer_callback_unregister(struct side_event_description *desc,
229 void *call, void *priv)
230{
0b9e59d6 231 struct side_event_state *event_state;
67337c4a
MD
232 struct side_callback *old_cb, *new_cb;
233 const struct side_callback *cb_pos;
b2a84b9f 234 struct side_event_state_0 *es0;
67337c4a
MD
235 uint32_t pos_idx;
236 int ret = SIDE_ERROR_OK;
237 uint32_t old_nr_cb;
238
239 if (!call)
240 return SIDE_ERROR_INVAL;
241 if (finalized)
242 return SIDE_ERROR_EXITING;
243 if (!initialized)
244 side_init();
245 pthread_mutex_lock(&side_lock);
0b9e59d6 246 event_state = side_ptr_get(desc->state);
b2a84b9f
MD
247 if (side_unlikely(event_state->version != 0))
248 abort();
49aea3ef 249 es0 = side_container_of(event_state, struct side_event_state_0, parent);
67337c4a
MD
250 cb_pos = side_tracer_callback_lookup(desc, call, priv);
251 if (!cb_pos) {
252 ret = SIDE_ERROR_NOENT;
253 goto unlock;
254 }
3cac1780 255 old_nr_cb = es0->nr_callbacks;
7269a8a3 256 old_cb = (struct side_callback *) es0->callbacks;
67337c4a
MD
257 if (old_nr_cb == 1) {
258 new_cb = (struct side_callback *) &side_empty_callback;
259 } else {
7269a8a3 260 pos_idx = cb_pos - es0->callbacks;
67337c4a
MD
261 /* Remove entry at pos_idx. */
262 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
263 new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
264 if (!new_cb) {
265 ret = SIDE_ERROR_NOMEM;
266 goto unlock;
267 }
268 memcpy(new_cb, old_cb, pos_idx);
269 memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
270 }
f60d8121 271 /* High order bits are already zeroed. */
7269a8a3 272 side_rcu_assign_pointer(es0->callbacks, new_cb);
67337c4a
MD
273 side_rcu_wait_grace_period(&rcu_gp);
274 free(old_cb);
3cac1780 275 es0->nr_callbacks--;
67337c4a
MD
276 /* Decrement concurrently with kernel setting the top bits. */
277 if (old_nr_cb == 1)
b2a84b9f 278 (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED);
67337c4a
MD
279unlock:
280 pthread_mutex_unlock(&side_lock);
281 return ret;
282}
283
284int side_tracer_callback_unregister(struct side_event_description *desc,
285 side_tracer_callback_func call,
286 void *priv)
287{
288 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
289 return SIDE_ERROR_INVAL;
290 return _side_tracer_callback_unregister(desc, (void *) call, priv);
291}
292
293int side_tracer_callback_variadic_unregister(struct side_event_description *desc,
294 side_tracer_callback_variadic_func call_variadic,
295 void *priv)
296{
297 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
298 return SIDE_ERROR_INVAL;
299 return _side_tracer_callback_unregister(desc, (void *) call_variadic, priv);
300}
301
302struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events)
303{
304 struct side_events_register_handle *events_handle = NULL;
305 struct side_tracer_handle *tracer_handle;
306
307 if (finalized)
308 return NULL;
309 if (!initialized)
310 side_init();
311 events_handle = (struct side_events_register_handle *)
312 calloc(1, sizeof(struct side_events_register_handle));
313 if (!events_handle)
314 return NULL;
315 events_handle->events = events;
316 events_handle->nr_events = nr_events;
317
318 pthread_mutex_lock(&side_lock);
319 side_list_insert_node_tail(&side_events_list, &events_handle->node);
320 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
321 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
322 events, nr_events, tracer_handle->priv);
323 }
324 pthread_mutex_unlock(&side_lock);
325 //TODO: call event batch register ioctl
326 return events_handle;
327}
328
329static
330void side_event_remove_callbacks(struct side_event_description *desc)
331{
0b9e59d6 332 struct side_event_state *event_state = side_ptr_get(desc->state);
b2a84b9f 333 struct side_event_state_0 *es0;
67337c4a 334 struct side_callback *old_cb;
3cac1780 335 uint32_t nr_cb;
67337c4a 336
b2a84b9f
MD
337 if (side_unlikely(event_state->version != 0))
338 abort();
49aea3ef 339 es0 = side_container_of(event_state, struct side_event_state_0, parent);
3cac1780
MD
340 nr_cb = es0->nr_callbacks;
341 if (!nr_cb)
342 return;
7269a8a3 343 old_cb = (struct side_callback *) es0->callbacks;
b2a84b9f 344 (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED);
67337c4a
MD
345 /*
346 * Setting the state back to 0 cb and empty callbacks out of
347 * caution. This should not matter because instrumentation is
348 * unreachable.
349 */
3cac1780 350 es0->nr_callbacks = 0;
7269a8a3 351 side_rcu_assign_pointer(es0->callbacks, &side_empty_callback);
67337c4a
MD
352 /*
353 * No need to wait for grace period because instrumentation is
354 * unreachable.
355 */
356 free(old_cb);
357}
358
359/*
360 * Unregister event handle. At this point, all side events in that
361 * handle should be unreachable.
362 */
363void side_events_unregister(struct side_events_register_handle *events_handle)
364{
365 struct side_tracer_handle *tracer_handle;
366 uint32_t i;
367
368 if (!events_handle)
369 return;
370 if (finalized)
371 return;
372 if (!initialized)
373 side_init();
374 pthread_mutex_lock(&side_lock);
375 side_list_remove_node(&events_handle->node);
376 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
377 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
378 events_handle->events, events_handle->nr_events,
379 tracer_handle->priv);
380 }
381 for (i = 0; i < events_handle->nr_events; i++) {
382 struct side_event_description *event = events_handle->events[i];
383
384 /* Skip NULL pointers */
385 if (!event)
386 continue;
387 side_event_remove_callbacks(event);
388 }
389 pthread_mutex_unlock(&side_lock);
390 //TODO: call event batch unregister ioctl
391 free(events_handle);
392}
393
394struct side_tracer_handle *side_tracer_event_notification_register(
395 void (*cb)(enum side_tracer_notification notif,
396 struct side_event_description **events, uint32_t nr_events, void *priv),
397 void *priv)
398{
399 struct side_tracer_handle *tracer_handle;
400 struct side_events_register_handle *events_handle;
401
402 if (finalized)
403 return NULL;
404 if (!initialized)
405 side_init();
406 tracer_handle = (struct side_tracer_handle *)
407 calloc(1, sizeof(struct side_tracer_handle));
408 if (!tracer_handle)
409 return NULL;
410 pthread_mutex_lock(&side_lock);
411 tracer_handle->cb = cb;
412 tracer_handle->priv = priv;
413 side_list_insert_node_tail(&side_tracer_list, &tracer_handle->node);
414 side_list_for_each_entry(events_handle, &side_events_list, node) {
415 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
416 events_handle->events, events_handle->nr_events, priv);
417 }
418 pthread_mutex_unlock(&side_lock);
419 return tracer_handle;
420}
421
422void side_tracer_event_notification_unregister(struct side_tracer_handle *tracer_handle)
423{
424 struct side_events_register_handle *events_handle;
425
426 if (finalized)
427 return;
428 if (!initialized)
429 side_init();
430 pthread_mutex_lock(&side_lock);
431 side_list_for_each_entry(events_handle, &side_events_list, node) {
432 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
433 events_handle->events, events_handle->nr_events,
434 tracer_handle->priv);
435 }
436 side_list_remove_node(&tracer_handle->node);
437 pthread_mutex_unlock(&side_lock);
be787080 438 free(tracer_handle);
67337c4a
MD
439}
440
441void side_init(void)
442{
443 if (initialized)
444 return;
445 side_rcu_gp_init(&rcu_gp);
446 initialized = true;
447}
448
449/*
450 * side_exit() is executed from a library destructor. It can be called
451 * explicitly at application exit as well. Concurrent side API use is
452 * not expected at that point.
453 */
454void side_exit(void)
455{
456 struct side_events_register_handle *handle, *tmp;
457
458 if (finalized)
459 return;
460 side_list_for_each_entry_safe(handle, tmp, &side_events_list, node)
461 side_events_unregister(handle);
462 side_rcu_gp_exit(&rcu_gp);
463 finalized = true;
464}
This page took 0.041881 seconds and 4 git commands to generate.