Add "key" to callback registration
[libside.git] / src / side.c
CommitLineData
67337c4a
MD
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6#include <side/trace.h>
7#include <string.h>
b1bf768c 8#include <assert.h>
67337c4a
MD
9
10#include "rcu.h"
11#include "list.h"
12
13/* Top 8 bits reserved for kernel tracer use. */
14#if SIDE_BITS_PER_LONG == 64
15# define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF00000000000000ULL
16# define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x8000000000000000ULL
17
18/* Allow 2^56 tracer references on an event. */
19# define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFFFFFFFFFULL
20#else
21# define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000UL
22# define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000UL
23
24/* Allow 2^24 tracer references on an event. */
25# define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFUL
26#endif
27
28struct side_events_register_handle {
29 struct side_list_node node;
30 struct side_event_description **events;
31 uint32_t nr_events;
32};
33
34struct side_tracer_handle {
35 struct side_list_node node;
36 void (*cb)(enum side_tracer_notification notif,
37 struct side_event_description **events, uint32_t nr_events, void *priv);
38 void *priv;
39};
40
867b4725
MD
41struct side_callback {
42 union {
43 void (*call)(const struct side_event_description *desc,
44 const struct side_arg_vec *side_arg_vec,
45 void *priv);
46 void (*call_variadic)(const struct side_event_description *desc,
47 const struct side_arg_vec *side_arg_vec,
48 const struct side_arg_dynamic_struct *var_struct,
49 void *priv);
50 } u;
51 void *priv;
92c377f9 52 void *key;
867b4725
MD
53};
54
67337c4a
MD
55static struct side_rcu_gp_state rcu_gp;
56
57/*
58 * Lazy initialization for early use within library constructors.
59 */
60static bool initialized;
61/*
62 * Do not register/unregister any more events after destructor.
63 */
64static bool finalized;
65
66/*
67 * Recursive mutex to allow tracer callbacks to use the side API.
68 */
69static pthread_mutex_t side_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
70
71static DEFINE_SIDE_LIST_HEAD(side_events_list);
72static DEFINE_SIDE_LIST_HEAD(side_tracer_list);
73
74/*
75 * The empty callback has a NULL function callback pointer, which stops
76 * iteration on the array of callbacks immediately.
77 */
867b4725 78const char side_empty_callback[sizeof(struct side_callback)];
67337c4a 79
92c377f9 80void side_call_key(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec, void *key)
67337c4a
MD
81{
82 struct side_rcu_read_state rcu_read_state;
b2a84b9f 83 const struct side_event_state_0 *es0;
67337c4a
MD
84 const struct side_callback *side_cb;
85 uintptr_t enabled;
86
87 if (side_unlikely(finalized))
88 return;
89 if (side_unlikely(!initialized))
90 side_init();
b2a84b9f
MD
91 if (side_unlikely(event_state->version != 0))
92 abort();
49aea3ef 93 es0 = side_container_of(event_state, const struct side_event_state_0, parent);
7269a8a3 94 assert(!(es0->desc->flags & SIDE_EVENT_FLAG_VARIADIC));
b2a84b9f 95 enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED);
67337c4a
MD
96 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
97 // TODO: call kernel write.
98 }
99 side_rcu_read_begin(&rcu_gp, &rcu_read_state);
92c377f9
MD
100 for (side_cb = side_rcu_dereference(es0->callbacks); side_cb->u.call != NULL; side_cb++) {
101 /* A NULL key is always a match. */
102 if (key && side_cb->key && side_cb->key != key)
103 continue;
7269a8a3 104 side_cb->u.call(es0->desc, side_arg_vec, side_cb->priv);
92c377f9 105 }
67337c4a
MD
106 side_rcu_read_end(&rcu_gp, &rcu_read_state);
107}
108
92c377f9
MD
109void side_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec)
110{
111 side_call_key(event_state, side_arg_vec, NULL);
112}
113
114void side_call_variadic_key(const struct side_event_state *event_state,
67337c4a 115 const struct side_arg_vec *side_arg_vec,
92c377f9
MD
116 const struct side_arg_dynamic_struct *var_struct,
117 void *key)
67337c4a
MD
118{
119 struct side_rcu_read_state rcu_read_state;
b2a84b9f 120 const struct side_event_state_0 *es0;
67337c4a
MD
121 const struct side_callback *side_cb;
122 uintptr_t enabled;
123
124 if (side_unlikely(finalized))
125 return;
126 if (side_unlikely(!initialized))
127 side_init();
b2a84b9f
MD
128 if (side_unlikely(event_state->version != 0))
129 abort();
49aea3ef 130 es0 = side_container_of(event_state, const struct side_event_state_0, parent);
7269a8a3 131 assert(es0->desc->flags & SIDE_EVENT_FLAG_VARIADIC);
b2a84b9f 132 enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED);
67337c4a
MD
133 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
134 // TODO: call kernel write.
135 }
136 side_rcu_read_begin(&rcu_gp, &rcu_read_state);
92c377f9
MD
137 for (side_cb = side_rcu_dereference(es0->callbacks); side_cb->u.call_variadic != NULL; side_cb++) {
138 /* A NULL key is always a match. */
139 if (key && side_cb->key && side_cb->key != key)
140 continue;
7269a8a3 141 side_cb->u.call_variadic(es0->desc, side_arg_vec, var_struct, side_cb->priv);
92c377f9 142 }
67337c4a
MD
143 side_rcu_read_end(&rcu_gp, &rcu_read_state);
144}
145
92c377f9
MD
146void side_call_variadic(const struct side_event_state *event_state,
147 const struct side_arg_vec *side_arg_vec,
148 const struct side_arg_dynamic_struct *var_struct)
149{
150 side_call_variadic_key(event_state, side_arg_vec, var_struct, NULL);
151}
152
67337c4a
MD
153static
154const struct side_callback *side_tracer_callback_lookup(
155 const struct side_event_description *desc,
92c377f9 156 void *call, void *priv, void *key)
67337c4a 157{
0b9e59d6 158 struct side_event_state *event_state = side_ptr_get(desc->state);
b2a84b9f 159 const struct side_event_state_0 *es0;
67337c4a
MD
160 const struct side_callback *cb;
161
b2a84b9f
MD
162 if (side_unlikely(event_state->version != 0))
163 abort();
49aea3ef 164 es0 = side_container_of(event_state, const struct side_event_state_0, parent);
7269a8a3 165 for (cb = es0->callbacks; cb->u.call != NULL; cb++) {
92c377f9 166 if ((void *) cb->u.call == call && cb->priv == priv && cb->key == key)
67337c4a
MD
167 return cb;
168 }
169 return NULL;
170}
171
172static
173int _side_tracer_callback_register(struct side_event_description *desc,
92c377f9 174 void *call, void *priv, void *key)
67337c4a 175{
0b9e59d6 176 struct side_event_state *event_state;
67337c4a 177 struct side_callback *old_cb, *new_cb;
b2a84b9f 178 struct side_event_state_0 *es0;
67337c4a
MD
179 int ret = SIDE_ERROR_OK;
180 uint32_t old_nr_cb;
181
182 if (!call)
183 return SIDE_ERROR_INVAL;
184 if (finalized)
185 return SIDE_ERROR_EXITING;
186 if (!initialized)
187 side_init();
188 pthread_mutex_lock(&side_lock);
0b9e59d6 189 event_state = side_ptr_get(desc->state);
b2a84b9f
MD
190 if (side_unlikely(event_state->version != 0))
191 abort();
49aea3ef 192 es0 = side_container_of(event_state, struct side_event_state_0, parent);
3cac1780 193 old_nr_cb = es0->nr_callbacks;
67337c4a
MD
194 if (old_nr_cb == UINT32_MAX) {
195 ret = SIDE_ERROR_INVAL;
196 goto unlock;
197 }
198 /* Reject duplicate (call, priv) tuples. */
92c377f9 199 if (side_tracer_callback_lookup(desc, call, priv, key)) {
67337c4a
MD
200 ret = SIDE_ERROR_EXIST;
201 goto unlock;
202 }
7269a8a3 203 old_cb = (struct side_callback *) es0->callbacks;
67337c4a
MD
204 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
205 new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
206 if (!new_cb) {
207 ret = SIDE_ERROR_NOMEM;
208 goto unlock;
209 }
210 memcpy(new_cb, old_cb, old_nr_cb);
211 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
212 new_cb[old_nr_cb].u.call_variadic =
213 (side_tracer_callback_variadic_func) call;
214 else
215 new_cb[old_nr_cb].u.call =
216 (side_tracer_callback_func) call;
217 new_cb[old_nr_cb].priv = priv;
92c377f9 218 new_cb[old_nr_cb].key = key;
f60d8121 219 /* High order bits are already zeroed. */
7269a8a3 220 side_rcu_assign_pointer(es0->callbacks, new_cb);
67337c4a
MD
221 side_rcu_wait_grace_period(&rcu_gp);
222 if (old_nr_cb)
223 free(old_cb);
3cac1780 224 es0->nr_callbacks++;
67337c4a
MD
225 /* Increment concurrently with kernel setting the top bits. */
226 if (!old_nr_cb)
b2a84b9f 227 (void) __atomic_add_fetch(&es0->enabled, 1, __ATOMIC_RELAXED);
67337c4a
MD
228unlock:
229 pthread_mutex_unlock(&side_lock);
230 return ret;
231}
232
233int side_tracer_callback_register(struct side_event_description *desc,
234 side_tracer_callback_func call,
92c377f9 235 void *priv, void *key)
67337c4a
MD
236{
237 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
238 return SIDE_ERROR_INVAL;
92c377f9 239 return _side_tracer_callback_register(desc, (void *) call, priv, key);
67337c4a
MD
240}
241
242int side_tracer_callback_variadic_register(struct side_event_description *desc,
243 side_tracer_callback_variadic_func call_variadic,
92c377f9 244 void *priv, void *key)
67337c4a
MD
245{
246 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
247 return SIDE_ERROR_INVAL;
92c377f9 248 return _side_tracer_callback_register(desc, (void *) call_variadic, priv, key);
67337c4a
MD
249}
250
251static int _side_tracer_callback_unregister(struct side_event_description *desc,
92c377f9 252 void *call, void *priv, void *key)
67337c4a 253{
0b9e59d6 254 struct side_event_state *event_state;
67337c4a
MD
255 struct side_callback *old_cb, *new_cb;
256 const struct side_callback *cb_pos;
b2a84b9f 257 struct side_event_state_0 *es0;
67337c4a
MD
258 uint32_t pos_idx;
259 int ret = SIDE_ERROR_OK;
260 uint32_t old_nr_cb;
261
262 if (!call)
263 return SIDE_ERROR_INVAL;
264 if (finalized)
265 return SIDE_ERROR_EXITING;
266 if (!initialized)
267 side_init();
268 pthread_mutex_lock(&side_lock);
0b9e59d6 269 event_state = side_ptr_get(desc->state);
b2a84b9f
MD
270 if (side_unlikely(event_state->version != 0))
271 abort();
49aea3ef 272 es0 = side_container_of(event_state, struct side_event_state_0, parent);
92c377f9 273 cb_pos = side_tracer_callback_lookup(desc, call, priv, key);
67337c4a
MD
274 if (!cb_pos) {
275 ret = SIDE_ERROR_NOENT;
276 goto unlock;
277 }
3cac1780 278 old_nr_cb = es0->nr_callbacks;
7269a8a3 279 old_cb = (struct side_callback *) es0->callbacks;
67337c4a
MD
280 if (old_nr_cb == 1) {
281 new_cb = (struct side_callback *) &side_empty_callback;
282 } else {
7269a8a3 283 pos_idx = cb_pos - es0->callbacks;
67337c4a
MD
284 /* Remove entry at pos_idx. */
285 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
286 new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
287 if (!new_cb) {
288 ret = SIDE_ERROR_NOMEM;
289 goto unlock;
290 }
291 memcpy(new_cb, old_cb, pos_idx);
292 memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
293 }
f60d8121 294 /* High order bits are already zeroed. */
7269a8a3 295 side_rcu_assign_pointer(es0->callbacks, new_cb);
67337c4a
MD
296 side_rcu_wait_grace_period(&rcu_gp);
297 free(old_cb);
3cac1780 298 es0->nr_callbacks--;
67337c4a
MD
299 /* Decrement concurrently with kernel setting the top bits. */
300 if (old_nr_cb == 1)
b2a84b9f 301 (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED);
67337c4a
MD
302unlock:
303 pthread_mutex_unlock(&side_lock);
304 return ret;
305}
306
307int side_tracer_callback_unregister(struct side_event_description *desc,
308 side_tracer_callback_func call,
92c377f9 309 void *priv, void *key)
67337c4a
MD
310{
311 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
312 return SIDE_ERROR_INVAL;
92c377f9 313 return _side_tracer_callback_unregister(desc, (void *) call, priv, key);
67337c4a
MD
314}
315
316int side_tracer_callback_variadic_unregister(struct side_event_description *desc,
317 side_tracer_callback_variadic_func call_variadic,
92c377f9 318 void *priv, void *key)
67337c4a
MD
319{
320 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
321 return SIDE_ERROR_INVAL;
92c377f9 322 return _side_tracer_callback_unregister(desc, (void *) call_variadic, priv, key);
67337c4a
MD
323}
324
325struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events)
326{
327 struct side_events_register_handle *events_handle = NULL;
328 struct side_tracer_handle *tracer_handle;
329
330 if (finalized)
331 return NULL;
332 if (!initialized)
333 side_init();
334 events_handle = (struct side_events_register_handle *)
335 calloc(1, sizeof(struct side_events_register_handle));
336 if (!events_handle)
337 return NULL;
338 events_handle->events = events;
339 events_handle->nr_events = nr_events;
340
341 pthread_mutex_lock(&side_lock);
342 side_list_insert_node_tail(&side_events_list, &events_handle->node);
343 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
344 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
345 events, nr_events, tracer_handle->priv);
346 }
347 pthread_mutex_unlock(&side_lock);
348 //TODO: call event batch register ioctl
349 return events_handle;
350}
351
352static
353void side_event_remove_callbacks(struct side_event_description *desc)
354{
0b9e59d6 355 struct side_event_state *event_state = side_ptr_get(desc->state);
b2a84b9f 356 struct side_event_state_0 *es0;
67337c4a 357 struct side_callback *old_cb;
3cac1780 358 uint32_t nr_cb;
67337c4a 359
b2a84b9f
MD
360 if (side_unlikely(event_state->version != 0))
361 abort();
49aea3ef 362 es0 = side_container_of(event_state, struct side_event_state_0, parent);
3cac1780
MD
363 nr_cb = es0->nr_callbacks;
364 if (!nr_cb)
365 return;
7269a8a3 366 old_cb = (struct side_callback *) es0->callbacks;
b2a84b9f 367 (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED);
67337c4a
MD
368 /*
369 * Setting the state back to 0 cb and empty callbacks out of
370 * caution. This should not matter because instrumentation is
371 * unreachable.
372 */
3cac1780 373 es0->nr_callbacks = 0;
7269a8a3 374 side_rcu_assign_pointer(es0->callbacks, &side_empty_callback);
67337c4a
MD
375 /*
376 * No need to wait for grace period because instrumentation is
377 * unreachable.
378 */
379 free(old_cb);
380}
381
382/*
383 * Unregister event handle. At this point, all side events in that
384 * handle should be unreachable.
385 */
386void side_events_unregister(struct side_events_register_handle *events_handle)
387{
388 struct side_tracer_handle *tracer_handle;
389 uint32_t i;
390
391 if (!events_handle)
392 return;
393 if (finalized)
394 return;
395 if (!initialized)
396 side_init();
397 pthread_mutex_lock(&side_lock);
398 side_list_remove_node(&events_handle->node);
399 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
400 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
401 events_handle->events, events_handle->nr_events,
402 tracer_handle->priv);
403 }
404 for (i = 0; i < events_handle->nr_events; i++) {
405 struct side_event_description *event = events_handle->events[i];
406
407 /* Skip NULL pointers */
408 if (!event)
409 continue;
410 side_event_remove_callbacks(event);
411 }
412 pthread_mutex_unlock(&side_lock);
413 //TODO: call event batch unregister ioctl
414 free(events_handle);
415}
416
417struct side_tracer_handle *side_tracer_event_notification_register(
418 void (*cb)(enum side_tracer_notification notif,
419 struct side_event_description **events, uint32_t nr_events, void *priv),
420 void *priv)
421{
422 struct side_tracer_handle *tracer_handle;
423 struct side_events_register_handle *events_handle;
424
425 if (finalized)
426 return NULL;
427 if (!initialized)
428 side_init();
429 tracer_handle = (struct side_tracer_handle *)
430 calloc(1, sizeof(struct side_tracer_handle));
431 if (!tracer_handle)
432 return NULL;
433 pthread_mutex_lock(&side_lock);
434 tracer_handle->cb = cb;
435 tracer_handle->priv = priv;
436 side_list_insert_node_tail(&side_tracer_list, &tracer_handle->node);
437 side_list_for_each_entry(events_handle, &side_events_list, node) {
438 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
439 events_handle->events, events_handle->nr_events, priv);
440 }
441 pthread_mutex_unlock(&side_lock);
442 return tracer_handle;
443}
444
445void side_tracer_event_notification_unregister(struct side_tracer_handle *tracer_handle)
446{
447 struct side_events_register_handle *events_handle;
448
449 if (finalized)
450 return;
451 if (!initialized)
452 side_init();
453 pthread_mutex_lock(&side_lock);
454 side_list_for_each_entry(events_handle, &side_events_list, node) {
455 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
456 events_handle->events, events_handle->nr_events,
457 tracer_handle->priv);
458 }
459 side_list_remove_node(&tracer_handle->node);
460 pthread_mutex_unlock(&side_lock);
be787080 461 free(tracer_handle);
67337c4a
MD
462}
463
464void side_init(void)
465{
466 if (initialized)
467 return;
468 side_rcu_gp_init(&rcu_gp);
469 initialized = true;
470}
471
472/*
473 * side_exit() is executed from a library destructor. It can be called
474 * explicitly at application exit as well. Concurrent side API use is
475 * not expected at that point.
476 */
477void side_exit(void)
478{
479 struct side_events_register_handle *handle, *tmp;
480
481 if (finalized)
482 return;
483 side_list_for_each_entry_safe(handle, tmp, &side_events_list, node)
484 side_events_unregister(handle);
485 side_rcu_gp_exit(&rcu_gp);
486 finalized = true;
487}
This page took 0.043681 seconds and 4 git commands to generate.