Use TLS variable for key state
[libside.git] / src / side.c
CommitLineData
67337c4a
MD
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6#include <side/trace.h>
7#include <string.h>
b1bf768c 8#include <assert.h>
67337c4a
MD
9
10#include "rcu.h"
11#include "list.h"
12
13/* Top 8 bits reserved for kernel tracer use. */
14#if SIDE_BITS_PER_LONG == 64
15# define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF00000000000000ULL
16# define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x8000000000000000ULL
17
18/* Allow 2^56 tracer references on an event. */
19# define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFFFFFFFFFULL
20#else
21# define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000UL
22# define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000UL
23
24/* Allow 2^24 tracer references on an event. */
25# define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFUL
26#endif
27
28struct side_events_register_handle {
29 struct side_list_node node;
30 struct side_event_description **events;
31 uint32_t nr_events;
32};
33
34struct side_tracer_handle {
35 struct side_list_node node;
36 void (*cb)(enum side_tracer_notification notif,
37 struct side_event_description **events, uint32_t nr_events, void *priv);
38 void *priv;
39};
40
867b4725
MD
41struct side_callback {
42 union {
43 void (*call)(const struct side_event_description *desc,
44 const struct side_arg_vec *side_arg_vec,
45 void *priv);
46 void (*call_variadic)(const struct side_event_description *desc,
47 const struct side_arg_vec *side_arg_vec,
48 const struct side_arg_dynamic_struct *var_struct,
49 void *priv);
50 } u;
51 void *priv;
92c377f9 52 void *key;
867b4725
MD
53};
54
67337c4a
MD
55static struct side_rcu_gp_state rcu_gp;
56
57/*
58 * Lazy initialization for early use within library constructors.
59 */
60static bool initialized;
61/*
62 * Do not register/unregister any more events after destructor.
63 */
64static bool finalized;
65
66/*
67 * Recursive mutex to allow tracer callbacks to use the side API.
68 */
69static pthread_mutex_t side_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
70
71static DEFINE_SIDE_LIST_HEAD(side_events_list);
72static DEFINE_SIDE_LIST_HEAD(side_tracer_list);
73
74be90b7
MD
74/*
75 * Callback filter key for state dump.
76 */
77static __thread void *filter_key;
78
67337c4a
MD
79/*
80 * The empty callback has a NULL function callback pointer, which stops
81 * iteration on the array of callbacks immediately.
82 */
867b4725 83const char side_empty_callback[sizeof(struct side_callback)];
67337c4a 84
74be90b7
MD
85static
86void _side_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec, void *key)
67337c4a
MD
87{
88 struct side_rcu_read_state rcu_read_state;
b2a84b9f 89 const struct side_event_state_0 *es0;
67337c4a
MD
90 const struct side_callback *side_cb;
91 uintptr_t enabled;
92
93 if (side_unlikely(finalized))
94 return;
95 if (side_unlikely(!initialized))
96 side_init();
b2a84b9f
MD
97 if (side_unlikely(event_state->version != 0))
98 abort();
49aea3ef 99 es0 = side_container_of(event_state, const struct side_event_state_0, parent);
7269a8a3 100 assert(!(es0->desc->flags & SIDE_EVENT_FLAG_VARIADIC));
b2a84b9f 101 enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED);
67337c4a
MD
102 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
103 // TODO: call kernel write.
104 }
105 side_rcu_read_begin(&rcu_gp, &rcu_read_state);
92c377f9
MD
106 for (side_cb = side_rcu_dereference(es0->callbacks); side_cb->u.call != NULL; side_cb++) {
107 /* A NULL key is always a match. */
108 if (key && side_cb->key && side_cb->key != key)
109 continue;
7269a8a3 110 side_cb->u.call(es0->desc, side_arg_vec, side_cb->priv);
92c377f9 111 }
67337c4a
MD
112 side_rcu_read_end(&rcu_gp, &rcu_read_state);
113}
114
92c377f9
MD
115void side_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec)
116{
74be90b7 117 _side_call(event_state, side_arg_vec, NULL);
92c377f9
MD
118}
119
74be90b7
MD
120void side_call_key(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec)
121{
122 _side_call(event_state, side_arg_vec, filter_key);
123}
124
125static
126void _side_call_variadic(const struct side_event_state *event_state,
67337c4a 127 const struct side_arg_vec *side_arg_vec,
92c377f9
MD
128 const struct side_arg_dynamic_struct *var_struct,
129 void *key)
67337c4a
MD
130{
131 struct side_rcu_read_state rcu_read_state;
b2a84b9f 132 const struct side_event_state_0 *es0;
67337c4a
MD
133 const struct side_callback *side_cb;
134 uintptr_t enabled;
135
136 if (side_unlikely(finalized))
137 return;
138 if (side_unlikely(!initialized))
139 side_init();
b2a84b9f
MD
140 if (side_unlikely(event_state->version != 0))
141 abort();
49aea3ef 142 es0 = side_container_of(event_state, const struct side_event_state_0, parent);
7269a8a3 143 assert(es0->desc->flags & SIDE_EVENT_FLAG_VARIADIC);
b2a84b9f 144 enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED);
67337c4a
MD
145 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
146 // TODO: call kernel write.
147 }
148 side_rcu_read_begin(&rcu_gp, &rcu_read_state);
92c377f9
MD
149 for (side_cb = side_rcu_dereference(es0->callbacks); side_cb->u.call_variadic != NULL; side_cb++) {
150 /* A NULL key is always a match. */
151 if (key && side_cb->key && side_cb->key != key)
152 continue;
7269a8a3 153 side_cb->u.call_variadic(es0->desc, side_arg_vec, var_struct, side_cb->priv);
92c377f9 154 }
67337c4a
MD
155 side_rcu_read_end(&rcu_gp, &rcu_read_state);
156}
157
92c377f9
MD
158void side_call_variadic(const struct side_event_state *event_state,
159 const struct side_arg_vec *side_arg_vec,
160 const struct side_arg_dynamic_struct *var_struct)
161{
74be90b7
MD
162 _side_call_variadic(event_state, side_arg_vec, var_struct, NULL);
163}
164
165void side_call_variadic_key(const struct side_event_state *event_state,
166 const struct side_arg_vec *side_arg_vec,
167 const struct side_arg_dynamic_struct *var_struct)
168{
169 _side_call_variadic(event_state, side_arg_vec, var_struct, filter_key);
92c377f9
MD
170}
171
67337c4a
MD
172static
173const struct side_callback *side_tracer_callback_lookup(
174 const struct side_event_description *desc,
92c377f9 175 void *call, void *priv, void *key)
67337c4a 176{
0b9e59d6 177 struct side_event_state *event_state = side_ptr_get(desc->state);
b2a84b9f 178 const struct side_event_state_0 *es0;
67337c4a
MD
179 const struct side_callback *cb;
180
b2a84b9f
MD
181 if (side_unlikely(event_state->version != 0))
182 abort();
49aea3ef 183 es0 = side_container_of(event_state, const struct side_event_state_0, parent);
7269a8a3 184 for (cb = es0->callbacks; cb->u.call != NULL; cb++) {
92c377f9 185 if ((void *) cb->u.call == call && cb->priv == priv && cb->key == key)
67337c4a
MD
186 return cb;
187 }
188 return NULL;
189}
190
191static
192int _side_tracer_callback_register(struct side_event_description *desc,
92c377f9 193 void *call, void *priv, void *key)
67337c4a 194{
0b9e59d6 195 struct side_event_state *event_state;
67337c4a 196 struct side_callback *old_cb, *new_cb;
b2a84b9f 197 struct side_event_state_0 *es0;
67337c4a
MD
198 int ret = SIDE_ERROR_OK;
199 uint32_t old_nr_cb;
200
201 if (!call)
202 return SIDE_ERROR_INVAL;
203 if (finalized)
204 return SIDE_ERROR_EXITING;
205 if (!initialized)
206 side_init();
207 pthread_mutex_lock(&side_lock);
0b9e59d6 208 event_state = side_ptr_get(desc->state);
b2a84b9f
MD
209 if (side_unlikely(event_state->version != 0))
210 abort();
49aea3ef 211 es0 = side_container_of(event_state, struct side_event_state_0, parent);
3cac1780 212 old_nr_cb = es0->nr_callbacks;
67337c4a
MD
213 if (old_nr_cb == UINT32_MAX) {
214 ret = SIDE_ERROR_INVAL;
215 goto unlock;
216 }
217 /* Reject duplicate (call, priv) tuples. */
92c377f9 218 if (side_tracer_callback_lookup(desc, call, priv, key)) {
67337c4a
MD
219 ret = SIDE_ERROR_EXIST;
220 goto unlock;
221 }
7269a8a3 222 old_cb = (struct side_callback *) es0->callbacks;
67337c4a
MD
223 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
224 new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
225 if (!new_cb) {
226 ret = SIDE_ERROR_NOMEM;
227 goto unlock;
228 }
229 memcpy(new_cb, old_cb, old_nr_cb);
230 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
231 new_cb[old_nr_cb].u.call_variadic =
232 (side_tracer_callback_variadic_func) call;
233 else
234 new_cb[old_nr_cb].u.call =
235 (side_tracer_callback_func) call;
236 new_cb[old_nr_cb].priv = priv;
92c377f9 237 new_cb[old_nr_cb].key = key;
f60d8121 238 /* High order bits are already zeroed. */
7269a8a3 239 side_rcu_assign_pointer(es0->callbacks, new_cb);
67337c4a
MD
240 side_rcu_wait_grace_period(&rcu_gp);
241 if (old_nr_cb)
242 free(old_cb);
3cac1780 243 es0->nr_callbacks++;
67337c4a
MD
244 /* Increment concurrently with kernel setting the top bits. */
245 if (!old_nr_cb)
b2a84b9f 246 (void) __atomic_add_fetch(&es0->enabled, 1, __ATOMIC_RELAXED);
67337c4a
MD
247unlock:
248 pthread_mutex_unlock(&side_lock);
249 return ret;
250}
251
252int side_tracer_callback_register(struct side_event_description *desc,
253 side_tracer_callback_func call,
92c377f9 254 void *priv, void *key)
67337c4a
MD
255{
256 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
257 return SIDE_ERROR_INVAL;
92c377f9 258 return _side_tracer_callback_register(desc, (void *) call, priv, key);
67337c4a
MD
259}
260
261int side_tracer_callback_variadic_register(struct side_event_description *desc,
262 side_tracer_callback_variadic_func call_variadic,
92c377f9 263 void *priv, void *key)
67337c4a
MD
264{
265 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
266 return SIDE_ERROR_INVAL;
92c377f9 267 return _side_tracer_callback_register(desc, (void *) call_variadic, priv, key);
67337c4a
MD
268}
269
270static int _side_tracer_callback_unregister(struct side_event_description *desc,
92c377f9 271 void *call, void *priv, void *key)
67337c4a 272{
0b9e59d6 273 struct side_event_state *event_state;
67337c4a
MD
274 struct side_callback *old_cb, *new_cb;
275 const struct side_callback *cb_pos;
b2a84b9f 276 struct side_event_state_0 *es0;
67337c4a
MD
277 uint32_t pos_idx;
278 int ret = SIDE_ERROR_OK;
279 uint32_t old_nr_cb;
280
281 if (!call)
282 return SIDE_ERROR_INVAL;
283 if (finalized)
284 return SIDE_ERROR_EXITING;
285 if (!initialized)
286 side_init();
287 pthread_mutex_lock(&side_lock);
0b9e59d6 288 event_state = side_ptr_get(desc->state);
b2a84b9f
MD
289 if (side_unlikely(event_state->version != 0))
290 abort();
49aea3ef 291 es0 = side_container_of(event_state, struct side_event_state_0, parent);
92c377f9 292 cb_pos = side_tracer_callback_lookup(desc, call, priv, key);
67337c4a
MD
293 if (!cb_pos) {
294 ret = SIDE_ERROR_NOENT;
295 goto unlock;
296 }
3cac1780 297 old_nr_cb = es0->nr_callbacks;
7269a8a3 298 old_cb = (struct side_callback *) es0->callbacks;
67337c4a
MD
299 if (old_nr_cb == 1) {
300 new_cb = (struct side_callback *) &side_empty_callback;
301 } else {
7269a8a3 302 pos_idx = cb_pos - es0->callbacks;
67337c4a
MD
303 /* Remove entry at pos_idx. */
304 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
305 new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
306 if (!new_cb) {
307 ret = SIDE_ERROR_NOMEM;
308 goto unlock;
309 }
310 memcpy(new_cb, old_cb, pos_idx);
311 memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
312 }
f60d8121 313 /* High order bits are already zeroed. */
7269a8a3 314 side_rcu_assign_pointer(es0->callbacks, new_cb);
67337c4a
MD
315 side_rcu_wait_grace_period(&rcu_gp);
316 free(old_cb);
3cac1780 317 es0->nr_callbacks--;
67337c4a
MD
318 /* Decrement concurrently with kernel setting the top bits. */
319 if (old_nr_cb == 1)
b2a84b9f 320 (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED);
67337c4a
MD
321unlock:
322 pthread_mutex_unlock(&side_lock);
323 return ret;
324}
325
326int side_tracer_callback_unregister(struct side_event_description *desc,
327 side_tracer_callback_func call,
92c377f9 328 void *priv, void *key)
67337c4a
MD
329{
330 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
331 return SIDE_ERROR_INVAL;
92c377f9 332 return _side_tracer_callback_unregister(desc, (void *) call, priv, key);
67337c4a
MD
333}
334
335int side_tracer_callback_variadic_unregister(struct side_event_description *desc,
336 side_tracer_callback_variadic_func call_variadic,
92c377f9 337 void *priv, void *key)
67337c4a
MD
338{
339 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
340 return SIDE_ERROR_INVAL;
92c377f9 341 return _side_tracer_callback_unregister(desc, (void *) call_variadic, priv, key);
67337c4a
MD
342}
343
344struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events)
345{
346 struct side_events_register_handle *events_handle = NULL;
347 struct side_tracer_handle *tracer_handle;
348
349 if (finalized)
350 return NULL;
351 if (!initialized)
352 side_init();
353 events_handle = (struct side_events_register_handle *)
354 calloc(1, sizeof(struct side_events_register_handle));
355 if (!events_handle)
356 return NULL;
357 events_handle->events = events;
358 events_handle->nr_events = nr_events;
359
360 pthread_mutex_lock(&side_lock);
361 side_list_insert_node_tail(&side_events_list, &events_handle->node);
362 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
363 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
364 events, nr_events, tracer_handle->priv);
365 }
366 pthread_mutex_unlock(&side_lock);
367 //TODO: call event batch register ioctl
368 return events_handle;
369}
370
371static
372void side_event_remove_callbacks(struct side_event_description *desc)
373{
0b9e59d6 374 struct side_event_state *event_state = side_ptr_get(desc->state);
b2a84b9f 375 struct side_event_state_0 *es0;
67337c4a 376 struct side_callback *old_cb;
3cac1780 377 uint32_t nr_cb;
67337c4a 378
b2a84b9f
MD
379 if (side_unlikely(event_state->version != 0))
380 abort();
49aea3ef 381 es0 = side_container_of(event_state, struct side_event_state_0, parent);
3cac1780
MD
382 nr_cb = es0->nr_callbacks;
383 if (!nr_cb)
384 return;
7269a8a3 385 old_cb = (struct side_callback *) es0->callbacks;
b2a84b9f 386 (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED);
67337c4a
MD
387 /*
388 * Setting the state back to 0 cb and empty callbacks out of
389 * caution. This should not matter because instrumentation is
390 * unreachable.
391 */
3cac1780 392 es0->nr_callbacks = 0;
7269a8a3 393 side_rcu_assign_pointer(es0->callbacks, &side_empty_callback);
67337c4a
MD
394 /*
395 * No need to wait for grace period because instrumentation is
396 * unreachable.
397 */
398 free(old_cb);
399}
400
401/*
402 * Unregister event handle. At this point, all side events in that
403 * handle should be unreachable.
404 */
405void side_events_unregister(struct side_events_register_handle *events_handle)
406{
407 struct side_tracer_handle *tracer_handle;
408 uint32_t i;
409
410 if (!events_handle)
411 return;
412 if (finalized)
413 return;
414 if (!initialized)
415 side_init();
416 pthread_mutex_lock(&side_lock);
417 side_list_remove_node(&events_handle->node);
418 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
419 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
420 events_handle->events, events_handle->nr_events,
421 tracer_handle->priv);
422 }
423 for (i = 0; i < events_handle->nr_events; i++) {
424 struct side_event_description *event = events_handle->events[i];
425
426 /* Skip NULL pointers */
427 if (!event)
428 continue;
429 side_event_remove_callbacks(event);
430 }
431 pthread_mutex_unlock(&side_lock);
432 //TODO: call event batch unregister ioctl
433 free(events_handle);
434}
435
436struct side_tracer_handle *side_tracer_event_notification_register(
437 void (*cb)(enum side_tracer_notification notif,
438 struct side_event_description **events, uint32_t nr_events, void *priv),
439 void *priv)
440{
441 struct side_tracer_handle *tracer_handle;
442 struct side_events_register_handle *events_handle;
443
444 if (finalized)
445 return NULL;
446 if (!initialized)
447 side_init();
448 tracer_handle = (struct side_tracer_handle *)
449 calloc(1, sizeof(struct side_tracer_handle));
450 if (!tracer_handle)
451 return NULL;
452 pthread_mutex_lock(&side_lock);
453 tracer_handle->cb = cb;
454 tracer_handle->priv = priv;
455 side_list_insert_node_tail(&side_tracer_list, &tracer_handle->node);
456 side_list_for_each_entry(events_handle, &side_events_list, node) {
457 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
458 events_handle->events, events_handle->nr_events, priv);
459 }
460 pthread_mutex_unlock(&side_lock);
461 return tracer_handle;
462}
463
464void side_tracer_event_notification_unregister(struct side_tracer_handle *tracer_handle)
465{
466 struct side_events_register_handle *events_handle;
467
468 if (finalized)
469 return;
470 if (!initialized)
471 side_init();
472 pthread_mutex_lock(&side_lock);
473 side_list_for_each_entry(events_handle, &side_events_list, node) {
474 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
475 events_handle->events, events_handle->nr_events,
476 tracer_handle->priv);
477 }
478 side_list_remove_node(&tracer_handle->node);
479 pthread_mutex_unlock(&side_lock);
be787080 480 free(tracer_handle);
67337c4a
MD
481}
482
483void side_init(void)
484{
485 if (initialized)
486 return;
487 side_rcu_gp_init(&rcu_gp);
488 initialized = true;
489}
490
491/*
492 * side_exit() is executed from a library destructor. It can be called
493 * explicitly at application exit as well. Concurrent side API use is
494 * not expected at that point.
495 */
496void side_exit(void)
497{
498 struct side_events_register_handle *handle, *tmp;
499
500 if (finalized)
501 return;
502 side_list_for_each_entry_safe(handle, tmp, &side_events_list, node)
503 side_events_unregister(handle);
504 side_rcu_gp_exit(&rcu_gp);
505 finalized = true;
506}
This page took 0.045067 seconds and 4 git commands to generate.