Extensible struct side_event_state
[libside.git] / src / side.c
CommitLineData
67337c4a
MD
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6#include <side/trace.h>
7#include <string.h>
b1bf768c 8#include <assert.h>
67337c4a
MD
9
10#include "rcu.h"
11#include "list.h"
12
13/* Top 8 bits reserved for kernel tracer use. */
14#if SIDE_BITS_PER_LONG == 64
15# define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF00000000000000ULL
16# define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x8000000000000000ULL
17
18/* Allow 2^56 tracer references on an event. */
19# define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFFFFFFFFFULL
20#else
21# define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000UL
22# define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000UL
23
24/* Allow 2^24 tracer references on an event. */
25# define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFUL
26#endif
27
28struct side_events_register_handle {
29 struct side_list_node node;
30 struct side_event_description **events;
31 uint32_t nr_events;
32};
33
34struct side_tracer_handle {
35 struct side_list_node node;
36 void (*cb)(enum side_tracer_notification notif,
37 struct side_event_description **events, uint32_t nr_events, void *priv);
38 void *priv;
39};
40
41static struct side_rcu_gp_state rcu_gp;
42
43/*
44 * Lazy initialization for early use within library constructors.
45 */
46static bool initialized;
47/*
48 * Do not register/unregister any more events after destructor.
49 */
50static bool finalized;
51
52/*
53 * Recursive mutex to allow tracer callbacks to use the side API.
54 */
55static pthread_mutex_t side_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
56
57static DEFINE_SIDE_LIST_HEAD(side_events_list);
58static DEFINE_SIDE_LIST_HEAD(side_tracer_list);
59
60/*
61 * The empty callback has a NULL function callback pointer, which stops
62 * iteration on the array of callbacks immediately.
63 */
64const struct side_callback side_empty_callback = { };
65
0d747f98 66void side_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec)
67337c4a
MD
67{
68 struct side_rcu_read_state rcu_read_state;
69 const struct side_callback *side_cb;
70 uintptr_t enabled;
71
72 if (side_unlikely(finalized))
73 return;
74 if (side_unlikely(!initialized))
75 side_init();
f60d8121 76 assert(!(side_ptr_get(event_state->desc)->flags & SIDE_EVENT_FLAG_VARIADIC));
0b9e59d6 77 enabled = __atomic_load_n(&event_state->enabled, __ATOMIC_RELAXED);
67337c4a
MD
78 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
79 // TODO: call kernel write.
80 }
81 side_rcu_read_begin(&rcu_gp, &rcu_read_state);
f60d8121
MD
82 for (side_cb = side_rcu_dereference(side_ptr_get(event_state->callbacks)); side_cb->u.call != NULL; side_cb++)
83 side_cb->u.call(side_ptr_get(event_state->desc), side_arg_vec, side_cb->priv);
67337c4a
MD
84 side_rcu_read_end(&rcu_gp, &rcu_read_state);
85}
86
0d747f98 87void side_call_variadic(const struct side_event_state *event_state,
67337c4a
MD
88 const struct side_arg_vec *side_arg_vec,
89 const struct side_arg_dynamic_struct *var_struct)
90{
91 struct side_rcu_read_state rcu_read_state;
92 const struct side_callback *side_cb;
93 uintptr_t enabled;
94
95 if (side_unlikely(finalized))
96 return;
97 if (side_unlikely(!initialized))
98 side_init();
f60d8121 99 assert(side_ptr_get(event_state->desc)->flags & SIDE_EVENT_FLAG_VARIADIC);
0b9e59d6 100 enabled = __atomic_load_n(&event_state->enabled, __ATOMIC_RELAXED);
67337c4a
MD
101 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
102 // TODO: call kernel write.
103 }
104 side_rcu_read_begin(&rcu_gp, &rcu_read_state);
f60d8121
MD
105 for (side_cb = side_rcu_dereference(side_ptr_get(event_state->callbacks)); side_cb->u.call_variadic != NULL; side_cb++)
106 side_cb->u.call_variadic(side_ptr_get(event_state->desc), side_arg_vec, var_struct, side_cb->priv);
67337c4a
MD
107 side_rcu_read_end(&rcu_gp, &rcu_read_state);
108}
109
110static
111const struct side_callback *side_tracer_callback_lookup(
112 const struct side_event_description *desc,
113 void *call, void *priv)
114{
0b9e59d6 115 struct side_event_state *event_state = side_ptr_get(desc->state);
67337c4a
MD
116 const struct side_callback *cb;
117
f60d8121 118 for (cb = side_ptr_get(event_state->callbacks); cb->u.call != NULL; cb++) {
67337c4a
MD
119 if ((void *) cb->u.call == call && cb->priv == priv)
120 return cb;
121 }
122 return NULL;
123}
124
125static
126int _side_tracer_callback_register(struct side_event_description *desc,
127 void *call, void *priv)
128{
0b9e59d6 129 struct side_event_state *event_state;
67337c4a
MD
130 struct side_callback *old_cb, *new_cb;
131 int ret = SIDE_ERROR_OK;
132 uint32_t old_nr_cb;
133
134 if (!call)
135 return SIDE_ERROR_INVAL;
136 if (finalized)
137 return SIDE_ERROR_EXITING;
138 if (!initialized)
139 side_init();
140 pthread_mutex_lock(&side_lock);
0b9e59d6 141 event_state = side_ptr_get(desc->state);
f8188f94 142 old_nr_cb = desc->nr_callbacks;
67337c4a
MD
143 if (old_nr_cb == UINT32_MAX) {
144 ret = SIDE_ERROR_INVAL;
145 goto unlock;
146 }
147 /* Reject duplicate (call, priv) tuples. */
148 if (side_tracer_callback_lookup(desc, call, priv)) {
149 ret = SIDE_ERROR_EXIST;
150 goto unlock;
151 }
f60d8121 152 old_cb = (struct side_callback *) side_ptr_get(event_state->callbacks);
67337c4a
MD
153 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
154 new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
155 if (!new_cb) {
156 ret = SIDE_ERROR_NOMEM;
157 goto unlock;
158 }
159 memcpy(new_cb, old_cb, old_nr_cb);
160 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
161 new_cb[old_nr_cb].u.call_variadic =
162 (side_tracer_callback_variadic_func) call;
163 else
164 new_cb[old_nr_cb].u.call =
165 (side_tracer_callback_func) call;
166 new_cb[old_nr_cb].priv = priv;
f60d8121
MD
167 /* High order bits are already zeroed. */
168 side_rcu_assign_pointer(side_ptr_get(event_state->callbacks), new_cb);
67337c4a
MD
169 side_rcu_wait_grace_period(&rcu_gp);
170 if (old_nr_cb)
171 free(old_cb);
f8188f94 172 desc->nr_callbacks++;
67337c4a
MD
173 /* Increment concurrently with kernel setting the top bits. */
174 if (!old_nr_cb)
0b9e59d6 175 (void) __atomic_add_fetch(&event_state->enabled, 1, __ATOMIC_RELAXED);
67337c4a
MD
176unlock:
177 pthread_mutex_unlock(&side_lock);
178 return ret;
179}
180
181int side_tracer_callback_register(struct side_event_description *desc,
182 side_tracer_callback_func call,
183 void *priv)
184{
185 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
186 return SIDE_ERROR_INVAL;
187 return _side_tracer_callback_register(desc, (void *) call, priv);
188}
189
190int side_tracer_callback_variadic_register(struct side_event_description *desc,
191 side_tracer_callback_variadic_func call_variadic,
192 void *priv)
193{
194 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
195 return SIDE_ERROR_INVAL;
196 return _side_tracer_callback_register(desc, (void *) call_variadic, priv);
197}
198
199static int _side_tracer_callback_unregister(struct side_event_description *desc,
200 void *call, void *priv)
201{
0b9e59d6 202 struct side_event_state *event_state;
67337c4a
MD
203 struct side_callback *old_cb, *new_cb;
204 const struct side_callback *cb_pos;
205 uint32_t pos_idx;
206 int ret = SIDE_ERROR_OK;
207 uint32_t old_nr_cb;
208
209 if (!call)
210 return SIDE_ERROR_INVAL;
211 if (finalized)
212 return SIDE_ERROR_EXITING;
213 if (!initialized)
214 side_init();
215 pthread_mutex_lock(&side_lock);
0b9e59d6 216 event_state = side_ptr_get(desc->state);
67337c4a
MD
217 cb_pos = side_tracer_callback_lookup(desc, call, priv);
218 if (!cb_pos) {
219 ret = SIDE_ERROR_NOENT;
220 goto unlock;
221 }
f8188f94 222 old_nr_cb = desc->nr_callbacks;
f60d8121 223 old_cb = (struct side_callback *) side_ptr_get(event_state->callbacks);
67337c4a
MD
224 if (old_nr_cb == 1) {
225 new_cb = (struct side_callback *) &side_empty_callback;
226 } else {
f60d8121 227 pos_idx = cb_pos - side_ptr_get(event_state->callbacks);
67337c4a
MD
228 /* Remove entry at pos_idx. */
229 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
230 new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
231 if (!new_cb) {
232 ret = SIDE_ERROR_NOMEM;
233 goto unlock;
234 }
235 memcpy(new_cb, old_cb, pos_idx);
236 memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
237 }
f60d8121
MD
238 /* High order bits are already zeroed. */
239 side_rcu_assign_pointer(side_ptr_get(event_state->callbacks), new_cb);
67337c4a
MD
240 side_rcu_wait_grace_period(&rcu_gp);
241 free(old_cb);
f8188f94 242 desc->nr_callbacks--;
67337c4a
MD
243 /* Decrement concurrently with kernel setting the top bits. */
244 if (old_nr_cb == 1)
0b9e59d6 245 (void) __atomic_add_fetch(&event_state->enabled, -1, __ATOMIC_RELAXED);
67337c4a
MD
246unlock:
247 pthread_mutex_unlock(&side_lock);
248 return ret;
249}
250
251int side_tracer_callback_unregister(struct side_event_description *desc,
252 side_tracer_callback_func call,
253 void *priv)
254{
255 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
256 return SIDE_ERROR_INVAL;
257 return _side_tracer_callback_unregister(desc, (void *) call, priv);
258}
259
260int side_tracer_callback_variadic_unregister(struct side_event_description *desc,
261 side_tracer_callback_variadic_func call_variadic,
262 void *priv)
263{
264 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
265 return SIDE_ERROR_INVAL;
266 return _side_tracer_callback_unregister(desc, (void *) call_variadic, priv);
267}
268
269struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events)
270{
271 struct side_events_register_handle *events_handle = NULL;
272 struct side_tracer_handle *tracer_handle;
273
274 if (finalized)
275 return NULL;
276 if (!initialized)
277 side_init();
278 events_handle = (struct side_events_register_handle *)
279 calloc(1, sizeof(struct side_events_register_handle));
280 if (!events_handle)
281 return NULL;
282 events_handle->events = events;
283 events_handle->nr_events = nr_events;
284
285 pthread_mutex_lock(&side_lock);
286 side_list_insert_node_tail(&side_events_list, &events_handle->node);
287 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
288 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
289 events, nr_events, tracer_handle->priv);
290 }
291 pthread_mutex_unlock(&side_lock);
292 //TODO: call event batch register ioctl
293 return events_handle;
294}
295
296static
297void side_event_remove_callbacks(struct side_event_description *desc)
298{
0b9e59d6 299 struct side_event_state *event_state = side_ptr_get(desc->state);
f8188f94 300 uint32_t nr_cb = desc->nr_callbacks;
67337c4a
MD
301 struct side_callback *old_cb;
302
303 if (!nr_cb)
304 return;
f60d8121 305 old_cb = (struct side_callback *) side_ptr_get(event_state->callbacks);
0b9e59d6 306 (void) __atomic_add_fetch(&event_state->enabled, -1, __ATOMIC_RELAXED);
67337c4a
MD
307 /*
308 * Setting the state back to 0 cb and empty callbacks out of
309 * caution. This should not matter because instrumentation is
310 * unreachable.
311 */
f8188f94 312 desc->nr_callbacks = 0;
f60d8121 313 side_rcu_assign_pointer(side_ptr_get(event_state->callbacks), &side_empty_callback);
67337c4a
MD
314 /*
315 * No need to wait for grace period because instrumentation is
316 * unreachable.
317 */
318 free(old_cb);
319}
320
321/*
322 * Unregister event handle. At this point, all side events in that
323 * handle should be unreachable.
324 */
325void side_events_unregister(struct side_events_register_handle *events_handle)
326{
327 struct side_tracer_handle *tracer_handle;
328 uint32_t i;
329
330 if (!events_handle)
331 return;
332 if (finalized)
333 return;
334 if (!initialized)
335 side_init();
336 pthread_mutex_lock(&side_lock);
337 side_list_remove_node(&events_handle->node);
338 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
339 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
340 events_handle->events, events_handle->nr_events,
341 tracer_handle->priv);
342 }
343 for (i = 0; i < events_handle->nr_events; i++) {
344 struct side_event_description *event = events_handle->events[i];
345
346 /* Skip NULL pointers */
347 if (!event)
348 continue;
349 side_event_remove_callbacks(event);
350 }
351 pthread_mutex_unlock(&side_lock);
352 //TODO: call event batch unregister ioctl
353 free(events_handle);
354}
355
356struct side_tracer_handle *side_tracer_event_notification_register(
357 void (*cb)(enum side_tracer_notification notif,
358 struct side_event_description **events, uint32_t nr_events, void *priv),
359 void *priv)
360{
361 struct side_tracer_handle *tracer_handle;
362 struct side_events_register_handle *events_handle;
363
364 if (finalized)
365 return NULL;
366 if (!initialized)
367 side_init();
368 tracer_handle = (struct side_tracer_handle *)
369 calloc(1, sizeof(struct side_tracer_handle));
370 if (!tracer_handle)
371 return NULL;
372 pthread_mutex_lock(&side_lock);
373 tracer_handle->cb = cb;
374 tracer_handle->priv = priv;
375 side_list_insert_node_tail(&side_tracer_list, &tracer_handle->node);
376 side_list_for_each_entry(events_handle, &side_events_list, node) {
377 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
378 events_handle->events, events_handle->nr_events, priv);
379 }
380 pthread_mutex_unlock(&side_lock);
381 return tracer_handle;
382}
383
384void side_tracer_event_notification_unregister(struct side_tracer_handle *tracer_handle)
385{
386 struct side_events_register_handle *events_handle;
387
388 if (finalized)
389 return;
390 if (!initialized)
391 side_init();
392 pthread_mutex_lock(&side_lock);
393 side_list_for_each_entry(events_handle, &side_events_list, node) {
394 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
395 events_handle->events, events_handle->nr_events,
396 tracer_handle->priv);
397 }
398 side_list_remove_node(&tracer_handle->node);
399 pthread_mutex_unlock(&side_lock);
be787080 400 free(tracer_handle);
67337c4a
MD
401}
402
403void side_init(void)
404{
405 if (initialized)
406 return;
407 side_rcu_gp_init(&rcu_gp);
408 initialized = true;
409}
410
411/*
412 * side_exit() is executed from a library destructor. It can be called
413 * explicitly at application exit as well. Concurrent side API use is
414 * not expected at that point.
415 */
416void side_exit(void)
417{
418 struct side_events_register_handle *handle, *tmp;
419
420 if (finalized)
421 return;
422 side_list_for_each_entry_safe(handle, tmp, &side_events_list, node)
423 side_events_unregister(handle);
424 side_rcu_gp_exit(&rcu_gp);
425 finalized = true;
426}
This page took 0.039474 seconds and 4 git commands to generate.