Move unit test to tests
[libside.git] / src / side.c
CommitLineData
6841ae81
MD
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6#include <side/trace.h>
a3f36db7
MD
7#include <string.h>
8
85b765b8 9#include "rcu.h"
b59abc69 10#include "list.h"
6841ae81 11
054b7b5c 12/* Top 8 bits reserved for kernel tracer use. */
f61301bb
MD
13#if SIDE_BITS_PER_LONG == 64
14# define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF00000000000000ULL
15# define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x8000000000000000ULL
16
17/* Allow 2^56 tracer references on an event. */
18# define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFFFFFFFFFULL
19#else
20# define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000UL
21# define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000UL
29b3374e 22
45172226 23/* Allow 2^24 tracer references on an event. */
f61301bb
MD
24# define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFUL
25#endif
054b7b5c 26
6e46f5e6
MD
27struct side_events_register_handle {
28 struct side_list_node node;
29 struct side_event_description **events;
30 uint32_t nr_events;
31};
32
a13c9d2e
MD
33struct side_tracer_handle {
34 struct side_list_node node;
35 void (*cb)(enum side_tracer_notification notif,
36 struct side_event_description **events, uint32_t nr_events, void *priv);
37 void *priv;
38};
39
a3f36db7 40static struct side_rcu_gp_state rcu_gp;
075ceef7
MD
41
42/*
43 * Lazy initialization for early use within library constructors.
44 */
45static bool initialized;
6e46f5e6
MD
46/*
47 * Do not register/unregister any more events after destructor.
48 */
49static bool finalized;
075ceef7 50
1e070341
MD
51/*
52 * Recursive mutex to allow tracer callbacks to use the side API.
53 */
54static pthread_mutex_t side_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
a3f36db7 55
a13c9d2e
MD
56static DEFINE_SIDE_LIST_HEAD(side_events_list);
57static DEFINE_SIDE_LIST_HEAD(side_tracer_list);
b59abc69 58
a3f36db7
MD
59/*
60 * The empty callback has a NULL function callback pointer, which stops
61 * iteration on the array of callbacks immediately.
62 */
8bd36d3b 63const struct side_callback side_empty_callback = { };
054b7b5c 64
9a6ca773 65void side_call(const struct side_event_description *desc, const struct side_arg_vec *side_arg_vec)
6841ae81 66{
cd50bc06 67 struct side_rcu_read_state rcu_read_state;
054b7b5c 68 const struct side_callback *side_cb;
beea6e2e 69 uintptr_t enabled;
054b7b5c 70
6e46f5e6
MD
71 if (side_unlikely(finalized))
72 return;
075ceef7
MD
73 if (side_unlikely(!initialized))
74 side_init();
6841ae81
MD
75 if (side_unlikely(desc->flags & SIDE_EVENT_FLAG_VARIADIC)) {
76 printf("ERROR: unexpected variadic event description\n");
77 abort();
78 }
3b4f86f6
MD
79 enabled = __atomic_load_n(desc->enabled, __ATOMIC_RELAXED);
80 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
dda82f46 81 // TODO: call kernel write.
6841ae81 82 }
cd50bc06 83 side_rcu_read_begin(&rcu_gp, &rcu_read_state);
054b7b5c 84 for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call != NULL; side_cb++)
9a6ca773 85 side_cb->u.call(desc, side_arg_vec, side_cb->priv);
cd50bc06 86 side_rcu_read_end(&rcu_gp, &rcu_read_state);
6841ae81
MD
87}
88
89void side_call_variadic(const struct side_event_description *desc,
9a6ca773 90 const struct side_arg_vec *side_arg_vec,
0c7abe2b 91 const struct side_arg_dynamic_struct *var_struct)
6841ae81 92{
cd50bc06 93 struct side_rcu_read_state rcu_read_state;
054b7b5c 94 const struct side_callback *side_cb;
beea6e2e 95 uintptr_t enabled;
054b7b5c 96
6e46f5e6
MD
97 if (side_unlikely(finalized))
98 return;
075ceef7
MD
99 if (side_unlikely(!initialized))
100 side_init();
3b4f86f6
MD
101 if (side_unlikely(!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))) {
102 printf("ERROR: unexpected non-variadic event description\n");
103 abort();
104 }
105 enabled = __atomic_load_n(desc->enabled, __ATOMIC_RELAXED);
106 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
dda82f46 107 // TODO: call kernel write.
6841ae81 108 }
cd50bc06 109 side_rcu_read_begin(&rcu_gp, &rcu_read_state);
054b7b5c 110 for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call_variadic != NULL; side_cb++)
9a6ca773 111 side_cb->u.call_variadic(desc, side_arg_vec, var_struct, side_cb->priv);
cd50bc06 112 side_rcu_read_end(&rcu_gp, &rcu_read_state);
6841ae81 113}
075ceef7 114
a3f36db7
MD
115static
116const struct side_callback *side_tracer_callback_lookup(
117 const struct side_event_description *desc,
07e94ffc 118 void *call, void *priv)
a3f36db7
MD
119{
120 const struct side_callback *cb;
121
122 for (cb = desc->callbacks; cb->u.call != NULL; cb++) {
07e94ffc 123 if ((void *) cb->u.call == call && cb->priv == priv)
a3f36db7
MD
124 return cb;
125 }
126 return NULL;
127}
128
129static
130int _side_tracer_callback_register(struct side_event_description *desc,
8bd36d3b 131 void *call, void *priv)
a3f36db7
MD
132{
133 struct side_callback *old_cb, *new_cb;
134 int ret = SIDE_ERROR_OK;
135 uint32_t old_nr_cb;
136
137 if (!call)
138 return SIDE_ERROR_INVAL;
6e46f5e6
MD
139 if (finalized)
140 return SIDE_ERROR_EXITING;
a13c9d2e
MD
141 if (!initialized)
142 side_init();
a3f36db7 143 pthread_mutex_lock(&side_lock);
45172226
MD
144 old_nr_cb = desc->nr_callbacks;
145 if (old_nr_cb == UINT32_MAX) {
a3f36db7
MD
146 ret = SIDE_ERROR_INVAL;
147 goto unlock;
148 }
149 /* Reject duplicate (call, priv) tuples. */
150 if (side_tracer_callback_lookup(desc, call, priv)) {
151 ret = SIDE_ERROR_EXIST;
152 goto unlock;
153 }
154 old_cb = (struct side_callback *) desc->callbacks;
155 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
156 new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
157 if (!new_cb) {
158 ret = SIDE_ERROR_NOMEM;
159 goto unlock;
160 }
161 memcpy(new_cb, old_cb, old_nr_cb);
162 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
07e94ffc
MD
163 new_cb[old_nr_cb].u.call_variadic =
164 (side_tracer_callback_variadic_func) call;
a3f36db7 165 else
07e94ffc
MD
166 new_cb[old_nr_cb].u.call =
167 (side_tracer_callback_func) call;
a3f36db7
MD
168 new_cb[old_nr_cb].priv = priv;
169 side_rcu_assign_pointer(desc->callbacks, new_cb);
170 side_rcu_wait_grace_period(&rcu_gp);
171 if (old_nr_cb)
172 free(old_cb);
45172226 173 desc->nr_callbacks++;
a3f36db7 174 /* Increment concurrently with kernel setting the top bits. */
45172226
MD
175 if (!old_nr_cb)
176 (void) __atomic_add_fetch(desc->enabled, 1, __ATOMIC_RELAXED);
a3f36db7
MD
177unlock:
178 pthread_mutex_unlock(&side_lock);
179 return ret;
180}
181
182int side_tracer_callback_register(struct side_event_description *desc,
8bd36d3b 183 side_tracer_callback_func call,
a3f36db7
MD
184 void *priv)
185{
186 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
187 return SIDE_ERROR_INVAL;
07e94ffc 188 return _side_tracer_callback_register(desc, (void *) call, priv);
a3f36db7
MD
189}
190
191int side_tracer_callback_variadic_register(struct side_event_description *desc,
8bd36d3b 192 side_tracer_callback_variadic_func call_variadic,
a3f36db7
MD
193 void *priv)
194{
195 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
196 return SIDE_ERROR_INVAL;
07e94ffc 197 return _side_tracer_callback_register(desc, (void *) call_variadic, priv);
a3f36db7
MD
198}
199
9365e936 200static int _side_tracer_callback_unregister(struct side_event_description *desc,
8bd36d3b 201 void *call, void *priv)
a3f36db7
MD
202{
203 struct side_callback *old_cb, *new_cb;
204 const struct side_callback *cb_pos;
205 uint32_t pos_idx;
206 int ret = SIDE_ERROR_OK;
207 uint32_t old_nr_cb;
208
209 if (!call)
210 return SIDE_ERROR_INVAL;
6e46f5e6
MD
211 if (finalized)
212 return SIDE_ERROR_EXITING;
a13c9d2e
MD
213 if (!initialized)
214 side_init();
a3f36db7 215 pthread_mutex_lock(&side_lock);
a3f36db7
MD
216 cb_pos = side_tracer_callback_lookup(desc, call, priv);
217 if (!cb_pos) {
218 ret = SIDE_ERROR_NOENT;
219 goto unlock;
220 }
45172226 221 old_nr_cb = desc->nr_callbacks;
9a83b759 222 old_cb = (struct side_callback *) desc->callbacks;
a3f36db7
MD
223 if (old_nr_cb == 1) {
224 new_cb = (struct side_callback *) &side_empty_callback;
225 } else {
226 pos_idx = cb_pos - desc->callbacks;
227 /* Remove entry at pos_idx. */
228 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
229 new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
230 if (!new_cb) {
231 ret = SIDE_ERROR_NOMEM;
232 goto unlock;
233 }
234 memcpy(new_cb, old_cb, pos_idx);
235 memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
236 }
237 side_rcu_assign_pointer(desc->callbacks, new_cb);
238 side_rcu_wait_grace_period(&rcu_gp);
239 free(old_cb);
45172226 240 desc->nr_callbacks--;
a3f36db7 241 /* Decrement concurrently with kernel setting the top bits. */
45172226
MD
242 if (old_nr_cb == 1)
243 (void) __atomic_add_fetch(desc->enabled, -1, __ATOMIC_RELAXED);
a3f36db7
MD
244unlock:
245 pthread_mutex_unlock(&side_lock);
246 return ret;
247}
248
249int side_tracer_callback_unregister(struct side_event_description *desc,
8bd36d3b 250 side_tracer_callback_func call,
a3f36db7
MD
251 void *priv)
252{
253 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
254 return SIDE_ERROR_INVAL;
07e94ffc 255 return _side_tracer_callback_unregister(desc, (void *) call, priv);
a3f36db7
MD
256}
257
258int side_tracer_callback_variadic_unregister(struct side_event_description *desc,
8bd36d3b 259 side_tracer_callback_variadic_func call_variadic,
a3f36db7
MD
260 void *priv)
261{
262 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
263 return SIDE_ERROR_INVAL;
07e94ffc 264 return _side_tracer_callback_unregister(desc, (void *) call_variadic, priv);
a3f36db7
MD
265}
266
6e46f5e6
MD
267struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events)
268{
a13c9d2e
MD
269 struct side_events_register_handle *events_handle = NULL;
270 struct side_tracer_handle *tracer_handle;
6e46f5e6
MD
271
272 if (finalized)
273 return NULL;
a13c9d2e
MD
274 if (!initialized)
275 side_init();
07e94ffc
MD
276 events_handle = (struct side_events_register_handle *)
277 calloc(1, sizeof(struct side_events_register_handle));
a13c9d2e 278 if (!events_handle)
6e46f5e6 279 return NULL;
a13c9d2e
MD
280 events_handle->events = events;
281 events_handle->nr_events = nr_events;
282
6e46f5e6 283 pthread_mutex_lock(&side_lock);
a13c9d2e
MD
284 side_list_insert_node_tail(&side_events_list, &events_handle->node);
285 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
286 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
287 events, nr_events, tracer_handle->priv);
288 }
6e46f5e6
MD
289 pthread_mutex_unlock(&side_lock);
290 //TODO: call event batch register ioctl
a13c9d2e 291 return events_handle;
6e46f5e6
MD
292}
293
a3f36db7 294static
6e46f5e6
MD
295void side_event_remove_callbacks(struct side_event_description *desc)
296{
ea0d2bea 297 uint32_t nr_cb = desc->nr_callbacks;
6e46f5e6
MD
298 struct side_callback *old_cb;
299
300 if (!nr_cb)
301 return;
302 old_cb = (struct side_callback *) desc->callbacks;
ea0d2bea 303 (void) __atomic_add_fetch(desc->enabled, -1, __ATOMIC_RELAXED);
6e46f5e6
MD
304 /*
305 * Setting the state back to 0 cb and empty callbacks out of
306 * caution. This should not matter because instrumentation is
307 * unreachable.
308 */
45172226 309 desc->nr_callbacks = 0;
6e46f5e6
MD
310 side_rcu_assign_pointer(desc->callbacks, &side_empty_callback);
311 /*
312 * No need to wait for grace period because instrumentation is
313 * unreachable.
314 */
315 free(old_cb);
316}
317
318/*
319 * Unregister event handle. At this point, all side events in that
320 * handle should be unreachable.
321 */
a13c9d2e 322void side_events_unregister(struct side_events_register_handle *events_handle)
6e46f5e6 323{
a13c9d2e 324 struct side_tracer_handle *tracer_handle;
6e46f5e6
MD
325 uint32_t i;
326
314c22c3
MD
327 if (!events_handle)
328 return;
6e46f5e6
MD
329 if (finalized)
330 return;
a13c9d2e
MD
331 if (!initialized)
332 side_init();
6e46f5e6 333 pthread_mutex_lock(&side_lock);
a13c9d2e
MD
334 side_list_remove_node(&events_handle->node);
335 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
336 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
337 events_handle->events, events_handle->nr_events,
338 tracer_handle->priv);
339 }
340 for (i = 0; i < events_handle->nr_events; i++) {
341 struct side_event_description *event = events_handle->events[i];
6e46f5e6
MD
342
343 /* Skip NULL pointers */
344 if (!event)
345 continue;
346 side_event_remove_callbacks(event);
347 }
348 pthread_mutex_unlock(&side_lock);
349 //TODO: call event batch unregister ioctl
a13c9d2e
MD
350 free(events_handle);
351}
352
353struct side_tracer_handle *side_tracer_event_notification_register(
354 void (*cb)(enum side_tracer_notification notif,
355 struct side_event_description **events, uint32_t nr_events, void *priv),
356 void *priv)
357{
358 struct side_tracer_handle *tracer_handle;
359 struct side_events_register_handle *events_handle;
360
361 if (finalized)
362 return NULL;
363 if (!initialized)
364 side_init();
07e94ffc
MD
365 tracer_handle = (struct side_tracer_handle *)
366 calloc(1, sizeof(struct side_tracer_handle));
a13c9d2e
MD
367 if (!tracer_handle)
368 return NULL;
369 pthread_mutex_lock(&side_lock);
370 tracer_handle->cb = cb;
371 tracer_handle->priv = priv;
372 side_list_insert_node_tail(&side_tracer_list, &tracer_handle->node);
373 side_list_for_each_entry(events_handle, &side_events_list, node) {
374 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
375 events_handle->events, events_handle->nr_events, priv);
376 }
377 pthread_mutex_unlock(&side_lock);
378 return tracer_handle;
379}
380
381void side_tracer_event_notification_unregister(struct side_tracer_handle *tracer_handle)
382{
383 struct side_events_register_handle *events_handle;
384
385 if (finalized)
386 return;
387 if (!initialized)
388 side_init();
389 pthread_mutex_lock(&side_lock);
390 side_list_for_each_entry(events_handle, &side_events_list, node) {
391 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
392 events_handle->events, events_handle->nr_events,
393 tracer_handle->priv);
394 }
395 side_list_remove_node(&tracer_handle->node);
396 pthread_mutex_unlock(&side_lock);
6e46f5e6
MD
397}
398
075ceef7
MD
399void side_init(void)
400{
401 if (initialized)
402 return;
054b7b5c 403 side_rcu_gp_init(&rcu_gp);
075ceef7
MD
404 initialized = true;
405}
6e46f5e6 406
aa584c16
MD
407/*
408 * side_exit() is executed from a library destructor. It can be called
409 * explicitly at application exit as well. Concurrent side API use is
410 * not expected at that point.
411 */
6e46f5e6
MD
412void side_exit(void)
413{
414 struct side_events_register_handle *handle, *tmp;
415
416 if (finalized)
417 return;
a13c9d2e 418 side_list_for_each_entry_safe(handle, tmp, &side_events_list, node)
6e46f5e6 419 side_events_unregister(handle);
64d29894 420 side_rcu_gp_exit(&rcu_gp);
6e46f5e6
MD
421 finalized = true;
422}
This page took 0.042705 seconds and 4 git commands to generate.