tracer_print_type_integer: move get attr base
[libside.git] / src / side.c
CommitLineData
6841ae81
MD
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6#include <side/trace.h>
a3f36db7
MD
7#include <string.h>
8
6841ae81 9#include "tracer.h"
85b765b8 10#include "rcu.h"
b59abc69 11#include "list.h"
6841ae81 12
054b7b5c 13/* Top 8 bits reserved for kernel tracer use. */
f61301bb
MD
14#if SIDE_BITS_PER_LONG == 64
15# define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF00000000000000ULL
16# define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x8000000000000000ULL
17
18/* Allow 2^56 tracer references on an event. */
19# define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFFFFFFFFFULL
20#else
21# define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000UL
22# define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000UL
29b3374e 23
45172226 24/* Allow 2^24 tracer references on an event. */
f61301bb
MD
25# define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFUL
26#endif
054b7b5c 27
6e46f5e6
MD
28struct side_events_register_handle {
29 struct side_list_node node;
30 struct side_event_description **events;
31 uint32_t nr_events;
32};
33
a13c9d2e
MD
34struct side_tracer_handle {
35 struct side_list_node node;
36 void (*cb)(enum side_tracer_notification notif,
37 struct side_event_description **events, uint32_t nr_events, void *priv);
38 void *priv;
39};
40
a3f36db7 41static struct side_rcu_gp_state rcu_gp;
075ceef7
MD
42
43/*
44 * Lazy initialization for early use within library constructors.
45 */
46static bool initialized;
6e46f5e6
MD
47/*
48 * Do not register/unregister any more events after destructor.
49 */
50static bool finalized;
075ceef7 51
1e070341
MD
52/*
53 * Recursive mutex to allow tracer callbacks to use the side API.
54 */
55static pthread_mutex_t side_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
a3f36db7 56
a13c9d2e
MD
57static DEFINE_SIDE_LIST_HEAD(side_events_list);
58static DEFINE_SIDE_LIST_HEAD(side_tracer_list);
b59abc69 59
a3f36db7
MD
60/*
61 * The empty callback has a NULL function callback pointer, which stops
62 * iteration on the array of callbacks immediately.
63 */
8bd36d3b 64const struct side_callback side_empty_callback = { };
054b7b5c 65
6e46f5e6
MD
66void side_init(void) __attribute__((constructor));
67void side_exit(void) __attribute__((destructor));
68
9a6ca773 69void side_call(const struct side_event_description *desc, const struct side_arg_vec *side_arg_vec)
6841ae81 70{
054b7b5c
MD
71 const struct side_callback *side_cb;
72 unsigned int rcu_period;
beea6e2e 73 uintptr_t enabled;
054b7b5c 74
6e46f5e6
MD
75 if (side_unlikely(finalized))
76 return;
075ceef7
MD
77 if (side_unlikely(!initialized))
78 side_init();
6841ae81
MD
79 if (side_unlikely(desc->flags & SIDE_EVENT_FLAG_VARIADIC)) {
80 printf("ERROR: unexpected variadic event description\n");
81 abort();
82 }
3b4f86f6
MD
83 enabled = __atomic_load_n(desc->enabled, __ATOMIC_RELAXED);
84 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
dda82f46 85 // TODO: call kernel write.
6841ae81 86 }
054b7b5c
MD
87 rcu_period = side_rcu_read_begin(&rcu_gp);
88 for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call != NULL; side_cb++)
9a6ca773 89 side_cb->u.call(desc, side_arg_vec, side_cb->priv);
054b7b5c 90 side_rcu_read_end(&rcu_gp, rcu_period);
6841ae81
MD
91}
92
93void side_call_variadic(const struct side_event_description *desc,
9a6ca773 94 const struct side_arg_vec *side_arg_vec,
0c7abe2b 95 const struct side_arg_dynamic_struct *var_struct)
6841ae81 96{
054b7b5c
MD
97 const struct side_callback *side_cb;
98 unsigned int rcu_period;
beea6e2e 99 uintptr_t enabled;
054b7b5c 100
6e46f5e6
MD
101 if (side_unlikely(finalized))
102 return;
075ceef7
MD
103 if (side_unlikely(!initialized))
104 side_init();
3b4f86f6
MD
105 if (side_unlikely(!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))) {
106 printf("ERROR: unexpected non-variadic event description\n");
107 abort();
108 }
109 enabled = __atomic_load_n(desc->enabled, __ATOMIC_RELAXED);
110 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
dda82f46 111 // TODO: call kernel write.
6841ae81 112 }
054b7b5c
MD
113 rcu_period = side_rcu_read_begin(&rcu_gp);
114 for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call_variadic != NULL; side_cb++)
9a6ca773 115 side_cb->u.call_variadic(desc, side_arg_vec, var_struct, side_cb->priv);
054b7b5c 116 side_rcu_read_end(&rcu_gp, rcu_period);
6841ae81 117}
075ceef7 118
a3f36db7
MD
119static
120const struct side_callback *side_tracer_callback_lookup(
121 const struct side_event_description *desc,
07e94ffc 122 void *call, void *priv)
a3f36db7
MD
123{
124 const struct side_callback *cb;
125
126 for (cb = desc->callbacks; cb->u.call != NULL; cb++) {
07e94ffc 127 if ((void *) cb->u.call == call && cb->priv == priv)
a3f36db7
MD
128 return cb;
129 }
130 return NULL;
131}
132
133static
134int _side_tracer_callback_register(struct side_event_description *desc,
8bd36d3b 135 void *call, void *priv)
a3f36db7
MD
136{
137 struct side_callback *old_cb, *new_cb;
138 int ret = SIDE_ERROR_OK;
139 uint32_t old_nr_cb;
140
141 if (!call)
142 return SIDE_ERROR_INVAL;
6e46f5e6
MD
143 if (finalized)
144 return SIDE_ERROR_EXITING;
a13c9d2e
MD
145 if (!initialized)
146 side_init();
a3f36db7 147 pthread_mutex_lock(&side_lock);
45172226
MD
148 old_nr_cb = desc->nr_callbacks;
149 if (old_nr_cb == UINT32_MAX) {
a3f36db7
MD
150 ret = SIDE_ERROR_INVAL;
151 goto unlock;
152 }
153 /* Reject duplicate (call, priv) tuples. */
154 if (side_tracer_callback_lookup(desc, call, priv)) {
155 ret = SIDE_ERROR_EXIST;
156 goto unlock;
157 }
158 old_cb = (struct side_callback *) desc->callbacks;
159 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
160 new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
161 if (!new_cb) {
162 ret = SIDE_ERROR_NOMEM;
163 goto unlock;
164 }
165 memcpy(new_cb, old_cb, old_nr_cb);
166 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
07e94ffc
MD
167 new_cb[old_nr_cb].u.call_variadic =
168 (side_tracer_callback_variadic_func) call;
a3f36db7 169 else
07e94ffc
MD
170 new_cb[old_nr_cb].u.call =
171 (side_tracer_callback_func) call;
a3f36db7
MD
172 new_cb[old_nr_cb].priv = priv;
173 side_rcu_assign_pointer(desc->callbacks, new_cb);
174 side_rcu_wait_grace_period(&rcu_gp);
175 if (old_nr_cb)
176 free(old_cb);
45172226 177 desc->nr_callbacks++;
a3f36db7 178 /* Increment concurrently with kernel setting the top bits. */
45172226
MD
179 if (!old_nr_cb)
180 (void) __atomic_add_fetch(desc->enabled, 1, __ATOMIC_RELAXED);
a3f36db7
MD
181unlock:
182 pthread_mutex_unlock(&side_lock);
183 return ret;
184}
185
186int side_tracer_callback_register(struct side_event_description *desc,
8bd36d3b 187 side_tracer_callback_func call,
a3f36db7
MD
188 void *priv)
189{
190 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
191 return SIDE_ERROR_INVAL;
07e94ffc 192 return _side_tracer_callback_register(desc, (void *) call, priv);
a3f36db7
MD
193}
194
195int side_tracer_callback_variadic_register(struct side_event_description *desc,
8bd36d3b 196 side_tracer_callback_variadic_func call_variadic,
a3f36db7
MD
197 void *priv)
198{
199 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
200 return SIDE_ERROR_INVAL;
07e94ffc 201 return _side_tracer_callback_register(desc, (void *) call_variadic, priv);
a3f36db7
MD
202}
203
204int _side_tracer_callback_unregister(struct side_event_description *desc,
8bd36d3b 205 void *call, void *priv)
a3f36db7
MD
206{
207 struct side_callback *old_cb, *new_cb;
208 const struct side_callback *cb_pos;
209 uint32_t pos_idx;
210 int ret = SIDE_ERROR_OK;
211 uint32_t old_nr_cb;
212
213 if (!call)
214 return SIDE_ERROR_INVAL;
6e46f5e6
MD
215 if (finalized)
216 return SIDE_ERROR_EXITING;
a13c9d2e
MD
217 if (!initialized)
218 side_init();
a3f36db7 219 pthread_mutex_lock(&side_lock);
a3f36db7
MD
220 cb_pos = side_tracer_callback_lookup(desc, call, priv);
221 if (!cb_pos) {
222 ret = SIDE_ERROR_NOENT;
223 goto unlock;
224 }
45172226 225 old_nr_cb = desc->nr_callbacks;
9a83b759 226 old_cb = (struct side_callback *) desc->callbacks;
a3f36db7
MD
227 if (old_nr_cb == 1) {
228 new_cb = (struct side_callback *) &side_empty_callback;
229 } else {
230 pos_idx = cb_pos - desc->callbacks;
231 /* Remove entry at pos_idx. */
232 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
233 new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
234 if (!new_cb) {
235 ret = SIDE_ERROR_NOMEM;
236 goto unlock;
237 }
238 memcpy(new_cb, old_cb, pos_idx);
239 memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
240 }
241 side_rcu_assign_pointer(desc->callbacks, new_cb);
242 side_rcu_wait_grace_period(&rcu_gp);
243 free(old_cb);
45172226 244 desc->nr_callbacks--;
a3f36db7 245 /* Decrement concurrently with kernel setting the top bits. */
45172226
MD
246 if (old_nr_cb == 1)
247 (void) __atomic_add_fetch(desc->enabled, -1, __ATOMIC_RELAXED);
a3f36db7
MD
248unlock:
249 pthread_mutex_unlock(&side_lock);
250 return ret;
251}
252
253int side_tracer_callback_unregister(struct side_event_description *desc,
8bd36d3b 254 side_tracer_callback_func call,
a3f36db7
MD
255 void *priv)
256{
257 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
258 return SIDE_ERROR_INVAL;
07e94ffc 259 return _side_tracer_callback_unregister(desc, (void *) call, priv);
a3f36db7
MD
260}
261
262int side_tracer_callback_variadic_unregister(struct side_event_description *desc,
8bd36d3b 263 side_tracer_callback_variadic_func call_variadic,
a3f36db7
MD
264 void *priv)
265{
266 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
267 return SIDE_ERROR_INVAL;
07e94ffc 268 return _side_tracer_callback_unregister(desc, (void *) call_variadic, priv);
a3f36db7
MD
269}
270
6e46f5e6
MD
271struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events)
272{
a13c9d2e
MD
273 struct side_events_register_handle *events_handle = NULL;
274 struct side_tracer_handle *tracer_handle;
6e46f5e6
MD
275
276 if (finalized)
277 return NULL;
a13c9d2e
MD
278 if (!initialized)
279 side_init();
07e94ffc
MD
280 events_handle = (struct side_events_register_handle *)
281 calloc(1, sizeof(struct side_events_register_handle));
a13c9d2e 282 if (!events_handle)
6e46f5e6 283 return NULL;
a13c9d2e
MD
284 events_handle->events = events;
285 events_handle->nr_events = nr_events;
286
6e46f5e6 287 pthread_mutex_lock(&side_lock);
a13c9d2e
MD
288 side_list_insert_node_tail(&side_events_list, &events_handle->node);
289 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
290 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
291 events, nr_events, tracer_handle->priv);
292 }
6e46f5e6
MD
293 pthread_mutex_unlock(&side_lock);
294 //TODO: call event batch register ioctl
a13c9d2e 295 return events_handle;
6e46f5e6
MD
296}
297
a3f36db7 298static
6e46f5e6
MD
299void side_event_remove_callbacks(struct side_event_description *desc)
300{
ea0d2bea 301 uint32_t nr_cb = desc->nr_callbacks;
6e46f5e6
MD
302 struct side_callback *old_cb;
303
304 if (!nr_cb)
305 return;
306 old_cb = (struct side_callback *) desc->callbacks;
ea0d2bea 307 (void) __atomic_add_fetch(desc->enabled, -1, __ATOMIC_RELAXED);
6e46f5e6
MD
308 /*
309 * Setting the state back to 0 cb and empty callbacks out of
310 * caution. This should not matter because instrumentation is
311 * unreachable.
312 */
45172226 313 desc->nr_callbacks = 0;
6e46f5e6
MD
314 side_rcu_assign_pointer(desc->callbacks, &side_empty_callback);
315 /*
316 * No need to wait for grace period because instrumentation is
317 * unreachable.
318 */
319 free(old_cb);
320}
321
322/*
323 * Unregister event handle. At this point, all side events in that
324 * handle should be unreachable.
325 */
a13c9d2e 326void side_events_unregister(struct side_events_register_handle *events_handle)
6e46f5e6 327{
a13c9d2e 328 struct side_tracer_handle *tracer_handle;
6e46f5e6
MD
329 uint32_t i;
330
314c22c3
MD
331 if (!events_handle)
332 return;
6e46f5e6
MD
333 if (finalized)
334 return;
a13c9d2e
MD
335 if (!initialized)
336 side_init();
6e46f5e6 337 pthread_mutex_lock(&side_lock);
a13c9d2e
MD
338 side_list_remove_node(&events_handle->node);
339 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
340 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
341 events_handle->events, events_handle->nr_events,
342 tracer_handle->priv);
343 }
344 for (i = 0; i < events_handle->nr_events; i++) {
345 struct side_event_description *event = events_handle->events[i];
6e46f5e6
MD
346
347 /* Skip NULL pointers */
348 if (!event)
349 continue;
350 side_event_remove_callbacks(event);
351 }
352 pthread_mutex_unlock(&side_lock);
353 //TODO: call event batch unregister ioctl
a13c9d2e
MD
354 free(events_handle);
355}
356
357struct side_tracer_handle *side_tracer_event_notification_register(
358 void (*cb)(enum side_tracer_notification notif,
359 struct side_event_description **events, uint32_t nr_events, void *priv),
360 void *priv)
361{
362 struct side_tracer_handle *tracer_handle;
363 struct side_events_register_handle *events_handle;
364
365 if (finalized)
366 return NULL;
367 if (!initialized)
368 side_init();
07e94ffc
MD
369 tracer_handle = (struct side_tracer_handle *)
370 calloc(1, sizeof(struct side_tracer_handle));
a13c9d2e
MD
371 if (!tracer_handle)
372 return NULL;
373 pthread_mutex_lock(&side_lock);
374 tracer_handle->cb = cb;
375 tracer_handle->priv = priv;
376 side_list_insert_node_tail(&side_tracer_list, &tracer_handle->node);
377 side_list_for_each_entry(events_handle, &side_events_list, node) {
378 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
379 events_handle->events, events_handle->nr_events, priv);
380 }
381 pthread_mutex_unlock(&side_lock);
382 return tracer_handle;
383}
384
385void side_tracer_event_notification_unregister(struct side_tracer_handle *tracer_handle)
386{
387 struct side_events_register_handle *events_handle;
388
389 if (finalized)
390 return;
391 if (!initialized)
392 side_init();
393 pthread_mutex_lock(&side_lock);
394 side_list_for_each_entry(events_handle, &side_events_list, node) {
395 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
396 events_handle->events, events_handle->nr_events,
397 tracer_handle->priv);
398 }
399 side_list_remove_node(&tracer_handle->node);
400 pthread_mutex_unlock(&side_lock);
6e46f5e6
MD
401}
402
075ceef7
MD
403void side_init(void)
404{
405 if (initialized)
406 return;
054b7b5c 407 side_rcu_gp_init(&rcu_gp);
075ceef7
MD
408 initialized = true;
409}
6e46f5e6 410
aa584c16
MD
411/*
412 * side_exit() is executed from a library destructor. It can be called
413 * explicitly at application exit as well. Concurrent side API use is
414 * not expected at that point.
415 */
6e46f5e6
MD
416void side_exit(void)
417{
418 struct side_events_register_handle *handle, *tmp;
419
420 if (finalized)
421 return;
a13c9d2e 422 side_list_for_each_entry_safe(handle, tmp, &side_events_list, node)
6e46f5e6 423 side_events_unregister(handle);
64d29894 424 side_rcu_gp_exit(&rcu_gp);
6e46f5e6
MD
425 finalized = true;
426}
This page took 0.042186 seconds and 4 git commands to generate.