Remove hardcoded tracer calls
[libside.git] / src / side.c
CommitLineData
6841ae81
MD
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6#include <side/trace.h>
a3f36db7
MD
7#include <string.h>
8
6841ae81 9#include "tracer.h"
85b765b8 10#include "rcu.h"
b59abc69 11#include "list.h"
6841ae81 12
054b7b5c
MD
13/* Top 8 bits reserved for kernel tracer use. */
14#define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000
29b3374e
MD
15#define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000
16
a3f36db7 17/* Allow 2^24 tracer callbacks to be registered on an event. */
054b7b5c
MD
18#define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFF
19
6e46f5e6
MD
20struct side_events_register_handle {
21 struct side_list_node node;
22 struct side_event_description **events;
23 uint32_t nr_events;
24};
25
a13c9d2e
MD
26struct side_tracer_handle {
27 struct side_list_node node;
28 void (*cb)(enum side_tracer_notification notif,
29 struct side_event_description **events, uint32_t nr_events, void *priv);
30 void *priv;
31};
32
a3f36db7 33static struct side_rcu_gp_state rcu_gp;
075ceef7
MD
34
35/*
36 * Lazy initialization for early use within library constructors.
37 */
38static bool initialized;
6e46f5e6
MD
39/*
40 * Do not register/unregister any more events after destructor.
41 */
42static bool finalized;
075ceef7 43
a3f36db7
MD
44static pthread_mutex_t side_lock = PTHREAD_MUTEX_INITIALIZER;
45
a13c9d2e
MD
46static DEFINE_SIDE_LIST_HEAD(side_events_list);
47static DEFINE_SIDE_LIST_HEAD(side_tracer_list);
b59abc69 48
a3f36db7
MD
49/*
50 * The empty callback has a NULL function callback pointer, which stops
51 * iteration on the array of callbacks immediately.
52 */
054b7b5c
MD
53const struct side_callback side_empty_callback;
54
6e46f5e6
MD
55void side_init(void) __attribute__((constructor));
56void side_exit(void) __attribute__((destructor));
57
6841ae81
MD
58void side_call(const struct side_event_description *desc, const struct side_arg_vec_description *sav_desc)
59{
054b7b5c
MD
60 const struct side_callback *side_cb;
61 unsigned int rcu_period;
3b4f86f6 62 uint32_t enabled;
054b7b5c 63
6e46f5e6
MD
64 if (side_unlikely(finalized))
65 return;
075ceef7
MD
66 if (side_unlikely(!initialized))
67 side_init();
6841ae81
MD
68 if (side_unlikely(desc->flags & SIDE_EVENT_FLAG_VARIADIC)) {
69 printf("ERROR: unexpected variadic event description\n");
70 abort();
71 }
3b4f86f6
MD
72 enabled = __atomic_load_n(desc->enabled, __ATOMIC_RELAXED);
73 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
dda82f46 74 // TODO: call kernel write.
6841ae81 75 }
3b4f86f6 76 if (side_unlikely(!(enabled & SIDE_EVENT_ENABLED_USER_MASK)))
054b7b5c
MD
77 return;
78
054b7b5c
MD
79 rcu_period = side_rcu_read_begin(&rcu_gp);
80 for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call != NULL; side_cb++)
81 side_cb->u.call(desc, sav_desc, side_cb->priv);
82 side_rcu_read_end(&rcu_gp, rcu_period);
6841ae81
MD
83}
84
85void side_call_variadic(const struct side_event_description *desc,
86 const struct side_arg_vec_description *sav_desc,
87 const struct side_arg_dynamic_event_struct *var_struct)
88{
054b7b5c
MD
89 const struct side_callback *side_cb;
90 unsigned int rcu_period;
3b4f86f6 91 uint32_t enabled;
054b7b5c 92
6e46f5e6
MD
93 if (side_unlikely(finalized))
94 return;
075ceef7
MD
95 if (side_unlikely(!initialized))
96 side_init();
3b4f86f6
MD
97 if (side_unlikely(!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))) {
98 printf("ERROR: unexpected non-variadic event description\n");
99 abort();
100 }
101 enabled = __atomic_load_n(desc->enabled, __ATOMIC_RELAXED);
102 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
dda82f46 103 // TODO: call kernel write.
6841ae81 104 }
3b4f86f6 105 if (side_unlikely(!(enabled & SIDE_EVENT_ENABLED_USER_MASK)))
054b7b5c
MD
106 return;
107
054b7b5c
MD
108 rcu_period = side_rcu_read_begin(&rcu_gp);
109 for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call_variadic != NULL; side_cb++)
110 side_cb->u.call_variadic(desc, sav_desc, var_struct, side_cb->priv);
111 side_rcu_read_end(&rcu_gp, rcu_period);
6841ae81 112}
075ceef7 113
a3f36db7
MD
114static
115const struct side_callback *side_tracer_callback_lookup(
116 const struct side_event_description *desc,
117 void (*call)(), void *priv)
118{
119 const struct side_callback *cb;
120
121 for (cb = desc->callbacks; cb->u.call != NULL; cb++) {
122 if (cb->u.call == call && cb->priv == priv)
123 return cb;
124 }
125 return NULL;
126}
127
128static
129int _side_tracer_callback_register(struct side_event_description *desc,
130 void (*call)(), void *priv)
131{
132 struct side_callback *old_cb, *new_cb;
133 int ret = SIDE_ERROR_OK;
134 uint32_t old_nr_cb;
135
136 if (!call)
137 return SIDE_ERROR_INVAL;
6e46f5e6
MD
138 if (finalized)
139 return SIDE_ERROR_EXITING;
a13c9d2e
MD
140 if (!initialized)
141 side_init();
a3f36db7
MD
142 pthread_mutex_lock(&side_lock);
143 old_nr_cb = *desc->enabled & SIDE_EVENT_ENABLED_USER_MASK;
144 if (old_nr_cb == SIDE_EVENT_ENABLED_USER_MASK) {
145 ret = SIDE_ERROR_INVAL;
146 goto unlock;
147 }
148 /* Reject duplicate (call, priv) tuples. */
149 if (side_tracer_callback_lookup(desc, call, priv)) {
150 ret = SIDE_ERROR_EXIST;
151 goto unlock;
152 }
153 old_cb = (struct side_callback *) desc->callbacks;
154 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
155 new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
156 if (!new_cb) {
157 ret = SIDE_ERROR_NOMEM;
158 goto unlock;
159 }
160 memcpy(new_cb, old_cb, old_nr_cb);
161 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
162 new_cb[old_nr_cb].u.call_variadic = call;
163 else
164 new_cb[old_nr_cb].u.call = call;
165 new_cb[old_nr_cb].priv = priv;
166 side_rcu_assign_pointer(desc->callbacks, new_cb);
167 side_rcu_wait_grace_period(&rcu_gp);
168 if (old_nr_cb)
169 free(old_cb);
170 /* Increment concurrently with kernel setting the top bits. */
171 (void) __atomic_add_fetch(desc->enabled, 1, __ATOMIC_RELAXED);
172unlock:
173 pthread_mutex_unlock(&side_lock);
174 return ret;
175}
176
177int side_tracer_callback_register(struct side_event_description *desc,
178 void (*call)(const struct side_event_description *desc,
179 const struct side_arg_vec_description *sav_desc,
180 void *priv),
181 void *priv)
182{
183 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
184 return SIDE_ERROR_INVAL;
185 return _side_tracer_callback_register(desc, call, priv);
186}
187
188int side_tracer_callback_variadic_register(struct side_event_description *desc,
189 void (*call_variadic)(const struct side_event_description *desc,
190 const struct side_arg_vec_description *sav_desc,
191 const struct side_arg_dynamic_event_struct *var_struct,
192 void *priv),
193 void *priv)
194{
195 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
196 return SIDE_ERROR_INVAL;
197 return _side_tracer_callback_register(desc, call_variadic, priv);
198}
199
200int _side_tracer_callback_unregister(struct side_event_description *desc,
201 void (*call)(), void *priv)
202{
203 struct side_callback *old_cb, *new_cb;
204 const struct side_callback *cb_pos;
205 uint32_t pos_idx;
206 int ret = SIDE_ERROR_OK;
207 uint32_t old_nr_cb;
208
209 if (!call)
210 return SIDE_ERROR_INVAL;
6e46f5e6
MD
211 if (finalized)
212 return SIDE_ERROR_EXITING;
a13c9d2e
MD
213 if (!initialized)
214 side_init();
a3f36db7 215 pthread_mutex_lock(&side_lock);
a3f36db7
MD
216 cb_pos = side_tracer_callback_lookup(desc, call, priv);
217 if (!cb_pos) {
218 ret = SIDE_ERROR_NOENT;
219 goto unlock;
220 }
9a83b759
MD
221 old_nr_cb = *desc->enabled & SIDE_EVENT_ENABLED_USER_MASK;
222 old_cb = (struct side_callback *) desc->callbacks;
a3f36db7
MD
223 if (old_nr_cb == 1) {
224 new_cb = (struct side_callback *) &side_empty_callback;
225 } else {
226 pos_idx = cb_pos - desc->callbacks;
227 /* Remove entry at pos_idx. */
228 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
229 new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
230 if (!new_cb) {
231 ret = SIDE_ERROR_NOMEM;
232 goto unlock;
233 }
234 memcpy(new_cb, old_cb, pos_idx);
235 memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
236 }
237 side_rcu_assign_pointer(desc->callbacks, new_cb);
238 side_rcu_wait_grace_period(&rcu_gp);
239 free(old_cb);
240 /* Decrement concurrently with kernel setting the top bits. */
241 (void) __atomic_add_fetch(desc->enabled, -1, __ATOMIC_RELAXED);
242unlock:
243 pthread_mutex_unlock(&side_lock);
244 return ret;
245}
246
247int side_tracer_callback_unregister(struct side_event_description *desc,
248 void (*call)(const struct side_event_description *desc,
249 const struct side_arg_vec_description *sav_desc,
250 void *priv),
251 void *priv)
252{
253 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
254 return SIDE_ERROR_INVAL;
255 return _side_tracer_callback_unregister(desc, call, priv);
256}
257
258int side_tracer_callback_variadic_unregister(struct side_event_description *desc,
259 void (*call_variadic)(const struct side_event_description *desc,
260 const struct side_arg_vec_description *sav_desc,
261 const struct side_arg_dynamic_event_struct *var_struct,
262 void *priv),
263 void *priv)
264{
265 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
266 return SIDE_ERROR_INVAL;
267 return _side_tracer_callback_unregister(desc, call_variadic, priv);
268}
269
6e46f5e6
MD
270struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events)
271{
a13c9d2e
MD
272 struct side_events_register_handle *events_handle = NULL;
273 struct side_tracer_handle *tracer_handle;
6e46f5e6
MD
274
275 if (finalized)
276 return NULL;
a13c9d2e
MD
277 if (!initialized)
278 side_init();
279 events_handle = calloc(1, sizeof(struct side_events_register_handle));
280 if (!events_handle)
6e46f5e6 281 return NULL;
a13c9d2e
MD
282 events_handle->events = events;
283 events_handle->nr_events = nr_events;
284
6e46f5e6 285 pthread_mutex_lock(&side_lock);
a13c9d2e
MD
286 side_list_insert_node_tail(&side_events_list, &events_handle->node);
287 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
288 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
289 events, nr_events, tracer_handle->priv);
290 }
6e46f5e6
MD
291 pthread_mutex_unlock(&side_lock);
292 //TODO: call event batch register ioctl
a13c9d2e 293 return events_handle;
6e46f5e6
MD
294}
295
a3f36db7 296static
6e46f5e6
MD
297void side_event_remove_callbacks(struct side_event_description *desc)
298{
299 uint32_t nr_cb = *desc->enabled & SIDE_EVENT_ENABLED_USER_MASK;
300 struct side_callback *old_cb;
301
302 if (!nr_cb)
303 return;
304 old_cb = (struct side_callback *) desc->callbacks;
305 /*
306 * Setting the state back to 0 cb and empty callbacks out of
307 * caution. This should not matter because instrumentation is
308 * unreachable.
309 */
310 (void) __atomic_add_fetch(desc->enabled, -nr_cb, __ATOMIC_RELAXED);
311 side_rcu_assign_pointer(desc->callbacks, &side_empty_callback);
312 /*
313 * No need to wait for grace period because instrumentation is
314 * unreachable.
315 */
316 free(old_cb);
317}
318
319/*
320 * Unregister event handle. At this point, all side events in that
321 * handle should be unreachable.
322 */
a13c9d2e 323void side_events_unregister(struct side_events_register_handle *events_handle)
6e46f5e6 324{
a13c9d2e 325 struct side_tracer_handle *tracer_handle;
6e46f5e6
MD
326 uint32_t i;
327
314c22c3
MD
328 if (!events_handle)
329 return;
6e46f5e6
MD
330 if (finalized)
331 return;
a13c9d2e
MD
332 if (!initialized)
333 side_init();
6e46f5e6 334 pthread_mutex_lock(&side_lock);
a13c9d2e
MD
335 side_list_remove_node(&events_handle->node);
336 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
337 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
338 events_handle->events, events_handle->nr_events,
339 tracer_handle->priv);
340 }
341 for (i = 0; i < events_handle->nr_events; i++) {
342 struct side_event_description *event = events_handle->events[i];
6e46f5e6
MD
343
344 /* Skip NULL pointers */
345 if (!event)
346 continue;
347 side_event_remove_callbacks(event);
348 }
349 pthread_mutex_unlock(&side_lock);
350 //TODO: call event batch unregister ioctl
a13c9d2e
MD
351 free(events_handle);
352}
353
354struct side_tracer_handle *side_tracer_event_notification_register(
355 void (*cb)(enum side_tracer_notification notif,
356 struct side_event_description **events, uint32_t nr_events, void *priv),
357 void *priv)
358{
359 struct side_tracer_handle *tracer_handle;
360 struct side_events_register_handle *events_handle;
361
362 if (finalized)
363 return NULL;
364 if (!initialized)
365 side_init();
366 tracer_handle = calloc(1, sizeof(struct side_tracer_handle));
367 if (!tracer_handle)
368 return NULL;
369 pthread_mutex_lock(&side_lock);
370 tracer_handle->cb = cb;
371 tracer_handle->priv = priv;
372 side_list_insert_node_tail(&side_tracer_list, &tracer_handle->node);
373 side_list_for_each_entry(events_handle, &side_events_list, node) {
374 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
375 events_handle->events, events_handle->nr_events, priv);
376 }
377 pthread_mutex_unlock(&side_lock);
378 return tracer_handle;
379}
380
381void side_tracer_event_notification_unregister(struct side_tracer_handle *tracer_handle)
382{
383 struct side_events_register_handle *events_handle;
384
385 if (finalized)
386 return;
387 if (!initialized)
388 side_init();
389 pthread_mutex_lock(&side_lock);
390 side_list_for_each_entry(events_handle, &side_events_list, node) {
391 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
392 events_handle->events, events_handle->nr_events,
393 tracer_handle->priv);
394 }
395 side_list_remove_node(&tracer_handle->node);
396 pthread_mutex_unlock(&side_lock);
6e46f5e6
MD
397}
398
075ceef7
MD
399void side_init(void)
400{
401 if (initialized)
402 return;
054b7b5c 403 side_rcu_gp_init(&rcu_gp);
075ceef7
MD
404 initialized = true;
405}
6e46f5e6 406
aa584c16
MD
407/*
408 * side_exit() is executed from a library destructor. It can be called
409 * explicitly at application exit as well. Concurrent side API use is
410 * not expected at that point.
411 */
6e46f5e6
MD
412void side_exit(void)
413{
414 struct side_events_register_handle *handle, *tmp;
415
416 if (finalized)
417 return;
418 side_rcu_gp_exit(&rcu_gp);
a13c9d2e 419 side_list_for_each_entry_safe(handle, tmp, &side_events_list, node)
6e46f5e6
MD
420 side_events_unregister(handle);
421 finalized = true;
422}
This page took 0.058081 seconds and 4 git commands to generate.