Move nr_callbacks to event state
[libside.git] / src / side.c
CommitLineData
67337c4a
MD
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6#include <side/trace.h>
7#include <string.h>
b1bf768c 8#include <assert.h>
67337c4a
MD
9
10#include "rcu.h"
11#include "list.h"
12
13/* Top 8 bits reserved for kernel tracer use. */
14#if SIDE_BITS_PER_LONG == 64
15# define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF00000000000000ULL
16# define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x8000000000000000ULL
17
18/* Allow 2^56 tracer references on an event. */
19# define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFFFFFFFFFULL
20#else
21# define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000UL
22# define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000UL
23
24/* Allow 2^24 tracer references on an event. */
25# define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFUL
26#endif
27
28struct side_events_register_handle {
29 struct side_list_node node;
30 struct side_event_description **events;
31 uint32_t nr_events;
32};
33
34struct side_tracer_handle {
35 struct side_list_node node;
36 void (*cb)(enum side_tracer_notification notif,
37 struct side_event_description **events, uint32_t nr_events, void *priv);
38 void *priv;
39};
40
41static struct side_rcu_gp_state rcu_gp;
42
43/*
44 * Lazy initialization for early use within library constructors.
45 */
46static bool initialized;
47/*
48 * Do not register/unregister any more events after destructor.
49 */
50static bool finalized;
51
52/*
53 * Recursive mutex to allow tracer callbacks to use the side API.
54 */
55static pthread_mutex_t side_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
56
57static DEFINE_SIDE_LIST_HEAD(side_events_list);
58static DEFINE_SIDE_LIST_HEAD(side_tracer_list);
59
60/*
61 * The empty callback has a NULL function callback pointer, which stops
62 * iteration on the array of callbacks immediately.
63 */
64const struct side_callback side_empty_callback = { };
65
0d747f98 66void side_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec)
67337c4a
MD
67{
68 struct side_rcu_read_state rcu_read_state;
b2a84b9f 69 const struct side_event_state_0 *es0;
67337c4a
MD
70 const struct side_callback *side_cb;
71 uintptr_t enabled;
72
73 if (side_unlikely(finalized))
74 return;
75 if (side_unlikely(!initialized))
76 side_init();
b2a84b9f
MD
77 if (side_unlikely(event_state->version != 0))
78 abort();
79 es0 = side_container_of(event_state, const struct side_event_state_0, p);
7269a8a3 80 assert(!(es0->desc->flags & SIDE_EVENT_FLAG_VARIADIC));
b2a84b9f 81 enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED);
67337c4a
MD
82 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
83 // TODO: call kernel write.
84 }
85 side_rcu_read_begin(&rcu_gp, &rcu_read_state);
7269a8a3
MD
86 for (side_cb = side_rcu_dereference(es0->callbacks); side_cb->u.call != NULL; side_cb++)
87 side_cb->u.call(es0->desc, side_arg_vec, side_cb->priv);
67337c4a
MD
88 side_rcu_read_end(&rcu_gp, &rcu_read_state);
89}
90
0d747f98 91void side_call_variadic(const struct side_event_state *event_state,
67337c4a
MD
92 const struct side_arg_vec *side_arg_vec,
93 const struct side_arg_dynamic_struct *var_struct)
94{
95 struct side_rcu_read_state rcu_read_state;
b2a84b9f 96 const struct side_event_state_0 *es0;
67337c4a
MD
97 const struct side_callback *side_cb;
98 uintptr_t enabled;
99
100 if (side_unlikely(finalized))
101 return;
102 if (side_unlikely(!initialized))
103 side_init();
b2a84b9f
MD
104 if (side_unlikely(event_state->version != 0))
105 abort();
106 es0 = side_container_of(event_state, const struct side_event_state_0, p);
7269a8a3 107 assert(es0->desc->flags & SIDE_EVENT_FLAG_VARIADIC);
b2a84b9f 108 enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED);
67337c4a
MD
109 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
110 // TODO: call kernel write.
111 }
112 side_rcu_read_begin(&rcu_gp, &rcu_read_state);
7269a8a3
MD
113 for (side_cb = side_rcu_dereference(es0->callbacks); side_cb->u.call_variadic != NULL; side_cb++)
114 side_cb->u.call_variadic(es0->desc, side_arg_vec, var_struct, side_cb->priv);
67337c4a
MD
115 side_rcu_read_end(&rcu_gp, &rcu_read_state);
116}
117
118static
119const struct side_callback *side_tracer_callback_lookup(
120 const struct side_event_description *desc,
121 void *call, void *priv)
122{
0b9e59d6 123 struct side_event_state *event_state = side_ptr_get(desc->state);
b2a84b9f 124 const struct side_event_state_0 *es0;
67337c4a
MD
125 const struct side_callback *cb;
126
b2a84b9f
MD
127 if (side_unlikely(event_state->version != 0))
128 abort();
129 es0 = side_container_of(event_state, const struct side_event_state_0, p);
7269a8a3 130 for (cb = es0->callbacks; cb->u.call != NULL; cb++) {
67337c4a
MD
131 if ((void *) cb->u.call == call && cb->priv == priv)
132 return cb;
133 }
134 return NULL;
135}
136
137static
138int _side_tracer_callback_register(struct side_event_description *desc,
139 void *call, void *priv)
140{
0b9e59d6 141 struct side_event_state *event_state;
67337c4a 142 struct side_callback *old_cb, *new_cb;
b2a84b9f 143 struct side_event_state_0 *es0;
67337c4a
MD
144 int ret = SIDE_ERROR_OK;
145 uint32_t old_nr_cb;
146
147 if (!call)
148 return SIDE_ERROR_INVAL;
149 if (finalized)
150 return SIDE_ERROR_EXITING;
151 if (!initialized)
152 side_init();
153 pthread_mutex_lock(&side_lock);
0b9e59d6 154 event_state = side_ptr_get(desc->state);
b2a84b9f
MD
155 if (side_unlikely(event_state->version != 0))
156 abort();
157 es0 = side_container_of(event_state, struct side_event_state_0, p);
3cac1780 158 old_nr_cb = es0->nr_callbacks;
67337c4a
MD
159 if (old_nr_cb == UINT32_MAX) {
160 ret = SIDE_ERROR_INVAL;
161 goto unlock;
162 }
163 /* Reject duplicate (call, priv) tuples. */
164 if (side_tracer_callback_lookup(desc, call, priv)) {
165 ret = SIDE_ERROR_EXIST;
166 goto unlock;
167 }
7269a8a3 168 old_cb = (struct side_callback *) es0->callbacks;
67337c4a
MD
169 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
170 new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
171 if (!new_cb) {
172 ret = SIDE_ERROR_NOMEM;
173 goto unlock;
174 }
175 memcpy(new_cb, old_cb, old_nr_cb);
176 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
177 new_cb[old_nr_cb].u.call_variadic =
178 (side_tracer_callback_variadic_func) call;
179 else
180 new_cb[old_nr_cb].u.call =
181 (side_tracer_callback_func) call;
182 new_cb[old_nr_cb].priv = priv;
f60d8121 183 /* High order bits are already zeroed. */
7269a8a3 184 side_rcu_assign_pointer(es0->callbacks, new_cb);
67337c4a
MD
185 side_rcu_wait_grace_period(&rcu_gp);
186 if (old_nr_cb)
187 free(old_cb);
3cac1780 188 es0->nr_callbacks++;
67337c4a
MD
189 /* Increment concurrently with kernel setting the top bits. */
190 if (!old_nr_cb)
b2a84b9f 191 (void) __atomic_add_fetch(&es0->enabled, 1, __ATOMIC_RELAXED);
67337c4a
MD
192unlock:
193 pthread_mutex_unlock(&side_lock);
194 return ret;
195}
196
197int side_tracer_callback_register(struct side_event_description *desc,
198 side_tracer_callback_func call,
199 void *priv)
200{
201 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
202 return SIDE_ERROR_INVAL;
203 return _side_tracer_callback_register(desc, (void *) call, priv);
204}
205
206int side_tracer_callback_variadic_register(struct side_event_description *desc,
207 side_tracer_callback_variadic_func call_variadic,
208 void *priv)
209{
210 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
211 return SIDE_ERROR_INVAL;
212 return _side_tracer_callback_register(desc, (void *) call_variadic, priv);
213}
214
215static int _side_tracer_callback_unregister(struct side_event_description *desc,
216 void *call, void *priv)
217{
0b9e59d6 218 struct side_event_state *event_state;
67337c4a
MD
219 struct side_callback *old_cb, *new_cb;
220 const struct side_callback *cb_pos;
b2a84b9f 221 struct side_event_state_0 *es0;
67337c4a
MD
222 uint32_t pos_idx;
223 int ret = SIDE_ERROR_OK;
224 uint32_t old_nr_cb;
225
226 if (!call)
227 return SIDE_ERROR_INVAL;
228 if (finalized)
229 return SIDE_ERROR_EXITING;
230 if (!initialized)
231 side_init();
232 pthread_mutex_lock(&side_lock);
0b9e59d6 233 event_state = side_ptr_get(desc->state);
b2a84b9f
MD
234 if (side_unlikely(event_state->version != 0))
235 abort();
236 es0 = side_container_of(event_state, struct side_event_state_0, p);
67337c4a
MD
237 cb_pos = side_tracer_callback_lookup(desc, call, priv);
238 if (!cb_pos) {
239 ret = SIDE_ERROR_NOENT;
240 goto unlock;
241 }
3cac1780 242 old_nr_cb = es0->nr_callbacks;
7269a8a3 243 old_cb = (struct side_callback *) es0->callbacks;
67337c4a
MD
244 if (old_nr_cb == 1) {
245 new_cb = (struct side_callback *) &side_empty_callback;
246 } else {
7269a8a3 247 pos_idx = cb_pos - es0->callbacks;
67337c4a
MD
248 /* Remove entry at pos_idx. */
249 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
250 new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
251 if (!new_cb) {
252 ret = SIDE_ERROR_NOMEM;
253 goto unlock;
254 }
255 memcpy(new_cb, old_cb, pos_idx);
256 memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
257 }
f60d8121 258 /* High order bits are already zeroed. */
7269a8a3 259 side_rcu_assign_pointer(es0->callbacks, new_cb);
67337c4a
MD
260 side_rcu_wait_grace_period(&rcu_gp);
261 free(old_cb);
3cac1780 262 es0->nr_callbacks--;
67337c4a
MD
263 /* Decrement concurrently with kernel setting the top bits. */
264 if (old_nr_cb == 1)
b2a84b9f 265 (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED);
67337c4a
MD
266unlock:
267 pthread_mutex_unlock(&side_lock);
268 return ret;
269}
270
271int side_tracer_callback_unregister(struct side_event_description *desc,
272 side_tracer_callback_func call,
273 void *priv)
274{
275 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
276 return SIDE_ERROR_INVAL;
277 return _side_tracer_callback_unregister(desc, (void *) call, priv);
278}
279
280int side_tracer_callback_variadic_unregister(struct side_event_description *desc,
281 side_tracer_callback_variadic_func call_variadic,
282 void *priv)
283{
284 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
285 return SIDE_ERROR_INVAL;
286 return _side_tracer_callback_unregister(desc, (void *) call_variadic, priv);
287}
288
289struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events)
290{
291 struct side_events_register_handle *events_handle = NULL;
292 struct side_tracer_handle *tracer_handle;
293
294 if (finalized)
295 return NULL;
296 if (!initialized)
297 side_init();
298 events_handle = (struct side_events_register_handle *)
299 calloc(1, sizeof(struct side_events_register_handle));
300 if (!events_handle)
301 return NULL;
302 events_handle->events = events;
303 events_handle->nr_events = nr_events;
304
305 pthread_mutex_lock(&side_lock);
306 side_list_insert_node_tail(&side_events_list, &events_handle->node);
307 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
308 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
309 events, nr_events, tracer_handle->priv);
310 }
311 pthread_mutex_unlock(&side_lock);
312 //TODO: call event batch register ioctl
313 return events_handle;
314}
315
316static
317void side_event_remove_callbacks(struct side_event_description *desc)
318{
0b9e59d6 319 struct side_event_state *event_state = side_ptr_get(desc->state);
b2a84b9f 320 struct side_event_state_0 *es0;
67337c4a 321 struct side_callback *old_cb;
3cac1780 322 uint32_t nr_cb;
67337c4a 323
b2a84b9f
MD
324 if (side_unlikely(event_state->version != 0))
325 abort();
326 es0 = side_container_of(event_state, struct side_event_state_0, p);
3cac1780
MD
327 nr_cb = es0->nr_callbacks;
328 if (!nr_cb)
329 return;
7269a8a3 330 old_cb = (struct side_callback *) es0->callbacks;
b2a84b9f 331 (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED);
67337c4a
MD
332 /*
333 * Setting the state back to 0 cb and empty callbacks out of
334 * caution. This should not matter because instrumentation is
335 * unreachable.
336 */
3cac1780 337 es0->nr_callbacks = 0;
7269a8a3 338 side_rcu_assign_pointer(es0->callbacks, &side_empty_callback);
67337c4a
MD
339 /*
340 * No need to wait for grace period because instrumentation is
341 * unreachable.
342 */
343 free(old_cb);
344}
345
346/*
347 * Unregister event handle. At this point, all side events in that
348 * handle should be unreachable.
349 */
350void side_events_unregister(struct side_events_register_handle *events_handle)
351{
352 struct side_tracer_handle *tracer_handle;
353 uint32_t i;
354
355 if (!events_handle)
356 return;
357 if (finalized)
358 return;
359 if (!initialized)
360 side_init();
361 pthread_mutex_lock(&side_lock);
362 side_list_remove_node(&events_handle->node);
363 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
364 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
365 events_handle->events, events_handle->nr_events,
366 tracer_handle->priv);
367 }
368 for (i = 0; i < events_handle->nr_events; i++) {
369 struct side_event_description *event = events_handle->events[i];
370
371 /* Skip NULL pointers */
372 if (!event)
373 continue;
374 side_event_remove_callbacks(event);
375 }
376 pthread_mutex_unlock(&side_lock);
377 //TODO: call event batch unregister ioctl
378 free(events_handle);
379}
380
381struct side_tracer_handle *side_tracer_event_notification_register(
382 void (*cb)(enum side_tracer_notification notif,
383 struct side_event_description **events, uint32_t nr_events, void *priv),
384 void *priv)
385{
386 struct side_tracer_handle *tracer_handle;
387 struct side_events_register_handle *events_handle;
388
389 if (finalized)
390 return NULL;
391 if (!initialized)
392 side_init();
393 tracer_handle = (struct side_tracer_handle *)
394 calloc(1, sizeof(struct side_tracer_handle));
395 if (!tracer_handle)
396 return NULL;
397 pthread_mutex_lock(&side_lock);
398 tracer_handle->cb = cb;
399 tracer_handle->priv = priv;
400 side_list_insert_node_tail(&side_tracer_list, &tracer_handle->node);
401 side_list_for_each_entry(events_handle, &side_events_list, node) {
402 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
403 events_handle->events, events_handle->nr_events, priv);
404 }
405 pthread_mutex_unlock(&side_lock);
406 return tracer_handle;
407}
408
409void side_tracer_event_notification_unregister(struct side_tracer_handle *tracer_handle)
410{
411 struct side_events_register_handle *events_handle;
412
413 if (finalized)
414 return;
415 if (!initialized)
416 side_init();
417 pthread_mutex_lock(&side_lock);
418 side_list_for_each_entry(events_handle, &side_events_list, node) {
419 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
420 events_handle->events, events_handle->nr_events,
421 tracer_handle->priv);
422 }
423 side_list_remove_node(&tracer_handle->node);
424 pthread_mutex_unlock(&side_lock);
be787080 425 free(tracer_handle);
67337c4a
MD
426}
427
428void side_init(void)
429{
430 if (initialized)
431 return;
432 side_rcu_gp_init(&rcu_gp);
433 initialized = true;
434}
435
436/*
437 * side_exit() is executed from a library destructor. It can be called
438 * explicitly at application exit as well. Concurrent side API use is
439 * not expected at that point.
440 */
441void side_exit(void)
442{
443 struct side_events_register_handle *handle, *tmp;
444
445 if (finalized)
446 return;
447 side_list_for_each_entry_safe(handle, tmp, &side_events_list, node)
448 side_events_unregister(handle);
449 side_rcu_gp_exit(&rcu_gp);
450 finalized = true;
451}
This page took 0.043544 seconds and 4 git commands to generate.