Use event state as argument to call fast path
[libside.git] / src / side.c
CommitLineData
67337c4a
MD
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6#include <side/trace.h>
7#include <string.h>
8
9#include "rcu.h"
10#include "list.h"
11
12/* Top 8 bits reserved for kernel tracer use. */
13#if SIDE_BITS_PER_LONG == 64
14# define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF00000000000000ULL
15# define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x8000000000000000ULL
16
17/* Allow 2^56 tracer references on an event. */
18# define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFFFFFFFFFULL
19#else
20# define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000UL
21# define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000UL
22
23/* Allow 2^24 tracer references on an event. */
24# define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFUL
25#endif
26
27struct side_events_register_handle {
28 struct side_list_node node;
29 struct side_event_description **events;
30 uint32_t nr_events;
31};
32
33struct side_tracer_handle {
34 struct side_list_node node;
35 void (*cb)(enum side_tracer_notification notif,
36 struct side_event_description **events, uint32_t nr_events, void *priv);
37 void *priv;
38};
39
40static struct side_rcu_gp_state rcu_gp;
41
42/*
43 * Lazy initialization for early use within library constructors.
44 */
45static bool initialized;
46/*
47 * Do not register/unregister any more events after destructor.
48 */
49static bool finalized;
50
51/*
52 * Recursive mutex to allow tracer callbacks to use the side API.
53 */
54static pthread_mutex_t side_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
55
56static DEFINE_SIDE_LIST_HEAD(side_events_list);
57static DEFINE_SIDE_LIST_HEAD(side_tracer_list);
58
59/*
60 * The empty callback has a NULL function callback pointer, which stops
61 * iteration on the array of callbacks immediately.
62 */
63const struct side_callback side_empty_callback = { };
64
0d747f98 65void side_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec)
67337c4a
MD
66{
67 struct side_rcu_read_state rcu_read_state;
68 const struct side_callback *side_cb;
69 uintptr_t enabled;
70
71 if (side_unlikely(finalized))
72 return;
73 if (side_unlikely(!initialized))
74 side_init();
0d747f98 75 if (side_unlikely(event_state->desc->flags & SIDE_EVENT_FLAG_VARIADIC)) {
67337c4a
MD
76 printf("ERROR: unexpected variadic event description\n");
77 abort();
78 }
0b9e59d6 79 enabled = __atomic_load_n(&event_state->enabled, __ATOMIC_RELAXED);
67337c4a
MD
80 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
81 // TODO: call kernel write.
82 }
83 side_rcu_read_begin(&rcu_gp, &rcu_read_state);
0b9e59d6 84 for (side_cb = side_rcu_dereference(event_state->callbacks); side_cb->u.call != NULL; side_cb++)
0d747f98 85 side_cb->u.call(event_state->desc, side_arg_vec, side_cb->priv);
67337c4a
MD
86 side_rcu_read_end(&rcu_gp, &rcu_read_state);
87}
88
0d747f98 89void side_call_variadic(const struct side_event_state *event_state,
67337c4a
MD
90 const struct side_arg_vec *side_arg_vec,
91 const struct side_arg_dynamic_struct *var_struct)
92{
93 struct side_rcu_read_state rcu_read_state;
94 const struct side_callback *side_cb;
95 uintptr_t enabled;
96
97 if (side_unlikely(finalized))
98 return;
99 if (side_unlikely(!initialized))
100 side_init();
0d747f98 101 if (side_unlikely(!(event_state->desc->flags & SIDE_EVENT_FLAG_VARIADIC))) {
67337c4a
MD
102 printf("ERROR: unexpected non-variadic event description\n");
103 abort();
104 }
0b9e59d6 105 enabled = __atomic_load_n(&event_state->enabled, __ATOMIC_RELAXED);
67337c4a
MD
106 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
107 // TODO: call kernel write.
108 }
109 side_rcu_read_begin(&rcu_gp, &rcu_read_state);
0b9e59d6 110 for (side_cb = side_rcu_dereference(event_state->callbacks); side_cb->u.call_variadic != NULL; side_cb++)
0d747f98 111 side_cb->u.call_variadic(event_state->desc, side_arg_vec, var_struct, side_cb->priv);
67337c4a
MD
112 side_rcu_read_end(&rcu_gp, &rcu_read_state);
113}
114
115static
116const struct side_callback *side_tracer_callback_lookup(
117 const struct side_event_description *desc,
118 void *call, void *priv)
119{
0b9e59d6 120 struct side_event_state *event_state = side_ptr_get(desc->state);
67337c4a
MD
121 const struct side_callback *cb;
122
0b9e59d6 123 for (cb = event_state->callbacks; cb->u.call != NULL; cb++) {
67337c4a
MD
124 if ((void *) cb->u.call == call && cb->priv == priv)
125 return cb;
126 }
127 return NULL;
128}
129
130static
131int _side_tracer_callback_register(struct side_event_description *desc,
132 void *call, void *priv)
133{
0b9e59d6 134 struct side_event_state *event_state;
67337c4a
MD
135 struct side_callback *old_cb, *new_cb;
136 int ret = SIDE_ERROR_OK;
137 uint32_t old_nr_cb;
138
139 if (!call)
140 return SIDE_ERROR_INVAL;
141 if (finalized)
142 return SIDE_ERROR_EXITING;
143 if (!initialized)
144 side_init();
145 pthread_mutex_lock(&side_lock);
0b9e59d6 146 event_state = side_ptr_get(desc->state);
f8188f94 147 old_nr_cb = desc->nr_callbacks;
67337c4a
MD
148 if (old_nr_cb == UINT32_MAX) {
149 ret = SIDE_ERROR_INVAL;
150 goto unlock;
151 }
152 /* Reject duplicate (call, priv) tuples. */
153 if (side_tracer_callback_lookup(desc, call, priv)) {
154 ret = SIDE_ERROR_EXIST;
155 goto unlock;
156 }
0b9e59d6 157 old_cb = (struct side_callback *) event_state->callbacks;
67337c4a
MD
158 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
159 new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
160 if (!new_cb) {
161 ret = SIDE_ERROR_NOMEM;
162 goto unlock;
163 }
164 memcpy(new_cb, old_cb, old_nr_cb);
165 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
166 new_cb[old_nr_cb].u.call_variadic =
167 (side_tracer_callback_variadic_func) call;
168 else
169 new_cb[old_nr_cb].u.call =
170 (side_tracer_callback_func) call;
171 new_cb[old_nr_cb].priv = priv;
0b9e59d6 172 side_rcu_assign_pointer(event_state->callbacks, new_cb);
67337c4a
MD
173 side_rcu_wait_grace_period(&rcu_gp);
174 if (old_nr_cb)
175 free(old_cb);
f8188f94 176 desc->nr_callbacks++;
67337c4a
MD
177 /* Increment concurrently with kernel setting the top bits. */
178 if (!old_nr_cb)
0b9e59d6 179 (void) __atomic_add_fetch(&event_state->enabled, 1, __ATOMIC_RELAXED);
67337c4a
MD
180unlock:
181 pthread_mutex_unlock(&side_lock);
182 return ret;
183}
184
185int side_tracer_callback_register(struct side_event_description *desc,
186 side_tracer_callback_func call,
187 void *priv)
188{
189 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
190 return SIDE_ERROR_INVAL;
191 return _side_tracer_callback_register(desc, (void *) call, priv);
192}
193
194int side_tracer_callback_variadic_register(struct side_event_description *desc,
195 side_tracer_callback_variadic_func call_variadic,
196 void *priv)
197{
198 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
199 return SIDE_ERROR_INVAL;
200 return _side_tracer_callback_register(desc, (void *) call_variadic, priv);
201}
202
203static int _side_tracer_callback_unregister(struct side_event_description *desc,
204 void *call, void *priv)
205{
0b9e59d6 206 struct side_event_state *event_state;
67337c4a
MD
207 struct side_callback *old_cb, *new_cb;
208 const struct side_callback *cb_pos;
209 uint32_t pos_idx;
210 int ret = SIDE_ERROR_OK;
211 uint32_t old_nr_cb;
212
213 if (!call)
214 return SIDE_ERROR_INVAL;
215 if (finalized)
216 return SIDE_ERROR_EXITING;
217 if (!initialized)
218 side_init();
219 pthread_mutex_lock(&side_lock);
0b9e59d6 220 event_state = side_ptr_get(desc->state);
67337c4a
MD
221 cb_pos = side_tracer_callback_lookup(desc, call, priv);
222 if (!cb_pos) {
223 ret = SIDE_ERROR_NOENT;
224 goto unlock;
225 }
f8188f94 226 old_nr_cb = desc->nr_callbacks;
0b9e59d6 227 old_cb = (struct side_callback *) event_state->callbacks;
67337c4a
MD
228 if (old_nr_cb == 1) {
229 new_cb = (struct side_callback *) &side_empty_callback;
230 } else {
0b9e59d6 231 pos_idx = cb_pos - event_state->callbacks;
67337c4a
MD
232 /* Remove entry at pos_idx. */
233 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
234 new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
235 if (!new_cb) {
236 ret = SIDE_ERROR_NOMEM;
237 goto unlock;
238 }
239 memcpy(new_cb, old_cb, pos_idx);
240 memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
241 }
0b9e59d6 242 side_rcu_assign_pointer(event_state->callbacks, new_cb);
67337c4a
MD
243 side_rcu_wait_grace_period(&rcu_gp);
244 free(old_cb);
f8188f94 245 desc->nr_callbacks--;
67337c4a
MD
246 /* Decrement concurrently with kernel setting the top bits. */
247 if (old_nr_cb == 1)
0b9e59d6 248 (void) __atomic_add_fetch(&event_state->enabled, -1, __ATOMIC_RELAXED);
67337c4a
MD
249unlock:
250 pthread_mutex_unlock(&side_lock);
251 return ret;
252}
253
254int side_tracer_callback_unregister(struct side_event_description *desc,
255 side_tracer_callback_func call,
256 void *priv)
257{
258 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
259 return SIDE_ERROR_INVAL;
260 return _side_tracer_callback_unregister(desc, (void *) call, priv);
261}
262
263int side_tracer_callback_variadic_unregister(struct side_event_description *desc,
264 side_tracer_callback_variadic_func call_variadic,
265 void *priv)
266{
267 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
268 return SIDE_ERROR_INVAL;
269 return _side_tracer_callback_unregister(desc, (void *) call_variadic, priv);
270}
271
272struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events)
273{
274 struct side_events_register_handle *events_handle = NULL;
275 struct side_tracer_handle *tracer_handle;
276
277 if (finalized)
278 return NULL;
279 if (!initialized)
280 side_init();
281 events_handle = (struct side_events_register_handle *)
282 calloc(1, sizeof(struct side_events_register_handle));
283 if (!events_handle)
284 return NULL;
285 events_handle->events = events;
286 events_handle->nr_events = nr_events;
287
288 pthread_mutex_lock(&side_lock);
289 side_list_insert_node_tail(&side_events_list, &events_handle->node);
290 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
291 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
292 events, nr_events, tracer_handle->priv);
293 }
294 pthread_mutex_unlock(&side_lock);
295 //TODO: call event batch register ioctl
296 return events_handle;
297}
298
299static
300void side_event_remove_callbacks(struct side_event_description *desc)
301{
0b9e59d6 302 struct side_event_state *event_state = side_ptr_get(desc->state);
f8188f94 303 uint32_t nr_cb = desc->nr_callbacks;
67337c4a
MD
304 struct side_callback *old_cb;
305
306 if (!nr_cb)
307 return;
0b9e59d6
MD
308 old_cb = (struct side_callback *) event_state->callbacks;
309 (void) __atomic_add_fetch(&event_state->enabled, -1, __ATOMIC_RELAXED);
67337c4a
MD
310 /*
311 * Setting the state back to 0 cb and empty callbacks out of
312 * caution. This should not matter because instrumentation is
313 * unreachable.
314 */
f8188f94 315 desc->nr_callbacks = 0;
0b9e59d6 316 side_rcu_assign_pointer(event_state->callbacks, &side_empty_callback);
67337c4a
MD
317 /*
318 * No need to wait for grace period because instrumentation is
319 * unreachable.
320 */
321 free(old_cb);
322}
323
324/*
325 * Unregister event handle. At this point, all side events in that
326 * handle should be unreachable.
327 */
328void side_events_unregister(struct side_events_register_handle *events_handle)
329{
330 struct side_tracer_handle *tracer_handle;
331 uint32_t i;
332
333 if (!events_handle)
334 return;
335 if (finalized)
336 return;
337 if (!initialized)
338 side_init();
339 pthread_mutex_lock(&side_lock);
340 side_list_remove_node(&events_handle->node);
341 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
342 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
343 events_handle->events, events_handle->nr_events,
344 tracer_handle->priv);
345 }
346 for (i = 0; i < events_handle->nr_events; i++) {
347 struct side_event_description *event = events_handle->events[i];
348
349 /* Skip NULL pointers */
350 if (!event)
351 continue;
352 side_event_remove_callbacks(event);
353 }
354 pthread_mutex_unlock(&side_lock);
355 //TODO: call event batch unregister ioctl
356 free(events_handle);
357}
358
359struct side_tracer_handle *side_tracer_event_notification_register(
360 void (*cb)(enum side_tracer_notification notif,
361 struct side_event_description **events, uint32_t nr_events, void *priv),
362 void *priv)
363{
364 struct side_tracer_handle *tracer_handle;
365 struct side_events_register_handle *events_handle;
366
367 if (finalized)
368 return NULL;
369 if (!initialized)
370 side_init();
371 tracer_handle = (struct side_tracer_handle *)
372 calloc(1, sizeof(struct side_tracer_handle));
373 if (!tracer_handle)
374 return NULL;
375 pthread_mutex_lock(&side_lock);
376 tracer_handle->cb = cb;
377 tracer_handle->priv = priv;
378 side_list_insert_node_tail(&side_tracer_list, &tracer_handle->node);
379 side_list_for_each_entry(events_handle, &side_events_list, node) {
380 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
381 events_handle->events, events_handle->nr_events, priv);
382 }
383 pthread_mutex_unlock(&side_lock);
384 return tracer_handle;
385}
386
387void side_tracer_event_notification_unregister(struct side_tracer_handle *tracer_handle)
388{
389 struct side_events_register_handle *events_handle;
390
391 if (finalized)
392 return;
393 if (!initialized)
394 side_init();
395 pthread_mutex_lock(&side_lock);
396 side_list_for_each_entry(events_handle, &side_events_list, node) {
397 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
398 events_handle->events, events_handle->nr_events,
399 tracer_handle->priv);
400 }
401 side_list_remove_node(&tracer_handle->node);
402 pthread_mutex_unlock(&side_lock);
be787080 403 free(tracer_handle);
67337c4a
MD
404}
405
406void side_init(void)
407{
408 if (initialized)
409 return;
410 side_rcu_gp_init(&rcu_gp);
411 initialized = true;
412}
413
414/*
415 * side_exit() is executed from a library destructor. It can be called
416 * explicitly at application exit as well. Concurrent side API use is
417 * not expected at that point.
418 */
419void side_exit(void)
420{
421 struct side_events_register_handle *handle, *tmp;
422
423 if (finalized)
424 return;
425 side_list_for_each_entry_safe(handle, tmp, &side_events_list, node)
426 side_events_unregister(handle);
427 side_rcu_gp_exit(&rcu_gp);
428 finalized = true;
429}
This page took 0.040028 seconds and 4 git commands to generate.