Refactoring: combine static and dynamic types
[libside.git] / src / side.c
1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6 #include <side/trace.h>
7 #include <string.h>
8
9 #include "tracer.h"
10 #include "rcu.h"
11 #include "list.h"
12
13 /* Top 8 bits reserved for kernel tracer use. */
14 #if SIDE_BITS_PER_LONG == 64
15 # define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF00000000000000ULL
16 # define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x8000000000000000ULL
17
18 /* Allow 2^56 tracer references on an event. */
19 # define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFFFFFFFFFULL
20 #else
21 # define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000UL
22 # define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000UL
23
24 /* Allow 2^24 tracer references on an event. */
25 # define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFUL
26 #endif
27
28 struct side_events_register_handle {
29 struct side_list_node node;
30 struct side_event_description **events;
31 uint32_t nr_events;
32 };
33
34 struct side_tracer_handle {
35 struct side_list_node node;
36 void (*cb)(enum side_tracer_notification notif,
37 struct side_event_description **events, uint32_t nr_events, void *priv);
38 void *priv;
39 };
40
41 static struct side_rcu_gp_state rcu_gp;
42
43 /*
44 * Lazy initialization for early use within library constructors.
45 */
46 static bool initialized;
47 /*
48 * Do not register/unregister any more events after destructor.
49 */
50 static bool finalized;
51
52 /*
53 * Recursive mutex to allow tracer callbacks to use the side API.
54 */
55 static pthread_mutex_t side_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
56
57 static DEFINE_SIDE_LIST_HEAD(side_events_list);
58 static DEFINE_SIDE_LIST_HEAD(side_tracer_list);
59
60 /*
61 * The empty callback has a NULL function callback pointer, which stops
62 * iteration on the array of callbacks immediately.
63 */
64 const struct side_callback side_empty_callback;
65
66 void side_init(void) __attribute__((constructor));
67 void side_exit(void) __attribute__((destructor));
68
69 void side_call(const struct side_event_description *desc, const struct side_arg_vec *sav_desc)
70 {
71 const struct side_callback *side_cb;
72 unsigned int rcu_period;
73 uintptr_t enabled;
74
75 if (side_unlikely(finalized))
76 return;
77 if (side_unlikely(!initialized))
78 side_init();
79 if (side_unlikely(desc->flags & SIDE_EVENT_FLAG_VARIADIC)) {
80 printf("ERROR: unexpected variadic event description\n");
81 abort();
82 }
83 enabled = __atomic_load_n(desc->enabled, __ATOMIC_RELAXED);
84 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
85 // TODO: call kernel write.
86 }
87 rcu_period = side_rcu_read_begin(&rcu_gp);
88 for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call != NULL; side_cb++)
89 side_cb->u.call(desc, sav_desc, side_cb->priv);
90 side_rcu_read_end(&rcu_gp, rcu_period);
91 }
92
93 void side_call_variadic(const struct side_event_description *desc,
94 const struct side_arg_vec *sav_desc,
95 const struct side_arg_dynamic_event_struct *var_struct)
96 {
97 const struct side_callback *side_cb;
98 unsigned int rcu_period;
99 uintptr_t enabled;
100
101 if (side_unlikely(finalized))
102 return;
103 if (side_unlikely(!initialized))
104 side_init();
105 if (side_unlikely(!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))) {
106 printf("ERROR: unexpected non-variadic event description\n");
107 abort();
108 }
109 enabled = __atomic_load_n(desc->enabled, __ATOMIC_RELAXED);
110 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
111 // TODO: call kernel write.
112 }
113 rcu_period = side_rcu_read_begin(&rcu_gp);
114 for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call_variadic != NULL; side_cb++)
115 side_cb->u.call_variadic(desc, sav_desc, var_struct, side_cb->priv);
116 side_rcu_read_end(&rcu_gp, rcu_period);
117 }
118
119 static
120 const struct side_callback *side_tracer_callback_lookup(
121 const struct side_event_description *desc,
122 void (*call)(), void *priv)
123 {
124 const struct side_callback *cb;
125
126 for (cb = desc->callbacks; cb->u.call != NULL; cb++) {
127 if (cb->u.call == call && cb->priv == priv)
128 return cb;
129 }
130 return NULL;
131 }
132
133 static
134 int _side_tracer_callback_register(struct side_event_description *desc,
135 void (*call)(), void *priv)
136 {
137 struct side_callback *old_cb, *new_cb;
138 int ret = SIDE_ERROR_OK;
139 uint32_t old_nr_cb;
140
141 if (!call)
142 return SIDE_ERROR_INVAL;
143 if (finalized)
144 return SIDE_ERROR_EXITING;
145 if (!initialized)
146 side_init();
147 pthread_mutex_lock(&side_lock);
148 old_nr_cb = desc->nr_callbacks;
149 if (old_nr_cb == UINT32_MAX) {
150 ret = SIDE_ERROR_INVAL;
151 goto unlock;
152 }
153 /* Reject duplicate (call, priv) tuples. */
154 if (side_tracer_callback_lookup(desc, call, priv)) {
155 ret = SIDE_ERROR_EXIST;
156 goto unlock;
157 }
158 old_cb = (struct side_callback *) desc->callbacks;
159 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
160 new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
161 if (!new_cb) {
162 ret = SIDE_ERROR_NOMEM;
163 goto unlock;
164 }
165 memcpy(new_cb, old_cb, old_nr_cb);
166 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
167 new_cb[old_nr_cb].u.call_variadic = call;
168 else
169 new_cb[old_nr_cb].u.call = call;
170 new_cb[old_nr_cb].priv = priv;
171 side_rcu_assign_pointer(desc->callbacks, new_cb);
172 side_rcu_wait_grace_period(&rcu_gp);
173 if (old_nr_cb)
174 free(old_cb);
175 desc->nr_callbacks++;
176 /* Increment concurrently with kernel setting the top bits. */
177 if (!old_nr_cb)
178 (void) __atomic_add_fetch(desc->enabled, 1, __ATOMIC_RELAXED);
179 unlock:
180 pthread_mutex_unlock(&side_lock);
181 return ret;
182 }
183
184 int side_tracer_callback_register(struct side_event_description *desc,
185 void (*call)(const struct side_event_description *desc,
186 const struct side_arg_vec *sav_desc,
187 void *priv),
188 void *priv)
189 {
190 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
191 return SIDE_ERROR_INVAL;
192 return _side_tracer_callback_register(desc, call, priv);
193 }
194
195 int side_tracer_callback_variadic_register(struct side_event_description *desc,
196 void (*call_variadic)(const struct side_event_description *desc,
197 const struct side_arg_vec *sav_desc,
198 const struct side_arg_dynamic_event_struct *var_struct,
199 void *priv),
200 void *priv)
201 {
202 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
203 return SIDE_ERROR_INVAL;
204 return _side_tracer_callback_register(desc, call_variadic, priv);
205 }
206
207 int _side_tracer_callback_unregister(struct side_event_description *desc,
208 void (*call)(), void *priv)
209 {
210 struct side_callback *old_cb, *new_cb;
211 const struct side_callback *cb_pos;
212 uint32_t pos_idx;
213 int ret = SIDE_ERROR_OK;
214 uint32_t old_nr_cb;
215
216 if (!call)
217 return SIDE_ERROR_INVAL;
218 if (finalized)
219 return SIDE_ERROR_EXITING;
220 if (!initialized)
221 side_init();
222 pthread_mutex_lock(&side_lock);
223 cb_pos = side_tracer_callback_lookup(desc, call, priv);
224 if (!cb_pos) {
225 ret = SIDE_ERROR_NOENT;
226 goto unlock;
227 }
228 old_nr_cb = desc->nr_callbacks;
229 old_cb = (struct side_callback *) desc->callbacks;
230 if (old_nr_cb == 1) {
231 new_cb = (struct side_callback *) &side_empty_callback;
232 } else {
233 pos_idx = cb_pos - desc->callbacks;
234 /* Remove entry at pos_idx. */
235 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
236 new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
237 if (!new_cb) {
238 ret = SIDE_ERROR_NOMEM;
239 goto unlock;
240 }
241 memcpy(new_cb, old_cb, pos_idx);
242 memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
243 }
244 side_rcu_assign_pointer(desc->callbacks, new_cb);
245 side_rcu_wait_grace_period(&rcu_gp);
246 free(old_cb);
247 desc->nr_callbacks--;
248 /* Decrement concurrently with kernel setting the top bits. */
249 if (old_nr_cb == 1)
250 (void) __atomic_add_fetch(desc->enabled, -1, __ATOMIC_RELAXED);
251 unlock:
252 pthread_mutex_unlock(&side_lock);
253 return ret;
254 }
255
256 int side_tracer_callback_unregister(struct side_event_description *desc,
257 void (*call)(const struct side_event_description *desc,
258 const struct side_arg_vec *sav_desc,
259 void *priv),
260 void *priv)
261 {
262 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
263 return SIDE_ERROR_INVAL;
264 return _side_tracer_callback_unregister(desc, call, priv);
265 }
266
267 int side_tracer_callback_variadic_unregister(struct side_event_description *desc,
268 void (*call_variadic)(const struct side_event_description *desc,
269 const struct side_arg_vec *sav_desc,
270 const struct side_arg_dynamic_event_struct *var_struct,
271 void *priv),
272 void *priv)
273 {
274 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
275 return SIDE_ERROR_INVAL;
276 return _side_tracer_callback_unregister(desc, call_variadic, priv);
277 }
278
279 struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events)
280 {
281 struct side_events_register_handle *events_handle = NULL;
282 struct side_tracer_handle *tracer_handle;
283
284 if (finalized)
285 return NULL;
286 if (!initialized)
287 side_init();
288 events_handle = calloc(1, sizeof(struct side_events_register_handle));
289 if (!events_handle)
290 return NULL;
291 events_handle->events = events;
292 events_handle->nr_events = nr_events;
293
294 pthread_mutex_lock(&side_lock);
295 side_list_insert_node_tail(&side_events_list, &events_handle->node);
296 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
297 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
298 events, nr_events, tracer_handle->priv);
299 }
300 pthread_mutex_unlock(&side_lock);
301 //TODO: call event batch register ioctl
302 return events_handle;
303 }
304
305 static
306 void side_event_remove_callbacks(struct side_event_description *desc)
307 {
308 uint32_t nr_cb = desc->nr_callbacks;
309 struct side_callback *old_cb;
310
311 if (!nr_cb)
312 return;
313 old_cb = (struct side_callback *) desc->callbacks;
314 (void) __atomic_add_fetch(desc->enabled, -1, __ATOMIC_RELAXED);
315 /*
316 * Setting the state back to 0 cb and empty callbacks out of
317 * caution. This should not matter because instrumentation is
318 * unreachable.
319 */
320 desc->nr_callbacks = 0;
321 side_rcu_assign_pointer(desc->callbacks, &side_empty_callback);
322 /*
323 * No need to wait for grace period because instrumentation is
324 * unreachable.
325 */
326 free(old_cb);
327 }
328
329 /*
330 * Unregister event handle. At this point, all side events in that
331 * handle should be unreachable.
332 */
333 void side_events_unregister(struct side_events_register_handle *events_handle)
334 {
335 struct side_tracer_handle *tracer_handle;
336 uint32_t i;
337
338 if (!events_handle)
339 return;
340 if (finalized)
341 return;
342 if (!initialized)
343 side_init();
344 pthread_mutex_lock(&side_lock);
345 side_list_remove_node(&events_handle->node);
346 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
347 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
348 events_handle->events, events_handle->nr_events,
349 tracer_handle->priv);
350 }
351 for (i = 0; i < events_handle->nr_events; i++) {
352 struct side_event_description *event = events_handle->events[i];
353
354 /* Skip NULL pointers */
355 if (!event)
356 continue;
357 side_event_remove_callbacks(event);
358 }
359 pthread_mutex_unlock(&side_lock);
360 //TODO: call event batch unregister ioctl
361 free(events_handle);
362 }
363
364 struct side_tracer_handle *side_tracer_event_notification_register(
365 void (*cb)(enum side_tracer_notification notif,
366 struct side_event_description **events, uint32_t nr_events, void *priv),
367 void *priv)
368 {
369 struct side_tracer_handle *tracer_handle;
370 struct side_events_register_handle *events_handle;
371
372 if (finalized)
373 return NULL;
374 if (!initialized)
375 side_init();
376 tracer_handle = calloc(1, sizeof(struct side_tracer_handle));
377 if (!tracer_handle)
378 return NULL;
379 pthread_mutex_lock(&side_lock);
380 tracer_handle->cb = cb;
381 tracer_handle->priv = priv;
382 side_list_insert_node_tail(&side_tracer_list, &tracer_handle->node);
383 side_list_for_each_entry(events_handle, &side_events_list, node) {
384 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
385 events_handle->events, events_handle->nr_events, priv);
386 }
387 pthread_mutex_unlock(&side_lock);
388 return tracer_handle;
389 }
390
391 void side_tracer_event_notification_unregister(struct side_tracer_handle *tracer_handle)
392 {
393 struct side_events_register_handle *events_handle;
394
395 if (finalized)
396 return;
397 if (!initialized)
398 side_init();
399 pthread_mutex_lock(&side_lock);
400 side_list_for_each_entry(events_handle, &side_events_list, node) {
401 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
402 events_handle->events, events_handle->nr_events,
403 tracer_handle->priv);
404 }
405 side_list_remove_node(&tracer_handle->node);
406 pthread_mutex_unlock(&side_lock);
407 }
408
409 void side_init(void)
410 {
411 if (initialized)
412 return;
413 side_rcu_gp_init(&rcu_gp);
414 initialized = true;
415 }
416
417 /*
418 * side_exit() is executed from a library destructor. It can be called
419 * explicitly at application exit as well. Concurrent side API use is
420 * not expected at that point.
421 */
422 void side_exit(void)
423 {
424 struct side_events_register_handle *handle, *tmp;
425
426 if (finalized)
427 return;
428 side_list_for_each_entry_safe(handle, tmp, &side_events_list, node)
429 side_events_unregister(handle);
430 side_rcu_gp_exit(&rcu_gp);
431 finalized = true;
432 }
This page took 0.038176 seconds and 5 git commands to generate.