84f5b32de0cb0ea6f0129ef2f2ce8f316ca6638a
[libside.git] / src / side.c
1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6 #include <side/trace.h>
7 #include <string.h>
8 #include <assert.h>
9
10 #include "rcu.h"
11 #include "list.h"
12
13 /* Top 8 bits reserved for kernel tracer use. */
14 #if SIDE_BITS_PER_LONG == 64
15 # define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF00000000000000ULL
16 # define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x8000000000000000ULL
17
18 /* Allow 2^56 tracer references on an event. */
19 # define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFFFFFFFFFULL
20 #else
21 # define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000UL
22 # define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000UL
23
24 /* Allow 2^24 tracer references on an event. */
25 # define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFUL
26 #endif
27
28 struct side_events_register_handle {
29 struct side_list_node node;
30 struct side_event_description **events;
31 uint32_t nr_events;
32 };
33
34 struct side_tracer_handle {
35 struct side_list_node node;
36 void (*cb)(enum side_tracer_notification notif,
37 struct side_event_description **events, uint32_t nr_events, void *priv);
38 void *priv;
39 };
40
41 static struct side_rcu_gp_state rcu_gp;
42
43 /*
44 * Lazy initialization for early use within library constructors.
45 */
46 static bool initialized;
47 /*
48 * Do not register/unregister any more events after destructor.
49 */
50 static bool finalized;
51
52 /*
53 * Recursive mutex to allow tracer callbacks to use the side API.
54 */
55 static pthread_mutex_t side_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
56
57 static DEFINE_SIDE_LIST_HEAD(side_events_list);
58 static DEFINE_SIDE_LIST_HEAD(side_tracer_list);
59
60 /*
61 * The empty callback has a NULL function callback pointer, which stops
62 * iteration on the array of callbacks immediately.
63 */
64 const struct side_callback side_empty_callback = { };
65
66 void side_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec)
67 {
68 struct side_rcu_read_state rcu_read_state;
69 const struct side_callback *side_cb;
70 uintptr_t enabled;
71
72 if (side_unlikely(finalized))
73 return;
74 if (side_unlikely(!initialized))
75 side_init();
76 assert(!(event_state->desc->flags & SIDE_EVENT_FLAG_VARIADIC));
77 enabled = __atomic_load_n(&event_state->enabled, __ATOMIC_RELAXED);
78 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
79 // TODO: call kernel write.
80 }
81 side_rcu_read_begin(&rcu_gp, &rcu_read_state);
82 for (side_cb = side_rcu_dereference(event_state->callbacks); side_cb->u.call != NULL; side_cb++)
83 side_cb->u.call(event_state->desc, side_arg_vec, side_cb->priv);
84 side_rcu_read_end(&rcu_gp, &rcu_read_state);
85 }
86
87 void side_call_variadic(const struct side_event_state *event_state,
88 const struct side_arg_vec *side_arg_vec,
89 const struct side_arg_dynamic_struct *var_struct)
90 {
91 struct side_rcu_read_state rcu_read_state;
92 const struct side_callback *side_cb;
93 uintptr_t enabled;
94
95 if (side_unlikely(finalized))
96 return;
97 if (side_unlikely(!initialized))
98 side_init();
99 assert(event_state->desc->flags & SIDE_EVENT_FLAG_VARIADIC);
100 enabled = __atomic_load_n(&event_state->enabled, __ATOMIC_RELAXED);
101 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
102 // TODO: call kernel write.
103 }
104 side_rcu_read_begin(&rcu_gp, &rcu_read_state);
105 for (side_cb = side_rcu_dereference(event_state->callbacks); side_cb->u.call_variadic != NULL; side_cb++)
106 side_cb->u.call_variadic(event_state->desc, side_arg_vec, var_struct, side_cb->priv);
107 side_rcu_read_end(&rcu_gp, &rcu_read_state);
108 }
109
110 static
111 const struct side_callback *side_tracer_callback_lookup(
112 const struct side_event_description *desc,
113 void *call, void *priv)
114 {
115 struct side_event_state *event_state = side_ptr_get(desc->state);
116 const struct side_callback *cb;
117
118 for (cb = event_state->callbacks; cb->u.call != NULL; cb++) {
119 if ((void *) cb->u.call == call && cb->priv == priv)
120 return cb;
121 }
122 return NULL;
123 }
124
125 static
126 int _side_tracer_callback_register(struct side_event_description *desc,
127 void *call, void *priv)
128 {
129 struct side_event_state *event_state;
130 struct side_callback *old_cb, *new_cb;
131 int ret = SIDE_ERROR_OK;
132 uint32_t old_nr_cb;
133
134 if (!call)
135 return SIDE_ERROR_INVAL;
136 if (finalized)
137 return SIDE_ERROR_EXITING;
138 if (!initialized)
139 side_init();
140 pthread_mutex_lock(&side_lock);
141 event_state = side_ptr_get(desc->state);
142 old_nr_cb = desc->nr_callbacks;
143 if (old_nr_cb == UINT32_MAX) {
144 ret = SIDE_ERROR_INVAL;
145 goto unlock;
146 }
147 /* Reject duplicate (call, priv) tuples. */
148 if (side_tracer_callback_lookup(desc, call, priv)) {
149 ret = SIDE_ERROR_EXIST;
150 goto unlock;
151 }
152 old_cb = (struct side_callback *) event_state->callbacks;
153 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
154 new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
155 if (!new_cb) {
156 ret = SIDE_ERROR_NOMEM;
157 goto unlock;
158 }
159 memcpy(new_cb, old_cb, old_nr_cb);
160 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
161 new_cb[old_nr_cb].u.call_variadic =
162 (side_tracer_callback_variadic_func) call;
163 else
164 new_cb[old_nr_cb].u.call =
165 (side_tracer_callback_func) call;
166 new_cb[old_nr_cb].priv = priv;
167 side_rcu_assign_pointer(event_state->callbacks, new_cb);
168 side_rcu_wait_grace_period(&rcu_gp);
169 if (old_nr_cb)
170 free(old_cb);
171 desc->nr_callbacks++;
172 /* Increment concurrently with kernel setting the top bits. */
173 if (!old_nr_cb)
174 (void) __atomic_add_fetch(&event_state->enabled, 1, __ATOMIC_RELAXED);
175 unlock:
176 pthread_mutex_unlock(&side_lock);
177 return ret;
178 }
179
180 int side_tracer_callback_register(struct side_event_description *desc,
181 side_tracer_callback_func call,
182 void *priv)
183 {
184 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
185 return SIDE_ERROR_INVAL;
186 return _side_tracer_callback_register(desc, (void *) call, priv);
187 }
188
189 int side_tracer_callback_variadic_register(struct side_event_description *desc,
190 side_tracer_callback_variadic_func call_variadic,
191 void *priv)
192 {
193 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
194 return SIDE_ERROR_INVAL;
195 return _side_tracer_callback_register(desc, (void *) call_variadic, priv);
196 }
197
198 static int _side_tracer_callback_unregister(struct side_event_description *desc,
199 void *call, void *priv)
200 {
201 struct side_event_state *event_state;
202 struct side_callback *old_cb, *new_cb;
203 const struct side_callback *cb_pos;
204 uint32_t pos_idx;
205 int ret = SIDE_ERROR_OK;
206 uint32_t old_nr_cb;
207
208 if (!call)
209 return SIDE_ERROR_INVAL;
210 if (finalized)
211 return SIDE_ERROR_EXITING;
212 if (!initialized)
213 side_init();
214 pthread_mutex_lock(&side_lock);
215 event_state = side_ptr_get(desc->state);
216 cb_pos = side_tracer_callback_lookup(desc, call, priv);
217 if (!cb_pos) {
218 ret = SIDE_ERROR_NOENT;
219 goto unlock;
220 }
221 old_nr_cb = desc->nr_callbacks;
222 old_cb = (struct side_callback *) event_state->callbacks;
223 if (old_nr_cb == 1) {
224 new_cb = (struct side_callback *) &side_empty_callback;
225 } else {
226 pos_idx = cb_pos - event_state->callbacks;
227 /* Remove entry at pos_idx. */
228 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
229 new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
230 if (!new_cb) {
231 ret = SIDE_ERROR_NOMEM;
232 goto unlock;
233 }
234 memcpy(new_cb, old_cb, pos_idx);
235 memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
236 }
237 side_rcu_assign_pointer(event_state->callbacks, new_cb);
238 side_rcu_wait_grace_period(&rcu_gp);
239 free(old_cb);
240 desc->nr_callbacks--;
241 /* Decrement concurrently with kernel setting the top bits. */
242 if (old_nr_cb == 1)
243 (void) __atomic_add_fetch(&event_state->enabled, -1, __ATOMIC_RELAXED);
244 unlock:
245 pthread_mutex_unlock(&side_lock);
246 return ret;
247 }
248
249 int side_tracer_callback_unregister(struct side_event_description *desc,
250 side_tracer_callback_func call,
251 void *priv)
252 {
253 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
254 return SIDE_ERROR_INVAL;
255 return _side_tracer_callback_unregister(desc, (void *) call, priv);
256 }
257
258 int side_tracer_callback_variadic_unregister(struct side_event_description *desc,
259 side_tracer_callback_variadic_func call_variadic,
260 void *priv)
261 {
262 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
263 return SIDE_ERROR_INVAL;
264 return _side_tracer_callback_unregister(desc, (void *) call_variadic, priv);
265 }
266
267 struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events)
268 {
269 struct side_events_register_handle *events_handle = NULL;
270 struct side_tracer_handle *tracer_handle;
271
272 if (finalized)
273 return NULL;
274 if (!initialized)
275 side_init();
276 events_handle = (struct side_events_register_handle *)
277 calloc(1, sizeof(struct side_events_register_handle));
278 if (!events_handle)
279 return NULL;
280 events_handle->events = events;
281 events_handle->nr_events = nr_events;
282
283 pthread_mutex_lock(&side_lock);
284 side_list_insert_node_tail(&side_events_list, &events_handle->node);
285 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
286 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
287 events, nr_events, tracer_handle->priv);
288 }
289 pthread_mutex_unlock(&side_lock);
290 //TODO: call event batch register ioctl
291 return events_handle;
292 }
293
294 static
295 void side_event_remove_callbacks(struct side_event_description *desc)
296 {
297 struct side_event_state *event_state = side_ptr_get(desc->state);
298 uint32_t nr_cb = desc->nr_callbacks;
299 struct side_callback *old_cb;
300
301 if (!nr_cb)
302 return;
303 old_cb = (struct side_callback *) event_state->callbacks;
304 (void) __atomic_add_fetch(&event_state->enabled, -1, __ATOMIC_RELAXED);
305 /*
306 * Setting the state back to 0 cb and empty callbacks out of
307 * caution. This should not matter because instrumentation is
308 * unreachable.
309 */
310 desc->nr_callbacks = 0;
311 side_rcu_assign_pointer(event_state->callbacks, &side_empty_callback);
312 /*
313 * No need to wait for grace period because instrumentation is
314 * unreachable.
315 */
316 free(old_cb);
317 }
318
319 /*
320 * Unregister event handle. At this point, all side events in that
321 * handle should be unreachable.
322 */
323 void side_events_unregister(struct side_events_register_handle *events_handle)
324 {
325 struct side_tracer_handle *tracer_handle;
326 uint32_t i;
327
328 if (!events_handle)
329 return;
330 if (finalized)
331 return;
332 if (!initialized)
333 side_init();
334 pthread_mutex_lock(&side_lock);
335 side_list_remove_node(&events_handle->node);
336 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
337 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
338 events_handle->events, events_handle->nr_events,
339 tracer_handle->priv);
340 }
341 for (i = 0; i < events_handle->nr_events; i++) {
342 struct side_event_description *event = events_handle->events[i];
343
344 /* Skip NULL pointers */
345 if (!event)
346 continue;
347 side_event_remove_callbacks(event);
348 }
349 pthread_mutex_unlock(&side_lock);
350 //TODO: call event batch unregister ioctl
351 free(events_handle);
352 }
353
354 struct side_tracer_handle *side_tracer_event_notification_register(
355 void (*cb)(enum side_tracer_notification notif,
356 struct side_event_description **events, uint32_t nr_events, void *priv),
357 void *priv)
358 {
359 struct side_tracer_handle *tracer_handle;
360 struct side_events_register_handle *events_handle;
361
362 if (finalized)
363 return NULL;
364 if (!initialized)
365 side_init();
366 tracer_handle = (struct side_tracer_handle *)
367 calloc(1, sizeof(struct side_tracer_handle));
368 if (!tracer_handle)
369 return NULL;
370 pthread_mutex_lock(&side_lock);
371 tracer_handle->cb = cb;
372 tracer_handle->priv = priv;
373 side_list_insert_node_tail(&side_tracer_list, &tracer_handle->node);
374 side_list_for_each_entry(events_handle, &side_events_list, node) {
375 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
376 events_handle->events, events_handle->nr_events, priv);
377 }
378 pthread_mutex_unlock(&side_lock);
379 return tracer_handle;
380 }
381
382 void side_tracer_event_notification_unregister(struct side_tracer_handle *tracer_handle)
383 {
384 struct side_events_register_handle *events_handle;
385
386 if (finalized)
387 return;
388 if (!initialized)
389 side_init();
390 pthread_mutex_lock(&side_lock);
391 side_list_for_each_entry(events_handle, &side_events_list, node) {
392 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
393 events_handle->events, events_handle->nr_events,
394 tracer_handle->priv);
395 }
396 side_list_remove_node(&tracer_handle->node);
397 pthread_mutex_unlock(&side_lock);
398 free(tracer_handle);
399 }
400
401 void side_init(void)
402 {
403 if (initialized)
404 return;
405 side_rcu_gp_init(&rcu_gp);
406 initialized = true;
407 }
408
409 /*
410 * side_exit() is executed from a library destructor. It can be called
411 * explicitly at application exit as well. Concurrent side API use is
412 * not expected at that point.
413 */
414 void side_exit(void)
415 {
416 struct side_events_register_handle *handle, *tmp;
417
418 if (finalized)
419 return;
420 side_list_for_each_entry_safe(handle, tmp, &side_events_list, node)
421 side_events_unregister(handle);
422 side_rcu_gp_exit(&rcu_gp);
423 finalized = true;
424 }
This page took 0.042661 seconds and 3 git commands to generate.