trace.h: Use side_ptr_t for event description
[libside.git] / src / side.c
1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6 #include <side/trace.h>
7 #include <string.h>
8
9 #include "rcu.h"
10 #include "list.h"
11
12 /* Top 8 bits reserved for kernel tracer use. */
13 #if SIDE_BITS_PER_LONG == 64
14 # define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF00000000000000ULL
15 # define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x8000000000000000ULL
16
17 /* Allow 2^56 tracer references on an event. */
18 # define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFFFFFFFFFULL
19 #else
20 # define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000UL
21 # define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000UL
22
23 /* Allow 2^24 tracer references on an event. */
24 # define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFUL
25 #endif
26
27 struct side_events_register_handle {
28 struct side_list_node node;
29 struct side_event_description **events;
30 uint32_t nr_events;
31 };
32
33 struct side_tracer_handle {
34 struct side_list_node node;
35 void (*cb)(enum side_tracer_notification notif,
36 struct side_event_description **events, uint32_t nr_events, void *priv);
37 void *priv;
38 };
39
40 static struct side_rcu_gp_state rcu_gp;
41
42 /*
43 * Lazy initialization for early use within library constructors.
44 */
45 static bool initialized;
46 /*
47 * Do not register/unregister any more events after destructor.
48 */
49 static bool finalized;
50
51 /*
52 * Recursive mutex to allow tracer callbacks to use the side API.
53 */
54 static pthread_mutex_t side_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
55
56 static DEFINE_SIDE_LIST_HEAD(side_events_list);
57 static DEFINE_SIDE_LIST_HEAD(side_tracer_list);
58
59 /*
60 * The empty callback has a NULL function callback pointer, which stops
61 * iteration on the array of callbacks immediately.
62 */
63 const struct side_callback side_empty_callback = { };
64
65 void side_call(const struct side_event_description *desc, const struct side_arg_vec *side_arg_vec)
66 {
67 struct side_event_state *event_state;
68 struct side_rcu_read_state rcu_read_state;
69 const struct side_callback *side_cb;
70 uintptr_t enabled;
71
72 if (side_unlikely(finalized))
73 return;
74 if (side_unlikely(!initialized))
75 side_init();
76 if (side_unlikely(desc->flags & SIDE_EVENT_FLAG_VARIADIC)) {
77 printf("ERROR: unexpected variadic event description\n");
78 abort();
79 }
80 event_state = side_ptr_get(desc->state);
81 enabled = __atomic_load_n(&event_state->enabled, __ATOMIC_RELAXED);
82 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
83 // TODO: call kernel write.
84 }
85 side_rcu_read_begin(&rcu_gp, &rcu_read_state);
86 for (side_cb = side_rcu_dereference(event_state->callbacks); side_cb->u.call != NULL; side_cb++)
87 side_cb->u.call(desc, side_arg_vec, side_cb->priv);
88 side_rcu_read_end(&rcu_gp, &rcu_read_state);
89 }
90
91 void side_call_variadic(const struct side_event_description *desc,
92 const struct side_arg_vec *side_arg_vec,
93 const struct side_arg_dynamic_struct *var_struct)
94 {
95 struct side_event_state *event_state;
96 struct side_rcu_read_state rcu_read_state;
97 const struct side_callback *side_cb;
98 uintptr_t enabled;
99
100 if (side_unlikely(finalized))
101 return;
102 if (side_unlikely(!initialized))
103 side_init();
104 if (side_unlikely(!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))) {
105 printf("ERROR: unexpected non-variadic event description\n");
106 abort();
107 }
108 event_state = side_ptr_get(desc->state);
109 enabled = __atomic_load_n(&event_state->enabled, __ATOMIC_RELAXED);
110 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
111 // TODO: call kernel write.
112 }
113 side_rcu_read_begin(&rcu_gp, &rcu_read_state);
114 for (side_cb = side_rcu_dereference(event_state->callbacks); side_cb->u.call_variadic != NULL; side_cb++)
115 side_cb->u.call_variadic(desc, side_arg_vec, var_struct, side_cb->priv);
116 side_rcu_read_end(&rcu_gp, &rcu_read_state);
117 }
118
119 static
120 const struct side_callback *side_tracer_callback_lookup(
121 const struct side_event_description *desc,
122 void *call, void *priv)
123 {
124 struct side_event_state *event_state = side_ptr_get(desc->state);
125 const struct side_callback *cb;
126
127 for (cb = event_state->callbacks; cb->u.call != NULL; cb++) {
128 if ((void *) cb->u.call == call && cb->priv == priv)
129 return cb;
130 }
131 return NULL;
132 }
133
134 static
135 int _side_tracer_callback_register(struct side_event_description *desc,
136 void *call, void *priv)
137 {
138 struct side_event_state *event_state;
139 struct side_callback *old_cb, *new_cb;
140 int ret = SIDE_ERROR_OK;
141 uint32_t old_nr_cb;
142
143 if (!call)
144 return SIDE_ERROR_INVAL;
145 if (finalized)
146 return SIDE_ERROR_EXITING;
147 if (!initialized)
148 side_init();
149 pthread_mutex_lock(&side_lock);
150 event_state = side_ptr_get(desc->state);
151 old_nr_cb = event_state->nr_callbacks;
152 if (old_nr_cb == UINT32_MAX) {
153 ret = SIDE_ERROR_INVAL;
154 goto unlock;
155 }
156 /* Reject duplicate (call, priv) tuples. */
157 if (side_tracer_callback_lookup(desc, call, priv)) {
158 ret = SIDE_ERROR_EXIST;
159 goto unlock;
160 }
161 old_cb = (struct side_callback *) event_state->callbacks;
162 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
163 new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
164 if (!new_cb) {
165 ret = SIDE_ERROR_NOMEM;
166 goto unlock;
167 }
168 memcpy(new_cb, old_cb, old_nr_cb);
169 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
170 new_cb[old_nr_cb].u.call_variadic =
171 (side_tracer_callback_variadic_func) call;
172 else
173 new_cb[old_nr_cb].u.call =
174 (side_tracer_callback_func) call;
175 new_cb[old_nr_cb].priv = priv;
176 side_rcu_assign_pointer(event_state->callbacks, new_cb);
177 side_rcu_wait_grace_period(&rcu_gp);
178 if (old_nr_cb)
179 free(old_cb);
180 event_state->nr_callbacks++;
181 /* Increment concurrently with kernel setting the top bits. */
182 if (!old_nr_cb)
183 (void) __atomic_add_fetch(&event_state->enabled, 1, __ATOMIC_RELAXED);
184 unlock:
185 pthread_mutex_unlock(&side_lock);
186 return ret;
187 }
188
189 int side_tracer_callback_register(struct side_event_description *desc,
190 side_tracer_callback_func call,
191 void *priv)
192 {
193 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
194 return SIDE_ERROR_INVAL;
195 return _side_tracer_callback_register(desc, (void *) call, priv);
196 }
197
198 int side_tracer_callback_variadic_register(struct side_event_description *desc,
199 side_tracer_callback_variadic_func call_variadic,
200 void *priv)
201 {
202 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
203 return SIDE_ERROR_INVAL;
204 return _side_tracer_callback_register(desc, (void *) call_variadic, priv);
205 }
206
207 static int _side_tracer_callback_unregister(struct side_event_description *desc,
208 void *call, void *priv)
209 {
210 struct side_event_state *event_state;
211 struct side_callback *old_cb, *new_cb;
212 const struct side_callback *cb_pos;
213 uint32_t pos_idx;
214 int ret = SIDE_ERROR_OK;
215 uint32_t old_nr_cb;
216
217 if (!call)
218 return SIDE_ERROR_INVAL;
219 if (finalized)
220 return SIDE_ERROR_EXITING;
221 if (!initialized)
222 side_init();
223 pthread_mutex_lock(&side_lock);
224 event_state = side_ptr_get(desc->state);
225 cb_pos = side_tracer_callback_lookup(desc, call, priv);
226 if (!cb_pos) {
227 ret = SIDE_ERROR_NOENT;
228 goto unlock;
229 }
230 old_nr_cb = event_state->nr_callbacks;
231 old_cb = (struct side_callback *) event_state->callbacks;
232 if (old_nr_cb == 1) {
233 new_cb = (struct side_callback *) &side_empty_callback;
234 } else {
235 pos_idx = cb_pos - event_state->callbacks;
236 /* Remove entry at pos_idx. */
237 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
238 new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
239 if (!new_cb) {
240 ret = SIDE_ERROR_NOMEM;
241 goto unlock;
242 }
243 memcpy(new_cb, old_cb, pos_idx);
244 memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
245 }
246 side_rcu_assign_pointer(event_state->callbacks, new_cb);
247 side_rcu_wait_grace_period(&rcu_gp);
248 free(old_cb);
249 event_state->nr_callbacks--;
250 /* Decrement concurrently with kernel setting the top bits. */
251 if (old_nr_cb == 1)
252 (void) __atomic_add_fetch(&event_state->enabled, -1, __ATOMIC_RELAXED);
253 unlock:
254 pthread_mutex_unlock(&side_lock);
255 return ret;
256 }
257
258 int side_tracer_callback_unregister(struct side_event_description *desc,
259 side_tracer_callback_func call,
260 void *priv)
261 {
262 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
263 return SIDE_ERROR_INVAL;
264 return _side_tracer_callback_unregister(desc, (void *) call, priv);
265 }
266
267 int side_tracer_callback_variadic_unregister(struct side_event_description *desc,
268 side_tracer_callback_variadic_func call_variadic,
269 void *priv)
270 {
271 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
272 return SIDE_ERROR_INVAL;
273 return _side_tracer_callback_unregister(desc, (void *) call_variadic, priv);
274 }
275
276 struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events)
277 {
278 struct side_events_register_handle *events_handle = NULL;
279 struct side_tracer_handle *tracer_handle;
280
281 if (finalized)
282 return NULL;
283 if (!initialized)
284 side_init();
285 events_handle = (struct side_events_register_handle *)
286 calloc(1, sizeof(struct side_events_register_handle));
287 if (!events_handle)
288 return NULL;
289 events_handle->events = events;
290 events_handle->nr_events = nr_events;
291
292 pthread_mutex_lock(&side_lock);
293 side_list_insert_node_tail(&side_events_list, &events_handle->node);
294 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
295 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
296 events, nr_events, tracer_handle->priv);
297 }
298 pthread_mutex_unlock(&side_lock);
299 //TODO: call event batch register ioctl
300 return events_handle;
301 }
302
303 static
304 void side_event_remove_callbacks(struct side_event_description *desc)
305 {
306 struct side_event_state *event_state = side_ptr_get(desc->state);
307 uint32_t nr_cb = event_state->nr_callbacks;
308 struct side_callback *old_cb;
309
310 if (!nr_cb)
311 return;
312 old_cb = (struct side_callback *) event_state->callbacks;
313 (void) __atomic_add_fetch(&event_state->enabled, -1, __ATOMIC_RELAXED);
314 /*
315 * Setting the state back to 0 cb and empty callbacks out of
316 * caution. This should not matter because instrumentation is
317 * unreachable.
318 */
319 event_state->nr_callbacks = 0;
320 side_rcu_assign_pointer(event_state->callbacks, &side_empty_callback);
321 /*
322 * No need to wait for grace period because instrumentation is
323 * unreachable.
324 */
325 free(old_cb);
326 }
327
328 /*
329 * Unregister event handle. At this point, all side events in that
330 * handle should be unreachable.
331 */
332 void side_events_unregister(struct side_events_register_handle *events_handle)
333 {
334 struct side_tracer_handle *tracer_handle;
335 uint32_t i;
336
337 if (!events_handle)
338 return;
339 if (finalized)
340 return;
341 if (!initialized)
342 side_init();
343 pthread_mutex_lock(&side_lock);
344 side_list_remove_node(&events_handle->node);
345 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
346 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
347 events_handle->events, events_handle->nr_events,
348 tracer_handle->priv);
349 }
350 for (i = 0; i < events_handle->nr_events; i++) {
351 struct side_event_description *event = events_handle->events[i];
352
353 /* Skip NULL pointers */
354 if (!event)
355 continue;
356 side_event_remove_callbacks(event);
357 }
358 pthread_mutex_unlock(&side_lock);
359 //TODO: call event batch unregister ioctl
360 free(events_handle);
361 }
362
363 struct side_tracer_handle *side_tracer_event_notification_register(
364 void (*cb)(enum side_tracer_notification notif,
365 struct side_event_description **events, uint32_t nr_events, void *priv),
366 void *priv)
367 {
368 struct side_tracer_handle *tracer_handle;
369 struct side_events_register_handle *events_handle;
370
371 if (finalized)
372 return NULL;
373 if (!initialized)
374 side_init();
375 tracer_handle = (struct side_tracer_handle *)
376 calloc(1, sizeof(struct side_tracer_handle));
377 if (!tracer_handle)
378 return NULL;
379 pthread_mutex_lock(&side_lock);
380 tracer_handle->cb = cb;
381 tracer_handle->priv = priv;
382 side_list_insert_node_tail(&side_tracer_list, &tracer_handle->node);
383 side_list_for_each_entry(events_handle, &side_events_list, node) {
384 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
385 events_handle->events, events_handle->nr_events, priv);
386 }
387 pthread_mutex_unlock(&side_lock);
388 return tracer_handle;
389 }
390
391 void side_tracer_event_notification_unregister(struct side_tracer_handle *tracer_handle)
392 {
393 struct side_events_register_handle *events_handle;
394
395 if (finalized)
396 return;
397 if (!initialized)
398 side_init();
399 pthread_mutex_lock(&side_lock);
400 side_list_for_each_entry(events_handle, &side_events_list, node) {
401 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
402 events_handle->events, events_handle->nr_events,
403 tracer_handle->priv);
404 }
405 side_list_remove_node(&tracer_handle->node);
406 pthread_mutex_unlock(&side_lock);
407 free(tracer_handle);
408 }
409
410 void side_init(void)
411 {
412 if (initialized)
413 return;
414 side_rcu_gp_init(&rcu_gp);
415 initialized = true;
416 }
417
418 /*
419 * side_exit() is executed from a library destructor. It can be called
420 * explicitly at application exit as well. Concurrent side API use is
421 * not expected at that point.
422 */
423 void side_exit(void)
424 {
425 struct side_events_register_handle *handle, *tmp;
426
427 if (finalized)
428 return;
429 side_list_for_each_entry_safe(handle, tmp, &side_events_list, node)
430 side_events_unregister(handle);
431 side_rcu_gp_exit(&rcu_gp);
432 finalized = true;
433 }
This page took 0.037971 seconds and 4 git commands to generate.