Remove typically false redundant check on fast-path
[libside.git] / src / side.c
1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6 #include <side/trace.h>
7 #include <string.h>
8
9 #include "tracer.h"
10 #include "rcu.h"
11 #include "list.h"
12
13 /* Top 8 bits reserved for kernel tracer use. */
14 #if SIDE_BITS_PER_LONG == 64
15 # define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF00000000000000ULL
16 # define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x8000000000000000ULL
17
18 /* Allow 2^56 tracer references on an event. */
19 # define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFFFFFFFFFULL
20 #else
21 # define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000UL
22 # define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000UL
23
24 /* Allow 2^24 tracer references on an event. */
25 # define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFUL
26 #endif
27
28 struct side_events_register_handle {
29 struct side_list_node node;
30 struct side_event_description **events;
31 uint32_t nr_events;
32 };
33
34 struct side_tracer_handle {
35 struct side_list_node node;
36 void (*cb)(enum side_tracer_notification notif,
37 struct side_event_description **events, uint32_t nr_events, void *priv);
38 void *priv;
39 };
40
41 static struct side_rcu_gp_state rcu_gp;
42
43 /*
44 * Lazy initialization for early use within library constructors.
45 */
46 static bool initialized;
47 /*
48 * Do not register/unregister any more events after destructor.
49 */
50 static bool finalized;
51
52 static pthread_mutex_t side_lock = PTHREAD_MUTEX_INITIALIZER;
53
54 static DEFINE_SIDE_LIST_HEAD(side_events_list);
55 static DEFINE_SIDE_LIST_HEAD(side_tracer_list);
56
57 /*
58 * The empty callback has a NULL function callback pointer, which stops
59 * iteration on the array of callbacks immediately.
60 */
61 const struct side_callback side_empty_callback;
62
63 void side_init(void) __attribute__((constructor));
64 void side_exit(void) __attribute__((destructor));
65
66 void side_call(const struct side_event_description *desc, const struct side_arg_vec_description *sav_desc)
67 {
68 const struct side_callback *side_cb;
69 unsigned int rcu_period;
70 uintptr_t enabled;
71
72 if (side_unlikely(finalized))
73 return;
74 if (side_unlikely(!initialized))
75 side_init();
76 if (side_unlikely(desc->flags & SIDE_EVENT_FLAG_VARIADIC)) {
77 printf("ERROR: unexpected variadic event description\n");
78 abort();
79 }
80 enabled = __atomic_load_n(desc->enabled, __ATOMIC_RELAXED);
81 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
82 // TODO: call kernel write.
83 }
84 rcu_period = side_rcu_read_begin(&rcu_gp);
85 for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call != NULL; side_cb++)
86 side_cb->u.call(desc, sav_desc, side_cb->priv);
87 side_rcu_read_end(&rcu_gp, rcu_period);
88 }
89
90 void side_call_variadic(const struct side_event_description *desc,
91 const struct side_arg_vec_description *sav_desc,
92 const struct side_arg_dynamic_event_struct *var_struct)
93 {
94 const struct side_callback *side_cb;
95 unsigned int rcu_period;
96 uintptr_t enabled;
97
98 if (side_unlikely(finalized))
99 return;
100 if (side_unlikely(!initialized))
101 side_init();
102 if (side_unlikely(!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))) {
103 printf("ERROR: unexpected non-variadic event description\n");
104 abort();
105 }
106 enabled = __atomic_load_n(desc->enabled, __ATOMIC_RELAXED);
107 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
108 // TODO: call kernel write.
109 }
110 rcu_period = side_rcu_read_begin(&rcu_gp);
111 for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call_variadic != NULL; side_cb++)
112 side_cb->u.call_variadic(desc, sav_desc, var_struct, side_cb->priv);
113 side_rcu_read_end(&rcu_gp, rcu_period);
114 }
115
116 static
117 const struct side_callback *side_tracer_callback_lookup(
118 const struct side_event_description *desc,
119 void (*call)(), void *priv)
120 {
121 const struct side_callback *cb;
122
123 for (cb = desc->callbacks; cb->u.call != NULL; cb++) {
124 if (cb->u.call == call && cb->priv == priv)
125 return cb;
126 }
127 return NULL;
128 }
129
130 static
131 int _side_tracer_callback_register(struct side_event_description *desc,
132 void (*call)(), void *priv)
133 {
134 struct side_callback *old_cb, *new_cb;
135 int ret = SIDE_ERROR_OK;
136 uint32_t old_nr_cb;
137
138 if (!call)
139 return SIDE_ERROR_INVAL;
140 if (finalized)
141 return SIDE_ERROR_EXITING;
142 if (!initialized)
143 side_init();
144 pthread_mutex_lock(&side_lock);
145 old_nr_cb = desc->nr_callbacks;
146 if (old_nr_cb == UINT32_MAX) {
147 ret = SIDE_ERROR_INVAL;
148 goto unlock;
149 }
150 /* Reject duplicate (call, priv) tuples. */
151 if (side_tracer_callback_lookup(desc, call, priv)) {
152 ret = SIDE_ERROR_EXIST;
153 goto unlock;
154 }
155 old_cb = (struct side_callback *) desc->callbacks;
156 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
157 new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
158 if (!new_cb) {
159 ret = SIDE_ERROR_NOMEM;
160 goto unlock;
161 }
162 memcpy(new_cb, old_cb, old_nr_cb);
163 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
164 new_cb[old_nr_cb].u.call_variadic = call;
165 else
166 new_cb[old_nr_cb].u.call = call;
167 new_cb[old_nr_cb].priv = priv;
168 side_rcu_assign_pointer(desc->callbacks, new_cb);
169 side_rcu_wait_grace_period(&rcu_gp);
170 if (old_nr_cb)
171 free(old_cb);
172 desc->nr_callbacks++;
173 /* Increment concurrently with kernel setting the top bits. */
174 if (!old_nr_cb)
175 (void) __atomic_add_fetch(desc->enabled, 1, __ATOMIC_RELAXED);
176 unlock:
177 pthread_mutex_unlock(&side_lock);
178 return ret;
179 }
180
181 int side_tracer_callback_register(struct side_event_description *desc,
182 void (*call)(const struct side_event_description *desc,
183 const struct side_arg_vec_description *sav_desc,
184 void *priv),
185 void *priv)
186 {
187 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
188 return SIDE_ERROR_INVAL;
189 return _side_tracer_callback_register(desc, call, priv);
190 }
191
192 int side_tracer_callback_variadic_register(struct side_event_description *desc,
193 void (*call_variadic)(const struct side_event_description *desc,
194 const struct side_arg_vec_description *sav_desc,
195 const struct side_arg_dynamic_event_struct *var_struct,
196 void *priv),
197 void *priv)
198 {
199 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
200 return SIDE_ERROR_INVAL;
201 return _side_tracer_callback_register(desc, call_variadic, priv);
202 }
203
204 int _side_tracer_callback_unregister(struct side_event_description *desc,
205 void (*call)(), void *priv)
206 {
207 struct side_callback *old_cb, *new_cb;
208 const struct side_callback *cb_pos;
209 uint32_t pos_idx;
210 int ret = SIDE_ERROR_OK;
211 uint32_t old_nr_cb;
212
213 if (!call)
214 return SIDE_ERROR_INVAL;
215 if (finalized)
216 return SIDE_ERROR_EXITING;
217 if (!initialized)
218 side_init();
219 pthread_mutex_lock(&side_lock);
220 cb_pos = side_tracer_callback_lookup(desc, call, priv);
221 if (!cb_pos) {
222 ret = SIDE_ERROR_NOENT;
223 goto unlock;
224 }
225 old_nr_cb = desc->nr_callbacks;
226 old_cb = (struct side_callback *) desc->callbacks;
227 if (old_nr_cb == 1) {
228 new_cb = (struct side_callback *) &side_empty_callback;
229 } else {
230 pos_idx = cb_pos - desc->callbacks;
231 /* Remove entry at pos_idx. */
232 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
233 new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
234 if (!new_cb) {
235 ret = SIDE_ERROR_NOMEM;
236 goto unlock;
237 }
238 memcpy(new_cb, old_cb, pos_idx);
239 memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
240 }
241 side_rcu_assign_pointer(desc->callbacks, new_cb);
242 side_rcu_wait_grace_period(&rcu_gp);
243 free(old_cb);
244 desc->nr_callbacks--;
245 /* Decrement concurrently with kernel setting the top bits. */
246 if (old_nr_cb == 1)
247 (void) __atomic_add_fetch(desc->enabled, -1, __ATOMIC_RELAXED);
248 unlock:
249 pthread_mutex_unlock(&side_lock);
250 return ret;
251 }
252
253 int side_tracer_callback_unregister(struct side_event_description *desc,
254 void (*call)(const struct side_event_description *desc,
255 const struct side_arg_vec_description *sav_desc,
256 void *priv),
257 void *priv)
258 {
259 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
260 return SIDE_ERROR_INVAL;
261 return _side_tracer_callback_unregister(desc, call, priv);
262 }
263
264 int side_tracer_callback_variadic_unregister(struct side_event_description *desc,
265 void (*call_variadic)(const struct side_event_description *desc,
266 const struct side_arg_vec_description *sav_desc,
267 const struct side_arg_dynamic_event_struct *var_struct,
268 void *priv),
269 void *priv)
270 {
271 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
272 return SIDE_ERROR_INVAL;
273 return _side_tracer_callback_unregister(desc, call_variadic, priv);
274 }
275
276 struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events)
277 {
278 struct side_events_register_handle *events_handle = NULL;
279 struct side_tracer_handle *tracer_handle;
280
281 if (finalized)
282 return NULL;
283 if (!initialized)
284 side_init();
285 events_handle = calloc(1, sizeof(struct side_events_register_handle));
286 if (!events_handle)
287 return NULL;
288 events_handle->events = events;
289 events_handle->nr_events = nr_events;
290
291 pthread_mutex_lock(&side_lock);
292 side_list_insert_node_tail(&side_events_list, &events_handle->node);
293 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
294 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
295 events, nr_events, tracer_handle->priv);
296 }
297 pthread_mutex_unlock(&side_lock);
298 //TODO: call event batch register ioctl
299 return events_handle;
300 }
301
302 static
303 void side_event_remove_callbacks(struct side_event_description *desc)
304 {
305 uint32_t nr_cb = desc->nr_callbacks;
306 struct side_callback *old_cb;
307
308 if (!nr_cb)
309 return;
310 old_cb = (struct side_callback *) desc->callbacks;
311 (void) __atomic_add_fetch(desc->enabled, -1, __ATOMIC_RELAXED);
312 /*
313 * Setting the state back to 0 cb and empty callbacks out of
314 * caution. This should not matter because instrumentation is
315 * unreachable.
316 */
317 desc->nr_callbacks = 0;
318 side_rcu_assign_pointer(desc->callbacks, &side_empty_callback);
319 /*
320 * No need to wait for grace period because instrumentation is
321 * unreachable.
322 */
323 free(old_cb);
324 }
325
326 /*
327 * Unregister event handle. At this point, all side events in that
328 * handle should be unreachable.
329 */
330 void side_events_unregister(struct side_events_register_handle *events_handle)
331 {
332 struct side_tracer_handle *tracer_handle;
333 uint32_t i;
334
335 if (!events_handle)
336 return;
337 if (finalized)
338 return;
339 if (!initialized)
340 side_init();
341 pthread_mutex_lock(&side_lock);
342 side_list_remove_node(&events_handle->node);
343 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
344 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
345 events_handle->events, events_handle->nr_events,
346 tracer_handle->priv);
347 }
348 for (i = 0; i < events_handle->nr_events; i++) {
349 struct side_event_description *event = events_handle->events[i];
350
351 /* Skip NULL pointers */
352 if (!event)
353 continue;
354 side_event_remove_callbacks(event);
355 }
356 pthread_mutex_unlock(&side_lock);
357 //TODO: call event batch unregister ioctl
358 free(events_handle);
359 }
360
361 struct side_tracer_handle *side_tracer_event_notification_register(
362 void (*cb)(enum side_tracer_notification notif,
363 struct side_event_description **events, uint32_t nr_events, void *priv),
364 void *priv)
365 {
366 struct side_tracer_handle *tracer_handle;
367 struct side_events_register_handle *events_handle;
368
369 if (finalized)
370 return NULL;
371 if (!initialized)
372 side_init();
373 tracer_handle = calloc(1, sizeof(struct side_tracer_handle));
374 if (!tracer_handle)
375 return NULL;
376 pthread_mutex_lock(&side_lock);
377 tracer_handle->cb = cb;
378 tracer_handle->priv = priv;
379 side_list_insert_node_tail(&side_tracer_list, &tracer_handle->node);
380 side_list_for_each_entry(events_handle, &side_events_list, node) {
381 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
382 events_handle->events, events_handle->nr_events, priv);
383 }
384 pthread_mutex_unlock(&side_lock);
385 return tracer_handle;
386 }
387
388 void side_tracer_event_notification_unregister(struct side_tracer_handle *tracer_handle)
389 {
390 struct side_events_register_handle *events_handle;
391
392 if (finalized)
393 return;
394 if (!initialized)
395 side_init();
396 pthread_mutex_lock(&side_lock);
397 side_list_for_each_entry(events_handle, &side_events_list, node) {
398 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
399 events_handle->events, events_handle->nr_events,
400 tracer_handle->priv);
401 }
402 side_list_remove_node(&tracer_handle->node);
403 pthread_mutex_unlock(&side_lock);
404 }
405
406 void side_init(void)
407 {
408 if (initialized)
409 return;
410 side_rcu_gp_init(&rcu_gp);
411 initialized = true;
412 }
413
414 /*
415 * side_exit() is executed from a library destructor. It can be called
416 * explicitly at application exit as well. Concurrent side API use is
417 * not expected at that point.
418 */
419 void side_exit(void)
420 {
421 struct side_events_register_handle *handle, *tmp;
422
423 if (finalized)
424 return;
425 side_rcu_gp_exit(&rcu_gp);
426 side_list_for_each_entry_safe(handle, tmp, &side_events_list, node)
427 side_events_unregister(handle);
428 finalized = true;
429 }
This page took 0.038828 seconds and 5 git commands to generate.