Document side_exit()
[libside.git] / src / side.c
1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6 #include <side/trace.h>
7 #include <string.h>
8
9 #include "tracer.h"
10 #include "rcu.h"
11 #include "list.h"
12
13 /* Top 8 bits reserved for kernel tracer use. */
14 #define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000
15 #define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000
16
17 /* Allow 2^24 tracer callbacks to be registered on an event. */
18 #define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFF
19
20 struct side_events_register_handle {
21 struct side_list_node node;
22 struct side_event_description **events;
23 uint32_t nr_events;
24 };
25
26 static struct side_rcu_gp_state rcu_gp;
27
28 /*
29 * Lazy initialization for early use within library constructors.
30 */
31 static bool initialized;
32 /*
33 * Do not register/unregister any more events after destructor.
34 */
35 static bool finalized;
36
37 static pthread_mutex_t side_lock = PTHREAD_MUTEX_INITIALIZER;
38
39 static DEFINE_SIDE_LIST_HEAD(side_list);
40
41 /*
42 * The empty callback has a NULL function callback pointer, which stops
43 * iteration on the array of callbacks immediately.
44 */
45 const struct side_callback side_empty_callback;
46
47 void side_init(void) __attribute__((constructor));
48 void side_exit(void) __attribute__((destructor));
49
50 void side_call(const struct side_event_description *desc, const struct side_arg_vec_description *sav_desc)
51 {
52 const struct side_callback *side_cb;
53 unsigned int rcu_period;
54 uint32_t enabled;
55
56 if (side_unlikely(finalized))
57 return;
58 if (side_unlikely(!initialized))
59 side_init();
60 if (side_unlikely(desc->flags & SIDE_EVENT_FLAG_VARIADIC)) {
61 printf("ERROR: unexpected variadic event description\n");
62 abort();
63 }
64 enabled = __atomic_load_n(desc->enabled, __ATOMIC_RELAXED);
65 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
66 // TODO: call kernel write.
67 }
68 if (side_unlikely(!(enabled & SIDE_EVENT_ENABLED_USER_MASK)))
69 return;
70
71 //TODO: replace tracer_call by rcu iteration on list of registered callbacks
72 tracer_call(desc, sav_desc, NULL);
73
74 rcu_period = side_rcu_read_begin(&rcu_gp);
75 for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call != NULL; side_cb++)
76 side_cb->u.call(desc, sav_desc, side_cb->priv);
77 side_rcu_read_end(&rcu_gp, rcu_period);
78 }
79
80 void side_call_variadic(const struct side_event_description *desc,
81 const struct side_arg_vec_description *sav_desc,
82 const struct side_arg_dynamic_event_struct *var_struct)
83 {
84 const struct side_callback *side_cb;
85 unsigned int rcu_period;
86 uint32_t enabled;
87
88 if (side_unlikely(finalized))
89 return;
90 if (side_unlikely(!initialized))
91 side_init();
92 if (side_unlikely(!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))) {
93 printf("ERROR: unexpected non-variadic event description\n");
94 abort();
95 }
96 enabled = __atomic_load_n(desc->enabled, __ATOMIC_RELAXED);
97 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
98 // TODO: call kernel write.
99 }
100 if (side_unlikely(!(enabled & SIDE_EVENT_ENABLED_USER_MASK)))
101 return;
102
103 //TODO: replace tracer_call by rcu iteration on list of registered callbacks
104 tracer_call_variadic(desc, sav_desc, var_struct, NULL);
105
106 rcu_period = side_rcu_read_begin(&rcu_gp);
107 for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call_variadic != NULL; side_cb++)
108 side_cb->u.call_variadic(desc, sav_desc, var_struct, side_cb->priv);
109 side_rcu_read_end(&rcu_gp, rcu_period);
110 }
111
112 static
113 const struct side_callback *side_tracer_callback_lookup(
114 const struct side_event_description *desc,
115 void (*call)(), void *priv)
116 {
117 const struct side_callback *cb;
118
119 for (cb = desc->callbacks; cb->u.call != NULL; cb++) {
120 if (cb->u.call == call && cb->priv == priv)
121 return cb;
122 }
123 return NULL;
124 }
125
126 static
127 int _side_tracer_callback_register(struct side_event_description *desc,
128 void (*call)(), void *priv)
129 {
130 struct side_callback *old_cb, *new_cb;
131 int ret = SIDE_ERROR_OK;
132 uint32_t old_nr_cb;
133
134 if (!call)
135 return SIDE_ERROR_INVAL;
136 if (finalized)
137 return SIDE_ERROR_EXITING;
138 pthread_mutex_lock(&side_lock);
139 old_nr_cb = *desc->enabled & SIDE_EVENT_ENABLED_USER_MASK;
140 if (old_nr_cb == SIDE_EVENT_ENABLED_USER_MASK) {
141 ret = SIDE_ERROR_INVAL;
142 goto unlock;
143 }
144 /* Reject duplicate (call, priv) tuples. */
145 if (side_tracer_callback_lookup(desc, call, priv)) {
146 ret = SIDE_ERROR_EXIST;
147 goto unlock;
148 }
149 old_cb = (struct side_callback *) desc->callbacks;
150 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
151 new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
152 if (!new_cb) {
153 ret = SIDE_ERROR_NOMEM;
154 goto unlock;
155 }
156 memcpy(new_cb, old_cb, old_nr_cb);
157 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
158 new_cb[old_nr_cb].u.call_variadic = call;
159 else
160 new_cb[old_nr_cb].u.call = call;
161 new_cb[old_nr_cb].priv = priv;
162 side_rcu_assign_pointer(desc->callbacks, new_cb);
163 side_rcu_wait_grace_period(&rcu_gp);
164 if (old_nr_cb)
165 free(old_cb);
166 /* Increment concurrently with kernel setting the top bits. */
167 (void) __atomic_add_fetch(desc->enabled, 1, __ATOMIC_RELAXED);
168 unlock:
169 pthread_mutex_unlock(&side_lock);
170 return ret;
171 }
172
173 int side_tracer_callback_register(struct side_event_description *desc,
174 void (*call)(const struct side_event_description *desc,
175 const struct side_arg_vec_description *sav_desc,
176 void *priv),
177 void *priv)
178 {
179 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
180 return SIDE_ERROR_INVAL;
181 return _side_tracer_callback_register(desc, call, priv);
182 }
183
184 int side_tracer_callback_variadic_register(struct side_event_description *desc,
185 void (*call_variadic)(const struct side_event_description *desc,
186 const struct side_arg_vec_description *sav_desc,
187 const struct side_arg_dynamic_event_struct *var_struct,
188 void *priv),
189 void *priv)
190 {
191 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
192 return SIDE_ERROR_INVAL;
193 return _side_tracer_callback_register(desc, call_variadic, priv);
194 }
195
196 int _side_tracer_callback_unregister(struct side_event_description *desc,
197 void (*call)(), void *priv)
198 {
199 struct side_callback *old_cb, *new_cb;
200 const struct side_callback *cb_pos;
201 uint32_t pos_idx;
202 int ret = SIDE_ERROR_OK;
203 uint32_t old_nr_cb;
204
205 if (!call)
206 return SIDE_ERROR_INVAL;
207 if (finalized)
208 return SIDE_ERROR_EXITING;
209 pthread_mutex_lock(&side_lock);
210 cb_pos = side_tracer_callback_lookup(desc, call, priv);
211 if (!cb_pos) {
212 ret = SIDE_ERROR_NOENT;
213 goto unlock;
214 }
215 old_nr_cb = *desc->enabled & SIDE_EVENT_ENABLED_USER_MASK;
216 old_cb = (struct side_callback *) desc->callbacks;
217 if (old_nr_cb == 1) {
218 new_cb = (struct side_callback *) &side_empty_callback;
219 } else {
220 pos_idx = cb_pos - desc->callbacks;
221 /* Remove entry at pos_idx. */
222 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
223 new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
224 if (!new_cb) {
225 ret = SIDE_ERROR_NOMEM;
226 goto unlock;
227 }
228 memcpy(new_cb, old_cb, pos_idx);
229 memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
230 }
231 side_rcu_assign_pointer(desc->callbacks, new_cb);
232 side_rcu_wait_grace_period(&rcu_gp);
233 free(old_cb);
234 /* Decrement concurrently with kernel setting the top bits. */
235 (void) __atomic_add_fetch(desc->enabled, -1, __ATOMIC_RELAXED);
236 unlock:
237 pthread_mutex_unlock(&side_lock);
238 return ret;
239 }
240
241 int side_tracer_callback_unregister(struct side_event_description *desc,
242 void (*call)(const struct side_event_description *desc,
243 const struct side_arg_vec_description *sav_desc,
244 void *priv),
245 void *priv)
246 {
247 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
248 return SIDE_ERROR_INVAL;
249 return _side_tracer_callback_unregister(desc, call, priv);
250 }
251
252 int side_tracer_callback_variadic_unregister(struct side_event_description *desc,
253 void (*call_variadic)(const struct side_event_description *desc,
254 const struct side_arg_vec_description *sav_desc,
255 const struct side_arg_dynamic_event_struct *var_struct,
256 void *priv),
257 void *priv)
258 {
259 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
260 return SIDE_ERROR_INVAL;
261 return _side_tracer_callback_unregister(desc, call_variadic, priv);
262 }
263
264 struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events)
265 {
266 struct side_events_register_handle *handle = NULL;
267
268 if (finalized)
269 return NULL;
270 handle = calloc(1, sizeof(struct side_events_register_handle));
271 if (!handle)
272 return NULL;
273 pthread_mutex_lock(&side_lock);
274 handle->events = events;
275 handle->nr_events = nr_events;
276 side_list_insert_node_tail(&side_list, &handle->node);
277 pthread_mutex_unlock(&side_lock);
278 //TODO: call event batch register ioctl
279 return handle;
280 }
281
282 static
283 void side_event_remove_callbacks(struct side_event_description *desc)
284 {
285 uint32_t nr_cb = *desc->enabled & SIDE_EVENT_ENABLED_USER_MASK;
286 struct side_callback *old_cb;
287
288 if (!nr_cb)
289 return;
290 old_cb = (struct side_callback *) desc->callbacks;
291 /*
292 * Setting the state back to 0 cb and empty callbacks out of
293 * caution. This should not matter because instrumentation is
294 * unreachable.
295 */
296 (void) __atomic_add_fetch(desc->enabled, -nr_cb, __ATOMIC_RELAXED);
297 side_rcu_assign_pointer(desc->callbacks, &side_empty_callback);
298 /*
299 * No need to wait for grace period because instrumentation is
300 * unreachable.
301 */
302 free(old_cb);
303 }
304
305 /*
306 * Unregister event handle. At this point, all side events in that
307 * handle should be unreachable.
308 */
309 void side_events_unregister(struct side_events_register_handle *handle)
310 {
311 uint32_t i;
312
313 if (finalized)
314 return;
315 pthread_mutex_lock(&side_lock);
316 side_list_remove_node(&handle->node);
317 for (i = 0; i < handle->nr_events; i++) {
318 struct side_event_description *event = handle->events[i];
319
320 /* Skip NULL pointers */
321 if (!event)
322 continue;
323 side_event_remove_callbacks(event);
324 }
325 pthread_mutex_unlock(&side_lock);
326 //TODO: call event batch unregister ioctl
327 free(handle);
328 }
329
330 void side_init(void)
331 {
332 if (initialized)
333 return;
334 side_rcu_gp_init(&rcu_gp);
335 initialized = true;
336 }
337
338 /*
339 * side_exit() is executed from a library destructor. It can be called
340 * explicitly at application exit as well. Concurrent side API use is
341 * not expected at that point.
342 */
343 void side_exit(void)
344 {
345 struct side_events_register_handle *handle, *tmp;
346
347 if (finalized)
348 return;
349 side_rcu_gp_exit(&rcu_gp);
350 side_list_for_each_entry_safe(handle, tmp, &side_list, node)
351 side_events_unregister(handle);
352 finalized = true;
353 }
This page took 0.038235 seconds and 5 git commands to generate.