Commit | Line | Data |
---|---|---|
67337c4a MD |
1 | // SPDX-License-Identifier: MIT |
2 | /* | |
3 | * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
4 | */ | |
5 | ||
6 | #include <side/trace.h> | |
7 | #include <string.h> | |
b1bf768c | 8 | #include <assert.h> |
67337c4a MD |
9 | |
10 | #include "rcu.h" | |
11 | #include "list.h" | |
12 | ||
13 | /* Top 8 bits reserved for kernel tracer use. */ | |
14 | #if SIDE_BITS_PER_LONG == 64 | |
15 | # define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF00000000000000ULL | |
16 | # define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x8000000000000000ULL | |
17 | ||
18 | /* Allow 2^56 tracer references on an event. */ | |
19 | # define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFFFFFFFFFULL | |
20 | #else | |
21 | # define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000UL | |
22 | # define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000UL | |
23 | ||
24 | /* Allow 2^24 tracer references on an event. */ | |
25 | # define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFUL | |
26 | #endif | |
27 | ||
28 | struct side_events_register_handle { | |
29 | struct side_list_node node; | |
30 | struct side_event_description **events; | |
31 | uint32_t nr_events; | |
32 | }; | |
33 | ||
34 | struct side_tracer_handle { | |
35 | struct side_list_node node; | |
36 | void (*cb)(enum side_tracer_notification notif, | |
37 | struct side_event_description **events, uint32_t nr_events, void *priv); | |
38 | void *priv; | |
39 | }; | |
40 | ||
f0b01832 MD |
41 | struct side_statedump_request_handle { |
42 | struct side_list_node node; | |
43 | void (*cb)(void); | |
44 | }; | |
45 | ||
867b4725 MD |
46 | struct side_callback { |
47 | union { | |
48 | void (*call)(const struct side_event_description *desc, | |
49 | const struct side_arg_vec *side_arg_vec, | |
50 | void *priv); | |
51 | void (*call_variadic)(const struct side_event_description *desc, | |
52 | const struct side_arg_vec *side_arg_vec, | |
53 | const struct side_arg_dynamic_struct *var_struct, | |
54 | void *priv); | |
55 | } u; | |
56 | void *priv; | |
92c377f9 | 57 | void *key; |
867b4725 MD |
58 | }; |
59 | ||
67337c4a MD |
60 | static struct side_rcu_gp_state rcu_gp; |
61 | ||
62 | /* | |
63 | * Lazy initialization for early use within library constructors. | |
64 | */ | |
65 | static bool initialized; | |
66 | /* | |
67 | * Do not register/unregister any more events after destructor. | |
68 | */ | |
69 | static bool finalized; | |
70 | ||
71 | /* | |
72 | * Recursive mutex to allow tracer callbacks to use the side API. | |
73 | */ | |
74 | static pthread_mutex_t side_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; | |
75 | ||
76 | static DEFINE_SIDE_LIST_HEAD(side_events_list); | |
77 | static DEFINE_SIDE_LIST_HEAD(side_tracer_list); | |
f0b01832 | 78 | static DEFINE_SIDE_LIST_HEAD(side_statedump_list); |
67337c4a | 79 | |
74be90b7 MD |
80 | /* |
81 | * Callback filter key for state dump. | |
82 | */ | |
83 | static __thread void *filter_key; | |
84 | ||
67337c4a MD |
85 | /* |
86 | * The empty callback has a NULL function callback pointer, which stops | |
87 | * iteration on the array of callbacks immediately. | |
88 | */ | |
867b4725 | 89 | const char side_empty_callback[sizeof(struct side_callback)]; |
67337c4a | 90 | |
74be90b7 MD |
91 | static |
92 | void _side_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec, void *key) | |
67337c4a MD |
93 | { |
94 | struct side_rcu_read_state rcu_read_state; | |
b2a84b9f | 95 | const struct side_event_state_0 *es0; |
67337c4a MD |
96 | const struct side_callback *side_cb; |
97 | uintptr_t enabled; | |
98 | ||
99 | if (side_unlikely(finalized)) | |
100 | return; | |
101 | if (side_unlikely(!initialized)) | |
102 | side_init(); | |
b2a84b9f MD |
103 | if (side_unlikely(event_state->version != 0)) |
104 | abort(); | |
49aea3ef | 105 | es0 = side_container_of(event_state, const struct side_event_state_0, parent); |
7269a8a3 | 106 | assert(!(es0->desc->flags & SIDE_EVENT_FLAG_VARIADIC)); |
b2a84b9f | 107 | enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED); |
67337c4a MD |
108 | if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) { |
109 | // TODO: call kernel write. | |
110 | } | |
111 | side_rcu_read_begin(&rcu_gp, &rcu_read_state); | |
92c377f9 MD |
112 | for (side_cb = side_rcu_dereference(es0->callbacks); side_cb->u.call != NULL; side_cb++) { |
113 | /* A NULL key is always a match. */ | |
114 | if (key && side_cb->key && side_cb->key != key) | |
115 | continue; | |
7269a8a3 | 116 | side_cb->u.call(es0->desc, side_arg_vec, side_cb->priv); |
92c377f9 | 117 | } |
67337c4a MD |
118 | side_rcu_read_end(&rcu_gp, &rcu_read_state); |
119 | } | |
120 | ||
92c377f9 MD |
121 | void side_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec) |
122 | { | |
74be90b7 | 123 | _side_call(event_state, side_arg_vec, NULL); |
92c377f9 MD |
124 | } |
125 | ||
f0b01832 | 126 | void side_statedump_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec) |
74be90b7 MD |
127 | { |
128 | _side_call(event_state, side_arg_vec, filter_key); | |
129 | } | |
130 | ||
131 | static | |
132 | void _side_call_variadic(const struct side_event_state *event_state, | |
67337c4a | 133 | const struct side_arg_vec *side_arg_vec, |
92c377f9 MD |
134 | const struct side_arg_dynamic_struct *var_struct, |
135 | void *key) | |
67337c4a MD |
136 | { |
137 | struct side_rcu_read_state rcu_read_state; | |
b2a84b9f | 138 | const struct side_event_state_0 *es0; |
67337c4a MD |
139 | const struct side_callback *side_cb; |
140 | uintptr_t enabled; | |
141 | ||
142 | if (side_unlikely(finalized)) | |
143 | return; | |
144 | if (side_unlikely(!initialized)) | |
145 | side_init(); | |
b2a84b9f MD |
146 | if (side_unlikely(event_state->version != 0)) |
147 | abort(); | |
49aea3ef | 148 | es0 = side_container_of(event_state, const struct side_event_state_0, parent); |
7269a8a3 | 149 | assert(es0->desc->flags & SIDE_EVENT_FLAG_VARIADIC); |
b2a84b9f | 150 | enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED); |
67337c4a MD |
151 | if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) { |
152 | // TODO: call kernel write. | |
153 | } | |
154 | side_rcu_read_begin(&rcu_gp, &rcu_read_state); | |
92c377f9 MD |
155 | for (side_cb = side_rcu_dereference(es0->callbacks); side_cb->u.call_variadic != NULL; side_cb++) { |
156 | /* A NULL key is always a match. */ | |
157 | if (key && side_cb->key && side_cb->key != key) | |
158 | continue; | |
7269a8a3 | 159 | side_cb->u.call_variadic(es0->desc, side_arg_vec, var_struct, side_cb->priv); |
92c377f9 | 160 | } |
67337c4a MD |
161 | side_rcu_read_end(&rcu_gp, &rcu_read_state); |
162 | } | |
163 | ||
92c377f9 MD |
164 | void side_call_variadic(const struct side_event_state *event_state, |
165 | const struct side_arg_vec *side_arg_vec, | |
166 | const struct side_arg_dynamic_struct *var_struct) | |
167 | { | |
74be90b7 MD |
168 | _side_call_variadic(event_state, side_arg_vec, var_struct, NULL); |
169 | } | |
170 | ||
f0b01832 | 171 | void side_statedump_call_variadic(const struct side_event_state *event_state, |
74be90b7 MD |
172 | const struct side_arg_vec *side_arg_vec, |
173 | const struct side_arg_dynamic_struct *var_struct) | |
174 | { | |
175 | _side_call_variadic(event_state, side_arg_vec, var_struct, filter_key); | |
92c377f9 MD |
176 | } |
177 | ||
67337c4a MD |
178 | static |
179 | const struct side_callback *side_tracer_callback_lookup( | |
180 | const struct side_event_description *desc, | |
92c377f9 | 181 | void *call, void *priv, void *key) |
67337c4a | 182 | { |
0b9e59d6 | 183 | struct side_event_state *event_state = side_ptr_get(desc->state); |
b2a84b9f | 184 | const struct side_event_state_0 *es0; |
67337c4a MD |
185 | const struct side_callback *cb; |
186 | ||
b2a84b9f MD |
187 | if (side_unlikely(event_state->version != 0)) |
188 | abort(); | |
49aea3ef | 189 | es0 = side_container_of(event_state, const struct side_event_state_0, parent); |
7269a8a3 | 190 | for (cb = es0->callbacks; cb->u.call != NULL; cb++) { |
92c377f9 | 191 | if ((void *) cb->u.call == call && cb->priv == priv && cb->key == key) |
67337c4a MD |
192 | return cb; |
193 | } | |
194 | return NULL; | |
195 | } | |
196 | ||
197 | static | |
198 | int _side_tracer_callback_register(struct side_event_description *desc, | |
92c377f9 | 199 | void *call, void *priv, void *key) |
67337c4a | 200 | { |
0b9e59d6 | 201 | struct side_event_state *event_state; |
67337c4a | 202 | struct side_callback *old_cb, *new_cb; |
b2a84b9f | 203 | struct side_event_state_0 *es0; |
67337c4a MD |
204 | int ret = SIDE_ERROR_OK; |
205 | uint32_t old_nr_cb; | |
206 | ||
207 | if (!call) | |
208 | return SIDE_ERROR_INVAL; | |
209 | if (finalized) | |
210 | return SIDE_ERROR_EXITING; | |
211 | if (!initialized) | |
212 | side_init(); | |
213 | pthread_mutex_lock(&side_lock); | |
0b9e59d6 | 214 | event_state = side_ptr_get(desc->state); |
b2a84b9f MD |
215 | if (side_unlikely(event_state->version != 0)) |
216 | abort(); | |
49aea3ef | 217 | es0 = side_container_of(event_state, struct side_event_state_0, parent); |
3cac1780 | 218 | old_nr_cb = es0->nr_callbacks; |
67337c4a MD |
219 | if (old_nr_cb == UINT32_MAX) { |
220 | ret = SIDE_ERROR_INVAL; | |
221 | goto unlock; | |
222 | } | |
223 | /* Reject duplicate (call, priv) tuples. */ | |
92c377f9 | 224 | if (side_tracer_callback_lookup(desc, call, priv, key)) { |
67337c4a MD |
225 | ret = SIDE_ERROR_EXIST; |
226 | goto unlock; | |
227 | } | |
7269a8a3 | 228 | old_cb = (struct side_callback *) es0->callbacks; |
67337c4a MD |
229 | /* old_nr_cb + 1 (new cb) + 1 (NULL) */ |
230 | new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback)); | |
231 | if (!new_cb) { | |
232 | ret = SIDE_ERROR_NOMEM; | |
233 | goto unlock; | |
234 | } | |
235 | memcpy(new_cb, old_cb, old_nr_cb); | |
236 | if (desc->flags & SIDE_EVENT_FLAG_VARIADIC) | |
237 | new_cb[old_nr_cb].u.call_variadic = | |
238 | (side_tracer_callback_variadic_func) call; | |
239 | else | |
240 | new_cb[old_nr_cb].u.call = | |
241 | (side_tracer_callback_func) call; | |
242 | new_cb[old_nr_cb].priv = priv; | |
92c377f9 | 243 | new_cb[old_nr_cb].key = key; |
f60d8121 | 244 | /* High order bits are already zeroed. */ |
7269a8a3 | 245 | side_rcu_assign_pointer(es0->callbacks, new_cb); |
67337c4a MD |
246 | side_rcu_wait_grace_period(&rcu_gp); |
247 | if (old_nr_cb) | |
248 | free(old_cb); | |
3cac1780 | 249 | es0->nr_callbacks++; |
67337c4a MD |
250 | /* Increment concurrently with kernel setting the top bits. */ |
251 | if (!old_nr_cb) | |
b2a84b9f | 252 | (void) __atomic_add_fetch(&es0->enabled, 1, __ATOMIC_RELAXED); |
67337c4a MD |
253 | unlock: |
254 | pthread_mutex_unlock(&side_lock); | |
255 | return ret; | |
256 | } | |
257 | ||
258 | int side_tracer_callback_register(struct side_event_description *desc, | |
259 | side_tracer_callback_func call, | |
92c377f9 | 260 | void *priv, void *key) |
67337c4a MD |
261 | { |
262 | if (desc->flags & SIDE_EVENT_FLAG_VARIADIC) | |
263 | return SIDE_ERROR_INVAL; | |
92c377f9 | 264 | return _side_tracer_callback_register(desc, (void *) call, priv, key); |
67337c4a MD |
265 | } |
266 | ||
267 | int side_tracer_callback_variadic_register(struct side_event_description *desc, | |
268 | side_tracer_callback_variadic_func call_variadic, | |
92c377f9 | 269 | void *priv, void *key) |
67337c4a MD |
270 | { |
271 | if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC)) | |
272 | return SIDE_ERROR_INVAL; | |
92c377f9 | 273 | return _side_tracer_callback_register(desc, (void *) call_variadic, priv, key); |
67337c4a MD |
274 | } |
275 | ||
276 | static int _side_tracer_callback_unregister(struct side_event_description *desc, | |
92c377f9 | 277 | void *call, void *priv, void *key) |
67337c4a | 278 | { |
0b9e59d6 | 279 | struct side_event_state *event_state; |
67337c4a MD |
280 | struct side_callback *old_cb, *new_cb; |
281 | const struct side_callback *cb_pos; | |
b2a84b9f | 282 | struct side_event_state_0 *es0; |
67337c4a MD |
283 | uint32_t pos_idx; |
284 | int ret = SIDE_ERROR_OK; | |
285 | uint32_t old_nr_cb; | |
286 | ||
287 | if (!call) | |
288 | return SIDE_ERROR_INVAL; | |
289 | if (finalized) | |
290 | return SIDE_ERROR_EXITING; | |
291 | if (!initialized) | |
292 | side_init(); | |
293 | pthread_mutex_lock(&side_lock); | |
0b9e59d6 | 294 | event_state = side_ptr_get(desc->state); |
b2a84b9f MD |
295 | if (side_unlikely(event_state->version != 0)) |
296 | abort(); | |
49aea3ef | 297 | es0 = side_container_of(event_state, struct side_event_state_0, parent); |
92c377f9 | 298 | cb_pos = side_tracer_callback_lookup(desc, call, priv, key); |
67337c4a MD |
299 | if (!cb_pos) { |
300 | ret = SIDE_ERROR_NOENT; | |
301 | goto unlock; | |
302 | } | |
3cac1780 | 303 | old_nr_cb = es0->nr_callbacks; |
7269a8a3 | 304 | old_cb = (struct side_callback *) es0->callbacks; |
67337c4a MD |
305 | if (old_nr_cb == 1) { |
306 | new_cb = (struct side_callback *) &side_empty_callback; | |
307 | } else { | |
7269a8a3 | 308 | pos_idx = cb_pos - es0->callbacks; |
67337c4a MD |
309 | /* Remove entry at pos_idx. */ |
310 | /* old_nr_cb - 1 (removed cb) + 1 (NULL) */ | |
311 | new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback)); | |
312 | if (!new_cb) { | |
313 | ret = SIDE_ERROR_NOMEM; | |
314 | goto unlock; | |
315 | } | |
316 | memcpy(new_cb, old_cb, pos_idx); | |
317 | memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1); | |
318 | } | |
f60d8121 | 319 | /* High order bits are already zeroed. */ |
7269a8a3 | 320 | side_rcu_assign_pointer(es0->callbacks, new_cb); |
67337c4a MD |
321 | side_rcu_wait_grace_period(&rcu_gp); |
322 | free(old_cb); | |
3cac1780 | 323 | es0->nr_callbacks--; |
67337c4a MD |
324 | /* Decrement concurrently with kernel setting the top bits. */ |
325 | if (old_nr_cb == 1) | |
b2a84b9f | 326 | (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED); |
67337c4a MD |
327 | unlock: |
328 | pthread_mutex_unlock(&side_lock); | |
329 | return ret; | |
330 | } | |
331 | ||
332 | int side_tracer_callback_unregister(struct side_event_description *desc, | |
333 | side_tracer_callback_func call, | |
92c377f9 | 334 | void *priv, void *key) |
67337c4a MD |
335 | { |
336 | if (desc->flags & SIDE_EVENT_FLAG_VARIADIC) | |
337 | return SIDE_ERROR_INVAL; | |
92c377f9 | 338 | return _side_tracer_callback_unregister(desc, (void *) call, priv, key); |
67337c4a MD |
339 | } |
340 | ||
341 | int side_tracer_callback_variadic_unregister(struct side_event_description *desc, | |
342 | side_tracer_callback_variadic_func call_variadic, | |
92c377f9 | 343 | void *priv, void *key) |
67337c4a MD |
344 | { |
345 | if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC)) | |
346 | return SIDE_ERROR_INVAL; | |
92c377f9 | 347 | return _side_tracer_callback_unregister(desc, (void *) call_variadic, priv, key); |
67337c4a MD |
348 | } |
349 | ||
350 | struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events) | |
351 | { | |
352 | struct side_events_register_handle *events_handle = NULL; | |
353 | struct side_tracer_handle *tracer_handle; | |
354 | ||
355 | if (finalized) | |
356 | return NULL; | |
357 | if (!initialized) | |
358 | side_init(); | |
359 | events_handle = (struct side_events_register_handle *) | |
360 | calloc(1, sizeof(struct side_events_register_handle)); | |
361 | if (!events_handle) | |
362 | return NULL; | |
363 | events_handle->events = events; | |
364 | events_handle->nr_events = nr_events; | |
365 | ||
366 | pthread_mutex_lock(&side_lock); | |
367 | side_list_insert_node_tail(&side_events_list, &events_handle->node); | |
368 | side_list_for_each_entry(tracer_handle, &side_tracer_list, node) { | |
369 | tracer_handle->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS, | |
370 | events, nr_events, tracer_handle->priv); | |
371 | } | |
372 | pthread_mutex_unlock(&side_lock); | |
373 | //TODO: call event batch register ioctl | |
374 | return events_handle; | |
375 | } | |
376 | ||
377 | static | |
378 | void side_event_remove_callbacks(struct side_event_description *desc) | |
379 | { | |
0b9e59d6 | 380 | struct side_event_state *event_state = side_ptr_get(desc->state); |
b2a84b9f | 381 | struct side_event_state_0 *es0; |
67337c4a | 382 | struct side_callback *old_cb; |
3cac1780 | 383 | uint32_t nr_cb; |
67337c4a | 384 | |
b2a84b9f MD |
385 | if (side_unlikely(event_state->version != 0)) |
386 | abort(); | |
49aea3ef | 387 | es0 = side_container_of(event_state, struct side_event_state_0, parent); |
3cac1780 MD |
388 | nr_cb = es0->nr_callbacks; |
389 | if (!nr_cb) | |
390 | return; | |
7269a8a3 | 391 | old_cb = (struct side_callback *) es0->callbacks; |
b2a84b9f | 392 | (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED); |
67337c4a MD |
393 | /* |
394 | * Setting the state back to 0 cb and empty callbacks out of | |
395 | * caution. This should not matter because instrumentation is | |
396 | * unreachable. | |
397 | */ | |
3cac1780 | 398 | es0->nr_callbacks = 0; |
7269a8a3 | 399 | side_rcu_assign_pointer(es0->callbacks, &side_empty_callback); |
67337c4a MD |
400 | /* |
401 | * No need to wait for grace period because instrumentation is | |
402 | * unreachable. | |
403 | */ | |
404 | free(old_cb); | |
405 | } | |
406 | ||
407 | /* | |
408 | * Unregister event handle. At this point, all side events in that | |
409 | * handle should be unreachable. | |
410 | */ | |
411 | void side_events_unregister(struct side_events_register_handle *events_handle) | |
412 | { | |
413 | struct side_tracer_handle *tracer_handle; | |
414 | uint32_t i; | |
415 | ||
416 | if (!events_handle) | |
417 | return; | |
418 | if (finalized) | |
419 | return; | |
420 | if (!initialized) | |
421 | side_init(); | |
422 | pthread_mutex_lock(&side_lock); | |
423 | side_list_remove_node(&events_handle->node); | |
424 | side_list_for_each_entry(tracer_handle, &side_tracer_list, node) { | |
425 | tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS, | |
426 | events_handle->events, events_handle->nr_events, | |
427 | tracer_handle->priv); | |
428 | } | |
429 | for (i = 0; i < events_handle->nr_events; i++) { | |
430 | struct side_event_description *event = events_handle->events[i]; | |
431 | ||
432 | /* Skip NULL pointers */ | |
433 | if (!event) | |
434 | continue; | |
435 | side_event_remove_callbacks(event); | |
436 | } | |
437 | pthread_mutex_unlock(&side_lock); | |
438 | //TODO: call event batch unregister ioctl | |
439 | free(events_handle); | |
440 | } | |
441 | ||
442 | struct side_tracer_handle *side_tracer_event_notification_register( | |
443 | void (*cb)(enum side_tracer_notification notif, | |
444 | struct side_event_description **events, uint32_t nr_events, void *priv), | |
445 | void *priv) | |
446 | { | |
447 | struct side_tracer_handle *tracer_handle; | |
448 | struct side_events_register_handle *events_handle; | |
449 | ||
450 | if (finalized) | |
451 | return NULL; | |
452 | if (!initialized) | |
453 | side_init(); | |
454 | tracer_handle = (struct side_tracer_handle *) | |
455 | calloc(1, sizeof(struct side_tracer_handle)); | |
456 | if (!tracer_handle) | |
457 | return NULL; | |
458 | pthread_mutex_lock(&side_lock); | |
459 | tracer_handle->cb = cb; | |
460 | tracer_handle->priv = priv; | |
461 | side_list_insert_node_tail(&side_tracer_list, &tracer_handle->node); | |
462 | side_list_for_each_entry(events_handle, &side_events_list, node) { | |
463 | cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS, | |
464 | events_handle->events, events_handle->nr_events, priv); | |
465 | } | |
466 | pthread_mutex_unlock(&side_lock); | |
467 | return tracer_handle; | |
468 | } | |
469 | ||
470 | void side_tracer_event_notification_unregister(struct side_tracer_handle *tracer_handle) | |
471 | { | |
472 | struct side_events_register_handle *events_handle; | |
473 | ||
474 | if (finalized) | |
475 | return; | |
476 | if (!initialized) | |
477 | side_init(); | |
478 | pthread_mutex_lock(&side_lock); | |
479 | side_list_for_each_entry(events_handle, &side_events_list, node) { | |
480 | tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS, | |
481 | events_handle->events, events_handle->nr_events, | |
482 | tracer_handle->priv); | |
483 | } | |
484 | side_list_remove_node(&tracer_handle->node); | |
485 | pthread_mutex_unlock(&side_lock); | |
be787080 | 486 | free(tracer_handle); |
67337c4a MD |
487 | } |
488 | ||
f0b01832 MD |
489 | struct side_statedump_request_handle *side_statedump_request_notification_register(void (*statedump_cb)(void)) |
490 | { | |
491 | struct side_statedump_request_handle *handle; | |
492 | ||
493 | if (finalized) | |
494 | return NULL; | |
495 | if (!initialized) | |
496 | side_init(); | |
497 | /* | |
498 | * The statedump request notification should not be registered | |
499 | * from a notification callback. | |
500 | */ | |
501 | assert(filter_key == NULL); | |
502 | handle = (struct side_statedump_request_handle *) | |
503 | calloc(1, sizeof(struct side_statedump_request_handle)); | |
504 | if (!handle) | |
505 | return NULL; | |
506 | pthread_mutex_lock(&side_lock); | |
507 | handle->cb = statedump_cb; | |
508 | side_list_insert_node_tail(&side_statedump_list, &handle->node); | |
509 | /* Invoke callback for all tracers. */ | |
510 | statedump_cb(); | |
511 | pthread_mutex_unlock(&side_lock); | |
512 | return handle; | |
513 | } | |
514 | ||
515 | void side_statedump_request_notification_unregister(struct side_statedump_request_handle *handle) | |
516 | { | |
517 | if (finalized) | |
518 | return; | |
519 | if (!initialized) | |
520 | side_init(); | |
521 | assert(filter_key == NULL); | |
522 | pthread_mutex_lock(&side_lock); | |
523 | side_list_remove_node(&handle->node); | |
524 | pthread_mutex_unlock(&side_lock); | |
525 | free(handle); | |
526 | } | |
527 | ||
528 | void side_tracer_statedump_request(void *key) | |
529 | { | |
530 | struct side_statedump_request_handle *handle; | |
531 | ||
532 | /* Invoke the state dump callback specifically for the tracer key. */ | |
533 | filter_key = key; | |
534 | pthread_mutex_lock(&side_lock); | |
535 | side_list_for_each_entry(handle, &side_statedump_list, node) | |
536 | handle->cb(); | |
537 | pthread_mutex_unlock(&side_lock); | |
538 | filter_key = NULL; | |
539 | } | |
540 | ||
67337c4a MD |
541 | void side_init(void) |
542 | { | |
543 | if (initialized) | |
544 | return; | |
545 | side_rcu_gp_init(&rcu_gp); | |
546 | initialized = true; | |
547 | } | |
548 | ||
549 | /* | |
550 | * side_exit() is executed from a library destructor. It can be called | |
551 | * explicitly at application exit as well. Concurrent side API use is | |
552 | * not expected at that point. | |
553 | */ | |
554 | void side_exit(void) | |
555 | { | |
556 | struct side_events_register_handle *handle, *tmp; | |
557 | ||
558 | if (finalized) | |
559 | return; | |
560 | side_list_for_each_entry_safe(handle, tmp, &side_events_list, node) | |
561 | side_events_unregister(handle); | |
562 | side_rcu_gp_exit(&rcu_gp); | |
563 | finalized = true; | |
564 | } |