Statedump improvements
[libside.git] / src / side.c
CommitLineData
67337c4a
MD
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6#include <side/trace.h>
7#include <string.h>
b1bf768c 8#include <assert.h>
67337c4a
MD
9
10#include "rcu.h"
11#include "list.h"
873bbf16 12#include "rculist.h"
67337c4a 13
871851e7 14/* Top 8 bits reserved for shared tracer use. */
67337c4a 15#if SIDE_BITS_PER_LONG == 64
871851e7
MD
16# define SIDE_EVENT_ENABLED_SHARED_MASK 0xFF00000000000000ULL
17# define SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK 0x8000000000000000ULL
18# define SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK 0x4000000000000000ULL
67337c4a 19
871851e7
MD
20/* Allow 2^56 private tracer references on an event. */
21# define SIDE_EVENT_ENABLED_PRIVATE_MASK 0x00FFFFFFFFFFFFFFULL
67337c4a 22#else
871851e7
MD
23# define SIDE_EVENT_ENABLED_SHARED_MASK 0xFF000000UL
24# define SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK 0x80000000UL
25# define SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK 0x40000000UL
67337c4a 26
871851e7
MD
27/* Allow 2^24 private tracer references on an event. */
28# define SIDE_EVENT_ENABLED_PRIVATE_MASK 0x00FFFFFFUL
67337c4a
MD
29#endif
30
bffe9ae3
MD
31#define SIDE_KEY_RESERVED_RANGE_END 0x8
32
33/* Key 0x0 is reserved to match all. */
34#define SIDE_KEY_MATCH_ALL 0x0
871851e7 35/* Key 0x1 is reserved for user event. */
bffe9ae3 36#define SIDE_KEY_USER_EVENT 0x1
871851e7 37/* Key 0x2 is reserved for ptrace. */
bffe9ae3 38#define SIDE_KEY_PTRACE 0x2
871851e7 39
67337c4a
MD
40struct side_events_register_handle {
41 struct side_list_node node;
42 struct side_event_description **events;
43 uint32_t nr_events;
44};
45
46struct side_tracer_handle {
47 struct side_list_node node;
48 void (*cb)(enum side_tracer_notification notif,
49 struct side_event_description **events, uint32_t nr_events, void *priv);
50 void *priv;
51};
52
bffe9ae3
MD
53struct side_statedump_notification {
54 struct side_list_node node;
55 uint64_t key;
56};
57
f0b01832 58struct side_statedump_request_handle {
bffe9ae3
MD
59 struct side_list_node node; /* Statedump request RCU list node. */
60 struct side_list_head notification_queue; /* Queue of struct side_statedump_notification */
f0b01832 61 void (*cb)(void);
bffe9ae3
MD
62 char *name;
63 enum side_statedump_mode mode;
f0b01832
MD
64};
65
867b4725
MD
66struct side_callback {
67 union {
68 void (*call)(const struct side_event_description *desc,
69 const struct side_arg_vec *side_arg_vec,
70 void *priv);
71 void (*call_variadic)(const struct side_event_description *desc,
72 const struct side_arg_vec *side_arg_vec,
73 const struct side_arg_dynamic_struct *var_struct,
74 void *priv);
75 } u;
76 void *priv;
bffe9ae3 77 uint64_t key;
867b4725
MD
78};
79
873bbf16 80static struct side_rcu_gp_state event_rcu_gp, statedump_rcu_gp;
67337c4a
MD
81
82/*
83 * Lazy initialization for early use within library constructors.
84 */
85static bool initialized;
86/*
87 * Do not register/unregister any more events after destructor.
88 */
89static bool finalized;
90
91/*
92 * Recursive mutex to allow tracer callbacks to use the side API.
93 */
873bbf16
MD
94static pthread_mutex_t side_event_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
95static pthread_mutex_t side_statedump_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
bffe9ae3
MD
96static pthread_mutex_t side_key_lock = PTHREAD_MUTEX_INITIALIZER;
97
98/* Dynamic tracer key allocation. */
99static uint64_t side_key_next = SIDE_KEY_RESERVED_RANGE_END;
67337c4a
MD
100
101static DEFINE_SIDE_LIST_HEAD(side_events_list);
102static DEFINE_SIDE_LIST_HEAD(side_tracer_list);
bffe9ae3
MD
103
104/*
105 * The statedump request list is a RCU list to allow the agent thread to
106 * iterate over this list with a RCU read-side lock.
107 */
f0b01832 108static DEFINE_SIDE_LIST_HEAD(side_statedump_list);
67337c4a 109
74be90b7
MD
110/*
111 * Callback filter key for state dump.
112 */
bffe9ae3 113static __thread uint64_t filter_key = SIDE_KEY_MATCH_ALL;
74be90b7 114
67337c4a
MD
115/*
116 * The empty callback has a NULL function callback pointer, which stops
117 * iteration on the array of callbacks immediately.
118 */
867b4725 119const char side_empty_callback[sizeof(struct side_callback)];
67337c4a 120
bffe9ae3
MD
121side_static_event(side_statedump_begin, "side", "statedump_begin",
122 SIDE_LOGLEVEL_INFO, side_field_list(side_field_string("name")));
123side_static_event(side_statedump_end, "side", "statedump_end",
124 SIDE_LOGLEVEL_INFO, side_field_list(side_field_string("name")));
125
871851e7
MD
126/*
127 * side_ptrace_hook is a place holder for a debugger breakpoint.
128 * var_struct is NULL if not variadic.
129 */
130void side_ptrace_hook(const struct side_event_state *event_state __attribute__((unused)),
131 const struct side_arg_vec *side_arg_vec __attribute__((unused)),
132 const struct side_arg_dynamic_struct *var_struct __attribute__((unused)))
133 __attribute__((noinline));
134void side_ptrace_hook(const struct side_event_state *event_state __attribute__((unused)),
135 const struct side_arg_vec *side_arg_vec __attribute__((unused)),
136 const struct side_arg_dynamic_struct *var_struct __attribute__((unused)))
137{
138}
139
74be90b7 140static
bffe9ae3 141void _side_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec, uint64_t key)
67337c4a
MD
142{
143 struct side_rcu_read_state rcu_read_state;
b2a84b9f 144 const struct side_event_state_0 *es0;
67337c4a
MD
145 const struct side_callback *side_cb;
146 uintptr_t enabled;
147
148 if (side_unlikely(finalized))
149 return;
150 if (side_unlikely(!initialized))
151 side_init();
b2a84b9f
MD
152 if (side_unlikely(event_state->version != 0))
153 abort();
49aea3ef 154 es0 = side_container_of(event_state, const struct side_event_state_0, parent);
7269a8a3 155 assert(!(es0->desc->flags & SIDE_EVENT_FLAG_VARIADIC));
b2a84b9f 156 enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED);
871851e7
MD
157 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_SHARED_MASK)) {
158 if ((enabled & SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK) &&
bffe9ae3 159 (key == SIDE_KEY_MATCH_ALL || key == SIDE_KEY_USER_EVENT)) {
871851e7
MD
160 // TODO: call kernel write.
161 }
162 if ((enabled & SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK) &&
bffe9ae3 163 (key == SIDE_KEY_MATCH_ALL || key == SIDE_KEY_PTRACE))
871851e7 164 side_ptrace_hook(event_state, side_arg_vec, NULL);
67337c4a 165 }
873bbf16 166 side_rcu_read_begin(&event_rcu_gp, &rcu_read_state);
92c377f9 167 for (side_cb = side_rcu_dereference(es0->callbacks); side_cb->u.call != NULL; side_cb++) {
bffe9ae3 168 if (key != SIDE_KEY_MATCH_ALL && side_cb->key != SIDE_KEY_MATCH_ALL && side_cb->key != key)
92c377f9 169 continue;
7269a8a3 170 side_cb->u.call(es0->desc, side_arg_vec, side_cb->priv);
92c377f9 171 }
873bbf16 172 side_rcu_read_end(&event_rcu_gp, &rcu_read_state);
67337c4a
MD
173}
174
92c377f9
MD
175void side_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec)
176{
bffe9ae3 177 _side_call(event_state, side_arg_vec, SIDE_KEY_MATCH_ALL);
92c377f9
MD
178}
179
f0b01832 180void side_statedump_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec)
74be90b7
MD
181{
182 _side_call(event_state, side_arg_vec, filter_key);
183}
184
185static
186void _side_call_variadic(const struct side_event_state *event_state,
67337c4a 187 const struct side_arg_vec *side_arg_vec,
92c377f9 188 const struct side_arg_dynamic_struct *var_struct,
bffe9ae3 189 uint64_t key)
67337c4a
MD
190{
191 struct side_rcu_read_state rcu_read_state;
b2a84b9f 192 const struct side_event_state_0 *es0;
67337c4a
MD
193 const struct side_callback *side_cb;
194 uintptr_t enabled;
195
196 if (side_unlikely(finalized))
197 return;
198 if (side_unlikely(!initialized))
199 side_init();
b2a84b9f
MD
200 if (side_unlikely(event_state->version != 0))
201 abort();
49aea3ef 202 es0 = side_container_of(event_state, const struct side_event_state_0, parent);
7269a8a3 203 assert(es0->desc->flags & SIDE_EVENT_FLAG_VARIADIC);
b2a84b9f 204 enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED);
871851e7
MD
205 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_SHARED_MASK)) {
206 if ((enabled & SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK) &&
bffe9ae3 207 (key == SIDE_KEY_MATCH_ALL || key == SIDE_KEY_USER_EVENT)) {
871851e7
MD
208 // TODO: call kernel write.
209 }
210 if ((enabled & SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK) &&
bffe9ae3 211 (key == SIDE_KEY_MATCH_ALL || key == SIDE_KEY_PTRACE))
871851e7 212 side_ptrace_hook(event_state, side_arg_vec, var_struct);
67337c4a 213 }
873bbf16 214 side_rcu_read_begin(&event_rcu_gp, &rcu_read_state);
92c377f9 215 for (side_cb = side_rcu_dereference(es0->callbacks); side_cb->u.call_variadic != NULL; side_cb++) {
bffe9ae3 216 if (key != SIDE_KEY_MATCH_ALL && side_cb->key != SIDE_KEY_MATCH_ALL && side_cb->key != key)
92c377f9 217 continue;
7269a8a3 218 side_cb->u.call_variadic(es0->desc, side_arg_vec, var_struct, side_cb->priv);
92c377f9 219 }
873bbf16 220 side_rcu_read_end(&event_rcu_gp, &rcu_read_state);
67337c4a
MD
221}
222
92c377f9
MD
223void side_call_variadic(const struct side_event_state *event_state,
224 const struct side_arg_vec *side_arg_vec,
225 const struct side_arg_dynamic_struct *var_struct)
226{
bffe9ae3 227 _side_call_variadic(event_state, side_arg_vec, var_struct, SIDE_KEY_MATCH_ALL);
74be90b7
MD
228}
229
f0b01832 230void side_statedump_call_variadic(const struct side_event_state *event_state,
74be90b7
MD
231 const struct side_arg_vec *side_arg_vec,
232 const struct side_arg_dynamic_struct *var_struct)
233{
234 _side_call_variadic(event_state, side_arg_vec, var_struct, filter_key);
92c377f9
MD
235}
236
67337c4a
MD
237static
238const struct side_callback *side_tracer_callback_lookup(
239 const struct side_event_description *desc,
bffe9ae3 240 void *call, void *priv, uint64_t key)
67337c4a 241{
0b9e59d6 242 struct side_event_state *event_state = side_ptr_get(desc->state);
b2a84b9f 243 const struct side_event_state_0 *es0;
67337c4a
MD
244 const struct side_callback *cb;
245
b2a84b9f
MD
246 if (side_unlikely(event_state->version != 0))
247 abort();
49aea3ef 248 es0 = side_container_of(event_state, const struct side_event_state_0, parent);
7269a8a3 249 for (cb = es0->callbacks; cb->u.call != NULL; cb++) {
92c377f9 250 if ((void *) cb->u.call == call && cb->priv == priv && cb->key == key)
67337c4a
MD
251 return cb;
252 }
253 return NULL;
254}
255
256static
257int _side_tracer_callback_register(struct side_event_description *desc,
bffe9ae3 258 void *call, void *priv, uint64_t key)
67337c4a 259{
0b9e59d6 260 struct side_event_state *event_state;
67337c4a 261 struct side_callback *old_cb, *new_cb;
b2a84b9f 262 struct side_event_state_0 *es0;
67337c4a
MD
263 int ret = SIDE_ERROR_OK;
264 uint32_t old_nr_cb;
265
266 if (!call)
267 return SIDE_ERROR_INVAL;
268 if (finalized)
269 return SIDE_ERROR_EXITING;
270 if (!initialized)
271 side_init();
873bbf16 272 pthread_mutex_lock(&side_event_lock);
0b9e59d6 273 event_state = side_ptr_get(desc->state);
b2a84b9f
MD
274 if (side_unlikely(event_state->version != 0))
275 abort();
49aea3ef 276 es0 = side_container_of(event_state, struct side_event_state_0, parent);
3cac1780 277 old_nr_cb = es0->nr_callbacks;
67337c4a
MD
278 if (old_nr_cb == UINT32_MAX) {
279 ret = SIDE_ERROR_INVAL;
280 goto unlock;
281 }
282 /* Reject duplicate (call, priv) tuples. */
92c377f9 283 if (side_tracer_callback_lookup(desc, call, priv, key)) {
67337c4a
MD
284 ret = SIDE_ERROR_EXIST;
285 goto unlock;
286 }
7269a8a3 287 old_cb = (struct side_callback *) es0->callbacks;
67337c4a
MD
288 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
289 new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
290 if (!new_cb) {
291 ret = SIDE_ERROR_NOMEM;
292 goto unlock;
293 }
294 memcpy(new_cb, old_cb, old_nr_cb);
295 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
296 new_cb[old_nr_cb].u.call_variadic =
297 (side_tracer_callback_variadic_func) call;
298 else
299 new_cb[old_nr_cb].u.call =
300 (side_tracer_callback_func) call;
301 new_cb[old_nr_cb].priv = priv;
92c377f9 302 new_cb[old_nr_cb].key = key;
f60d8121 303 /* High order bits are already zeroed. */
7269a8a3 304 side_rcu_assign_pointer(es0->callbacks, new_cb);
873bbf16 305 side_rcu_wait_grace_period(&event_rcu_gp);
67337c4a
MD
306 if (old_nr_cb)
307 free(old_cb);
3cac1780 308 es0->nr_callbacks++;
67337c4a
MD
309 /* Increment concurrently with kernel setting the top bits. */
310 if (!old_nr_cb)
b2a84b9f 311 (void) __atomic_add_fetch(&es0->enabled, 1, __ATOMIC_RELAXED);
67337c4a 312unlock:
873bbf16 313 pthread_mutex_unlock(&side_event_lock);
67337c4a
MD
314 return ret;
315}
316
317int side_tracer_callback_register(struct side_event_description *desc,
318 side_tracer_callback_func call,
bffe9ae3 319 void *priv, uint64_t key)
67337c4a
MD
320{
321 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
322 return SIDE_ERROR_INVAL;
92c377f9 323 return _side_tracer_callback_register(desc, (void *) call, priv, key);
67337c4a
MD
324}
325
326int side_tracer_callback_variadic_register(struct side_event_description *desc,
327 side_tracer_callback_variadic_func call_variadic,
bffe9ae3 328 void *priv, uint64_t key)
67337c4a
MD
329{
330 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
331 return SIDE_ERROR_INVAL;
92c377f9 332 return _side_tracer_callback_register(desc, (void *) call_variadic, priv, key);
67337c4a
MD
333}
334
335static int _side_tracer_callback_unregister(struct side_event_description *desc,
bffe9ae3 336 void *call, void *priv, uint64_t key)
67337c4a 337{
0b9e59d6 338 struct side_event_state *event_state;
67337c4a
MD
339 struct side_callback *old_cb, *new_cb;
340 const struct side_callback *cb_pos;
b2a84b9f 341 struct side_event_state_0 *es0;
67337c4a
MD
342 uint32_t pos_idx;
343 int ret = SIDE_ERROR_OK;
344 uint32_t old_nr_cb;
345
346 if (!call)
347 return SIDE_ERROR_INVAL;
348 if (finalized)
349 return SIDE_ERROR_EXITING;
350 if (!initialized)
351 side_init();
873bbf16 352 pthread_mutex_lock(&side_event_lock);
0b9e59d6 353 event_state = side_ptr_get(desc->state);
b2a84b9f
MD
354 if (side_unlikely(event_state->version != 0))
355 abort();
49aea3ef 356 es0 = side_container_of(event_state, struct side_event_state_0, parent);
92c377f9 357 cb_pos = side_tracer_callback_lookup(desc, call, priv, key);
67337c4a
MD
358 if (!cb_pos) {
359 ret = SIDE_ERROR_NOENT;
360 goto unlock;
361 }
3cac1780 362 old_nr_cb = es0->nr_callbacks;
7269a8a3 363 old_cb = (struct side_callback *) es0->callbacks;
67337c4a
MD
364 if (old_nr_cb == 1) {
365 new_cb = (struct side_callback *) &side_empty_callback;
366 } else {
7269a8a3 367 pos_idx = cb_pos - es0->callbacks;
67337c4a
MD
368 /* Remove entry at pos_idx. */
369 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
370 new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
371 if (!new_cb) {
372 ret = SIDE_ERROR_NOMEM;
373 goto unlock;
374 }
375 memcpy(new_cb, old_cb, pos_idx);
376 memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
377 }
f60d8121 378 /* High order bits are already zeroed. */
7269a8a3 379 side_rcu_assign_pointer(es0->callbacks, new_cb);
873bbf16 380 side_rcu_wait_grace_period(&event_rcu_gp);
67337c4a 381 free(old_cb);
3cac1780 382 es0->nr_callbacks--;
67337c4a
MD
383 /* Decrement concurrently with kernel setting the top bits. */
384 if (old_nr_cb == 1)
b2a84b9f 385 (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED);
67337c4a 386unlock:
873bbf16 387 pthread_mutex_unlock(&side_event_lock);
67337c4a
MD
388 return ret;
389}
390
391int side_tracer_callback_unregister(struct side_event_description *desc,
392 side_tracer_callback_func call,
bffe9ae3 393 void *priv, uint64_t key)
67337c4a
MD
394{
395 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
396 return SIDE_ERROR_INVAL;
92c377f9 397 return _side_tracer_callback_unregister(desc, (void *) call, priv, key);
67337c4a
MD
398}
399
400int side_tracer_callback_variadic_unregister(struct side_event_description *desc,
401 side_tracer_callback_variadic_func call_variadic,
bffe9ae3 402 void *priv, uint64_t key)
67337c4a
MD
403{
404 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
405 return SIDE_ERROR_INVAL;
92c377f9 406 return _side_tracer_callback_unregister(desc, (void *) call_variadic, priv, key);
67337c4a
MD
407}
408
409struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events)
410{
411 struct side_events_register_handle *events_handle = NULL;
412 struct side_tracer_handle *tracer_handle;
413
414 if (finalized)
415 return NULL;
416 if (!initialized)
417 side_init();
418 events_handle = (struct side_events_register_handle *)
419 calloc(1, sizeof(struct side_events_register_handle));
420 if (!events_handle)
421 return NULL;
422 events_handle->events = events;
423 events_handle->nr_events = nr_events;
424
873bbf16 425 pthread_mutex_lock(&side_event_lock);
67337c4a
MD
426 side_list_insert_node_tail(&side_events_list, &events_handle->node);
427 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
428 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
429 events, nr_events, tracer_handle->priv);
430 }
873bbf16 431 pthread_mutex_unlock(&side_event_lock);
67337c4a
MD
432 //TODO: call event batch register ioctl
433 return events_handle;
434}
435
436static
437void side_event_remove_callbacks(struct side_event_description *desc)
438{
0b9e59d6 439 struct side_event_state *event_state = side_ptr_get(desc->state);
b2a84b9f 440 struct side_event_state_0 *es0;
67337c4a 441 struct side_callback *old_cb;
3cac1780 442 uint32_t nr_cb;
67337c4a 443
b2a84b9f
MD
444 if (side_unlikely(event_state->version != 0))
445 abort();
49aea3ef 446 es0 = side_container_of(event_state, struct side_event_state_0, parent);
3cac1780
MD
447 nr_cb = es0->nr_callbacks;
448 if (!nr_cb)
449 return;
7269a8a3 450 old_cb = (struct side_callback *) es0->callbacks;
b2a84b9f 451 (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED);
67337c4a
MD
452 /*
453 * Setting the state back to 0 cb and empty callbacks out of
454 * caution. This should not matter because instrumentation is
455 * unreachable.
456 */
3cac1780 457 es0->nr_callbacks = 0;
7269a8a3 458 side_rcu_assign_pointer(es0->callbacks, &side_empty_callback);
67337c4a
MD
459 /*
460 * No need to wait for grace period because instrumentation is
461 * unreachable.
462 */
463 free(old_cb);
464}
465
466/*
467 * Unregister event handle. At this point, all side events in that
468 * handle should be unreachable.
469 */
470void side_events_unregister(struct side_events_register_handle *events_handle)
471{
472 struct side_tracer_handle *tracer_handle;
473 uint32_t i;
474
475 if (!events_handle)
476 return;
477 if (finalized)
478 return;
479 if (!initialized)
480 side_init();
873bbf16 481 pthread_mutex_lock(&side_event_lock);
67337c4a
MD
482 side_list_remove_node(&events_handle->node);
483 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
484 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
485 events_handle->events, events_handle->nr_events,
486 tracer_handle->priv);
487 }
488 for (i = 0; i < events_handle->nr_events; i++) {
489 struct side_event_description *event = events_handle->events[i];
490
491 /* Skip NULL pointers */
492 if (!event)
493 continue;
494 side_event_remove_callbacks(event);
495 }
873bbf16 496 pthread_mutex_unlock(&side_event_lock);
67337c4a
MD
497 //TODO: call event batch unregister ioctl
498 free(events_handle);
499}
500
501struct side_tracer_handle *side_tracer_event_notification_register(
502 void (*cb)(enum side_tracer_notification notif,
503 struct side_event_description **events, uint32_t nr_events, void *priv),
504 void *priv)
505{
506 struct side_tracer_handle *tracer_handle;
507 struct side_events_register_handle *events_handle;
508
509 if (finalized)
510 return NULL;
511 if (!initialized)
512 side_init();
513 tracer_handle = (struct side_tracer_handle *)
514 calloc(1, sizeof(struct side_tracer_handle));
515 if (!tracer_handle)
516 return NULL;
873bbf16 517 pthread_mutex_lock(&side_event_lock);
67337c4a
MD
518 tracer_handle->cb = cb;
519 tracer_handle->priv = priv;
520 side_list_insert_node_tail(&side_tracer_list, &tracer_handle->node);
521 side_list_for_each_entry(events_handle, &side_events_list, node) {
522 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
523 events_handle->events, events_handle->nr_events, priv);
524 }
873bbf16 525 pthread_mutex_unlock(&side_event_lock);
67337c4a
MD
526 return tracer_handle;
527}
528
529void side_tracer_event_notification_unregister(struct side_tracer_handle *tracer_handle)
530{
531 struct side_events_register_handle *events_handle;
532
533 if (finalized)
534 return;
535 if (!initialized)
536 side_init();
873bbf16 537 pthread_mutex_lock(&side_event_lock);
67337c4a
MD
538 side_list_for_each_entry(events_handle, &side_events_list, node) {
539 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
540 events_handle->events, events_handle->nr_events,
541 tracer_handle->priv);
542 }
543 side_list_remove_node(&tracer_handle->node);
873bbf16 544 pthread_mutex_unlock(&side_event_lock);
be787080 545 free(tracer_handle);
67337c4a
MD
546}
547
bffe9ae3
MD
548/* Called with side_statedump_lock held. */
549static
550void queue_statedump_pending(struct side_statedump_request_handle *handle, uint64_t key)
551{
552 struct side_statedump_notification *notif;
553
554 notif = (struct side_statedump_notification *) calloc(1, sizeof(struct side_statedump_notification));
555 if (!notif)
556 abort();
557 notif->key = key;
558 side_list_insert_node_tail(&handle->notification_queue, &notif->node);
559}
560
561/* Called with side_statedump_lock held. */
562static
563void unqueue_statedump_pending(struct side_statedump_request_handle *handle, uint64_t key)
564{
565 struct side_statedump_notification *notif, *tmp;
566
567 side_list_for_each_entry_safe(notif, tmp, &handle->notification_queue, node) {
568 if (key == SIDE_KEY_MATCH_ALL || key == notif->key) {
569 side_list_remove_node(&notif->node);
570 free(notif);
571 }
572 }
573}
574
575struct side_statedump_request_handle *
576 side_statedump_request_notification_register(const char *state_name,
577 void (*statedump_cb)(void),
578 enum side_statedump_mode mode)
f0b01832
MD
579{
580 struct side_statedump_request_handle *handle;
bffe9ae3 581 char *name;
f0b01832
MD
582
583 if (finalized)
584 return NULL;
585 if (!initialized)
586 side_init();
587 /*
588 * The statedump request notification should not be registered
589 * from a notification callback.
590 */
bffe9ae3 591 assert(!filter_key);
f0b01832
MD
592 handle = (struct side_statedump_request_handle *)
593 calloc(1, sizeof(struct side_statedump_request_handle));
594 if (!handle)
595 return NULL;
bffe9ae3
MD
596 name = strdup(state_name);
597 if (!name)
598 goto name_nomem;
f0b01832 599 handle->cb = statedump_cb;
bffe9ae3
MD
600 handle->name = name;
601 handle->mode = mode;
602 side_list_head_init(&handle->notification_queue);
873bbf16
MD
603
604 pthread_mutex_lock(&side_statedump_lock);
605 side_list_insert_node_tail_rcu(&side_statedump_list, &handle->node);
bffe9ae3
MD
606 /* Queue statedump pending for all tracers. */
607 queue_statedump_pending(handle, SIDE_KEY_MATCH_ALL);
873bbf16
MD
608 pthread_mutex_unlock(&side_statedump_lock);
609
f0b01832 610 return handle;
bffe9ae3
MD
611
612name_nomem:
613 free(handle);
614 return NULL;
f0b01832
MD
615}
616
617void side_statedump_request_notification_unregister(struct side_statedump_request_handle *handle)
618{
619 if (finalized)
620 return;
621 if (!initialized)
622 side_init();
bffe9ae3 623 assert(!filter_key);
873bbf16
MD
624
625 pthread_mutex_lock(&side_statedump_lock);
bffe9ae3 626 unqueue_statedump_pending(handle, SIDE_KEY_MATCH_ALL);
873bbf16
MD
627 side_list_remove_node_rcu(&handle->node);
628 pthread_mutex_unlock(&side_statedump_lock);
629
630 side_rcu_wait_grace_period(&statedump_rcu_gp);
bffe9ae3 631 free(handle->name);
f0b01832
MD
632 free(handle);
633}
634
bffe9ae3
MD
635/* Returns true if the handle has pending statedump requests. */
636bool side_statedump_poll_pending_requests(struct side_statedump_request_handle *handle)
f0b01832 637{
bffe9ae3
MD
638 bool ret;
639
640 if (handle->mode != SIDE_STATEDUMP_MODE_POLLING)
641 return false;
642 pthread_mutex_lock(&side_statedump_lock);
643 ret = !side_list_empty(&handle->notification_queue);
644 pthread_mutex_unlock(&side_statedump_lock);
645 return ret;
646}
f0b01832 647
bffe9ae3
MD
648static
649void side_statedump_run(struct side_statedump_request_handle *handle,
650 struct side_statedump_notification *notif)
651{
f0b01832 652 /* Invoke the state dump callback specifically for the tracer key. */
bffe9ae3
MD
653 filter_key = notif->key;
654 side_statedump_event_call(side_statedump_begin,
655 side_arg_list(side_arg_string(handle->name)));
656 handle->cb();
657 side_statedump_event_call(side_statedump_end,
658 side_arg_list(side_arg_string(handle->name)));
659 filter_key = SIDE_KEY_MATCH_ALL;
660}
661
662static
663void _side_statedump_run_pending_requests(struct side_statedump_request_handle *handle)
664{
665 struct side_statedump_notification *notif, *tmp;
666 DEFINE_SIDE_LIST_HEAD(tmp_head);
667
668 pthread_mutex_lock(&side_statedump_lock);
669 side_list_splice(&handle->notification_queue, &tmp_head);
670 pthread_mutex_lock(&side_statedump_lock);
671
672 /* We are now sole owner of the tmp_head list. */
673 side_list_for_each_entry(notif, &tmp_head, node)
674 side_statedump_run(handle, notif);
675 side_list_for_each_entry_safe(notif, tmp, &tmp_head, node)
676 free(notif);
677}
678
679/*
680 * Only polling mode state dump handles allow application to explicitly handle the
681 * pending requests.
682 */
683int side_statedump_run_pending_requests(struct side_statedump_request_handle *handle)
684{
685 if (handle->mode != SIDE_STATEDUMP_MODE_POLLING)
686 return SIDE_ERROR_INVAL;
687 _side_statedump_run_pending_requests(handle);
688 return SIDE_ERROR_OK;
689}
690
691/*
692 * Request a state dump for tracer callbacks identified with "key".
693 */
694int side_tracer_statedump_request(uint64_t key)
695{
696 struct side_statedump_request_handle *handle;
697
698 if (key == SIDE_KEY_MATCH_ALL)
699 return SIDE_ERROR_INVAL;
700 pthread_mutex_lock(&side_statedump_lock);
701 side_list_for_each_entry(handle, &side_statedump_list, node)
702 queue_statedump_pending(handle, key);
703 pthread_mutex_lock(&side_statedump_lock);
704 return SIDE_ERROR_OK;
705}
706
707/*
708 * Cancel a statedump request.
709 */
710int side_tracer_statedump_request_cancel(uint64_t key)
711{
712 struct side_statedump_request_handle *handle;
713
714 if (key == SIDE_KEY_MATCH_ALL)
715 return SIDE_ERROR_INVAL;
716 pthread_mutex_lock(&side_statedump_lock);
717 side_list_for_each_entry(handle, &side_statedump_list, node)
718 unqueue_statedump_pending(handle, key);
719 pthread_mutex_lock(&side_statedump_lock);
720 return SIDE_ERROR_OK;
721}
722
723/*
724 * Tracer keys are represented on 64-bit. Return SIDE_ERROR_NOMEM on
725 * overflow (which should never happen in practice).
726 */
727int side_tracer_request_key(uint64_t *key)
728{
729 int ret = SIDE_ERROR_OK;
730
731 pthread_mutex_lock(&side_key_lock);
732 if (side_key_next == 0) {
733 ret = SIDE_ERROR_NOMEM;
734 goto end;
735 }
736 *key = side_key_next++;
737end:
738 pthread_mutex_unlock(&side_key_lock);
739 return ret;
f0b01832
MD
740}
741
67337c4a
MD
742void side_init(void)
743{
744 if (initialized)
745 return;
873bbf16
MD
746 side_rcu_gp_init(&event_rcu_gp);
747 side_rcu_gp_init(&statedump_rcu_gp);
67337c4a
MD
748 initialized = true;
749}
750
751/*
752 * side_exit() is executed from a library destructor. It can be called
753 * explicitly at application exit as well. Concurrent side API use is
754 * not expected at that point.
755 */
756void side_exit(void)
757{
758 struct side_events_register_handle *handle, *tmp;
759
760 if (finalized)
761 return;
762 side_list_for_each_entry_safe(handle, tmp, &side_events_list, node)
763 side_events_unregister(handle);
873bbf16
MD
764 side_rcu_gp_exit(&event_rcu_gp);
765 side_rcu_gp_exit(&statedump_rcu_gp);
67337c4a
MD
766 finalized = true;
767}
This page took 0.060329 seconds and 4 git commands to generate.