1 // SPDX-License-Identifier: MIT
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 #include <side/trace.h>
14 /* Top 8 bits reserved for shared tracer use. */
15 #if SIDE_BITS_PER_LONG == 64
16 # define SIDE_EVENT_ENABLED_SHARED_MASK 0xFF00000000000000ULL
17 # define SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK 0x8000000000000000ULL
18 # define SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK 0x4000000000000000ULL
20 /* Allow 2^56 private tracer references on an event. */
21 # define SIDE_EVENT_ENABLED_PRIVATE_MASK 0x00FFFFFFFFFFFFFFULL
23 # define SIDE_EVENT_ENABLED_SHARED_MASK 0xFF000000UL
24 # define SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK 0x80000000UL
25 # define SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK 0x40000000UL
27 /* Allow 2^24 private tracer references on an event. */
28 # define SIDE_EVENT_ENABLED_PRIVATE_MASK 0x00FFFFFFUL
31 #define SIDE_KEY_RESERVED_RANGE_END 0x8
33 /* Key 0x0 is reserved to match all. */
34 #define SIDE_KEY_MATCH_ALL 0x0
35 /* Key 0x1 is reserved for user event. */
36 #define SIDE_KEY_USER_EVENT 0x1
37 /* Key 0x2 is reserved for ptrace. */
38 #define SIDE_KEY_PTRACE 0x2
40 struct side_events_register_handle
{
41 struct side_list_node node
;
42 struct side_event_description
**events
;
46 struct side_tracer_handle
{
47 struct side_list_node node
;
48 void (*cb
)(enum side_tracer_notification notif
,
49 struct side_event_description
**events
, uint32_t nr_events
, void *priv
);
53 struct side_statedump_notification
{
54 struct side_list_node node
;
58 struct side_statedump_request_handle
{
59 struct side_list_node node
; /* Statedump request RCU list node. */
60 struct side_list_head notification_queue
; /* Queue of struct side_statedump_notification */
63 enum side_statedump_mode mode
;
66 struct side_callback
{
68 void (*call
)(const struct side_event_description
*desc
,
69 const struct side_arg_vec
*side_arg_vec
,
71 void (*call_variadic
)(const struct side_event_description
*desc
,
72 const struct side_arg_vec
*side_arg_vec
,
73 const struct side_arg_dynamic_struct
*var_struct
,
80 static struct side_rcu_gp_state event_rcu_gp
, statedump_rcu_gp
;
83 * Lazy initialization for early use within library constructors.
85 static bool initialized
;
87 * Do not register/unregister any more events after destructor.
89 static bool finalized
;
92 * Recursive mutex to allow tracer callbacks to use the side API.
94 static pthread_mutex_t side_event_lock
= PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
;
95 static pthread_mutex_t side_statedump_lock
= PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
;
96 static pthread_mutex_t side_key_lock
= PTHREAD_MUTEX_INITIALIZER
;
98 /* Dynamic tracer key allocation. */
99 static uint64_t side_key_next
= SIDE_KEY_RESERVED_RANGE_END
;
101 static DEFINE_SIDE_LIST_HEAD(side_events_list
);
102 static DEFINE_SIDE_LIST_HEAD(side_tracer_list
);
105 * The statedump request list is a RCU list to allow the agent thread to
106 * iterate over this list with a RCU read-side lock.
108 static DEFINE_SIDE_LIST_HEAD(side_statedump_list
);
111 * Callback filter key for state dump.
113 static __thread
uint64_t filter_key
= SIDE_KEY_MATCH_ALL
;
116 * The empty callback has a NULL function callback pointer, which stops
117 * iteration on the array of callbacks immediately.
119 const char side_empty_callback
[sizeof(struct side_callback
)];
121 side_static_event(side_statedump_begin
, "side", "statedump_begin",
122 SIDE_LOGLEVEL_INFO
, side_field_list(side_field_string("name")));
123 side_static_event(side_statedump_end
, "side", "statedump_end",
124 SIDE_LOGLEVEL_INFO
, side_field_list(side_field_string("name")));
127 * side_ptrace_hook is a place holder for a debugger breakpoint.
128 * var_struct is NULL if not variadic.
130 void side_ptrace_hook(const struct side_event_state
*event_state
__attribute__((unused
)),
131 const struct side_arg_vec
*side_arg_vec
__attribute__((unused
)),
132 const struct side_arg_dynamic_struct
*var_struct
__attribute__((unused
)))
133 __attribute__((noinline
));
134 void side_ptrace_hook(const struct side_event_state
*event_state
__attribute__((unused
)),
135 const struct side_arg_vec
*side_arg_vec
__attribute__((unused
)),
136 const struct side_arg_dynamic_struct
*var_struct
__attribute__((unused
)))
141 void _side_call(const struct side_event_state
*event_state
, const struct side_arg_vec
*side_arg_vec
, uint64_t key
)
143 struct side_rcu_read_state rcu_read_state
;
144 const struct side_event_state_0
*es0
;
145 const struct side_callback
*side_cb
;
148 if (side_unlikely(finalized
))
150 if (side_unlikely(!initialized
))
152 if (side_unlikely(event_state
->version
!= 0))
154 es0
= side_container_of(event_state
, const struct side_event_state_0
, parent
);
155 assert(!(es0
->desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
));
156 enabled
= __atomic_load_n(&es0
->enabled
, __ATOMIC_RELAXED
);
157 if (side_unlikely(enabled
& SIDE_EVENT_ENABLED_SHARED_MASK
)) {
158 if ((enabled
& SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK
) &&
159 (key
== SIDE_KEY_MATCH_ALL
|| key
== SIDE_KEY_USER_EVENT
)) {
160 // TODO: call kernel write.
162 if ((enabled
& SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK
) &&
163 (key
== SIDE_KEY_MATCH_ALL
|| key
== SIDE_KEY_PTRACE
))
164 side_ptrace_hook(event_state
, side_arg_vec
, NULL
);
166 side_rcu_read_begin(&event_rcu_gp
, &rcu_read_state
);
167 for (side_cb
= side_rcu_dereference(es0
->callbacks
); side_cb
->u
.call
!= NULL
; side_cb
++) {
168 if (key
!= SIDE_KEY_MATCH_ALL
&& side_cb
->key
!= SIDE_KEY_MATCH_ALL
&& side_cb
->key
!= key
)
170 side_cb
->u
.call(es0
->desc
, side_arg_vec
, side_cb
->priv
);
172 side_rcu_read_end(&event_rcu_gp
, &rcu_read_state
);
175 void side_call(const struct side_event_state
*event_state
, const struct side_arg_vec
*side_arg_vec
)
177 _side_call(event_state
, side_arg_vec
, SIDE_KEY_MATCH_ALL
);
180 void side_statedump_call(const struct side_event_state
*event_state
, const struct side_arg_vec
*side_arg_vec
)
182 _side_call(event_state
, side_arg_vec
, filter_key
);
186 void _side_call_variadic(const struct side_event_state
*event_state
,
187 const struct side_arg_vec
*side_arg_vec
,
188 const struct side_arg_dynamic_struct
*var_struct
,
191 struct side_rcu_read_state rcu_read_state
;
192 const struct side_event_state_0
*es0
;
193 const struct side_callback
*side_cb
;
196 if (side_unlikely(finalized
))
198 if (side_unlikely(!initialized
))
200 if (side_unlikely(event_state
->version
!= 0))
202 es0
= side_container_of(event_state
, const struct side_event_state_0
, parent
);
203 assert(es0
->desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
);
204 enabled
= __atomic_load_n(&es0
->enabled
, __ATOMIC_RELAXED
);
205 if (side_unlikely(enabled
& SIDE_EVENT_ENABLED_SHARED_MASK
)) {
206 if ((enabled
& SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK
) &&
207 (key
== SIDE_KEY_MATCH_ALL
|| key
== SIDE_KEY_USER_EVENT
)) {
208 // TODO: call kernel write.
210 if ((enabled
& SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK
) &&
211 (key
== SIDE_KEY_MATCH_ALL
|| key
== SIDE_KEY_PTRACE
))
212 side_ptrace_hook(event_state
, side_arg_vec
, var_struct
);
214 side_rcu_read_begin(&event_rcu_gp
, &rcu_read_state
);
215 for (side_cb
= side_rcu_dereference(es0
->callbacks
); side_cb
->u
.call_variadic
!= NULL
; side_cb
++) {
216 if (key
!= SIDE_KEY_MATCH_ALL
&& side_cb
->key
!= SIDE_KEY_MATCH_ALL
&& side_cb
->key
!= key
)
218 side_cb
->u
.call_variadic(es0
->desc
, side_arg_vec
, var_struct
, side_cb
->priv
);
220 side_rcu_read_end(&event_rcu_gp
, &rcu_read_state
);
223 void side_call_variadic(const struct side_event_state
*event_state
,
224 const struct side_arg_vec
*side_arg_vec
,
225 const struct side_arg_dynamic_struct
*var_struct
)
227 _side_call_variadic(event_state
, side_arg_vec
, var_struct
, SIDE_KEY_MATCH_ALL
);
230 void side_statedump_call_variadic(const struct side_event_state
*event_state
,
231 const struct side_arg_vec
*side_arg_vec
,
232 const struct side_arg_dynamic_struct
*var_struct
)
234 _side_call_variadic(event_state
, side_arg_vec
, var_struct
, filter_key
);
238 const struct side_callback
*side_tracer_callback_lookup(
239 const struct side_event_description
*desc
,
240 void *call
, void *priv
, uint64_t key
)
242 struct side_event_state
*event_state
= side_ptr_get(desc
->state
);
243 const struct side_event_state_0
*es0
;
244 const struct side_callback
*cb
;
246 if (side_unlikely(event_state
->version
!= 0))
248 es0
= side_container_of(event_state
, const struct side_event_state_0
, parent
);
249 for (cb
= es0
->callbacks
; cb
->u
.call
!= NULL
; cb
++) {
250 if ((void *) cb
->u
.call
== call
&& cb
->priv
== priv
&& cb
->key
== key
)
257 int _side_tracer_callback_register(struct side_event_description
*desc
,
258 void *call
, void *priv
, uint64_t key
)
260 struct side_event_state
*event_state
;
261 struct side_callback
*old_cb
, *new_cb
;
262 struct side_event_state_0
*es0
;
263 int ret
= SIDE_ERROR_OK
;
267 return SIDE_ERROR_INVAL
;
269 return SIDE_ERROR_EXITING
;
272 pthread_mutex_lock(&side_event_lock
);
273 event_state
= side_ptr_get(desc
->state
);
274 if (side_unlikely(event_state
->version
!= 0))
276 es0
= side_container_of(event_state
, struct side_event_state_0
, parent
);
277 old_nr_cb
= es0
->nr_callbacks
;
278 if (old_nr_cb
== UINT32_MAX
) {
279 ret
= SIDE_ERROR_INVAL
;
282 /* Reject duplicate (call, priv) tuples. */
283 if (side_tracer_callback_lookup(desc
, call
, priv
, key
)) {
284 ret
= SIDE_ERROR_EXIST
;
287 old_cb
= (struct side_callback
*) es0
->callbacks
;
288 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
289 new_cb
= (struct side_callback
*) calloc(old_nr_cb
+ 2, sizeof(struct side_callback
));
291 ret
= SIDE_ERROR_NOMEM
;
294 memcpy(new_cb
, old_cb
, old_nr_cb
);
295 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
296 new_cb
[old_nr_cb
].u
.call_variadic
=
297 (side_tracer_callback_variadic_func
) call
;
299 new_cb
[old_nr_cb
].u
.call
=
300 (side_tracer_callback_func
) call
;
301 new_cb
[old_nr_cb
].priv
= priv
;
302 new_cb
[old_nr_cb
].key
= key
;
303 /* High order bits are already zeroed. */
304 side_rcu_assign_pointer(es0
->callbacks
, new_cb
);
305 side_rcu_wait_grace_period(&event_rcu_gp
);
309 /* Increment concurrently with kernel setting the top bits. */
311 (void) __atomic_add_fetch(&es0
->enabled
, 1, __ATOMIC_RELAXED
);
313 pthread_mutex_unlock(&side_event_lock
);
317 int side_tracer_callback_register(struct side_event_description
*desc
,
318 side_tracer_callback_func call
,
319 void *priv
, uint64_t key
)
321 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
322 return SIDE_ERROR_INVAL
;
323 return _side_tracer_callback_register(desc
, (void *) call
, priv
, key
);
326 int side_tracer_callback_variadic_register(struct side_event_description
*desc
,
327 side_tracer_callback_variadic_func call_variadic
,
328 void *priv
, uint64_t key
)
330 if (!(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
))
331 return SIDE_ERROR_INVAL
;
332 return _side_tracer_callback_register(desc
, (void *) call_variadic
, priv
, key
);
335 static int _side_tracer_callback_unregister(struct side_event_description
*desc
,
336 void *call
, void *priv
, uint64_t key
)
338 struct side_event_state
*event_state
;
339 struct side_callback
*old_cb
, *new_cb
;
340 const struct side_callback
*cb_pos
;
341 struct side_event_state_0
*es0
;
343 int ret
= SIDE_ERROR_OK
;
347 return SIDE_ERROR_INVAL
;
349 return SIDE_ERROR_EXITING
;
352 pthread_mutex_lock(&side_event_lock
);
353 event_state
= side_ptr_get(desc
->state
);
354 if (side_unlikely(event_state
->version
!= 0))
356 es0
= side_container_of(event_state
, struct side_event_state_0
, parent
);
357 cb_pos
= side_tracer_callback_lookup(desc
, call
, priv
, key
);
359 ret
= SIDE_ERROR_NOENT
;
362 old_nr_cb
= es0
->nr_callbacks
;
363 old_cb
= (struct side_callback
*) es0
->callbacks
;
364 if (old_nr_cb
== 1) {
365 new_cb
= (struct side_callback
*) &side_empty_callback
;
367 pos_idx
= cb_pos
- es0
->callbacks
;
368 /* Remove entry at pos_idx. */
369 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
370 new_cb
= (struct side_callback
*) calloc(old_nr_cb
, sizeof(struct side_callback
));
372 ret
= SIDE_ERROR_NOMEM
;
375 memcpy(new_cb
, old_cb
, pos_idx
);
376 memcpy(&new_cb
[pos_idx
], &old_cb
[pos_idx
+ 1], old_nr_cb
- pos_idx
- 1);
378 /* High order bits are already zeroed. */
379 side_rcu_assign_pointer(es0
->callbacks
, new_cb
);
380 side_rcu_wait_grace_period(&event_rcu_gp
);
383 /* Decrement concurrently with kernel setting the top bits. */
385 (void) __atomic_add_fetch(&es0
->enabled
, -1, __ATOMIC_RELAXED
);
387 pthread_mutex_unlock(&side_event_lock
);
391 int side_tracer_callback_unregister(struct side_event_description
*desc
,
392 side_tracer_callback_func call
,
393 void *priv
, uint64_t key
)
395 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
396 return SIDE_ERROR_INVAL
;
397 return _side_tracer_callback_unregister(desc
, (void *) call
, priv
, key
);
400 int side_tracer_callback_variadic_unregister(struct side_event_description
*desc
,
401 side_tracer_callback_variadic_func call_variadic
,
402 void *priv
, uint64_t key
)
404 if (!(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
))
405 return SIDE_ERROR_INVAL
;
406 return _side_tracer_callback_unregister(desc
, (void *) call_variadic
, priv
, key
);
409 struct side_events_register_handle
*side_events_register(struct side_event_description
**events
, uint32_t nr_events
)
411 struct side_events_register_handle
*events_handle
= NULL
;
412 struct side_tracer_handle
*tracer_handle
;
418 events_handle
= (struct side_events_register_handle
*)
419 calloc(1, sizeof(struct side_events_register_handle
));
422 events_handle
->events
= events
;
423 events_handle
->nr_events
= nr_events
;
425 pthread_mutex_lock(&side_event_lock
);
426 side_list_insert_node_tail(&side_events_list
, &events_handle
->node
);
427 side_list_for_each_entry(tracer_handle
, &side_tracer_list
, node
) {
428 tracer_handle
->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS
,
429 events
, nr_events
, tracer_handle
->priv
);
431 pthread_mutex_unlock(&side_event_lock
);
432 //TODO: call event batch register ioctl
433 return events_handle
;
437 void side_event_remove_callbacks(struct side_event_description
*desc
)
439 struct side_event_state
*event_state
= side_ptr_get(desc
->state
);
440 struct side_event_state_0
*es0
;
441 struct side_callback
*old_cb
;
444 if (side_unlikely(event_state
->version
!= 0))
446 es0
= side_container_of(event_state
, struct side_event_state_0
, parent
);
447 nr_cb
= es0
->nr_callbacks
;
450 old_cb
= (struct side_callback
*) es0
->callbacks
;
451 (void) __atomic_add_fetch(&es0
->enabled
, -1, __ATOMIC_RELAXED
);
453 * Setting the state back to 0 cb and empty callbacks out of
454 * caution. This should not matter because instrumentation is
457 es0
->nr_callbacks
= 0;
458 side_rcu_assign_pointer(es0
->callbacks
, &side_empty_callback
);
460 * No need to wait for grace period because instrumentation is
467 * Unregister event handle. At this point, all side events in that
468 * handle should be unreachable.
470 void side_events_unregister(struct side_events_register_handle
*events_handle
)
472 struct side_tracer_handle
*tracer_handle
;
481 pthread_mutex_lock(&side_event_lock
);
482 side_list_remove_node(&events_handle
->node
);
483 side_list_for_each_entry(tracer_handle
, &side_tracer_list
, node
) {
484 tracer_handle
->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS
,
485 events_handle
->events
, events_handle
->nr_events
,
486 tracer_handle
->priv
);
488 for (i
= 0; i
< events_handle
->nr_events
; i
++) {
489 struct side_event_description
*event
= events_handle
->events
[i
];
491 /* Skip NULL pointers */
494 side_event_remove_callbacks(event
);
496 pthread_mutex_unlock(&side_event_lock
);
497 //TODO: call event batch unregister ioctl
501 struct side_tracer_handle
*side_tracer_event_notification_register(
502 void (*cb
)(enum side_tracer_notification notif
,
503 struct side_event_description
**events
, uint32_t nr_events
, void *priv
),
506 struct side_tracer_handle
*tracer_handle
;
507 struct side_events_register_handle
*events_handle
;
513 tracer_handle
= (struct side_tracer_handle
*)
514 calloc(1, sizeof(struct side_tracer_handle
));
517 pthread_mutex_lock(&side_event_lock
);
518 tracer_handle
->cb
= cb
;
519 tracer_handle
->priv
= priv
;
520 side_list_insert_node_tail(&side_tracer_list
, &tracer_handle
->node
);
521 side_list_for_each_entry(events_handle
, &side_events_list
, node
) {
522 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS
,
523 events_handle
->events
, events_handle
->nr_events
, priv
);
525 pthread_mutex_unlock(&side_event_lock
);
526 return tracer_handle
;
529 void side_tracer_event_notification_unregister(struct side_tracer_handle
*tracer_handle
)
531 struct side_events_register_handle
*events_handle
;
537 pthread_mutex_lock(&side_event_lock
);
538 side_list_for_each_entry(events_handle
, &side_events_list
, node
) {
539 tracer_handle
->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS
,
540 events_handle
->events
, events_handle
->nr_events
,
541 tracer_handle
->priv
);
543 side_list_remove_node(&tracer_handle
->node
);
544 pthread_mutex_unlock(&side_event_lock
);
548 /* Called with side_statedump_lock held. */
550 void queue_statedump_pending(struct side_statedump_request_handle
*handle
, uint64_t key
)
552 struct side_statedump_notification
*notif
;
554 notif
= (struct side_statedump_notification
*) calloc(1, sizeof(struct side_statedump_notification
));
558 side_list_insert_node_tail(&handle
->notification_queue
, ¬if
->node
);
561 /* Called with side_statedump_lock held. */
563 void unqueue_statedump_pending(struct side_statedump_request_handle
*handle
, uint64_t key
)
565 struct side_statedump_notification
*notif
, *tmp
;
567 side_list_for_each_entry_safe(notif
, tmp
, &handle
->notification_queue
, node
) {
568 if (key
== SIDE_KEY_MATCH_ALL
|| key
== notif
->key
) {
569 side_list_remove_node(¬if
->node
);
575 struct side_statedump_request_handle
*
576 side_statedump_request_notification_register(const char *state_name
,
577 void (*statedump_cb
)(void),
578 enum side_statedump_mode mode
)
580 struct side_statedump_request_handle
*handle
;
588 * The statedump request notification should not be registered
589 * from a notification callback.
592 handle
= (struct side_statedump_request_handle
*)
593 calloc(1, sizeof(struct side_statedump_request_handle
));
596 name
= strdup(state_name
);
599 handle
->cb
= statedump_cb
;
602 side_list_head_init(&handle
->notification_queue
);
604 pthread_mutex_lock(&side_statedump_lock
);
605 side_list_insert_node_tail_rcu(&side_statedump_list
, &handle
->node
);
606 /* Queue statedump pending for all tracers. */
607 queue_statedump_pending(handle
, SIDE_KEY_MATCH_ALL
);
608 pthread_mutex_unlock(&side_statedump_lock
);
617 void side_statedump_request_notification_unregister(struct side_statedump_request_handle
*handle
)
625 pthread_mutex_lock(&side_statedump_lock
);
626 unqueue_statedump_pending(handle
, SIDE_KEY_MATCH_ALL
);
627 side_list_remove_node_rcu(&handle
->node
);
628 pthread_mutex_unlock(&side_statedump_lock
);
630 side_rcu_wait_grace_period(&statedump_rcu_gp
);
635 /* Returns true if the handle has pending statedump requests. */
636 bool side_statedump_poll_pending_requests(struct side_statedump_request_handle
*handle
)
640 if (handle
->mode
!= SIDE_STATEDUMP_MODE_POLLING
)
642 pthread_mutex_lock(&side_statedump_lock
);
643 ret
= !side_list_empty(&handle
->notification_queue
);
644 pthread_mutex_unlock(&side_statedump_lock
);
649 void side_statedump_run(struct side_statedump_request_handle
*handle
,
650 struct side_statedump_notification
*notif
)
652 /* Invoke the state dump callback specifically for the tracer key. */
653 filter_key
= notif
->key
;
654 side_statedump_event_call(side_statedump_begin
,
655 side_arg_list(side_arg_string(handle
->name
)));
657 side_statedump_event_call(side_statedump_end
,
658 side_arg_list(side_arg_string(handle
->name
)));
659 filter_key
= SIDE_KEY_MATCH_ALL
;
663 void _side_statedump_run_pending_requests(struct side_statedump_request_handle
*handle
)
665 struct side_statedump_notification
*notif
, *tmp
;
666 DEFINE_SIDE_LIST_HEAD(tmp_head
);
668 pthread_mutex_lock(&side_statedump_lock
);
669 side_list_splice(&handle
->notification_queue
, &tmp_head
);
670 pthread_mutex_lock(&side_statedump_lock
);
672 /* We are now sole owner of the tmp_head list. */
673 side_list_for_each_entry(notif
, &tmp_head
, node
)
674 side_statedump_run(handle
, notif
);
675 side_list_for_each_entry_safe(notif
, tmp
, &tmp_head
, node
)
680 * Only polling mode state dump handles allow application to explicitly handle the
683 int side_statedump_run_pending_requests(struct side_statedump_request_handle
*handle
)
685 if (handle
->mode
!= SIDE_STATEDUMP_MODE_POLLING
)
686 return SIDE_ERROR_INVAL
;
687 _side_statedump_run_pending_requests(handle
);
688 return SIDE_ERROR_OK
;
692 * Request a state dump for tracer callbacks identified with "key".
694 int side_tracer_statedump_request(uint64_t key
)
696 struct side_statedump_request_handle
*handle
;
698 if (key
== SIDE_KEY_MATCH_ALL
)
699 return SIDE_ERROR_INVAL
;
700 pthread_mutex_lock(&side_statedump_lock
);
701 side_list_for_each_entry(handle
, &side_statedump_list
, node
)
702 queue_statedump_pending(handle
, key
);
703 pthread_mutex_lock(&side_statedump_lock
);
704 return SIDE_ERROR_OK
;
708 * Cancel a statedump request.
710 int side_tracer_statedump_request_cancel(uint64_t key
)
712 struct side_statedump_request_handle
*handle
;
714 if (key
== SIDE_KEY_MATCH_ALL
)
715 return SIDE_ERROR_INVAL
;
716 pthread_mutex_lock(&side_statedump_lock
);
717 side_list_for_each_entry(handle
, &side_statedump_list
, node
)
718 unqueue_statedump_pending(handle
, key
);
719 pthread_mutex_lock(&side_statedump_lock
);
720 return SIDE_ERROR_OK
;
724 * Tracer keys are represented on 64-bit. Return SIDE_ERROR_NOMEM on
725 * overflow (which should never happen in practice).
727 int side_tracer_request_key(uint64_t *key
)
729 int ret
= SIDE_ERROR_OK
;
731 pthread_mutex_lock(&side_key_lock
);
732 if (side_key_next
== 0) {
733 ret
= SIDE_ERROR_NOMEM
;
736 *key
= side_key_next
++;
738 pthread_mutex_unlock(&side_key_lock
);
746 side_rcu_gp_init(&event_rcu_gp
);
747 side_rcu_gp_init(&statedump_rcu_gp
);
752 * side_exit() is executed from a library destructor. It can be called
753 * explicitly at application exit as well. Concurrent side API use is
754 * not expected at that point.
758 struct side_events_register_handle
*handle
, *tmp
;
762 side_list_for_each_entry_safe(handle
, tmp
, &side_events_list
, node
)
763 side_events_unregister(handle
);
764 side_rcu_gp_exit(&event_rcu_gp
);
765 side_rcu_gp_exit(&statedump_rcu_gp
);