1 // SPDX-License-Identifier: MIT
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 #include <side/trace.h>
16 /* Top 8 bits reserved for shared tracer use. */
17 #if SIDE_BITS_PER_LONG == 64
18 # define SIDE_EVENT_ENABLED_SHARED_MASK 0xFF00000000000000ULL
19 # define SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK 0x8000000000000000ULL
20 # define SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK 0x4000000000000000ULL
22 /* Allow 2^56 private tracer references on an event. */
23 # define SIDE_EVENT_ENABLED_PRIVATE_MASK 0x00FFFFFFFFFFFFFFULL
25 # define SIDE_EVENT_ENABLED_SHARED_MASK 0xFF000000UL
26 # define SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK 0x80000000UL
27 # define SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK 0x40000000UL
29 /* Allow 2^24 private tracer references on an event. */
30 # define SIDE_EVENT_ENABLED_PRIVATE_MASK 0x00FFFFFFUL
33 #define SIDE_KEY_RESERVED_RANGE_END 0x8
35 /* Key 0x0 is reserved to match all. */
36 #define SIDE_KEY_MATCH_ALL 0x0
37 /* Key 0x1 is reserved for user event. */
38 #define SIDE_KEY_USER_EVENT 0x1
39 /* Key 0x2 is reserved for ptrace. */
40 #define SIDE_KEY_PTRACE 0x2
42 struct side_events_register_handle
{
43 struct side_list_node node
;
44 struct side_event_description
**events
;
48 struct side_tracer_handle
{
49 struct side_list_node node
;
50 void (*cb
)(enum side_tracer_notification notif
,
51 struct side_event_description
**events
, uint32_t nr_events
, void *priv
);
55 struct side_statedump_notification
{
56 struct side_list_node node
;
60 struct side_statedump_request_handle
{
61 struct side_list_node node
; /* Statedump request RCU list node. */
62 struct side_list_head notification_queue
; /* Queue of struct side_statedump_notification */
65 enum side_statedump_mode mode
;
68 struct side_callback
{
70 void (*call
)(const struct side_event_description
*desc
,
71 const struct side_arg_vec
*side_arg_vec
,
73 void (*call_variadic
)(const struct side_event_description
*desc
,
74 const struct side_arg_vec
*side_arg_vec
,
75 const struct side_arg_dynamic_struct
*var_struct
,
82 enum agent_thread_state
{
83 AGENT_THREAD_STATE_BLOCKED
= 0,
84 AGENT_THREAD_STATE_HANDLE_REQUEST
= (1 << 0),
85 AGENT_THREAD_STATE_EXIT
= (1 << 1),
88 struct statedump_agent_thread
{
91 enum agent_thread_state state
;
94 static struct side_rcu_gp_state event_rcu_gp
, statedump_rcu_gp
;
97 * Lazy initialization for early use within library constructors.
99 static bool initialized
;
101 * Do not register/unregister any more events after destructor.
103 static bool finalized
;
106 * Recursive mutex to allow tracer callbacks to use the side API.
108 static pthread_mutex_t side_event_lock
= PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
;
109 static pthread_mutex_t side_statedump_lock
= PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
;
110 static pthread_mutex_t side_key_lock
= PTHREAD_MUTEX_INITIALIZER
;
112 /* Dynamic tracer key allocation. */
113 static uint64_t side_key_next
= SIDE_KEY_RESERVED_RANGE_END
;
115 static struct statedump_agent_thread statedump_agent_thread
;
117 static DEFINE_SIDE_LIST_HEAD(side_events_list
);
118 static DEFINE_SIDE_LIST_HEAD(side_tracer_list
);
121 * The statedump request list is a RCU list to allow the agent thread to
122 * iterate over this list with a RCU read-side lock.
124 static DEFINE_SIDE_LIST_HEAD(side_statedump_list
);
127 * Callback filter key for state dump.
129 static __thread
uint64_t filter_key
= SIDE_KEY_MATCH_ALL
;
132 * The empty callback has a NULL function callback pointer, which stops
133 * iteration on the array of callbacks immediately.
135 const char side_empty_callback
[sizeof(struct side_callback
)];
137 side_static_event(side_statedump_begin
, "side", "statedump_begin",
138 SIDE_LOGLEVEL_INFO
, side_field_list(side_field_string("name")));
139 side_static_event(side_statedump_end
, "side", "statedump_end",
140 SIDE_LOGLEVEL_INFO
, side_field_list(side_field_string("name")));
143 * side_ptrace_hook is a place holder for a debugger breakpoint.
144 * var_struct is NULL if not variadic.
146 void side_ptrace_hook(const struct side_event_state
*event_state
__attribute__((unused
)),
147 const struct side_arg_vec
*side_arg_vec
__attribute__((unused
)),
148 const struct side_arg_dynamic_struct
*var_struct
__attribute__((unused
)))
149 __attribute__((noinline
));
150 void side_ptrace_hook(const struct side_event_state
*event_state
__attribute__((unused
)),
151 const struct side_arg_vec
*side_arg_vec
__attribute__((unused
)),
152 const struct side_arg_dynamic_struct
*var_struct
__attribute__((unused
)))
157 void _side_call(const struct side_event_state
*event_state
, const struct side_arg_vec
*side_arg_vec
, uint64_t key
)
159 struct side_rcu_read_state rcu_read_state
;
160 const struct side_event_state_0
*es0
;
161 const struct side_callback
*side_cb
;
164 if (side_unlikely(finalized
))
166 if (side_unlikely(!initialized
))
168 if (side_unlikely(event_state
->version
!= 0))
170 es0
= side_container_of(event_state
, const struct side_event_state_0
, parent
);
171 assert(!(es0
->desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
));
172 enabled
= __atomic_load_n(&es0
->enabled
, __ATOMIC_RELAXED
);
173 if (side_unlikely(enabled
& SIDE_EVENT_ENABLED_SHARED_MASK
)) {
174 if ((enabled
& SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK
) &&
175 (key
== SIDE_KEY_MATCH_ALL
|| key
== SIDE_KEY_USER_EVENT
)) {
176 // TODO: call kernel write.
178 if ((enabled
& SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK
) &&
179 (key
== SIDE_KEY_MATCH_ALL
|| key
== SIDE_KEY_PTRACE
))
180 side_ptrace_hook(event_state
, side_arg_vec
, NULL
);
182 side_rcu_read_begin(&event_rcu_gp
, &rcu_read_state
);
183 for (side_cb
= side_rcu_dereference(es0
->callbacks
); side_cb
->u
.call
!= NULL
; side_cb
++) {
184 if (key
!= SIDE_KEY_MATCH_ALL
&& side_cb
->key
!= SIDE_KEY_MATCH_ALL
&& side_cb
->key
!= key
)
186 side_cb
->u
.call(es0
->desc
, side_arg_vec
, side_cb
->priv
);
188 side_rcu_read_end(&event_rcu_gp
, &rcu_read_state
);
191 void side_call(const struct side_event_state
*event_state
, const struct side_arg_vec
*side_arg_vec
)
193 _side_call(event_state
, side_arg_vec
, SIDE_KEY_MATCH_ALL
);
196 void side_statedump_call(const struct side_event_state
*event_state
, const struct side_arg_vec
*side_arg_vec
)
198 _side_call(event_state
, side_arg_vec
, filter_key
);
202 void _side_call_variadic(const struct side_event_state
*event_state
,
203 const struct side_arg_vec
*side_arg_vec
,
204 const struct side_arg_dynamic_struct
*var_struct
,
207 struct side_rcu_read_state rcu_read_state
;
208 const struct side_event_state_0
*es0
;
209 const struct side_callback
*side_cb
;
212 if (side_unlikely(finalized
))
214 if (side_unlikely(!initialized
))
216 if (side_unlikely(event_state
->version
!= 0))
218 es0
= side_container_of(event_state
, const struct side_event_state_0
, parent
);
219 assert(es0
->desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
);
220 enabled
= __atomic_load_n(&es0
->enabled
, __ATOMIC_RELAXED
);
221 if (side_unlikely(enabled
& SIDE_EVENT_ENABLED_SHARED_MASK
)) {
222 if ((enabled
& SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK
) &&
223 (key
== SIDE_KEY_MATCH_ALL
|| key
== SIDE_KEY_USER_EVENT
)) {
224 // TODO: call kernel write.
226 if ((enabled
& SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK
) &&
227 (key
== SIDE_KEY_MATCH_ALL
|| key
== SIDE_KEY_PTRACE
))
228 side_ptrace_hook(event_state
, side_arg_vec
, var_struct
);
230 side_rcu_read_begin(&event_rcu_gp
, &rcu_read_state
);
231 for (side_cb
= side_rcu_dereference(es0
->callbacks
); side_cb
->u
.call_variadic
!= NULL
; side_cb
++) {
232 if (key
!= SIDE_KEY_MATCH_ALL
&& side_cb
->key
!= SIDE_KEY_MATCH_ALL
&& side_cb
->key
!= key
)
234 side_cb
->u
.call_variadic(es0
->desc
, side_arg_vec
, var_struct
, side_cb
->priv
);
236 side_rcu_read_end(&event_rcu_gp
, &rcu_read_state
);
239 void side_call_variadic(const struct side_event_state
*event_state
,
240 const struct side_arg_vec
*side_arg_vec
,
241 const struct side_arg_dynamic_struct
*var_struct
)
243 _side_call_variadic(event_state
, side_arg_vec
, var_struct
, SIDE_KEY_MATCH_ALL
);
246 void side_statedump_call_variadic(const struct side_event_state
*event_state
,
247 const struct side_arg_vec
*side_arg_vec
,
248 const struct side_arg_dynamic_struct
*var_struct
)
250 _side_call_variadic(event_state
, side_arg_vec
, var_struct
, filter_key
);
254 const struct side_callback
*side_tracer_callback_lookup(
255 const struct side_event_description
*desc
,
256 void *call
, void *priv
, uint64_t key
)
258 struct side_event_state
*event_state
= side_ptr_get(desc
->state
);
259 const struct side_event_state_0
*es0
;
260 const struct side_callback
*cb
;
262 if (side_unlikely(event_state
->version
!= 0))
264 es0
= side_container_of(event_state
, const struct side_event_state_0
, parent
);
265 for (cb
= es0
->callbacks
; cb
->u
.call
!= NULL
; cb
++) {
266 if ((void *) cb
->u
.call
== call
&& cb
->priv
== priv
&& cb
->key
== key
)
273 int _side_tracer_callback_register(struct side_event_description
*desc
,
274 void *call
, void *priv
, uint64_t key
)
276 struct side_event_state
*event_state
;
277 struct side_callback
*old_cb
, *new_cb
;
278 struct side_event_state_0
*es0
;
279 int ret
= SIDE_ERROR_OK
;
283 return SIDE_ERROR_INVAL
;
285 return SIDE_ERROR_EXITING
;
288 pthread_mutex_lock(&side_event_lock
);
289 event_state
= side_ptr_get(desc
->state
);
290 if (side_unlikely(event_state
->version
!= 0))
292 es0
= side_container_of(event_state
, struct side_event_state_0
, parent
);
293 old_nr_cb
= es0
->nr_callbacks
;
294 if (old_nr_cb
== UINT32_MAX
) {
295 ret
= SIDE_ERROR_INVAL
;
298 /* Reject duplicate (call, priv) tuples. */
299 if (side_tracer_callback_lookup(desc
, call
, priv
, key
)) {
300 ret
= SIDE_ERROR_EXIST
;
303 old_cb
= (struct side_callback
*) es0
->callbacks
;
304 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
305 new_cb
= (struct side_callback
*) calloc(old_nr_cb
+ 2, sizeof(struct side_callback
));
307 ret
= SIDE_ERROR_NOMEM
;
310 memcpy(new_cb
, old_cb
, old_nr_cb
);
311 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
312 new_cb
[old_nr_cb
].u
.call_variadic
=
313 (side_tracer_callback_variadic_func
) call
;
315 new_cb
[old_nr_cb
].u
.call
=
316 (side_tracer_callback_func
) call
;
317 new_cb
[old_nr_cb
].priv
= priv
;
318 new_cb
[old_nr_cb
].key
= key
;
319 /* High order bits are already zeroed. */
320 side_rcu_assign_pointer(es0
->callbacks
, new_cb
);
321 side_rcu_wait_grace_period(&event_rcu_gp
);
325 /* Increment concurrently with kernel setting the top bits. */
327 (void) __atomic_add_fetch(&es0
->enabled
, 1, __ATOMIC_RELAXED
);
329 pthread_mutex_unlock(&side_event_lock
);
333 int side_tracer_callback_register(struct side_event_description
*desc
,
334 side_tracer_callback_func call
,
335 void *priv
, uint64_t key
)
337 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
338 return SIDE_ERROR_INVAL
;
339 return _side_tracer_callback_register(desc
, (void *) call
, priv
, key
);
342 int side_tracer_callback_variadic_register(struct side_event_description
*desc
,
343 side_tracer_callback_variadic_func call_variadic
,
344 void *priv
, uint64_t key
)
346 if (!(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
))
347 return SIDE_ERROR_INVAL
;
348 return _side_tracer_callback_register(desc
, (void *) call_variadic
, priv
, key
);
351 static int _side_tracer_callback_unregister(struct side_event_description
*desc
,
352 void *call
, void *priv
, uint64_t key
)
354 struct side_event_state
*event_state
;
355 struct side_callback
*old_cb
, *new_cb
;
356 const struct side_callback
*cb_pos
;
357 struct side_event_state_0
*es0
;
359 int ret
= SIDE_ERROR_OK
;
363 return SIDE_ERROR_INVAL
;
365 return SIDE_ERROR_EXITING
;
368 pthread_mutex_lock(&side_event_lock
);
369 event_state
= side_ptr_get(desc
->state
);
370 if (side_unlikely(event_state
->version
!= 0))
372 es0
= side_container_of(event_state
, struct side_event_state_0
, parent
);
373 cb_pos
= side_tracer_callback_lookup(desc
, call
, priv
, key
);
375 ret
= SIDE_ERROR_NOENT
;
378 old_nr_cb
= es0
->nr_callbacks
;
379 old_cb
= (struct side_callback
*) es0
->callbacks
;
380 if (old_nr_cb
== 1) {
381 new_cb
= (struct side_callback
*) &side_empty_callback
;
383 pos_idx
= cb_pos
- es0
->callbacks
;
384 /* Remove entry at pos_idx. */
385 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
386 new_cb
= (struct side_callback
*) calloc(old_nr_cb
, sizeof(struct side_callback
));
388 ret
= SIDE_ERROR_NOMEM
;
391 memcpy(new_cb
, old_cb
, pos_idx
);
392 memcpy(&new_cb
[pos_idx
], &old_cb
[pos_idx
+ 1], old_nr_cb
- pos_idx
- 1);
394 /* High order bits are already zeroed. */
395 side_rcu_assign_pointer(es0
->callbacks
, new_cb
);
396 side_rcu_wait_grace_period(&event_rcu_gp
);
399 /* Decrement concurrently with kernel setting the top bits. */
401 (void) __atomic_add_fetch(&es0
->enabled
, -1, __ATOMIC_RELAXED
);
403 pthread_mutex_unlock(&side_event_lock
);
407 int side_tracer_callback_unregister(struct side_event_description
*desc
,
408 side_tracer_callback_func call
,
409 void *priv
, uint64_t key
)
411 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
412 return SIDE_ERROR_INVAL
;
413 return _side_tracer_callback_unregister(desc
, (void *) call
, priv
, key
);
416 int side_tracer_callback_variadic_unregister(struct side_event_description
*desc
,
417 side_tracer_callback_variadic_func call_variadic
,
418 void *priv
, uint64_t key
)
420 if (!(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
))
421 return SIDE_ERROR_INVAL
;
422 return _side_tracer_callback_unregister(desc
, (void *) call_variadic
, priv
, key
);
425 struct side_events_register_handle
*side_events_register(struct side_event_description
**events
, uint32_t nr_events
)
427 struct side_events_register_handle
*events_handle
= NULL
;
428 struct side_tracer_handle
*tracer_handle
;
434 events_handle
= (struct side_events_register_handle
*)
435 calloc(1, sizeof(struct side_events_register_handle
));
438 events_handle
->events
= events
;
439 events_handle
->nr_events
= nr_events
;
441 pthread_mutex_lock(&side_event_lock
);
442 side_list_insert_node_tail(&side_events_list
, &events_handle
->node
);
443 side_list_for_each_entry(tracer_handle
, &side_tracer_list
, node
) {
444 tracer_handle
->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS
,
445 events
, nr_events
, tracer_handle
->priv
);
447 pthread_mutex_unlock(&side_event_lock
);
448 //TODO: call event batch register ioctl
449 return events_handle
;
453 void side_event_remove_callbacks(struct side_event_description
*desc
)
455 struct side_event_state
*event_state
= side_ptr_get(desc
->state
);
456 struct side_event_state_0
*es0
;
457 struct side_callback
*old_cb
;
460 if (side_unlikely(event_state
->version
!= 0))
462 es0
= side_container_of(event_state
, struct side_event_state_0
, parent
);
463 nr_cb
= es0
->nr_callbacks
;
466 old_cb
= (struct side_callback
*) es0
->callbacks
;
467 (void) __atomic_add_fetch(&es0
->enabled
, -1, __ATOMIC_RELAXED
);
469 * Setting the state back to 0 cb and empty callbacks out of
470 * caution. This should not matter because instrumentation is
473 es0
->nr_callbacks
= 0;
474 side_rcu_assign_pointer(es0
->callbacks
, &side_empty_callback
);
476 * No need to wait for grace period because instrumentation is
483 * Unregister event handle. At this point, all side events in that
484 * handle should be unreachable.
486 void side_events_unregister(struct side_events_register_handle
*events_handle
)
488 struct side_tracer_handle
*tracer_handle
;
497 pthread_mutex_lock(&side_event_lock
);
498 side_list_remove_node(&events_handle
->node
);
499 side_list_for_each_entry(tracer_handle
, &side_tracer_list
, node
) {
500 tracer_handle
->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS
,
501 events_handle
->events
, events_handle
->nr_events
,
502 tracer_handle
->priv
);
504 for (i
= 0; i
< events_handle
->nr_events
; i
++) {
505 struct side_event_description
*event
= events_handle
->events
[i
];
507 /* Skip NULL pointers */
510 side_event_remove_callbacks(event
);
512 pthread_mutex_unlock(&side_event_lock
);
513 //TODO: call event batch unregister ioctl
517 struct side_tracer_handle
*side_tracer_event_notification_register(
518 void (*cb
)(enum side_tracer_notification notif
,
519 struct side_event_description
**events
, uint32_t nr_events
, void *priv
),
522 struct side_tracer_handle
*tracer_handle
;
523 struct side_events_register_handle
*events_handle
;
529 tracer_handle
= (struct side_tracer_handle
*)
530 calloc(1, sizeof(struct side_tracer_handle
));
533 pthread_mutex_lock(&side_event_lock
);
534 tracer_handle
->cb
= cb
;
535 tracer_handle
->priv
= priv
;
536 side_list_insert_node_tail(&side_tracer_list
, &tracer_handle
->node
);
537 side_list_for_each_entry(events_handle
, &side_events_list
, node
) {
538 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS
,
539 events_handle
->events
, events_handle
->nr_events
, priv
);
541 pthread_mutex_unlock(&side_event_lock
);
542 return tracer_handle
;
545 void side_tracer_event_notification_unregister(struct side_tracer_handle
*tracer_handle
)
547 struct side_events_register_handle
*events_handle
;
553 pthread_mutex_lock(&side_event_lock
);
554 side_list_for_each_entry(events_handle
, &side_events_list
, node
) {
555 tracer_handle
->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS
,
556 events_handle
->events
, events_handle
->nr_events
,
557 tracer_handle
->priv
);
559 side_list_remove_node(&tracer_handle
->node
);
560 pthread_mutex_unlock(&side_event_lock
);
564 /* Called with side_statedump_lock held. */
566 void queue_statedump_pending(struct side_statedump_request_handle
*handle
, uint64_t key
)
568 struct side_statedump_notification
*notif
;
570 notif
= (struct side_statedump_notification
*) calloc(1, sizeof(struct side_statedump_notification
));
574 side_list_insert_node_tail(&handle
->notification_queue
, ¬if
->node
);
575 if (handle
->mode
== SIDE_STATEDUMP_MODE_AGENT_THREAD
)
576 (void)__atomic_or_fetch(&statedump_agent_thread
.state
, AGENT_THREAD_STATE_HANDLE_REQUEST
, __ATOMIC_SEQ_CST
);
579 /* Called with side_statedump_lock held. */
581 void unqueue_statedump_pending(struct side_statedump_request_handle
*handle
, uint64_t key
)
583 struct side_statedump_notification
*notif
, *tmp
;
585 side_list_for_each_entry_safe(notif
, tmp
, &handle
->notification_queue
, node
) {
586 if (key
== SIDE_KEY_MATCH_ALL
|| key
== notif
->key
) {
587 side_list_remove_node(¬if
->node
);
594 void side_statedump_run(struct side_statedump_request_handle
*handle
,
595 struct side_statedump_notification
*notif
)
597 /* Invoke the state dump callback specifically for the tracer key. */
598 filter_key
= notif
->key
;
599 side_statedump_event_call(side_statedump_begin
,
600 side_arg_list(side_arg_string(handle
->name
)));
602 side_statedump_event_call(side_statedump_end
,
603 side_arg_list(side_arg_string(handle
->name
)));
604 filter_key
= SIDE_KEY_MATCH_ALL
;
608 void _side_statedump_run_pending_requests(struct side_statedump_request_handle
*handle
)
610 struct side_statedump_notification
*notif
, *tmp
;
611 DEFINE_SIDE_LIST_HEAD(tmp_head
);
613 pthread_mutex_lock(&side_statedump_lock
);
614 side_list_splice(&handle
->notification_queue
, &tmp_head
);
615 side_list_head_init(&handle
->notification_queue
);
616 pthread_mutex_unlock(&side_statedump_lock
);
618 /* We are now sole owner of the tmp_head list. */
619 side_list_for_each_entry(notif
, &tmp_head
, node
)
620 side_statedump_run(handle
, notif
);
621 side_list_for_each_entry_safe(notif
, tmp
, &tmp_head
, node
)
626 void *statedump_agent_func(void *arg
__attribute__((unused
)))
629 struct side_statedump_request_handle
*handle
;
630 struct side_rcu_read_state rcu_read_state
;
631 enum agent_thread_state state
;
633 /* TODO: futex-based wakeup. */
634 state
= __atomic_load_n(&statedump_agent_thread
.state
, __ATOMIC_SEQ_CST
);
635 if (state
== AGENT_THREAD_STATE_BLOCKED
) {
639 if (state
& AGENT_THREAD_STATE_EXIT
)
641 (void)__atomic_and_fetch(&statedump_agent_thread
.state
, ~AGENT_THREAD_STATE_HANDLE_REQUEST
, __ATOMIC_SEQ_CST
);
642 side_rcu_read_begin(&statedump_rcu_gp
, &rcu_read_state
);
643 side_list_for_each_entry_rcu(handle
, &side_statedump_list
, node
)
644 _side_statedump_run_pending_requests(handle
);
645 side_rcu_read_end(&statedump_rcu_gp
, &rcu_read_state
);
651 void statedump_agent_thread_get(void)
655 if (statedump_agent_thread
.ref
++)
657 statedump_agent_thread
.state
= AGENT_THREAD_STATE_BLOCKED
;
658 ret
= pthread_create(&statedump_agent_thread
.id
, NULL
,
659 statedump_agent_func
, NULL
);
666 void statedump_agent_thread_put(void)
671 if (--statedump_agent_thread
.ref
)
673 (void)__atomic_or_fetch(&statedump_agent_thread
.state
, AGENT_THREAD_STATE_EXIT
, __ATOMIC_SEQ_CST
);
674 ret
= pthread_join(statedump_agent_thread
.id
, &retval
);
678 statedump_agent_thread
.state
= AGENT_THREAD_STATE_BLOCKED
;
681 struct side_statedump_request_handle
*
682 side_statedump_request_notification_register(const char *state_name
,
683 void (*statedump_cb
)(void),
684 enum side_statedump_mode mode
)
686 struct side_statedump_request_handle
*handle
;
694 * The statedump request notification should not be registered
695 * from a notification callback.
698 handle
= (struct side_statedump_request_handle
*)
699 calloc(1, sizeof(struct side_statedump_request_handle
));
702 name
= strdup(state_name
);
705 handle
->cb
= statedump_cb
;
708 side_list_head_init(&handle
->notification_queue
);
710 pthread_mutex_lock(&side_statedump_lock
);
711 if (mode
== SIDE_STATEDUMP_MODE_AGENT_THREAD
)
712 statedump_agent_thread_get();
713 side_list_insert_node_tail_rcu(&side_statedump_list
, &handle
->node
);
714 /* Queue statedump pending for all tracers. */
715 queue_statedump_pending(handle
, SIDE_KEY_MATCH_ALL
);
716 pthread_mutex_unlock(&side_statedump_lock
);
718 if (mode
== SIDE_STATEDUMP_MODE_AGENT_THREAD
) {
722 /* TODO futex based wakeup. */
723 pthread_mutex_lock(&side_statedump_lock
);
724 is_empty
= side_list_empty(&handle
->notification_queue
);
725 pthread_mutex_unlock(&side_statedump_lock
);
739 void side_statedump_request_notification_unregister(struct side_statedump_request_handle
*handle
)
747 pthread_mutex_lock(&side_statedump_lock
);
748 unqueue_statedump_pending(handle
, SIDE_KEY_MATCH_ALL
);
749 side_list_remove_node_rcu(&handle
->node
);
750 if (handle
->mode
== SIDE_STATEDUMP_MODE_AGENT_THREAD
)
751 statedump_agent_thread_put();
752 pthread_mutex_unlock(&side_statedump_lock
);
754 side_rcu_wait_grace_period(&statedump_rcu_gp
);
759 /* Returns true if the handle has pending statedump requests. */
760 bool side_statedump_poll_pending_requests(struct side_statedump_request_handle
*handle
)
764 if (handle
->mode
!= SIDE_STATEDUMP_MODE_POLLING
)
766 pthread_mutex_lock(&side_statedump_lock
);
767 ret
= !side_list_empty(&handle
->notification_queue
);
768 pthread_mutex_unlock(&side_statedump_lock
);
773 * Only polling mode state dump handles allow application to explicitly handle the
776 int side_statedump_run_pending_requests(struct side_statedump_request_handle
*handle
)
778 if (handle
->mode
!= SIDE_STATEDUMP_MODE_POLLING
)
779 return SIDE_ERROR_INVAL
;
780 _side_statedump_run_pending_requests(handle
);
781 return SIDE_ERROR_OK
;
785 * Request a state dump for tracer callbacks identified with "key".
787 int side_tracer_statedump_request(uint64_t key
)
789 struct side_statedump_request_handle
*handle
;
791 if (key
== SIDE_KEY_MATCH_ALL
)
792 return SIDE_ERROR_INVAL
;
793 pthread_mutex_lock(&side_statedump_lock
);
794 side_list_for_each_entry(handle
, &side_statedump_list
, node
)
795 queue_statedump_pending(handle
, key
);
796 pthread_mutex_lock(&side_statedump_lock
);
797 return SIDE_ERROR_OK
;
801 * Cancel a statedump request.
803 int side_tracer_statedump_request_cancel(uint64_t key
)
805 struct side_statedump_request_handle
*handle
;
807 if (key
== SIDE_KEY_MATCH_ALL
)
808 return SIDE_ERROR_INVAL
;
809 pthread_mutex_lock(&side_statedump_lock
);
810 side_list_for_each_entry(handle
, &side_statedump_list
, node
)
811 unqueue_statedump_pending(handle
, key
);
812 pthread_mutex_lock(&side_statedump_lock
);
813 return SIDE_ERROR_OK
;
817 * Tracer keys are represented on 64-bit. Return SIDE_ERROR_NOMEM on
818 * overflow (which should never happen in practice).
820 int side_tracer_request_key(uint64_t *key
)
822 int ret
= SIDE_ERROR_OK
;
824 pthread_mutex_lock(&side_key_lock
);
825 if (side_key_next
== 0) {
826 ret
= SIDE_ERROR_NOMEM
;
829 *key
= side_key_next
++;
831 pthread_mutex_unlock(&side_key_lock
);
839 side_rcu_gp_init(&event_rcu_gp
);
840 side_rcu_gp_init(&statedump_rcu_gp
);
845 * side_exit() is executed from a library destructor. It can be called
846 * explicitly at application exit as well. Concurrent side API use is
847 * not expected at that point.
851 struct side_events_register_handle
*handle
, *tmp
;
855 side_list_for_each_entry_safe(handle
, tmp
, &side_events_list
, node
)
856 side_events_unregister(handle
);
857 side_rcu_gp_exit(&event_rcu_gp
);
858 side_rcu_gp_exit(&statedump_rcu_gp
);