1 // SPDX-License-Identifier: MIT
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 #include <side/trace.h>
16 /* Top 8 bits reserved for shared tracer use. */
17 #if SIDE_BITS_PER_LONG == 64
18 # define SIDE_EVENT_ENABLED_SHARED_MASK 0xFF00000000000000ULL
19 # define SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK 0x8000000000000000ULL
20 # define SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK 0x4000000000000000ULL
22 /* Allow 2^56 private tracer references on an event. */
23 # define SIDE_EVENT_ENABLED_PRIVATE_MASK 0x00FFFFFFFFFFFFFFULL
25 # define SIDE_EVENT_ENABLED_SHARED_MASK 0xFF000000UL
26 # define SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK 0x80000000UL
27 # define SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK 0x40000000UL
29 /* Allow 2^24 private tracer references on an event. */
30 # define SIDE_EVENT_ENABLED_PRIVATE_MASK 0x00FFFFFFUL
33 #define SIDE_KEY_RESERVED_RANGE_END 0x8
35 /* Key 0x0 is reserved to match all. */
36 #define SIDE_KEY_MATCH_ALL 0x0
37 /* Key 0x1 is reserved for user event. */
38 #define SIDE_KEY_USER_EVENT 0x1
39 /* Key 0x2 is reserved for ptrace. */
40 #define SIDE_KEY_PTRACE 0x2
42 struct side_events_register_handle
{
43 struct side_list_node node
;
44 struct side_event_description
**events
;
48 struct side_tracer_handle
{
49 struct side_list_node node
;
50 void (*cb
)(enum side_tracer_notification notif
,
51 struct side_event_description
**events
, uint32_t nr_events
, void *priv
);
55 struct side_statedump_notification
{
56 struct side_list_node node
;
60 struct side_statedump_request_handle
{
61 struct side_list_node node
; /* Statedump request RCU list node. */
62 struct side_list_head notification_queue
; /* Queue of struct side_statedump_notification */
65 enum side_statedump_mode mode
;
68 struct side_callback
{
70 void (*call
)(const struct side_event_description
*desc
,
71 const struct side_arg_vec
*side_arg_vec
,
73 void (*call_variadic
)(const struct side_event_description
*desc
,
74 const struct side_arg_vec
*side_arg_vec
,
75 const struct side_arg_dynamic_struct
*var_struct
,
82 enum agent_thread_state
{
83 AGENT_THREAD_STATE_BLOCKED
= 0,
84 AGENT_THREAD_STATE_HANDLE_REQUEST
= (1 << 0),
85 AGENT_THREAD_STATE_EXIT
= (1 << 1),
88 struct statedump_agent_thread
{
91 enum agent_thread_state state
;
92 pthread_cond_t worker_cond
;
93 pthread_cond_t waiter_cond
;
96 static struct side_rcu_gp_state event_rcu_gp
, statedump_rcu_gp
;
99 * Lazy initialization for early use within library constructors.
101 static bool initialized
;
103 * Do not register/unregister any more events after destructor.
105 static bool finalized
;
108 * Recursive mutex to allow tracer callbacks to use the side API.
110 static pthread_mutex_t side_event_lock
= PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
;
111 static pthread_mutex_t side_statedump_lock
= PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
;
112 static pthread_mutex_t side_key_lock
= PTHREAD_MUTEX_INITIALIZER
;
114 * The side_agent_thread_lock protects the life-time of the agent
115 * thread: reference counting, creation, join. It is not taken by
116 * the agent thread per se so it does not have a circular dependency
118 * The side_statedump_lock nests inside the side_agent_thread_lock.
120 static pthread_mutex_t side_agent_thread_lock
= PTHREAD_MUTEX_INITIALIZER
;
122 /* Dynamic tracer key allocation. */
123 static uint64_t side_key_next
= SIDE_KEY_RESERVED_RANGE_END
;
125 static struct statedump_agent_thread statedump_agent_thread
;
127 static DEFINE_SIDE_LIST_HEAD(side_events_list
);
128 static DEFINE_SIDE_LIST_HEAD(side_tracer_list
);
131 * The statedump request list is a RCU list to allow the agent thread to
132 * iterate over this list with a RCU read-side lock.
134 static DEFINE_SIDE_LIST_HEAD(side_statedump_list
);
137 * Callback filter key for state dump.
139 static __thread
uint64_t filter_key
= SIDE_KEY_MATCH_ALL
;
142 * The empty callback has a NULL function callback pointer, which stops
143 * iteration on the array of callbacks immediately.
145 const char side_empty_callback
[sizeof(struct side_callback
)];
147 side_static_event(side_statedump_begin
, "side", "statedump_begin",
148 SIDE_LOGLEVEL_INFO
, side_field_list(side_field_string("name")));
149 side_static_event(side_statedump_end
, "side", "statedump_end",
150 SIDE_LOGLEVEL_INFO
, side_field_list(side_field_string("name")));
153 * side_ptrace_hook is a place holder for a debugger breakpoint.
154 * var_struct is NULL if not variadic.
156 void side_ptrace_hook(const struct side_event_state
*event_state
__attribute__((unused
)),
157 const struct side_arg_vec
*side_arg_vec
__attribute__((unused
)),
158 const struct side_arg_dynamic_struct
*var_struct
__attribute__((unused
)))
159 __attribute__((noinline
));
160 void side_ptrace_hook(const struct side_event_state
*event_state
__attribute__((unused
)),
161 const struct side_arg_vec
*side_arg_vec
__attribute__((unused
)),
162 const struct side_arg_dynamic_struct
*var_struct
__attribute__((unused
)))
167 void _side_call(const struct side_event_state
*event_state
, const struct side_arg_vec
*side_arg_vec
, uint64_t key
)
169 struct side_rcu_read_state rcu_read_state
;
170 const struct side_event_state_0
*es0
;
171 const struct side_callback
*side_cb
;
174 if (side_unlikely(finalized
))
176 if (side_unlikely(!initialized
))
178 if (side_unlikely(event_state
->version
!= 0))
180 es0
= side_container_of(event_state
, const struct side_event_state_0
, parent
);
181 assert(!(es0
->desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
));
182 enabled
= __atomic_load_n(&es0
->enabled
, __ATOMIC_RELAXED
);
183 if (side_unlikely(enabled
& SIDE_EVENT_ENABLED_SHARED_MASK
)) {
184 if ((enabled
& SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK
) &&
185 (key
== SIDE_KEY_MATCH_ALL
|| key
== SIDE_KEY_USER_EVENT
)) {
186 // TODO: call kernel write.
188 if ((enabled
& SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK
) &&
189 (key
== SIDE_KEY_MATCH_ALL
|| key
== SIDE_KEY_PTRACE
))
190 side_ptrace_hook(event_state
, side_arg_vec
, NULL
);
192 side_rcu_read_begin(&event_rcu_gp
, &rcu_read_state
);
193 for (side_cb
= side_rcu_dereference(es0
->callbacks
); side_cb
->u
.call
!= NULL
; side_cb
++) {
194 if (key
!= SIDE_KEY_MATCH_ALL
&& side_cb
->key
!= SIDE_KEY_MATCH_ALL
&& side_cb
->key
!= key
)
196 side_cb
->u
.call(es0
->desc
, side_arg_vec
, side_cb
->priv
);
198 side_rcu_read_end(&event_rcu_gp
, &rcu_read_state
);
201 void side_call(const struct side_event_state
*event_state
, const struct side_arg_vec
*side_arg_vec
)
203 _side_call(event_state
, side_arg_vec
, SIDE_KEY_MATCH_ALL
);
206 void side_statedump_call(const struct side_event_state
*event_state
, const struct side_arg_vec
*side_arg_vec
)
208 _side_call(event_state
, side_arg_vec
, filter_key
);
212 void _side_call_variadic(const struct side_event_state
*event_state
,
213 const struct side_arg_vec
*side_arg_vec
,
214 const struct side_arg_dynamic_struct
*var_struct
,
217 struct side_rcu_read_state rcu_read_state
;
218 const struct side_event_state_0
*es0
;
219 const struct side_callback
*side_cb
;
222 if (side_unlikely(finalized
))
224 if (side_unlikely(!initialized
))
226 if (side_unlikely(event_state
->version
!= 0))
228 es0
= side_container_of(event_state
, const struct side_event_state_0
, parent
);
229 assert(es0
->desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
);
230 enabled
= __atomic_load_n(&es0
->enabled
, __ATOMIC_RELAXED
);
231 if (side_unlikely(enabled
& SIDE_EVENT_ENABLED_SHARED_MASK
)) {
232 if ((enabled
& SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK
) &&
233 (key
== SIDE_KEY_MATCH_ALL
|| key
== SIDE_KEY_USER_EVENT
)) {
234 // TODO: call kernel write.
236 if ((enabled
& SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK
) &&
237 (key
== SIDE_KEY_MATCH_ALL
|| key
== SIDE_KEY_PTRACE
))
238 side_ptrace_hook(event_state
, side_arg_vec
, var_struct
);
240 side_rcu_read_begin(&event_rcu_gp
, &rcu_read_state
);
241 for (side_cb
= side_rcu_dereference(es0
->callbacks
); side_cb
->u
.call_variadic
!= NULL
; side_cb
++) {
242 if (key
!= SIDE_KEY_MATCH_ALL
&& side_cb
->key
!= SIDE_KEY_MATCH_ALL
&& side_cb
->key
!= key
)
244 side_cb
->u
.call_variadic(es0
->desc
, side_arg_vec
, var_struct
, side_cb
->priv
);
246 side_rcu_read_end(&event_rcu_gp
, &rcu_read_state
);
249 void side_call_variadic(const struct side_event_state
*event_state
,
250 const struct side_arg_vec
*side_arg_vec
,
251 const struct side_arg_dynamic_struct
*var_struct
)
253 _side_call_variadic(event_state
, side_arg_vec
, var_struct
, SIDE_KEY_MATCH_ALL
);
256 void side_statedump_call_variadic(const struct side_event_state
*event_state
,
257 const struct side_arg_vec
*side_arg_vec
,
258 const struct side_arg_dynamic_struct
*var_struct
)
260 _side_call_variadic(event_state
, side_arg_vec
, var_struct
, filter_key
);
264 const struct side_callback
*side_tracer_callback_lookup(
265 const struct side_event_description
*desc
,
266 void *call
, void *priv
, uint64_t key
)
268 struct side_event_state
*event_state
= side_ptr_get(desc
->state
);
269 const struct side_event_state_0
*es0
;
270 const struct side_callback
*cb
;
272 if (side_unlikely(event_state
->version
!= 0))
274 es0
= side_container_of(event_state
, const struct side_event_state_0
, parent
);
275 for (cb
= es0
->callbacks
; cb
->u
.call
!= NULL
; cb
++) {
276 if ((void *) cb
->u
.call
== call
&& cb
->priv
== priv
&& cb
->key
== key
)
283 int _side_tracer_callback_register(struct side_event_description
*desc
,
284 void *call
, void *priv
, uint64_t key
)
286 struct side_event_state
*event_state
;
287 struct side_callback
*old_cb
, *new_cb
;
288 struct side_event_state_0
*es0
;
289 int ret
= SIDE_ERROR_OK
;
293 return SIDE_ERROR_INVAL
;
295 return SIDE_ERROR_EXITING
;
298 pthread_mutex_lock(&side_event_lock
);
299 event_state
= side_ptr_get(desc
->state
);
300 if (side_unlikely(event_state
->version
!= 0))
302 es0
= side_container_of(event_state
, struct side_event_state_0
, parent
);
303 old_nr_cb
= es0
->nr_callbacks
;
304 if (old_nr_cb
== UINT32_MAX
) {
305 ret
= SIDE_ERROR_INVAL
;
308 /* Reject duplicate (call, priv) tuples. */
309 if (side_tracer_callback_lookup(desc
, call
, priv
, key
)) {
310 ret
= SIDE_ERROR_EXIST
;
313 old_cb
= (struct side_callback
*) es0
->callbacks
;
314 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
315 new_cb
= (struct side_callback
*) calloc(old_nr_cb
+ 2, sizeof(struct side_callback
));
317 ret
= SIDE_ERROR_NOMEM
;
320 memcpy(new_cb
, old_cb
, old_nr_cb
);
321 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
322 new_cb
[old_nr_cb
].u
.call_variadic
=
323 (side_tracer_callback_variadic_func
) call
;
325 new_cb
[old_nr_cb
].u
.call
=
326 (side_tracer_callback_func
) call
;
327 new_cb
[old_nr_cb
].priv
= priv
;
328 new_cb
[old_nr_cb
].key
= key
;
329 /* High order bits are already zeroed. */
330 side_rcu_assign_pointer(es0
->callbacks
, new_cb
);
331 side_rcu_wait_grace_period(&event_rcu_gp
);
335 /* Increment concurrently with kernel setting the top bits. */
337 (void) __atomic_add_fetch(&es0
->enabled
, 1, __ATOMIC_RELAXED
);
339 pthread_mutex_unlock(&side_event_lock
);
343 int side_tracer_callback_register(struct side_event_description
*desc
,
344 side_tracer_callback_func call
,
345 void *priv
, uint64_t key
)
347 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
348 return SIDE_ERROR_INVAL
;
349 return _side_tracer_callback_register(desc
, (void *) call
, priv
, key
);
352 int side_tracer_callback_variadic_register(struct side_event_description
*desc
,
353 side_tracer_callback_variadic_func call_variadic
,
354 void *priv
, uint64_t key
)
356 if (!(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
))
357 return SIDE_ERROR_INVAL
;
358 return _side_tracer_callback_register(desc
, (void *) call_variadic
, priv
, key
);
361 static int _side_tracer_callback_unregister(struct side_event_description
*desc
,
362 void *call
, void *priv
, uint64_t key
)
364 struct side_event_state
*event_state
;
365 struct side_callback
*old_cb
, *new_cb
;
366 const struct side_callback
*cb_pos
;
367 struct side_event_state_0
*es0
;
369 int ret
= SIDE_ERROR_OK
;
373 return SIDE_ERROR_INVAL
;
375 return SIDE_ERROR_EXITING
;
378 pthread_mutex_lock(&side_event_lock
);
379 event_state
= side_ptr_get(desc
->state
);
380 if (side_unlikely(event_state
->version
!= 0))
382 es0
= side_container_of(event_state
, struct side_event_state_0
, parent
);
383 cb_pos
= side_tracer_callback_lookup(desc
, call
, priv
, key
);
385 ret
= SIDE_ERROR_NOENT
;
388 old_nr_cb
= es0
->nr_callbacks
;
389 old_cb
= (struct side_callback
*) es0
->callbacks
;
390 if (old_nr_cb
== 1) {
391 new_cb
= (struct side_callback
*) &side_empty_callback
;
393 pos_idx
= cb_pos
- es0
->callbacks
;
394 /* Remove entry at pos_idx. */
395 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
396 new_cb
= (struct side_callback
*) calloc(old_nr_cb
, sizeof(struct side_callback
));
398 ret
= SIDE_ERROR_NOMEM
;
401 memcpy(new_cb
, old_cb
, pos_idx
);
402 memcpy(&new_cb
[pos_idx
], &old_cb
[pos_idx
+ 1], old_nr_cb
- pos_idx
- 1);
404 /* High order bits are already zeroed. */
405 side_rcu_assign_pointer(es0
->callbacks
, new_cb
);
406 side_rcu_wait_grace_period(&event_rcu_gp
);
409 /* Decrement concurrently with kernel setting the top bits. */
411 (void) __atomic_add_fetch(&es0
->enabled
, -1, __ATOMIC_RELAXED
);
413 pthread_mutex_unlock(&side_event_lock
);
417 int side_tracer_callback_unregister(struct side_event_description
*desc
,
418 side_tracer_callback_func call
,
419 void *priv
, uint64_t key
)
421 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
422 return SIDE_ERROR_INVAL
;
423 return _side_tracer_callback_unregister(desc
, (void *) call
, priv
, key
);
426 int side_tracer_callback_variadic_unregister(struct side_event_description
*desc
,
427 side_tracer_callback_variadic_func call_variadic
,
428 void *priv
, uint64_t key
)
430 if (!(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
))
431 return SIDE_ERROR_INVAL
;
432 return _side_tracer_callback_unregister(desc
, (void *) call_variadic
, priv
, key
);
435 struct side_events_register_handle
*side_events_register(struct side_event_description
**events
, uint32_t nr_events
)
437 struct side_events_register_handle
*events_handle
= NULL
;
438 struct side_tracer_handle
*tracer_handle
;
444 events_handle
= (struct side_events_register_handle
*)
445 calloc(1, sizeof(struct side_events_register_handle
));
448 events_handle
->events
= events
;
449 events_handle
->nr_events
= nr_events
;
451 pthread_mutex_lock(&side_event_lock
);
452 side_list_insert_node_tail(&side_events_list
, &events_handle
->node
);
453 side_list_for_each_entry(tracer_handle
, &side_tracer_list
, node
) {
454 tracer_handle
->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS
,
455 events
, nr_events
, tracer_handle
->priv
);
457 pthread_mutex_unlock(&side_event_lock
);
458 //TODO: call event batch register ioctl
459 return events_handle
;
463 void side_event_remove_callbacks(struct side_event_description
*desc
)
465 struct side_event_state
*event_state
= side_ptr_get(desc
->state
);
466 struct side_event_state_0
*es0
;
467 struct side_callback
*old_cb
;
470 if (side_unlikely(event_state
->version
!= 0))
472 es0
= side_container_of(event_state
, struct side_event_state_0
, parent
);
473 nr_cb
= es0
->nr_callbacks
;
476 old_cb
= (struct side_callback
*) es0
->callbacks
;
477 (void) __atomic_add_fetch(&es0
->enabled
, -1, __ATOMIC_RELAXED
);
479 * Setting the state back to 0 cb and empty callbacks out of
480 * caution. This should not matter because instrumentation is
483 es0
->nr_callbacks
= 0;
484 side_rcu_assign_pointer(es0
->callbacks
, &side_empty_callback
);
486 * No need to wait for grace period because instrumentation is
493 * Unregister event handle. At this point, all side events in that
494 * handle should be unreachable.
496 void side_events_unregister(struct side_events_register_handle
*events_handle
)
498 struct side_tracer_handle
*tracer_handle
;
507 pthread_mutex_lock(&side_event_lock
);
508 side_list_remove_node(&events_handle
->node
);
509 side_list_for_each_entry(tracer_handle
, &side_tracer_list
, node
) {
510 tracer_handle
->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS
,
511 events_handle
->events
, events_handle
->nr_events
,
512 tracer_handle
->priv
);
514 for (i
= 0; i
< events_handle
->nr_events
; i
++) {
515 struct side_event_description
*event
= events_handle
->events
[i
];
517 /* Skip NULL pointers */
520 side_event_remove_callbacks(event
);
522 pthread_mutex_unlock(&side_event_lock
);
523 //TODO: call event batch unregister ioctl
527 struct side_tracer_handle
*side_tracer_event_notification_register(
528 void (*cb
)(enum side_tracer_notification notif
,
529 struct side_event_description
**events
, uint32_t nr_events
, void *priv
),
532 struct side_tracer_handle
*tracer_handle
;
533 struct side_events_register_handle
*events_handle
;
539 tracer_handle
= (struct side_tracer_handle
*)
540 calloc(1, sizeof(struct side_tracer_handle
));
543 pthread_mutex_lock(&side_event_lock
);
544 tracer_handle
->cb
= cb
;
545 tracer_handle
->priv
= priv
;
546 side_list_insert_node_tail(&side_tracer_list
, &tracer_handle
->node
);
547 side_list_for_each_entry(events_handle
, &side_events_list
, node
) {
548 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS
,
549 events_handle
->events
, events_handle
->nr_events
, priv
);
551 pthread_mutex_unlock(&side_event_lock
);
552 return tracer_handle
;
555 void side_tracer_event_notification_unregister(struct side_tracer_handle
*tracer_handle
)
557 struct side_events_register_handle
*events_handle
;
563 pthread_mutex_lock(&side_event_lock
);
564 side_list_for_each_entry(events_handle
, &side_events_list
, node
) {
565 tracer_handle
->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS
,
566 events_handle
->events
, events_handle
->nr_events
,
567 tracer_handle
->priv
);
569 side_list_remove_node(&tracer_handle
->node
);
570 pthread_mutex_unlock(&side_event_lock
);
574 /* Called with side_statedump_lock held. */
576 void queue_statedump_pending(struct side_statedump_request_handle
*handle
, uint64_t key
)
578 struct side_statedump_notification
*notif
;
580 notif
= (struct side_statedump_notification
*) calloc(1, sizeof(struct side_statedump_notification
));
584 side_list_insert_node_tail(&handle
->notification_queue
, ¬if
->node
);
585 if (handle
->mode
== SIDE_STATEDUMP_MODE_AGENT_THREAD
) {
586 (void)__atomic_or_fetch(&statedump_agent_thread
.state
, AGENT_THREAD_STATE_HANDLE_REQUEST
, __ATOMIC_SEQ_CST
);
587 pthread_cond_broadcast(&statedump_agent_thread
.worker_cond
);
591 /* Called with side_statedump_lock held. */
593 void unqueue_statedump_pending(struct side_statedump_request_handle
*handle
, uint64_t key
)
595 struct side_statedump_notification
*notif
, *tmp
;
597 side_list_for_each_entry_safe(notif
, tmp
, &handle
->notification_queue
, node
) {
598 if (key
== SIDE_KEY_MATCH_ALL
|| key
== notif
->key
) {
599 side_list_remove_node(¬if
->node
);
606 void side_statedump_run(struct side_statedump_request_handle
*handle
,
607 struct side_statedump_notification
*notif
)
609 /* Invoke the state dump callback specifically for the tracer key. */
610 filter_key
= notif
->key
;
611 side_statedump_event_call(side_statedump_begin
,
612 side_arg_list(side_arg_string(handle
->name
)));
614 side_statedump_event_call(side_statedump_end
,
615 side_arg_list(side_arg_string(handle
->name
)));
616 filter_key
= SIDE_KEY_MATCH_ALL
;
620 void _side_statedump_run_pending_requests(struct side_statedump_request_handle
*handle
)
622 struct side_statedump_notification
*notif
, *tmp
;
623 DEFINE_SIDE_LIST_HEAD(tmp_head
);
625 pthread_mutex_lock(&side_statedump_lock
);
626 side_list_splice(&handle
->notification_queue
, &tmp_head
);
627 side_list_head_init(&handle
->notification_queue
);
628 pthread_mutex_unlock(&side_statedump_lock
);
630 /* We are now sole owner of the tmp_head list. */
631 side_list_for_each_entry(notif
, &tmp_head
, node
)
632 side_statedump_run(handle
, notif
);
633 side_list_for_each_entry_safe(notif
, tmp
, &tmp_head
, node
)
636 if (handle
->mode
== SIDE_STATEDUMP_MODE_AGENT_THREAD
) {
637 pthread_mutex_lock(&side_statedump_lock
);
638 pthread_cond_broadcast(&statedump_agent_thread
.waiter_cond
);
639 pthread_mutex_unlock(&side_statedump_lock
);
644 void *statedump_agent_func(void *arg
__attribute__((unused
)))
647 struct side_statedump_request_handle
*handle
;
648 struct side_rcu_read_state rcu_read_state
;
649 enum agent_thread_state state
;
651 pthread_mutex_lock(&side_statedump_lock
);
653 state
= __atomic_load_n(&statedump_agent_thread
.state
, __ATOMIC_SEQ_CST
);
654 if (state
== AGENT_THREAD_STATE_BLOCKED
)
655 pthread_cond_wait(&statedump_agent_thread
.worker_cond
, &side_statedump_lock
);
659 pthread_mutex_unlock(&side_statedump_lock
);
660 if (state
& AGENT_THREAD_STATE_EXIT
)
662 (void)__atomic_and_fetch(&statedump_agent_thread
.state
, ~AGENT_THREAD_STATE_HANDLE_REQUEST
, __ATOMIC_SEQ_CST
);
663 side_rcu_read_begin(&statedump_rcu_gp
, &rcu_read_state
);
664 side_list_for_each_entry_rcu(handle
, &side_statedump_list
, node
)
665 _side_statedump_run_pending_requests(handle
);
666 side_rcu_read_end(&statedump_rcu_gp
, &rcu_read_state
);
671 /* Called with side_agent_thread_lock and side_statedump_lock held. */
673 void statedump_agent_thread_get(void)
677 if (statedump_agent_thread
.ref
++)
679 pthread_cond_init(&statedump_agent_thread
.worker_cond
, NULL
);
680 pthread_cond_init(&statedump_agent_thread
.waiter_cond
, NULL
);
681 statedump_agent_thread
.state
= AGENT_THREAD_STATE_BLOCKED
;
682 ret
= pthread_create(&statedump_agent_thread
.id
, NULL
,
683 statedump_agent_func
, NULL
);
690 * Called with side_agent_thread_lock and side_statedump_lock held.
691 * Returns true if join for agent thread is needed.
694 bool statedump_agent_thread_put(void)
696 if (--statedump_agent_thread
.ref
)
698 (void)__atomic_or_fetch(&statedump_agent_thread
.state
, AGENT_THREAD_STATE_EXIT
, __ATOMIC_SEQ_CST
);
699 pthread_cond_broadcast(&statedump_agent_thread
.worker_cond
);
703 /* Called with side_agent_thread_lock held. */
705 void statedump_agent_thread_join(void)
710 ret
= pthread_join(statedump_agent_thread
.id
, &retval
);
714 statedump_agent_thread
.state
= AGENT_THREAD_STATE_BLOCKED
;
715 pthread_cond_destroy(&statedump_agent_thread
.worker_cond
);
716 pthread_cond_destroy(&statedump_agent_thread
.waiter_cond
);
719 struct side_statedump_request_handle
*
720 side_statedump_request_notification_register(const char *state_name
,
721 void (*statedump_cb
)(void),
722 enum side_statedump_mode mode
)
724 struct side_statedump_request_handle
*handle
;
732 * The statedump request notification should not be registered
733 * from a notification callback.
736 handle
= (struct side_statedump_request_handle
*)
737 calloc(1, sizeof(struct side_statedump_request_handle
));
740 name
= strdup(state_name
);
743 handle
->cb
= statedump_cb
;
746 side_list_head_init(&handle
->notification_queue
);
748 if (mode
== SIDE_STATEDUMP_MODE_AGENT_THREAD
)
749 pthread_mutex_lock(&side_agent_thread_lock
);
750 pthread_mutex_lock(&side_statedump_lock
);
751 if (mode
== SIDE_STATEDUMP_MODE_AGENT_THREAD
)
752 statedump_agent_thread_get();
753 side_list_insert_node_tail_rcu(&side_statedump_list
, &handle
->node
);
754 /* Queue statedump pending for all tracers. */
755 queue_statedump_pending(handle
, SIDE_KEY_MATCH_ALL
);
756 pthread_mutex_unlock(&side_statedump_lock
);
758 if (mode
== SIDE_STATEDUMP_MODE_AGENT_THREAD
) {
759 pthread_mutex_unlock(&side_agent_thread_lock
);
761 pthread_mutex_lock(&side_statedump_lock
);
762 while (!side_list_empty(&handle
->notification_queue
))
763 pthread_cond_wait(&statedump_agent_thread
.waiter_cond
, &side_statedump_lock
);
764 pthread_mutex_unlock(&side_statedump_lock
);
774 void side_statedump_request_notification_unregister(struct side_statedump_request_handle
*handle
)
784 if (handle
->mode
== SIDE_STATEDUMP_MODE_AGENT_THREAD
)
785 pthread_mutex_lock(&side_agent_thread_lock
);
786 pthread_mutex_lock(&side_statedump_lock
);
787 unqueue_statedump_pending(handle
, SIDE_KEY_MATCH_ALL
);
788 side_list_remove_node_rcu(&handle
->node
);
789 if (handle
->mode
== SIDE_STATEDUMP_MODE_AGENT_THREAD
)
790 join
= statedump_agent_thread_put();
791 pthread_mutex_unlock(&side_statedump_lock
);
793 statedump_agent_thread_join();
794 if (handle
->mode
== SIDE_STATEDUMP_MODE_AGENT_THREAD
)
795 pthread_mutex_unlock(&side_agent_thread_lock
);
797 side_rcu_wait_grace_period(&statedump_rcu_gp
);
802 /* Returns true if the handle has pending statedump requests. */
803 bool side_statedump_poll_pending_requests(struct side_statedump_request_handle
*handle
)
807 if (handle
->mode
!= SIDE_STATEDUMP_MODE_POLLING
)
809 pthread_mutex_lock(&side_statedump_lock
);
810 ret
= !side_list_empty(&handle
->notification_queue
);
811 pthread_mutex_unlock(&side_statedump_lock
);
816 * Only polling mode state dump handles allow application to explicitly handle the
819 int side_statedump_run_pending_requests(struct side_statedump_request_handle
*handle
)
821 if (handle
->mode
!= SIDE_STATEDUMP_MODE_POLLING
)
822 return SIDE_ERROR_INVAL
;
823 _side_statedump_run_pending_requests(handle
);
824 return SIDE_ERROR_OK
;
828 * Request a state dump for tracer callbacks identified with "key".
830 int side_tracer_statedump_request(uint64_t key
)
832 struct side_statedump_request_handle
*handle
;
834 if (key
== SIDE_KEY_MATCH_ALL
)
835 return SIDE_ERROR_INVAL
;
836 pthread_mutex_lock(&side_statedump_lock
);
837 side_list_for_each_entry(handle
, &side_statedump_list
, node
)
838 queue_statedump_pending(handle
, key
);
839 pthread_mutex_lock(&side_statedump_lock
);
840 return SIDE_ERROR_OK
;
844 * Cancel a statedump request.
846 int side_tracer_statedump_request_cancel(uint64_t key
)
848 struct side_statedump_request_handle
*handle
;
850 if (key
== SIDE_KEY_MATCH_ALL
)
851 return SIDE_ERROR_INVAL
;
852 pthread_mutex_lock(&side_statedump_lock
);
853 side_list_for_each_entry(handle
, &side_statedump_list
, node
)
854 unqueue_statedump_pending(handle
, key
);
855 pthread_mutex_lock(&side_statedump_lock
);
856 return SIDE_ERROR_OK
;
860 * Tracer keys are represented on 64-bit. Return SIDE_ERROR_NOMEM on
861 * overflow (which should never happen in practice).
863 int side_tracer_request_key(uint64_t *key
)
865 int ret
= SIDE_ERROR_OK
;
867 pthread_mutex_lock(&side_key_lock
);
868 if (side_key_next
== 0) {
869 ret
= SIDE_ERROR_NOMEM
;
872 *key
= side_key_next
++;
874 pthread_mutex_unlock(&side_key_lock
);
882 side_rcu_gp_init(&event_rcu_gp
);
883 side_rcu_gp_init(&statedump_rcu_gp
);
888 * side_exit() is executed from a library destructor. It can be called
889 * explicitly at application exit as well. Concurrent side API use is
890 * not expected at that point.
894 struct side_events_register_handle
*handle
, *tmp
;
898 side_list_for_each_entry_safe(handle
, tmp
, &side_events_list
, node
)
899 side_events_unregister(handle
);
900 side_rcu_gp_exit(&event_rcu_gp
);
901 side_rcu_gp_exit(&statedump_rcu_gp
);