Implement agent thread
[libside.git] / src / side.c
CommitLineData
67337c4a
MD
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6#include <side/trace.h>
7#include <string.h>
b1bf768c 8#include <assert.h>
a125f217
MD
9#include <pthread.h>
10#include <unistd.h>
67337c4a
MD
11
12#include "rcu.h"
13#include "list.h"
873bbf16 14#include "rculist.h"
67337c4a 15
871851e7 16/* Top 8 bits reserved for shared tracer use. */
67337c4a 17#if SIDE_BITS_PER_LONG == 64
871851e7
MD
18# define SIDE_EVENT_ENABLED_SHARED_MASK 0xFF00000000000000ULL
19# define SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK 0x8000000000000000ULL
20# define SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK 0x4000000000000000ULL
67337c4a 21
871851e7
MD
22/* Allow 2^56 private tracer references on an event. */
23# define SIDE_EVENT_ENABLED_PRIVATE_MASK 0x00FFFFFFFFFFFFFFULL
67337c4a 24#else
871851e7
MD
25# define SIDE_EVENT_ENABLED_SHARED_MASK 0xFF000000UL
26# define SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK 0x80000000UL
27# define SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK 0x40000000UL
67337c4a 28
871851e7
MD
29/* Allow 2^24 private tracer references on an event. */
30# define SIDE_EVENT_ENABLED_PRIVATE_MASK 0x00FFFFFFUL
67337c4a
MD
31#endif
32
bffe9ae3
MD
33#define SIDE_KEY_RESERVED_RANGE_END 0x8
34
35/* Key 0x0 is reserved to match all. */
36#define SIDE_KEY_MATCH_ALL 0x0
871851e7 37/* Key 0x1 is reserved for user event. */
bffe9ae3 38#define SIDE_KEY_USER_EVENT 0x1
871851e7 39/* Key 0x2 is reserved for ptrace. */
bffe9ae3 40#define SIDE_KEY_PTRACE 0x2
871851e7 41
67337c4a
MD
42struct side_events_register_handle {
43 struct side_list_node node;
44 struct side_event_description **events;
45 uint32_t nr_events;
46};
47
48struct side_tracer_handle {
49 struct side_list_node node;
50 void (*cb)(enum side_tracer_notification notif,
51 struct side_event_description **events, uint32_t nr_events, void *priv);
52 void *priv;
53};
54
bffe9ae3
MD
55struct side_statedump_notification {
56 struct side_list_node node;
57 uint64_t key;
58};
59
f0b01832 60struct side_statedump_request_handle {
bffe9ae3
MD
61 struct side_list_node node; /* Statedump request RCU list node. */
62 struct side_list_head notification_queue; /* Queue of struct side_statedump_notification */
f0b01832 63 void (*cb)(void);
bffe9ae3
MD
64 char *name;
65 enum side_statedump_mode mode;
f0b01832
MD
66};
67
867b4725
MD
68struct side_callback {
69 union {
70 void (*call)(const struct side_event_description *desc,
71 const struct side_arg_vec *side_arg_vec,
72 void *priv);
73 void (*call_variadic)(const struct side_event_description *desc,
74 const struct side_arg_vec *side_arg_vec,
75 const struct side_arg_dynamic_struct *var_struct,
76 void *priv);
77 } u;
78 void *priv;
bffe9ae3 79 uint64_t key;
867b4725
MD
80};
81
a125f217
MD
82enum agent_thread_state {
83 AGENT_THREAD_STATE_BLOCKED = 0,
84 AGENT_THREAD_STATE_HANDLE_REQUEST = (1 << 0),
85 AGENT_THREAD_STATE_EXIT = (1 << 1),
86};
87
88struct statedump_agent_thread {
89 long ref;
90 pthread_t id;
91 enum agent_thread_state state;
92};
93
873bbf16 94static struct side_rcu_gp_state event_rcu_gp, statedump_rcu_gp;
67337c4a
MD
95
96/*
97 * Lazy initialization for early use within library constructors.
98 */
99static bool initialized;
100/*
101 * Do not register/unregister any more events after destructor.
102 */
103static bool finalized;
104
105/*
106 * Recursive mutex to allow tracer callbacks to use the side API.
107 */
873bbf16
MD
108static pthread_mutex_t side_event_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
109static pthread_mutex_t side_statedump_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
bffe9ae3
MD
110static pthread_mutex_t side_key_lock = PTHREAD_MUTEX_INITIALIZER;
111
112/* Dynamic tracer key allocation. */
113static uint64_t side_key_next = SIDE_KEY_RESERVED_RANGE_END;
67337c4a 114
a125f217
MD
115static struct statedump_agent_thread statedump_agent_thread;
116
67337c4a
MD
117static DEFINE_SIDE_LIST_HEAD(side_events_list);
118static DEFINE_SIDE_LIST_HEAD(side_tracer_list);
bffe9ae3
MD
119
120/*
121 * The statedump request list is a RCU list to allow the agent thread to
122 * iterate over this list with a RCU read-side lock.
123 */
f0b01832 124static DEFINE_SIDE_LIST_HEAD(side_statedump_list);
67337c4a 125
74be90b7
MD
126/*
127 * Callback filter key for state dump.
128 */
bffe9ae3 129static __thread uint64_t filter_key = SIDE_KEY_MATCH_ALL;
74be90b7 130
67337c4a
MD
131/*
132 * The empty callback has a NULL function callback pointer, which stops
133 * iteration on the array of callbacks immediately.
134 */
867b4725 135const char side_empty_callback[sizeof(struct side_callback)];
67337c4a 136
bffe9ae3
MD
137side_static_event(side_statedump_begin, "side", "statedump_begin",
138 SIDE_LOGLEVEL_INFO, side_field_list(side_field_string("name")));
139side_static_event(side_statedump_end, "side", "statedump_end",
140 SIDE_LOGLEVEL_INFO, side_field_list(side_field_string("name")));
141
871851e7
MD
142/*
143 * side_ptrace_hook is a place holder for a debugger breakpoint.
144 * var_struct is NULL if not variadic.
145 */
146void side_ptrace_hook(const struct side_event_state *event_state __attribute__((unused)),
147 const struct side_arg_vec *side_arg_vec __attribute__((unused)),
148 const struct side_arg_dynamic_struct *var_struct __attribute__((unused)))
149 __attribute__((noinline));
150void side_ptrace_hook(const struct side_event_state *event_state __attribute__((unused)),
151 const struct side_arg_vec *side_arg_vec __attribute__((unused)),
152 const struct side_arg_dynamic_struct *var_struct __attribute__((unused)))
153{
154}
155
74be90b7 156static
bffe9ae3 157void _side_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec, uint64_t key)
67337c4a
MD
158{
159 struct side_rcu_read_state rcu_read_state;
b2a84b9f 160 const struct side_event_state_0 *es0;
67337c4a
MD
161 const struct side_callback *side_cb;
162 uintptr_t enabled;
163
164 if (side_unlikely(finalized))
165 return;
166 if (side_unlikely(!initialized))
167 side_init();
b2a84b9f
MD
168 if (side_unlikely(event_state->version != 0))
169 abort();
49aea3ef 170 es0 = side_container_of(event_state, const struct side_event_state_0, parent);
7269a8a3 171 assert(!(es0->desc->flags & SIDE_EVENT_FLAG_VARIADIC));
b2a84b9f 172 enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED);
871851e7
MD
173 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_SHARED_MASK)) {
174 if ((enabled & SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK) &&
bffe9ae3 175 (key == SIDE_KEY_MATCH_ALL || key == SIDE_KEY_USER_EVENT)) {
871851e7
MD
176 // TODO: call kernel write.
177 }
178 if ((enabled & SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK) &&
bffe9ae3 179 (key == SIDE_KEY_MATCH_ALL || key == SIDE_KEY_PTRACE))
871851e7 180 side_ptrace_hook(event_state, side_arg_vec, NULL);
67337c4a 181 }
873bbf16 182 side_rcu_read_begin(&event_rcu_gp, &rcu_read_state);
92c377f9 183 for (side_cb = side_rcu_dereference(es0->callbacks); side_cb->u.call != NULL; side_cb++) {
bffe9ae3 184 if (key != SIDE_KEY_MATCH_ALL && side_cb->key != SIDE_KEY_MATCH_ALL && side_cb->key != key)
92c377f9 185 continue;
7269a8a3 186 side_cb->u.call(es0->desc, side_arg_vec, side_cb->priv);
92c377f9 187 }
873bbf16 188 side_rcu_read_end(&event_rcu_gp, &rcu_read_state);
67337c4a
MD
189}
190
92c377f9
MD
191void side_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec)
192{
bffe9ae3 193 _side_call(event_state, side_arg_vec, SIDE_KEY_MATCH_ALL);
92c377f9
MD
194}
195
f0b01832 196void side_statedump_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec)
74be90b7
MD
197{
198 _side_call(event_state, side_arg_vec, filter_key);
199}
200
201static
202void _side_call_variadic(const struct side_event_state *event_state,
67337c4a 203 const struct side_arg_vec *side_arg_vec,
92c377f9 204 const struct side_arg_dynamic_struct *var_struct,
bffe9ae3 205 uint64_t key)
67337c4a
MD
206{
207 struct side_rcu_read_state rcu_read_state;
b2a84b9f 208 const struct side_event_state_0 *es0;
67337c4a
MD
209 const struct side_callback *side_cb;
210 uintptr_t enabled;
211
212 if (side_unlikely(finalized))
213 return;
214 if (side_unlikely(!initialized))
215 side_init();
b2a84b9f
MD
216 if (side_unlikely(event_state->version != 0))
217 abort();
49aea3ef 218 es0 = side_container_of(event_state, const struct side_event_state_0, parent);
7269a8a3 219 assert(es0->desc->flags & SIDE_EVENT_FLAG_VARIADIC);
b2a84b9f 220 enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED);
871851e7
MD
221 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_SHARED_MASK)) {
222 if ((enabled & SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK) &&
bffe9ae3 223 (key == SIDE_KEY_MATCH_ALL || key == SIDE_KEY_USER_EVENT)) {
871851e7
MD
224 // TODO: call kernel write.
225 }
226 if ((enabled & SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK) &&
bffe9ae3 227 (key == SIDE_KEY_MATCH_ALL || key == SIDE_KEY_PTRACE))
871851e7 228 side_ptrace_hook(event_state, side_arg_vec, var_struct);
67337c4a 229 }
873bbf16 230 side_rcu_read_begin(&event_rcu_gp, &rcu_read_state);
92c377f9 231 for (side_cb = side_rcu_dereference(es0->callbacks); side_cb->u.call_variadic != NULL; side_cb++) {
bffe9ae3 232 if (key != SIDE_KEY_MATCH_ALL && side_cb->key != SIDE_KEY_MATCH_ALL && side_cb->key != key)
92c377f9 233 continue;
7269a8a3 234 side_cb->u.call_variadic(es0->desc, side_arg_vec, var_struct, side_cb->priv);
92c377f9 235 }
873bbf16 236 side_rcu_read_end(&event_rcu_gp, &rcu_read_state);
67337c4a
MD
237}
238
92c377f9
MD
239void side_call_variadic(const struct side_event_state *event_state,
240 const struct side_arg_vec *side_arg_vec,
241 const struct side_arg_dynamic_struct *var_struct)
242{
bffe9ae3 243 _side_call_variadic(event_state, side_arg_vec, var_struct, SIDE_KEY_MATCH_ALL);
74be90b7
MD
244}
245
f0b01832 246void side_statedump_call_variadic(const struct side_event_state *event_state,
74be90b7
MD
247 const struct side_arg_vec *side_arg_vec,
248 const struct side_arg_dynamic_struct *var_struct)
249{
250 _side_call_variadic(event_state, side_arg_vec, var_struct, filter_key);
92c377f9
MD
251}
252
67337c4a
MD
253static
254const struct side_callback *side_tracer_callback_lookup(
255 const struct side_event_description *desc,
bffe9ae3 256 void *call, void *priv, uint64_t key)
67337c4a 257{
0b9e59d6 258 struct side_event_state *event_state = side_ptr_get(desc->state);
b2a84b9f 259 const struct side_event_state_0 *es0;
67337c4a
MD
260 const struct side_callback *cb;
261
b2a84b9f
MD
262 if (side_unlikely(event_state->version != 0))
263 abort();
49aea3ef 264 es0 = side_container_of(event_state, const struct side_event_state_0, parent);
7269a8a3 265 for (cb = es0->callbacks; cb->u.call != NULL; cb++) {
92c377f9 266 if ((void *) cb->u.call == call && cb->priv == priv && cb->key == key)
67337c4a
MD
267 return cb;
268 }
269 return NULL;
270}
271
272static
273int _side_tracer_callback_register(struct side_event_description *desc,
bffe9ae3 274 void *call, void *priv, uint64_t key)
67337c4a 275{
0b9e59d6 276 struct side_event_state *event_state;
67337c4a 277 struct side_callback *old_cb, *new_cb;
b2a84b9f 278 struct side_event_state_0 *es0;
67337c4a
MD
279 int ret = SIDE_ERROR_OK;
280 uint32_t old_nr_cb;
281
282 if (!call)
283 return SIDE_ERROR_INVAL;
284 if (finalized)
285 return SIDE_ERROR_EXITING;
286 if (!initialized)
287 side_init();
873bbf16 288 pthread_mutex_lock(&side_event_lock);
0b9e59d6 289 event_state = side_ptr_get(desc->state);
b2a84b9f
MD
290 if (side_unlikely(event_state->version != 0))
291 abort();
49aea3ef 292 es0 = side_container_of(event_state, struct side_event_state_0, parent);
3cac1780 293 old_nr_cb = es0->nr_callbacks;
67337c4a
MD
294 if (old_nr_cb == UINT32_MAX) {
295 ret = SIDE_ERROR_INVAL;
296 goto unlock;
297 }
298 /* Reject duplicate (call, priv) tuples. */
92c377f9 299 if (side_tracer_callback_lookup(desc, call, priv, key)) {
67337c4a
MD
300 ret = SIDE_ERROR_EXIST;
301 goto unlock;
302 }
7269a8a3 303 old_cb = (struct side_callback *) es0->callbacks;
67337c4a
MD
304 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
305 new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
306 if (!new_cb) {
307 ret = SIDE_ERROR_NOMEM;
308 goto unlock;
309 }
310 memcpy(new_cb, old_cb, old_nr_cb);
311 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
312 new_cb[old_nr_cb].u.call_variadic =
313 (side_tracer_callback_variadic_func) call;
314 else
315 new_cb[old_nr_cb].u.call =
316 (side_tracer_callback_func) call;
317 new_cb[old_nr_cb].priv = priv;
92c377f9 318 new_cb[old_nr_cb].key = key;
f60d8121 319 /* High order bits are already zeroed. */
7269a8a3 320 side_rcu_assign_pointer(es0->callbacks, new_cb);
873bbf16 321 side_rcu_wait_grace_period(&event_rcu_gp);
67337c4a
MD
322 if (old_nr_cb)
323 free(old_cb);
3cac1780 324 es0->nr_callbacks++;
67337c4a
MD
325 /* Increment concurrently with kernel setting the top bits. */
326 if (!old_nr_cb)
b2a84b9f 327 (void) __atomic_add_fetch(&es0->enabled, 1, __ATOMIC_RELAXED);
67337c4a 328unlock:
873bbf16 329 pthread_mutex_unlock(&side_event_lock);
67337c4a
MD
330 return ret;
331}
332
333int side_tracer_callback_register(struct side_event_description *desc,
334 side_tracer_callback_func call,
bffe9ae3 335 void *priv, uint64_t key)
67337c4a
MD
336{
337 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
338 return SIDE_ERROR_INVAL;
92c377f9 339 return _side_tracer_callback_register(desc, (void *) call, priv, key);
67337c4a
MD
340}
341
342int side_tracer_callback_variadic_register(struct side_event_description *desc,
343 side_tracer_callback_variadic_func call_variadic,
bffe9ae3 344 void *priv, uint64_t key)
67337c4a
MD
345{
346 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
347 return SIDE_ERROR_INVAL;
92c377f9 348 return _side_tracer_callback_register(desc, (void *) call_variadic, priv, key);
67337c4a
MD
349}
350
351static int _side_tracer_callback_unregister(struct side_event_description *desc,
bffe9ae3 352 void *call, void *priv, uint64_t key)
67337c4a 353{
0b9e59d6 354 struct side_event_state *event_state;
67337c4a
MD
355 struct side_callback *old_cb, *new_cb;
356 const struct side_callback *cb_pos;
b2a84b9f 357 struct side_event_state_0 *es0;
67337c4a
MD
358 uint32_t pos_idx;
359 int ret = SIDE_ERROR_OK;
360 uint32_t old_nr_cb;
361
362 if (!call)
363 return SIDE_ERROR_INVAL;
364 if (finalized)
365 return SIDE_ERROR_EXITING;
366 if (!initialized)
367 side_init();
873bbf16 368 pthread_mutex_lock(&side_event_lock);
0b9e59d6 369 event_state = side_ptr_get(desc->state);
b2a84b9f
MD
370 if (side_unlikely(event_state->version != 0))
371 abort();
49aea3ef 372 es0 = side_container_of(event_state, struct side_event_state_0, parent);
92c377f9 373 cb_pos = side_tracer_callback_lookup(desc, call, priv, key);
67337c4a
MD
374 if (!cb_pos) {
375 ret = SIDE_ERROR_NOENT;
376 goto unlock;
377 }
3cac1780 378 old_nr_cb = es0->nr_callbacks;
7269a8a3 379 old_cb = (struct side_callback *) es0->callbacks;
67337c4a
MD
380 if (old_nr_cb == 1) {
381 new_cb = (struct side_callback *) &side_empty_callback;
382 } else {
7269a8a3 383 pos_idx = cb_pos - es0->callbacks;
67337c4a
MD
384 /* Remove entry at pos_idx. */
385 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
386 new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
387 if (!new_cb) {
388 ret = SIDE_ERROR_NOMEM;
389 goto unlock;
390 }
391 memcpy(new_cb, old_cb, pos_idx);
392 memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
393 }
f60d8121 394 /* High order bits are already zeroed. */
7269a8a3 395 side_rcu_assign_pointer(es0->callbacks, new_cb);
873bbf16 396 side_rcu_wait_grace_period(&event_rcu_gp);
67337c4a 397 free(old_cb);
3cac1780 398 es0->nr_callbacks--;
67337c4a
MD
399 /* Decrement concurrently with kernel setting the top bits. */
400 if (old_nr_cb == 1)
b2a84b9f 401 (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED);
67337c4a 402unlock:
873bbf16 403 pthread_mutex_unlock(&side_event_lock);
67337c4a
MD
404 return ret;
405}
406
407int side_tracer_callback_unregister(struct side_event_description *desc,
408 side_tracer_callback_func call,
bffe9ae3 409 void *priv, uint64_t key)
67337c4a
MD
410{
411 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
412 return SIDE_ERROR_INVAL;
92c377f9 413 return _side_tracer_callback_unregister(desc, (void *) call, priv, key);
67337c4a
MD
414}
415
416int side_tracer_callback_variadic_unregister(struct side_event_description *desc,
417 side_tracer_callback_variadic_func call_variadic,
bffe9ae3 418 void *priv, uint64_t key)
67337c4a
MD
419{
420 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
421 return SIDE_ERROR_INVAL;
92c377f9 422 return _side_tracer_callback_unregister(desc, (void *) call_variadic, priv, key);
67337c4a
MD
423}
424
425struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events)
426{
427 struct side_events_register_handle *events_handle = NULL;
428 struct side_tracer_handle *tracer_handle;
429
430 if (finalized)
431 return NULL;
432 if (!initialized)
433 side_init();
434 events_handle = (struct side_events_register_handle *)
435 calloc(1, sizeof(struct side_events_register_handle));
436 if (!events_handle)
437 return NULL;
438 events_handle->events = events;
439 events_handle->nr_events = nr_events;
440
873bbf16 441 pthread_mutex_lock(&side_event_lock);
67337c4a
MD
442 side_list_insert_node_tail(&side_events_list, &events_handle->node);
443 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
444 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
445 events, nr_events, tracer_handle->priv);
446 }
873bbf16 447 pthread_mutex_unlock(&side_event_lock);
67337c4a
MD
448 //TODO: call event batch register ioctl
449 return events_handle;
450}
451
452static
453void side_event_remove_callbacks(struct side_event_description *desc)
454{
0b9e59d6 455 struct side_event_state *event_state = side_ptr_get(desc->state);
b2a84b9f 456 struct side_event_state_0 *es0;
67337c4a 457 struct side_callback *old_cb;
3cac1780 458 uint32_t nr_cb;
67337c4a 459
b2a84b9f
MD
460 if (side_unlikely(event_state->version != 0))
461 abort();
49aea3ef 462 es0 = side_container_of(event_state, struct side_event_state_0, parent);
3cac1780
MD
463 nr_cb = es0->nr_callbacks;
464 if (!nr_cb)
465 return;
7269a8a3 466 old_cb = (struct side_callback *) es0->callbacks;
b2a84b9f 467 (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED);
67337c4a
MD
468 /*
469 * Setting the state back to 0 cb and empty callbacks out of
470 * caution. This should not matter because instrumentation is
471 * unreachable.
472 */
3cac1780 473 es0->nr_callbacks = 0;
7269a8a3 474 side_rcu_assign_pointer(es0->callbacks, &side_empty_callback);
67337c4a
MD
475 /*
476 * No need to wait for grace period because instrumentation is
477 * unreachable.
478 */
479 free(old_cb);
480}
481
482/*
483 * Unregister event handle. At this point, all side events in that
484 * handle should be unreachable.
485 */
486void side_events_unregister(struct side_events_register_handle *events_handle)
487{
488 struct side_tracer_handle *tracer_handle;
489 uint32_t i;
490
491 if (!events_handle)
492 return;
493 if (finalized)
494 return;
495 if (!initialized)
496 side_init();
873bbf16 497 pthread_mutex_lock(&side_event_lock);
67337c4a
MD
498 side_list_remove_node(&events_handle->node);
499 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
500 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
501 events_handle->events, events_handle->nr_events,
502 tracer_handle->priv);
503 }
504 for (i = 0; i < events_handle->nr_events; i++) {
505 struct side_event_description *event = events_handle->events[i];
506
507 /* Skip NULL pointers */
508 if (!event)
509 continue;
510 side_event_remove_callbacks(event);
511 }
873bbf16 512 pthread_mutex_unlock(&side_event_lock);
67337c4a
MD
513 //TODO: call event batch unregister ioctl
514 free(events_handle);
515}
516
517struct side_tracer_handle *side_tracer_event_notification_register(
518 void (*cb)(enum side_tracer_notification notif,
519 struct side_event_description **events, uint32_t nr_events, void *priv),
520 void *priv)
521{
522 struct side_tracer_handle *tracer_handle;
523 struct side_events_register_handle *events_handle;
524
525 if (finalized)
526 return NULL;
527 if (!initialized)
528 side_init();
529 tracer_handle = (struct side_tracer_handle *)
530 calloc(1, sizeof(struct side_tracer_handle));
531 if (!tracer_handle)
532 return NULL;
873bbf16 533 pthread_mutex_lock(&side_event_lock);
67337c4a
MD
534 tracer_handle->cb = cb;
535 tracer_handle->priv = priv;
536 side_list_insert_node_tail(&side_tracer_list, &tracer_handle->node);
537 side_list_for_each_entry(events_handle, &side_events_list, node) {
538 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
539 events_handle->events, events_handle->nr_events, priv);
540 }
873bbf16 541 pthread_mutex_unlock(&side_event_lock);
67337c4a
MD
542 return tracer_handle;
543}
544
545void side_tracer_event_notification_unregister(struct side_tracer_handle *tracer_handle)
546{
547 struct side_events_register_handle *events_handle;
548
549 if (finalized)
550 return;
551 if (!initialized)
552 side_init();
873bbf16 553 pthread_mutex_lock(&side_event_lock);
67337c4a
MD
554 side_list_for_each_entry(events_handle, &side_events_list, node) {
555 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
556 events_handle->events, events_handle->nr_events,
557 tracer_handle->priv);
558 }
559 side_list_remove_node(&tracer_handle->node);
873bbf16 560 pthread_mutex_unlock(&side_event_lock);
be787080 561 free(tracer_handle);
67337c4a
MD
562}
563
bffe9ae3
MD
564/* Called with side_statedump_lock held. */
565static
566void queue_statedump_pending(struct side_statedump_request_handle *handle, uint64_t key)
567{
568 struct side_statedump_notification *notif;
569
570 notif = (struct side_statedump_notification *) calloc(1, sizeof(struct side_statedump_notification));
571 if (!notif)
572 abort();
573 notif->key = key;
574 side_list_insert_node_tail(&handle->notification_queue, &notif->node);
a125f217
MD
575 if (handle->mode == SIDE_STATEDUMP_MODE_AGENT_THREAD)
576 (void)__atomic_or_fetch(&statedump_agent_thread.state, AGENT_THREAD_STATE_HANDLE_REQUEST, __ATOMIC_SEQ_CST);
bffe9ae3
MD
577}
578
579/* Called with side_statedump_lock held. */
580static
581void unqueue_statedump_pending(struct side_statedump_request_handle *handle, uint64_t key)
582{
583 struct side_statedump_notification *notif, *tmp;
584
585 side_list_for_each_entry_safe(notif, tmp, &handle->notification_queue, node) {
586 if (key == SIDE_KEY_MATCH_ALL || key == notif->key) {
587 side_list_remove_node(&notif->node);
588 free(notif);
589 }
590 }
591}
592
a125f217
MD
593static
594void side_statedump_run(struct side_statedump_request_handle *handle,
595 struct side_statedump_notification *notif)
596{
597 /* Invoke the state dump callback specifically for the tracer key. */
598 filter_key = notif->key;
599 side_statedump_event_call(side_statedump_begin,
600 side_arg_list(side_arg_string(handle->name)));
601 handle->cb();
602 side_statedump_event_call(side_statedump_end,
603 side_arg_list(side_arg_string(handle->name)));
604 filter_key = SIDE_KEY_MATCH_ALL;
605}
606
607static
608void _side_statedump_run_pending_requests(struct side_statedump_request_handle *handle)
609{
610 struct side_statedump_notification *notif, *tmp;
611 DEFINE_SIDE_LIST_HEAD(tmp_head);
612
613 pthread_mutex_lock(&side_statedump_lock);
614 side_list_splice(&handle->notification_queue, &tmp_head);
615 side_list_head_init(&handle->notification_queue);
616 pthread_mutex_unlock(&side_statedump_lock);
617
618 /* We are now sole owner of the tmp_head list. */
619 side_list_for_each_entry(notif, &tmp_head, node)
620 side_statedump_run(handle, notif);
621 side_list_for_each_entry_safe(notif, tmp, &tmp_head, node)
622 free(notif);
623}
624
625static
626void *statedump_agent_func(void *arg __attribute__((unused)))
627{
628 for (;;) {
629 struct side_statedump_request_handle *handle;
630 struct side_rcu_read_state rcu_read_state;
631 enum agent_thread_state state;
632
633 /* TODO: futex-based wakeup. */
634 state = __atomic_load_n(&statedump_agent_thread.state, __ATOMIC_SEQ_CST);
635 if (state == AGENT_THREAD_STATE_BLOCKED) {
636 sleep(1);
637 continue;
638 }
639 if (state & AGENT_THREAD_STATE_EXIT)
640 break;
641 (void)__atomic_and_fetch(&statedump_agent_thread.state, ~AGENT_THREAD_STATE_HANDLE_REQUEST, __ATOMIC_SEQ_CST);
642 side_rcu_read_begin(&statedump_rcu_gp, &rcu_read_state);
643 side_list_for_each_entry_rcu(handle, &side_statedump_list, node)
644 _side_statedump_run_pending_requests(handle);
645 side_rcu_read_end(&statedump_rcu_gp, &rcu_read_state);
646 }
647 return NULL;
648}
649
650static
651void statedump_agent_thread_get(void)
652{
653 int ret;
654
655 if (statedump_agent_thread.ref++)
656 return;
657 statedump_agent_thread.state = AGENT_THREAD_STATE_BLOCKED;
658 ret = pthread_create(&statedump_agent_thread.id, NULL,
659 statedump_agent_func, NULL);
660 if (ret) {
661 abort();
662 }
663}
664
665static
666void statedump_agent_thread_put(void)
667{
668 int ret;
669 void *retval;
670
671 if (--statedump_agent_thread.ref)
672 return;
673 (void)__atomic_or_fetch(&statedump_agent_thread.state, AGENT_THREAD_STATE_EXIT, __ATOMIC_SEQ_CST);
674 ret = pthread_join(statedump_agent_thread.id, &retval);
675 if (ret) {
676 abort();
677 }
678 statedump_agent_thread.state = AGENT_THREAD_STATE_BLOCKED;
679}
680
bffe9ae3
MD
681struct side_statedump_request_handle *
682 side_statedump_request_notification_register(const char *state_name,
683 void (*statedump_cb)(void),
684 enum side_statedump_mode mode)
f0b01832
MD
685{
686 struct side_statedump_request_handle *handle;
bffe9ae3 687 char *name;
f0b01832
MD
688
689 if (finalized)
690 return NULL;
691 if (!initialized)
692 side_init();
693 /*
694 * The statedump request notification should not be registered
695 * from a notification callback.
696 */
bffe9ae3 697 assert(!filter_key);
f0b01832
MD
698 handle = (struct side_statedump_request_handle *)
699 calloc(1, sizeof(struct side_statedump_request_handle));
700 if (!handle)
701 return NULL;
bffe9ae3
MD
702 name = strdup(state_name);
703 if (!name)
704 goto name_nomem;
f0b01832 705 handle->cb = statedump_cb;
bffe9ae3
MD
706 handle->name = name;
707 handle->mode = mode;
708 side_list_head_init(&handle->notification_queue);
873bbf16
MD
709
710 pthread_mutex_lock(&side_statedump_lock);
a125f217
MD
711 if (mode == SIDE_STATEDUMP_MODE_AGENT_THREAD)
712 statedump_agent_thread_get();
873bbf16 713 side_list_insert_node_tail_rcu(&side_statedump_list, &handle->node);
bffe9ae3
MD
714 /* Queue statedump pending for all tracers. */
715 queue_statedump_pending(handle, SIDE_KEY_MATCH_ALL);
873bbf16
MD
716 pthread_mutex_unlock(&side_statedump_lock);
717
a125f217
MD
718 if (mode == SIDE_STATEDUMP_MODE_AGENT_THREAD) {
719 for (;;) {
720 bool is_empty;
721
722 /* TODO futex based wakeup. */
723 pthread_mutex_lock(&side_statedump_lock);
724 is_empty = side_list_empty(&handle->notification_queue);
725 pthread_mutex_unlock(&side_statedump_lock);
726 if (is_empty)
727 break;
728 sleep(1);
729 }
730 }
731
f0b01832 732 return handle;
bffe9ae3
MD
733
734name_nomem:
735 free(handle);
736 return NULL;
f0b01832
MD
737}
738
739void side_statedump_request_notification_unregister(struct side_statedump_request_handle *handle)
740{
741 if (finalized)
742 return;
743 if (!initialized)
744 side_init();
bffe9ae3 745 assert(!filter_key);
873bbf16
MD
746
747 pthread_mutex_lock(&side_statedump_lock);
bffe9ae3 748 unqueue_statedump_pending(handle, SIDE_KEY_MATCH_ALL);
873bbf16 749 side_list_remove_node_rcu(&handle->node);
a125f217
MD
750 if (handle->mode == SIDE_STATEDUMP_MODE_AGENT_THREAD)
751 statedump_agent_thread_put();
873bbf16
MD
752 pthread_mutex_unlock(&side_statedump_lock);
753
754 side_rcu_wait_grace_period(&statedump_rcu_gp);
bffe9ae3 755 free(handle->name);
f0b01832
MD
756 free(handle);
757}
758
bffe9ae3
MD
759/* Returns true if the handle has pending statedump requests. */
760bool side_statedump_poll_pending_requests(struct side_statedump_request_handle *handle)
f0b01832 761{
bffe9ae3
MD
762 bool ret;
763
764 if (handle->mode != SIDE_STATEDUMP_MODE_POLLING)
765 return false;
766 pthread_mutex_lock(&side_statedump_lock);
767 ret = !side_list_empty(&handle->notification_queue);
768 pthread_mutex_unlock(&side_statedump_lock);
769 return ret;
770}
f0b01832 771
bffe9ae3
MD
772/*
773 * Only polling mode state dump handles allow application to explicitly handle the
774 * pending requests.
775 */
776int side_statedump_run_pending_requests(struct side_statedump_request_handle *handle)
777{
778 if (handle->mode != SIDE_STATEDUMP_MODE_POLLING)
779 return SIDE_ERROR_INVAL;
780 _side_statedump_run_pending_requests(handle);
781 return SIDE_ERROR_OK;
782}
783
784/*
785 * Request a state dump for tracer callbacks identified with "key".
786 */
787int side_tracer_statedump_request(uint64_t key)
788{
789 struct side_statedump_request_handle *handle;
790
791 if (key == SIDE_KEY_MATCH_ALL)
792 return SIDE_ERROR_INVAL;
793 pthread_mutex_lock(&side_statedump_lock);
794 side_list_for_each_entry(handle, &side_statedump_list, node)
795 queue_statedump_pending(handle, key);
796 pthread_mutex_lock(&side_statedump_lock);
797 return SIDE_ERROR_OK;
798}
799
800/*
801 * Cancel a statedump request.
802 */
803int side_tracer_statedump_request_cancel(uint64_t key)
804{
805 struct side_statedump_request_handle *handle;
806
807 if (key == SIDE_KEY_MATCH_ALL)
808 return SIDE_ERROR_INVAL;
809 pthread_mutex_lock(&side_statedump_lock);
810 side_list_for_each_entry(handle, &side_statedump_list, node)
811 unqueue_statedump_pending(handle, key);
812 pthread_mutex_lock(&side_statedump_lock);
813 return SIDE_ERROR_OK;
814}
815
816/*
817 * Tracer keys are represented on 64-bit. Return SIDE_ERROR_NOMEM on
818 * overflow (which should never happen in practice).
819 */
820int side_tracer_request_key(uint64_t *key)
821{
822 int ret = SIDE_ERROR_OK;
823
824 pthread_mutex_lock(&side_key_lock);
825 if (side_key_next == 0) {
826 ret = SIDE_ERROR_NOMEM;
827 goto end;
828 }
829 *key = side_key_next++;
830end:
831 pthread_mutex_unlock(&side_key_lock);
832 return ret;
f0b01832
MD
833}
834
67337c4a
MD
835void side_init(void)
836{
837 if (initialized)
838 return;
873bbf16
MD
839 side_rcu_gp_init(&event_rcu_gp);
840 side_rcu_gp_init(&statedump_rcu_gp);
67337c4a
MD
841 initialized = true;
842}
843
844/*
845 * side_exit() is executed from a library destructor. It can be called
846 * explicitly at application exit as well. Concurrent side API use is
847 * not expected at that point.
848 */
849void side_exit(void)
850{
851 struct side_events_register_handle *handle, *tmp;
852
853 if (finalized)
854 return;
855 side_list_for_each_entry_safe(handle, tmp, &side_events_list, node)
856 side_events_unregister(handle);
873bbf16
MD
857 side_rcu_gp_exit(&event_rcu_gp);
858 side_rcu_gp_exit(&statedump_rcu_gp);
67337c4a
MD
859 finalized = true;
860}
This page took 0.062104 seconds and 4 git commands to generate.