Implement agent thread wakeup scheme with pthread cond var
[libside.git] / src / side.c
CommitLineData
67337c4a
MD
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6#include <side/trace.h>
7#include <string.h>
b1bf768c 8#include <assert.h>
a125f217
MD
9#include <pthread.h>
10#include <unistd.h>
67337c4a
MD
11
12#include "rcu.h"
13#include "list.h"
873bbf16 14#include "rculist.h"
67337c4a 15
871851e7 16/* Top 8 bits reserved for shared tracer use. */
67337c4a 17#if SIDE_BITS_PER_LONG == 64
871851e7
MD
18# define SIDE_EVENT_ENABLED_SHARED_MASK 0xFF00000000000000ULL
19# define SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK 0x8000000000000000ULL
20# define SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK 0x4000000000000000ULL
67337c4a 21
871851e7
MD
22/* Allow 2^56 private tracer references on an event. */
23# define SIDE_EVENT_ENABLED_PRIVATE_MASK 0x00FFFFFFFFFFFFFFULL
67337c4a 24#else
871851e7
MD
25# define SIDE_EVENT_ENABLED_SHARED_MASK 0xFF000000UL
26# define SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK 0x80000000UL
27# define SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK 0x40000000UL
67337c4a 28
871851e7
MD
29/* Allow 2^24 private tracer references on an event. */
30# define SIDE_EVENT_ENABLED_PRIVATE_MASK 0x00FFFFFFUL
67337c4a
MD
31#endif
32
bffe9ae3
MD
33#define SIDE_KEY_RESERVED_RANGE_END 0x8
34
35/* Key 0x0 is reserved to match all. */
36#define SIDE_KEY_MATCH_ALL 0x0
871851e7 37/* Key 0x1 is reserved for user event. */
bffe9ae3 38#define SIDE_KEY_USER_EVENT 0x1
871851e7 39/* Key 0x2 is reserved for ptrace. */
bffe9ae3 40#define SIDE_KEY_PTRACE 0x2
871851e7 41
67337c4a
MD
42struct side_events_register_handle {
43 struct side_list_node node;
44 struct side_event_description **events;
45 uint32_t nr_events;
46};
47
48struct side_tracer_handle {
49 struct side_list_node node;
50 void (*cb)(enum side_tracer_notification notif,
51 struct side_event_description **events, uint32_t nr_events, void *priv);
52 void *priv;
53};
54
bffe9ae3
MD
55struct side_statedump_notification {
56 struct side_list_node node;
57 uint64_t key;
58};
59
f0b01832 60struct side_statedump_request_handle {
bffe9ae3
MD
61 struct side_list_node node; /* Statedump request RCU list node. */
62 struct side_list_head notification_queue; /* Queue of struct side_statedump_notification */
f0b01832 63 void (*cb)(void);
bffe9ae3
MD
64 char *name;
65 enum side_statedump_mode mode;
f0b01832
MD
66};
67
867b4725
MD
68struct side_callback {
69 union {
70 void (*call)(const struct side_event_description *desc,
71 const struct side_arg_vec *side_arg_vec,
72 void *priv);
73 void (*call_variadic)(const struct side_event_description *desc,
74 const struct side_arg_vec *side_arg_vec,
75 const struct side_arg_dynamic_struct *var_struct,
76 void *priv);
77 } u;
78 void *priv;
bffe9ae3 79 uint64_t key;
867b4725
MD
80};
81
a125f217
MD
82enum agent_thread_state {
83 AGENT_THREAD_STATE_BLOCKED = 0,
84 AGENT_THREAD_STATE_HANDLE_REQUEST = (1 << 0),
85 AGENT_THREAD_STATE_EXIT = (1 << 1),
86};
87
88struct statedump_agent_thread {
89 long ref;
90 pthread_t id;
91 enum agent_thread_state state;
76dd11f9
MD
92 pthread_cond_t worker_cond;
93 pthread_cond_t waiter_cond;
a125f217
MD
94};
95
873bbf16 96static struct side_rcu_gp_state event_rcu_gp, statedump_rcu_gp;
67337c4a
MD
97
98/*
99 * Lazy initialization for early use within library constructors.
100 */
101static bool initialized;
102/*
103 * Do not register/unregister any more events after destructor.
104 */
105static bool finalized;
106
107/*
108 * Recursive mutex to allow tracer callbacks to use the side API.
109 */
873bbf16
MD
110static pthread_mutex_t side_event_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
111static pthread_mutex_t side_statedump_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
bffe9ae3 112static pthread_mutex_t side_key_lock = PTHREAD_MUTEX_INITIALIZER;
76dd11f9
MD
113/*
114 * The side_agent_thread_lock protects the life-time of the agent
115 * thread: reference counting, creation, join. It is not taken by
116 * the agent thread per se so it does not have a circular dependency
117 * with pthread join.
118 * The side_statedump_lock nests inside the side_agent_thread_lock.
119 */
120static pthread_mutex_t side_agent_thread_lock = PTHREAD_MUTEX_INITIALIZER;
bffe9ae3
MD
121
122/* Dynamic tracer key allocation. */
123static uint64_t side_key_next = SIDE_KEY_RESERVED_RANGE_END;
67337c4a 124
a125f217
MD
125static struct statedump_agent_thread statedump_agent_thread;
126
67337c4a
MD
127static DEFINE_SIDE_LIST_HEAD(side_events_list);
128static DEFINE_SIDE_LIST_HEAD(side_tracer_list);
bffe9ae3
MD
129
130/*
131 * The statedump request list is a RCU list to allow the agent thread to
132 * iterate over this list with a RCU read-side lock.
133 */
f0b01832 134static DEFINE_SIDE_LIST_HEAD(side_statedump_list);
67337c4a 135
74be90b7
MD
136/*
137 * Callback filter key for state dump.
138 */
bffe9ae3 139static __thread uint64_t filter_key = SIDE_KEY_MATCH_ALL;
74be90b7 140
67337c4a
MD
141/*
142 * The empty callback has a NULL function callback pointer, which stops
143 * iteration on the array of callbacks immediately.
144 */
867b4725 145const char side_empty_callback[sizeof(struct side_callback)];
67337c4a 146
bffe9ae3
MD
147side_static_event(side_statedump_begin, "side", "statedump_begin",
148 SIDE_LOGLEVEL_INFO, side_field_list(side_field_string("name")));
149side_static_event(side_statedump_end, "side", "statedump_end",
150 SIDE_LOGLEVEL_INFO, side_field_list(side_field_string("name")));
151
871851e7
MD
152/*
153 * side_ptrace_hook is a place holder for a debugger breakpoint.
154 * var_struct is NULL if not variadic.
155 */
156void side_ptrace_hook(const struct side_event_state *event_state __attribute__((unused)),
157 const struct side_arg_vec *side_arg_vec __attribute__((unused)),
158 const struct side_arg_dynamic_struct *var_struct __attribute__((unused)))
159 __attribute__((noinline));
160void side_ptrace_hook(const struct side_event_state *event_state __attribute__((unused)),
161 const struct side_arg_vec *side_arg_vec __attribute__((unused)),
162 const struct side_arg_dynamic_struct *var_struct __attribute__((unused)))
163{
164}
165
74be90b7 166static
bffe9ae3 167void _side_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec, uint64_t key)
67337c4a
MD
168{
169 struct side_rcu_read_state rcu_read_state;
b2a84b9f 170 const struct side_event_state_0 *es0;
67337c4a
MD
171 const struct side_callback *side_cb;
172 uintptr_t enabled;
173
174 if (side_unlikely(finalized))
175 return;
176 if (side_unlikely(!initialized))
177 side_init();
b2a84b9f
MD
178 if (side_unlikely(event_state->version != 0))
179 abort();
49aea3ef 180 es0 = side_container_of(event_state, const struct side_event_state_0, parent);
7269a8a3 181 assert(!(es0->desc->flags & SIDE_EVENT_FLAG_VARIADIC));
b2a84b9f 182 enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED);
871851e7
MD
183 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_SHARED_MASK)) {
184 if ((enabled & SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK) &&
bffe9ae3 185 (key == SIDE_KEY_MATCH_ALL || key == SIDE_KEY_USER_EVENT)) {
871851e7
MD
186 // TODO: call kernel write.
187 }
188 if ((enabled & SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK) &&
bffe9ae3 189 (key == SIDE_KEY_MATCH_ALL || key == SIDE_KEY_PTRACE))
871851e7 190 side_ptrace_hook(event_state, side_arg_vec, NULL);
67337c4a 191 }
873bbf16 192 side_rcu_read_begin(&event_rcu_gp, &rcu_read_state);
92c377f9 193 for (side_cb = side_rcu_dereference(es0->callbacks); side_cb->u.call != NULL; side_cb++) {
bffe9ae3 194 if (key != SIDE_KEY_MATCH_ALL && side_cb->key != SIDE_KEY_MATCH_ALL && side_cb->key != key)
92c377f9 195 continue;
7269a8a3 196 side_cb->u.call(es0->desc, side_arg_vec, side_cb->priv);
92c377f9 197 }
873bbf16 198 side_rcu_read_end(&event_rcu_gp, &rcu_read_state);
67337c4a
MD
199}
200
92c377f9
MD
201void side_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec)
202{
bffe9ae3 203 _side_call(event_state, side_arg_vec, SIDE_KEY_MATCH_ALL);
92c377f9
MD
204}
205
f0b01832 206void side_statedump_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec)
74be90b7
MD
207{
208 _side_call(event_state, side_arg_vec, filter_key);
209}
210
211static
212void _side_call_variadic(const struct side_event_state *event_state,
67337c4a 213 const struct side_arg_vec *side_arg_vec,
92c377f9 214 const struct side_arg_dynamic_struct *var_struct,
bffe9ae3 215 uint64_t key)
67337c4a
MD
216{
217 struct side_rcu_read_state rcu_read_state;
b2a84b9f 218 const struct side_event_state_0 *es0;
67337c4a
MD
219 const struct side_callback *side_cb;
220 uintptr_t enabled;
221
222 if (side_unlikely(finalized))
223 return;
224 if (side_unlikely(!initialized))
225 side_init();
b2a84b9f
MD
226 if (side_unlikely(event_state->version != 0))
227 abort();
49aea3ef 228 es0 = side_container_of(event_state, const struct side_event_state_0, parent);
7269a8a3 229 assert(es0->desc->flags & SIDE_EVENT_FLAG_VARIADIC);
b2a84b9f 230 enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED);
871851e7
MD
231 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_SHARED_MASK)) {
232 if ((enabled & SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK) &&
bffe9ae3 233 (key == SIDE_KEY_MATCH_ALL || key == SIDE_KEY_USER_EVENT)) {
871851e7
MD
234 // TODO: call kernel write.
235 }
236 if ((enabled & SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK) &&
bffe9ae3 237 (key == SIDE_KEY_MATCH_ALL || key == SIDE_KEY_PTRACE))
871851e7 238 side_ptrace_hook(event_state, side_arg_vec, var_struct);
67337c4a 239 }
873bbf16 240 side_rcu_read_begin(&event_rcu_gp, &rcu_read_state);
92c377f9 241 for (side_cb = side_rcu_dereference(es0->callbacks); side_cb->u.call_variadic != NULL; side_cb++) {
bffe9ae3 242 if (key != SIDE_KEY_MATCH_ALL && side_cb->key != SIDE_KEY_MATCH_ALL && side_cb->key != key)
92c377f9 243 continue;
7269a8a3 244 side_cb->u.call_variadic(es0->desc, side_arg_vec, var_struct, side_cb->priv);
92c377f9 245 }
873bbf16 246 side_rcu_read_end(&event_rcu_gp, &rcu_read_state);
67337c4a
MD
247}
248
92c377f9
MD
249void side_call_variadic(const struct side_event_state *event_state,
250 const struct side_arg_vec *side_arg_vec,
251 const struct side_arg_dynamic_struct *var_struct)
252{
bffe9ae3 253 _side_call_variadic(event_state, side_arg_vec, var_struct, SIDE_KEY_MATCH_ALL);
74be90b7
MD
254}
255
f0b01832 256void side_statedump_call_variadic(const struct side_event_state *event_state,
74be90b7
MD
257 const struct side_arg_vec *side_arg_vec,
258 const struct side_arg_dynamic_struct *var_struct)
259{
260 _side_call_variadic(event_state, side_arg_vec, var_struct, filter_key);
92c377f9
MD
261}
262
67337c4a
MD
263static
264const struct side_callback *side_tracer_callback_lookup(
265 const struct side_event_description *desc,
bffe9ae3 266 void *call, void *priv, uint64_t key)
67337c4a 267{
0b9e59d6 268 struct side_event_state *event_state = side_ptr_get(desc->state);
b2a84b9f 269 const struct side_event_state_0 *es0;
67337c4a
MD
270 const struct side_callback *cb;
271
b2a84b9f
MD
272 if (side_unlikely(event_state->version != 0))
273 abort();
49aea3ef 274 es0 = side_container_of(event_state, const struct side_event_state_0, parent);
7269a8a3 275 for (cb = es0->callbacks; cb->u.call != NULL; cb++) {
92c377f9 276 if ((void *) cb->u.call == call && cb->priv == priv && cb->key == key)
67337c4a
MD
277 return cb;
278 }
279 return NULL;
280}
281
282static
283int _side_tracer_callback_register(struct side_event_description *desc,
bffe9ae3 284 void *call, void *priv, uint64_t key)
67337c4a 285{
0b9e59d6 286 struct side_event_state *event_state;
67337c4a 287 struct side_callback *old_cb, *new_cb;
b2a84b9f 288 struct side_event_state_0 *es0;
67337c4a
MD
289 int ret = SIDE_ERROR_OK;
290 uint32_t old_nr_cb;
291
292 if (!call)
293 return SIDE_ERROR_INVAL;
294 if (finalized)
295 return SIDE_ERROR_EXITING;
296 if (!initialized)
297 side_init();
873bbf16 298 pthread_mutex_lock(&side_event_lock);
0b9e59d6 299 event_state = side_ptr_get(desc->state);
b2a84b9f
MD
300 if (side_unlikely(event_state->version != 0))
301 abort();
49aea3ef 302 es0 = side_container_of(event_state, struct side_event_state_0, parent);
3cac1780 303 old_nr_cb = es0->nr_callbacks;
67337c4a
MD
304 if (old_nr_cb == UINT32_MAX) {
305 ret = SIDE_ERROR_INVAL;
306 goto unlock;
307 }
308 /* Reject duplicate (call, priv) tuples. */
92c377f9 309 if (side_tracer_callback_lookup(desc, call, priv, key)) {
67337c4a
MD
310 ret = SIDE_ERROR_EXIST;
311 goto unlock;
312 }
7269a8a3 313 old_cb = (struct side_callback *) es0->callbacks;
67337c4a
MD
314 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
315 new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
316 if (!new_cb) {
317 ret = SIDE_ERROR_NOMEM;
318 goto unlock;
319 }
320 memcpy(new_cb, old_cb, old_nr_cb);
321 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
322 new_cb[old_nr_cb].u.call_variadic =
323 (side_tracer_callback_variadic_func) call;
324 else
325 new_cb[old_nr_cb].u.call =
326 (side_tracer_callback_func) call;
327 new_cb[old_nr_cb].priv = priv;
92c377f9 328 new_cb[old_nr_cb].key = key;
f60d8121 329 /* High order bits are already zeroed. */
7269a8a3 330 side_rcu_assign_pointer(es0->callbacks, new_cb);
873bbf16 331 side_rcu_wait_grace_period(&event_rcu_gp);
67337c4a
MD
332 if (old_nr_cb)
333 free(old_cb);
3cac1780 334 es0->nr_callbacks++;
67337c4a
MD
335 /* Increment concurrently with kernel setting the top bits. */
336 if (!old_nr_cb)
b2a84b9f 337 (void) __atomic_add_fetch(&es0->enabled, 1, __ATOMIC_RELAXED);
67337c4a 338unlock:
873bbf16 339 pthread_mutex_unlock(&side_event_lock);
67337c4a
MD
340 return ret;
341}
342
343int side_tracer_callback_register(struct side_event_description *desc,
344 side_tracer_callback_func call,
bffe9ae3 345 void *priv, uint64_t key)
67337c4a
MD
346{
347 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
348 return SIDE_ERROR_INVAL;
92c377f9 349 return _side_tracer_callback_register(desc, (void *) call, priv, key);
67337c4a
MD
350}
351
352int side_tracer_callback_variadic_register(struct side_event_description *desc,
353 side_tracer_callback_variadic_func call_variadic,
bffe9ae3 354 void *priv, uint64_t key)
67337c4a
MD
355{
356 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
357 return SIDE_ERROR_INVAL;
92c377f9 358 return _side_tracer_callback_register(desc, (void *) call_variadic, priv, key);
67337c4a
MD
359}
360
361static int _side_tracer_callback_unregister(struct side_event_description *desc,
bffe9ae3 362 void *call, void *priv, uint64_t key)
67337c4a 363{
0b9e59d6 364 struct side_event_state *event_state;
67337c4a
MD
365 struct side_callback *old_cb, *new_cb;
366 const struct side_callback *cb_pos;
b2a84b9f 367 struct side_event_state_0 *es0;
67337c4a
MD
368 uint32_t pos_idx;
369 int ret = SIDE_ERROR_OK;
370 uint32_t old_nr_cb;
371
372 if (!call)
373 return SIDE_ERROR_INVAL;
374 if (finalized)
375 return SIDE_ERROR_EXITING;
376 if (!initialized)
377 side_init();
873bbf16 378 pthread_mutex_lock(&side_event_lock);
0b9e59d6 379 event_state = side_ptr_get(desc->state);
b2a84b9f
MD
380 if (side_unlikely(event_state->version != 0))
381 abort();
49aea3ef 382 es0 = side_container_of(event_state, struct side_event_state_0, parent);
92c377f9 383 cb_pos = side_tracer_callback_lookup(desc, call, priv, key);
67337c4a
MD
384 if (!cb_pos) {
385 ret = SIDE_ERROR_NOENT;
386 goto unlock;
387 }
3cac1780 388 old_nr_cb = es0->nr_callbacks;
7269a8a3 389 old_cb = (struct side_callback *) es0->callbacks;
67337c4a
MD
390 if (old_nr_cb == 1) {
391 new_cb = (struct side_callback *) &side_empty_callback;
392 } else {
7269a8a3 393 pos_idx = cb_pos - es0->callbacks;
67337c4a
MD
394 /* Remove entry at pos_idx. */
395 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
396 new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
397 if (!new_cb) {
398 ret = SIDE_ERROR_NOMEM;
399 goto unlock;
400 }
401 memcpy(new_cb, old_cb, pos_idx);
402 memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
403 }
f60d8121 404 /* High order bits are already zeroed. */
7269a8a3 405 side_rcu_assign_pointer(es0->callbacks, new_cb);
873bbf16 406 side_rcu_wait_grace_period(&event_rcu_gp);
67337c4a 407 free(old_cb);
3cac1780 408 es0->nr_callbacks--;
67337c4a
MD
409 /* Decrement concurrently with kernel setting the top bits. */
410 if (old_nr_cb == 1)
b2a84b9f 411 (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED);
67337c4a 412unlock:
873bbf16 413 pthread_mutex_unlock(&side_event_lock);
67337c4a
MD
414 return ret;
415}
416
417int side_tracer_callback_unregister(struct side_event_description *desc,
418 side_tracer_callback_func call,
bffe9ae3 419 void *priv, uint64_t key)
67337c4a
MD
420{
421 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
422 return SIDE_ERROR_INVAL;
92c377f9 423 return _side_tracer_callback_unregister(desc, (void *) call, priv, key);
67337c4a
MD
424}
425
426int side_tracer_callback_variadic_unregister(struct side_event_description *desc,
427 side_tracer_callback_variadic_func call_variadic,
bffe9ae3 428 void *priv, uint64_t key)
67337c4a
MD
429{
430 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
431 return SIDE_ERROR_INVAL;
92c377f9 432 return _side_tracer_callback_unregister(desc, (void *) call_variadic, priv, key);
67337c4a
MD
433}
434
435struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events)
436{
437 struct side_events_register_handle *events_handle = NULL;
438 struct side_tracer_handle *tracer_handle;
439
440 if (finalized)
441 return NULL;
442 if (!initialized)
443 side_init();
444 events_handle = (struct side_events_register_handle *)
445 calloc(1, sizeof(struct side_events_register_handle));
446 if (!events_handle)
447 return NULL;
448 events_handle->events = events;
449 events_handle->nr_events = nr_events;
450
873bbf16 451 pthread_mutex_lock(&side_event_lock);
67337c4a
MD
452 side_list_insert_node_tail(&side_events_list, &events_handle->node);
453 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
454 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
455 events, nr_events, tracer_handle->priv);
456 }
873bbf16 457 pthread_mutex_unlock(&side_event_lock);
67337c4a
MD
458 //TODO: call event batch register ioctl
459 return events_handle;
460}
461
462static
463void side_event_remove_callbacks(struct side_event_description *desc)
464{
0b9e59d6 465 struct side_event_state *event_state = side_ptr_get(desc->state);
b2a84b9f 466 struct side_event_state_0 *es0;
67337c4a 467 struct side_callback *old_cb;
3cac1780 468 uint32_t nr_cb;
67337c4a 469
b2a84b9f
MD
470 if (side_unlikely(event_state->version != 0))
471 abort();
49aea3ef 472 es0 = side_container_of(event_state, struct side_event_state_0, parent);
3cac1780
MD
473 nr_cb = es0->nr_callbacks;
474 if (!nr_cb)
475 return;
7269a8a3 476 old_cb = (struct side_callback *) es0->callbacks;
b2a84b9f 477 (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED);
67337c4a
MD
478 /*
479 * Setting the state back to 0 cb and empty callbacks out of
480 * caution. This should not matter because instrumentation is
481 * unreachable.
482 */
3cac1780 483 es0->nr_callbacks = 0;
7269a8a3 484 side_rcu_assign_pointer(es0->callbacks, &side_empty_callback);
67337c4a
MD
485 /*
486 * No need to wait for grace period because instrumentation is
487 * unreachable.
488 */
489 free(old_cb);
490}
491
492/*
493 * Unregister event handle. At this point, all side events in that
494 * handle should be unreachable.
495 */
496void side_events_unregister(struct side_events_register_handle *events_handle)
497{
498 struct side_tracer_handle *tracer_handle;
499 uint32_t i;
500
501 if (!events_handle)
502 return;
503 if (finalized)
504 return;
505 if (!initialized)
506 side_init();
873bbf16 507 pthread_mutex_lock(&side_event_lock);
67337c4a
MD
508 side_list_remove_node(&events_handle->node);
509 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
510 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
511 events_handle->events, events_handle->nr_events,
512 tracer_handle->priv);
513 }
514 for (i = 0; i < events_handle->nr_events; i++) {
515 struct side_event_description *event = events_handle->events[i];
516
517 /* Skip NULL pointers */
518 if (!event)
519 continue;
520 side_event_remove_callbacks(event);
521 }
873bbf16 522 pthread_mutex_unlock(&side_event_lock);
67337c4a
MD
523 //TODO: call event batch unregister ioctl
524 free(events_handle);
525}
526
527struct side_tracer_handle *side_tracer_event_notification_register(
528 void (*cb)(enum side_tracer_notification notif,
529 struct side_event_description **events, uint32_t nr_events, void *priv),
530 void *priv)
531{
532 struct side_tracer_handle *tracer_handle;
533 struct side_events_register_handle *events_handle;
534
535 if (finalized)
536 return NULL;
537 if (!initialized)
538 side_init();
539 tracer_handle = (struct side_tracer_handle *)
540 calloc(1, sizeof(struct side_tracer_handle));
541 if (!tracer_handle)
542 return NULL;
873bbf16 543 pthread_mutex_lock(&side_event_lock);
67337c4a
MD
544 tracer_handle->cb = cb;
545 tracer_handle->priv = priv;
546 side_list_insert_node_tail(&side_tracer_list, &tracer_handle->node);
547 side_list_for_each_entry(events_handle, &side_events_list, node) {
548 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
549 events_handle->events, events_handle->nr_events, priv);
550 }
873bbf16 551 pthread_mutex_unlock(&side_event_lock);
67337c4a
MD
552 return tracer_handle;
553}
554
555void side_tracer_event_notification_unregister(struct side_tracer_handle *tracer_handle)
556{
557 struct side_events_register_handle *events_handle;
558
559 if (finalized)
560 return;
561 if (!initialized)
562 side_init();
873bbf16 563 pthread_mutex_lock(&side_event_lock);
67337c4a
MD
564 side_list_for_each_entry(events_handle, &side_events_list, node) {
565 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
566 events_handle->events, events_handle->nr_events,
567 tracer_handle->priv);
568 }
569 side_list_remove_node(&tracer_handle->node);
873bbf16 570 pthread_mutex_unlock(&side_event_lock);
be787080 571 free(tracer_handle);
67337c4a
MD
572}
573
bffe9ae3
MD
574/* Called with side_statedump_lock held. */
575static
576void queue_statedump_pending(struct side_statedump_request_handle *handle, uint64_t key)
577{
578 struct side_statedump_notification *notif;
579
580 notif = (struct side_statedump_notification *) calloc(1, sizeof(struct side_statedump_notification));
581 if (!notif)
582 abort();
583 notif->key = key;
584 side_list_insert_node_tail(&handle->notification_queue, &notif->node);
76dd11f9 585 if (handle->mode == SIDE_STATEDUMP_MODE_AGENT_THREAD) {
a125f217 586 (void)__atomic_or_fetch(&statedump_agent_thread.state, AGENT_THREAD_STATE_HANDLE_REQUEST, __ATOMIC_SEQ_CST);
76dd11f9
MD
587 pthread_cond_broadcast(&statedump_agent_thread.worker_cond);
588 }
bffe9ae3
MD
589}
590
591/* Called with side_statedump_lock held. */
592static
593void unqueue_statedump_pending(struct side_statedump_request_handle *handle, uint64_t key)
594{
595 struct side_statedump_notification *notif, *tmp;
596
597 side_list_for_each_entry_safe(notif, tmp, &handle->notification_queue, node) {
598 if (key == SIDE_KEY_MATCH_ALL || key == notif->key) {
599 side_list_remove_node(&notif->node);
600 free(notif);
601 }
602 }
603}
604
a125f217
MD
605static
606void side_statedump_run(struct side_statedump_request_handle *handle,
607 struct side_statedump_notification *notif)
608{
609 /* Invoke the state dump callback specifically for the tracer key. */
610 filter_key = notif->key;
611 side_statedump_event_call(side_statedump_begin,
612 side_arg_list(side_arg_string(handle->name)));
613 handle->cb();
614 side_statedump_event_call(side_statedump_end,
615 side_arg_list(side_arg_string(handle->name)));
616 filter_key = SIDE_KEY_MATCH_ALL;
617}
618
619static
620void _side_statedump_run_pending_requests(struct side_statedump_request_handle *handle)
621{
622 struct side_statedump_notification *notif, *tmp;
623 DEFINE_SIDE_LIST_HEAD(tmp_head);
624
625 pthread_mutex_lock(&side_statedump_lock);
626 side_list_splice(&handle->notification_queue, &tmp_head);
627 side_list_head_init(&handle->notification_queue);
628 pthread_mutex_unlock(&side_statedump_lock);
629
630 /* We are now sole owner of the tmp_head list. */
631 side_list_for_each_entry(notif, &tmp_head, node)
632 side_statedump_run(handle, notif);
633 side_list_for_each_entry_safe(notif, tmp, &tmp_head, node)
634 free(notif);
76dd11f9
MD
635
636 if (handle->mode == SIDE_STATEDUMP_MODE_AGENT_THREAD) {
637 pthread_mutex_lock(&side_statedump_lock);
638 pthread_cond_broadcast(&statedump_agent_thread.waiter_cond);
639 pthread_mutex_unlock(&side_statedump_lock);
640 }
a125f217
MD
641}
642
643static
644void *statedump_agent_func(void *arg __attribute__((unused)))
645{
646 for (;;) {
647 struct side_statedump_request_handle *handle;
648 struct side_rcu_read_state rcu_read_state;
649 enum agent_thread_state state;
650
76dd11f9
MD
651 pthread_mutex_lock(&side_statedump_lock);
652 for (;;) {
653 state = __atomic_load_n(&statedump_agent_thread.state, __ATOMIC_SEQ_CST);
654 if (state == AGENT_THREAD_STATE_BLOCKED)
655 pthread_cond_wait(&statedump_agent_thread.worker_cond, &side_statedump_lock);
656 else
657 break;
a125f217 658 }
76dd11f9 659 pthread_mutex_unlock(&side_statedump_lock);
a125f217
MD
660 if (state & AGENT_THREAD_STATE_EXIT)
661 break;
662 (void)__atomic_and_fetch(&statedump_agent_thread.state, ~AGENT_THREAD_STATE_HANDLE_REQUEST, __ATOMIC_SEQ_CST);
663 side_rcu_read_begin(&statedump_rcu_gp, &rcu_read_state);
664 side_list_for_each_entry_rcu(handle, &side_statedump_list, node)
665 _side_statedump_run_pending_requests(handle);
666 side_rcu_read_end(&statedump_rcu_gp, &rcu_read_state);
667 }
668 return NULL;
669}
670
76dd11f9 671/* Called with side_agent_thread_lock and side_statedump_lock held. */
a125f217
MD
672static
673void statedump_agent_thread_get(void)
674{
675 int ret;
676
677 if (statedump_agent_thread.ref++)
678 return;
76dd11f9
MD
679 pthread_cond_init(&statedump_agent_thread.worker_cond, NULL);
680 pthread_cond_init(&statedump_agent_thread.waiter_cond, NULL);
a125f217
MD
681 statedump_agent_thread.state = AGENT_THREAD_STATE_BLOCKED;
682 ret = pthread_create(&statedump_agent_thread.id, NULL,
683 statedump_agent_func, NULL);
684 if (ret) {
685 abort();
686 }
687}
688
76dd11f9
MD
689/*
690 * Called with side_agent_thread_lock and side_statedump_lock held.
691 * Returns true if join for agent thread is needed.
692 */
693static
694bool statedump_agent_thread_put(void)
695{
696 if (--statedump_agent_thread.ref)
697 return false;
698 (void)__atomic_or_fetch(&statedump_agent_thread.state, AGENT_THREAD_STATE_EXIT, __ATOMIC_SEQ_CST);
699 pthread_cond_broadcast(&statedump_agent_thread.worker_cond);
700 return true;
701}
702
703/* Called with side_agent_thread_lock held. */
a125f217 704static
76dd11f9 705void statedump_agent_thread_join(void)
a125f217
MD
706{
707 int ret;
708 void *retval;
709
a125f217
MD
710 ret = pthread_join(statedump_agent_thread.id, &retval);
711 if (ret) {
712 abort();
713 }
714 statedump_agent_thread.state = AGENT_THREAD_STATE_BLOCKED;
76dd11f9
MD
715 pthread_cond_destroy(&statedump_agent_thread.worker_cond);
716 pthread_cond_destroy(&statedump_agent_thread.waiter_cond);
a125f217
MD
717}
718
bffe9ae3
MD
719struct side_statedump_request_handle *
720 side_statedump_request_notification_register(const char *state_name,
721 void (*statedump_cb)(void),
722 enum side_statedump_mode mode)
f0b01832
MD
723{
724 struct side_statedump_request_handle *handle;
bffe9ae3 725 char *name;
f0b01832
MD
726
727 if (finalized)
728 return NULL;
729 if (!initialized)
730 side_init();
731 /*
732 * The statedump request notification should not be registered
733 * from a notification callback.
734 */
bffe9ae3 735 assert(!filter_key);
f0b01832
MD
736 handle = (struct side_statedump_request_handle *)
737 calloc(1, sizeof(struct side_statedump_request_handle));
738 if (!handle)
739 return NULL;
bffe9ae3
MD
740 name = strdup(state_name);
741 if (!name)
742 goto name_nomem;
f0b01832 743 handle->cb = statedump_cb;
bffe9ae3
MD
744 handle->name = name;
745 handle->mode = mode;
746 side_list_head_init(&handle->notification_queue);
873bbf16 747
76dd11f9
MD
748 if (mode == SIDE_STATEDUMP_MODE_AGENT_THREAD)
749 pthread_mutex_lock(&side_agent_thread_lock);
873bbf16 750 pthread_mutex_lock(&side_statedump_lock);
a125f217
MD
751 if (mode == SIDE_STATEDUMP_MODE_AGENT_THREAD)
752 statedump_agent_thread_get();
873bbf16 753 side_list_insert_node_tail_rcu(&side_statedump_list, &handle->node);
bffe9ae3
MD
754 /* Queue statedump pending for all tracers. */
755 queue_statedump_pending(handle, SIDE_KEY_MATCH_ALL);
873bbf16
MD
756 pthread_mutex_unlock(&side_statedump_lock);
757
a125f217 758 if (mode == SIDE_STATEDUMP_MODE_AGENT_THREAD) {
76dd11f9 759 pthread_mutex_unlock(&side_agent_thread_lock);
a125f217 760
76dd11f9
MD
761 pthread_mutex_lock(&side_statedump_lock);
762 while (!side_list_empty(&handle->notification_queue))
763 pthread_cond_wait(&statedump_agent_thread.waiter_cond, &side_statedump_lock);
764 pthread_mutex_unlock(&side_statedump_lock);
a125f217
MD
765 }
766
f0b01832 767 return handle;
bffe9ae3
MD
768
769name_nomem:
770 free(handle);
771 return NULL;
f0b01832
MD
772}
773
774void side_statedump_request_notification_unregister(struct side_statedump_request_handle *handle)
775{
76dd11f9
MD
776 bool join = false;
777
f0b01832
MD
778 if (finalized)
779 return;
780 if (!initialized)
781 side_init();
bffe9ae3 782 assert(!filter_key);
873bbf16 783
76dd11f9
MD
784 if (handle->mode == SIDE_STATEDUMP_MODE_AGENT_THREAD)
785 pthread_mutex_lock(&side_agent_thread_lock);
873bbf16 786 pthread_mutex_lock(&side_statedump_lock);
bffe9ae3 787 unqueue_statedump_pending(handle, SIDE_KEY_MATCH_ALL);
873bbf16 788 side_list_remove_node_rcu(&handle->node);
a125f217 789 if (handle->mode == SIDE_STATEDUMP_MODE_AGENT_THREAD)
76dd11f9 790 join = statedump_agent_thread_put();
873bbf16 791 pthread_mutex_unlock(&side_statedump_lock);
76dd11f9
MD
792 if (join)
793 statedump_agent_thread_join();
794 if (handle->mode == SIDE_STATEDUMP_MODE_AGENT_THREAD)
795 pthread_mutex_unlock(&side_agent_thread_lock);
873bbf16
MD
796
797 side_rcu_wait_grace_period(&statedump_rcu_gp);
bffe9ae3 798 free(handle->name);
f0b01832
MD
799 free(handle);
800}
801
bffe9ae3
MD
802/* Returns true if the handle has pending statedump requests. */
803bool side_statedump_poll_pending_requests(struct side_statedump_request_handle *handle)
f0b01832 804{
bffe9ae3
MD
805 bool ret;
806
807 if (handle->mode != SIDE_STATEDUMP_MODE_POLLING)
808 return false;
809 pthread_mutex_lock(&side_statedump_lock);
810 ret = !side_list_empty(&handle->notification_queue);
811 pthread_mutex_unlock(&side_statedump_lock);
812 return ret;
813}
f0b01832 814
bffe9ae3
MD
815/*
816 * Only polling mode state dump handles allow application to explicitly handle the
817 * pending requests.
818 */
819int side_statedump_run_pending_requests(struct side_statedump_request_handle *handle)
820{
821 if (handle->mode != SIDE_STATEDUMP_MODE_POLLING)
822 return SIDE_ERROR_INVAL;
823 _side_statedump_run_pending_requests(handle);
824 return SIDE_ERROR_OK;
825}
826
827/*
828 * Request a state dump for tracer callbacks identified with "key".
829 */
830int side_tracer_statedump_request(uint64_t key)
831{
832 struct side_statedump_request_handle *handle;
833
834 if (key == SIDE_KEY_MATCH_ALL)
835 return SIDE_ERROR_INVAL;
836 pthread_mutex_lock(&side_statedump_lock);
837 side_list_for_each_entry(handle, &side_statedump_list, node)
838 queue_statedump_pending(handle, key);
839 pthread_mutex_lock(&side_statedump_lock);
840 return SIDE_ERROR_OK;
841}
842
843/*
844 * Cancel a statedump request.
845 */
846int side_tracer_statedump_request_cancel(uint64_t key)
847{
848 struct side_statedump_request_handle *handle;
849
850 if (key == SIDE_KEY_MATCH_ALL)
851 return SIDE_ERROR_INVAL;
852 pthread_mutex_lock(&side_statedump_lock);
853 side_list_for_each_entry(handle, &side_statedump_list, node)
854 unqueue_statedump_pending(handle, key);
855 pthread_mutex_lock(&side_statedump_lock);
856 return SIDE_ERROR_OK;
857}
858
859/*
860 * Tracer keys are represented on 64-bit. Return SIDE_ERROR_NOMEM on
861 * overflow (which should never happen in practice).
862 */
863int side_tracer_request_key(uint64_t *key)
864{
865 int ret = SIDE_ERROR_OK;
866
867 pthread_mutex_lock(&side_key_lock);
868 if (side_key_next == 0) {
869 ret = SIDE_ERROR_NOMEM;
870 goto end;
871 }
872 *key = side_key_next++;
873end:
874 pthread_mutex_unlock(&side_key_lock);
875 return ret;
f0b01832
MD
876}
877
67337c4a
MD
878void side_init(void)
879{
880 if (initialized)
881 return;
873bbf16
MD
882 side_rcu_gp_init(&event_rcu_gp);
883 side_rcu_gp_init(&statedump_rcu_gp);
67337c4a
MD
884 initialized = true;
885}
886
887/*
888 * side_exit() is executed from a library destructor. It can be called
889 * explicitly at application exit as well. Concurrent side API use is
890 * not expected at that point.
891 */
892void side_exit(void)
893{
894 struct side_events_register_handle *handle, *tmp;
895
896 if (finalized)
897 return;
898 side_list_for_each_entry_safe(handle, tmp, &side_events_list, node)
899 side_events_unregister(handle);
873bbf16
MD
900 side_rcu_gp_exit(&event_rcu_gp);
901 side_rcu_gp_exit(&statedump_rcu_gp);
67337c4a
MD
902 finalized = true;
903}
This page took 0.064513 seconds and 4 git commands to generate.