f758102ec34b5237018a208a4f17c1052a5e97c0
[libside.git] / src / side.c
1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6 #include <side/trace.h>
7 #include <string.h>
8 #include <assert.h>
9 #include <pthread.h>
10 #include <unistd.h>
11 #include <poll.h>
12
13 #include "compiler.h"
14 #include "rcu.h"
15 #include "list.h"
16 #include "rculist.h"
17
18 /* Top 8 bits reserved for shared tracer use. */
19 #if SIDE_BITS_PER_LONG == 64
20 # define SIDE_EVENT_ENABLED_SHARED_MASK 0xFF00000000000000ULL
21 # define SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK 0x8000000000000000ULL
22 # define SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK 0x4000000000000000ULL
23
24 /* Allow 2^56 private tracer references on an event. */
25 # define SIDE_EVENT_ENABLED_PRIVATE_MASK 0x00FFFFFFFFFFFFFFULL
26 #else
27 # define SIDE_EVENT_ENABLED_SHARED_MASK 0xFF000000UL
28 # define SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK 0x80000000UL
29 # define SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK 0x40000000UL
30
31 /* Allow 2^24 private tracer references on an event. */
32 # define SIDE_EVENT_ENABLED_PRIVATE_MASK 0x00FFFFFFUL
33 #endif
34
35 #define SIDE_KEY_RESERVED_RANGE_END 0x8
36
37 /* Key 0x0 is reserved to match all. */
38 #define SIDE_KEY_MATCH_ALL 0x0
39 /* Key 0x1 is reserved for user event. */
40 #define SIDE_KEY_USER_EVENT 0x1
41 /* Key 0x2 is reserved for ptrace. */
42 #define SIDE_KEY_PTRACE 0x2
43
44 #define SIDE_RETRY_BUSY_LOOP_ATTEMPTS 100
45 #define SIDE_RETRY_DELAY_MS 1
46
47 struct side_events_register_handle {
48 struct side_list_node node;
49 struct side_event_description **events;
50 uint32_t nr_events;
51 };
52
53 struct side_tracer_handle {
54 struct side_list_node node;
55 void (*cb)(enum side_tracer_notification notif,
56 struct side_event_description **events, uint32_t nr_events, void *priv);
57 void *priv;
58 };
59
60 struct side_statedump_notification {
61 struct side_list_node node;
62 uint64_t key;
63 };
64
65 struct side_statedump_request_handle {
66 struct side_list_node node; /* Statedump request RCU list node. */
67 struct side_list_head notification_queue; /* Queue of struct side_statedump_notification */
68 void (*cb)(void *statedump_request_key);
69 char *name;
70 enum side_statedump_mode mode;
71 };
72
73 struct side_callback {
74 union {
75 void (*call)(const struct side_event_description *desc,
76 const struct side_arg_vec *side_arg_vec,
77 void *priv);
78 void (*call_variadic)(const struct side_event_description *desc,
79 const struct side_arg_vec *side_arg_vec,
80 const struct side_arg_dynamic_struct *var_struct,
81 void *priv);
82 } u;
83 void *priv;
84 uint64_t key;
85 };
86
87 enum agent_thread_state {
88 AGENT_THREAD_STATE_BLOCKED = 0,
89 AGENT_THREAD_STATE_HANDLE_REQUEST = (1 << 0),
90 AGENT_THREAD_STATE_EXIT = (1 << 1),
91 AGENT_THREAD_STATE_PAUSE = (1 << 2),
92 AGENT_THREAD_STATE_PAUSE_ACK = (1 << 3),
93 };
94
95 struct statedump_agent_thread {
96 long ref;
97 pthread_t id;
98 enum agent_thread_state state;
99 pthread_cond_t worker_cond;
100 pthread_cond_t waiter_cond;
101 };
102
103 static struct side_rcu_gp_state event_rcu_gp, statedump_rcu_gp;
104
105 /*
106 * Lazy initialization for early use within library constructors.
107 */
108 static bool initialized;
109 /*
110 * Do not register/unregister any more events after destructor.
111 */
112 static bool finalized;
113
114 /*
115 * Recursive mutex to allow tracer callbacks to use the side API.
116 */
117 static pthread_mutex_t side_event_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
118 static pthread_mutex_t side_statedump_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
119 static pthread_mutex_t side_key_lock = PTHREAD_MUTEX_INITIALIZER;
120 /*
121 * The side_agent_thread_lock protects the life-time of the agent
122 * thread: reference counting, creation, join. It is not taken by
123 * the agent thread per se so it does not have a circular dependency
124 * with pthread join.
125 * The side_statedump_lock nests inside the side_agent_thread_lock.
126 */
127 static pthread_mutex_t side_agent_thread_lock = PTHREAD_MUTEX_INITIALIZER;
128
129 /* Dynamic tracer key allocation. */
130 static uint64_t side_key_next = SIDE_KEY_RESERVED_RANGE_END;
131
132 static struct statedump_agent_thread statedump_agent_thread;
133
134 static DEFINE_SIDE_LIST_HEAD(side_events_list);
135 static DEFINE_SIDE_LIST_HEAD(side_tracer_list);
136
137 /*
138 * The statedump request list is a RCU list to allow the agent thread to
139 * iterate over this list with a RCU read-side lock.
140 */
141 static DEFINE_SIDE_LIST_HEAD(side_statedump_list);
142
143 /*
144 * The empty callback has a NULL function callback pointer, which stops
145 * iteration on the array of callbacks immediately.
146 */
147 const char side_empty_callback[sizeof(struct side_callback)];
148
149 side_static_event(side_statedump_begin, "side", "statedump_begin",
150 SIDE_LOGLEVEL_INFO, side_field_list(side_field_string("name")));
151 side_static_event(side_statedump_end, "side", "statedump_end",
152 SIDE_LOGLEVEL_INFO, side_field_list(side_field_string("name")));
153
154 /*
155 * side_ptrace_hook is a place holder for a debugger breakpoint.
156 * var_struct is NULL if not variadic.
157 */
158 void side_ptrace_hook(const struct side_event_state *event_state __attribute__((unused)),
159 const struct side_arg_vec *side_arg_vec __attribute__((unused)),
160 const struct side_arg_dynamic_struct *var_struct __attribute__((unused)))
161 __attribute__((noinline));
162 void side_ptrace_hook(const struct side_event_state *event_state __attribute__((unused)),
163 const struct side_arg_vec *side_arg_vec __attribute__((unused)),
164 const struct side_arg_dynamic_struct *var_struct __attribute__((unused)))
165 {
166 }
167
168 static
169 void _side_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec, uint64_t key)
170 {
171 struct side_rcu_read_state rcu_read_state;
172 const struct side_event_state_0 *es0;
173 const struct side_callback *side_cb;
174 uintptr_t enabled;
175
176 if (side_unlikely(finalized))
177 return;
178 if (side_unlikely(!initialized))
179 side_init();
180 if (side_unlikely(event_state->version != 0))
181 abort();
182 es0 = side_container_of(event_state, const struct side_event_state_0, parent);
183 assert(!(es0->desc->flags & SIDE_EVENT_FLAG_VARIADIC));
184 enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED);
185 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_SHARED_MASK)) {
186 if ((enabled & SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK) &&
187 (key == SIDE_KEY_MATCH_ALL || key == SIDE_KEY_USER_EVENT)) {
188 // TODO: call kernel write.
189 }
190 if ((enabled & SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK) &&
191 (key == SIDE_KEY_MATCH_ALL || key == SIDE_KEY_PTRACE))
192 side_ptrace_hook(event_state, side_arg_vec, NULL);
193 }
194 side_rcu_read_begin(&event_rcu_gp, &rcu_read_state);
195 for (side_cb = side_rcu_dereference(es0->callbacks); side_cb->u.call != NULL; side_cb++) {
196 if (key != SIDE_KEY_MATCH_ALL && side_cb->key != SIDE_KEY_MATCH_ALL && side_cb->key != key)
197 continue;
198 side_cb->u.call(es0->desc, side_arg_vec, side_cb->priv);
199 }
200 side_rcu_read_end(&event_rcu_gp, &rcu_read_state);
201 }
202
203 void side_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec)
204 {
205 _side_call(event_state, side_arg_vec, SIDE_KEY_MATCH_ALL);
206 }
207
208 void side_statedump_call(const struct side_event_state *event_state,
209 const struct side_arg_vec *side_arg_vec,
210 void *statedump_request_key)
211 {
212 _side_call(event_state, side_arg_vec, *(const uint64_t *) statedump_request_key);
213 }
214
215 static
216 void _side_call_variadic(const struct side_event_state *event_state,
217 const struct side_arg_vec *side_arg_vec,
218 const struct side_arg_dynamic_struct *var_struct,
219 uint64_t key)
220 {
221 struct side_rcu_read_state rcu_read_state;
222 const struct side_event_state_0 *es0;
223 const struct side_callback *side_cb;
224 uintptr_t enabled;
225
226 if (side_unlikely(finalized))
227 return;
228 if (side_unlikely(!initialized))
229 side_init();
230 if (side_unlikely(event_state->version != 0))
231 abort();
232 es0 = side_container_of(event_state, const struct side_event_state_0, parent);
233 assert(es0->desc->flags & SIDE_EVENT_FLAG_VARIADIC);
234 enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED);
235 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_SHARED_MASK)) {
236 if ((enabled & SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK) &&
237 (key == SIDE_KEY_MATCH_ALL || key == SIDE_KEY_USER_EVENT)) {
238 // TODO: call kernel write.
239 }
240 if ((enabled & SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK) &&
241 (key == SIDE_KEY_MATCH_ALL || key == SIDE_KEY_PTRACE))
242 side_ptrace_hook(event_state, side_arg_vec, var_struct);
243 }
244 side_rcu_read_begin(&event_rcu_gp, &rcu_read_state);
245 for (side_cb = side_rcu_dereference(es0->callbacks); side_cb->u.call_variadic != NULL; side_cb++) {
246 if (key != SIDE_KEY_MATCH_ALL && side_cb->key != SIDE_KEY_MATCH_ALL && side_cb->key != key)
247 continue;
248 side_cb->u.call_variadic(es0->desc, side_arg_vec, var_struct, side_cb->priv);
249 }
250 side_rcu_read_end(&event_rcu_gp, &rcu_read_state);
251 }
252
253 void side_call_variadic(const struct side_event_state *event_state,
254 const struct side_arg_vec *side_arg_vec,
255 const struct side_arg_dynamic_struct *var_struct)
256 {
257 _side_call_variadic(event_state, side_arg_vec, var_struct, SIDE_KEY_MATCH_ALL);
258 }
259
260 void side_statedump_call_variadic(const struct side_event_state *event_state,
261 const struct side_arg_vec *side_arg_vec,
262 const struct side_arg_dynamic_struct *var_struct,
263 void *statedump_request_key)
264 {
265 _side_call_variadic(event_state, side_arg_vec, var_struct, *(const uint64_t *) statedump_request_key);
266 }
267
268 static
269 const struct side_callback *side_tracer_callback_lookup(
270 const struct side_event_description *desc,
271 void *call, void *priv, uint64_t key)
272 {
273 struct side_event_state *event_state = side_ptr_get(desc->state);
274 const struct side_event_state_0 *es0;
275 const struct side_callback *cb;
276
277 if (side_unlikely(event_state->version != 0))
278 abort();
279 es0 = side_container_of(event_state, const struct side_event_state_0, parent);
280 for (cb = es0->callbacks; cb->u.call != NULL; cb++) {
281 if ((void *) cb->u.call == call && cb->priv == priv && cb->key == key)
282 return cb;
283 }
284 return NULL;
285 }
286
287 static
288 int _side_tracer_callback_register(struct side_event_description *desc,
289 void *call, void *priv, uint64_t key)
290 {
291 struct side_event_state *event_state;
292 struct side_callback *old_cb, *new_cb;
293 struct side_event_state_0 *es0;
294 int ret = SIDE_ERROR_OK;
295 uint32_t old_nr_cb;
296
297 if (!call)
298 return SIDE_ERROR_INVAL;
299 if (finalized)
300 return SIDE_ERROR_EXITING;
301 if (!initialized)
302 side_init();
303 pthread_mutex_lock(&side_event_lock);
304 event_state = side_ptr_get(desc->state);
305 if (side_unlikely(event_state->version != 0))
306 abort();
307 es0 = side_container_of(event_state, struct side_event_state_0, parent);
308 old_nr_cb = es0->nr_callbacks;
309 if (old_nr_cb == UINT32_MAX) {
310 ret = SIDE_ERROR_INVAL;
311 goto unlock;
312 }
313 /* Reject duplicate (call, priv) tuples. */
314 if (side_tracer_callback_lookup(desc, call, priv, key)) {
315 ret = SIDE_ERROR_EXIST;
316 goto unlock;
317 }
318 old_cb = (struct side_callback *) es0->callbacks;
319 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
320 new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
321 if (!new_cb) {
322 ret = SIDE_ERROR_NOMEM;
323 goto unlock;
324 }
325 memcpy(new_cb, old_cb, old_nr_cb);
326 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
327 new_cb[old_nr_cb].u.call_variadic =
328 (side_tracer_callback_variadic_func) call;
329 else
330 new_cb[old_nr_cb].u.call =
331 (side_tracer_callback_func) call;
332 new_cb[old_nr_cb].priv = priv;
333 new_cb[old_nr_cb].key = key;
334 /* High order bits are already zeroed. */
335 side_rcu_assign_pointer(es0->callbacks, new_cb);
336 side_rcu_wait_grace_period(&event_rcu_gp);
337 if (old_nr_cb)
338 free(old_cb);
339 es0->nr_callbacks++;
340 /* Increment concurrently with kernel setting the top bits. */
341 if (!old_nr_cb)
342 (void) __atomic_add_fetch(&es0->enabled, 1, __ATOMIC_RELAXED);
343 unlock:
344 pthread_mutex_unlock(&side_event_lock);
345 return ret;
346 }
347
348 int side_tracer_callback_register(struct side_event_description *desc,
349 side_tracer_callback_func call,
350 void *priv, uint64_t key)
351 {
352 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
353 return SIDE_ERROR_INVAL;
354 return _side_tracer_callback_register(desc, (void *) call, priv, key);
355 }
356
357 int side_tracer_callback_variadic_register(struct side_event_description *desc,
358 side_tracer_callback_variadic_func call_variadic,
359 void *priv, uint64_t key)
360 {
361 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
362 return SIDE_ERROR_INVAL;
363 return _side_tracer_callback_register(desc, (void *) call_variadic, priv, key);
364 }
365
366 static int _side_tracer_callback_unregister(struct side_event_description *desc,
367 void *call, void *priv, uint64_t key)
368 {
369 struct side_event_state *event_state;
370 struct side_callback *old_cb, *new_cb;
371 const struct side_callback *cb_pos;
372 struct side_event_state_0 *es0;
373 uint32_t pos_idx;
374 int ret = SIDE_ERROR_OK;
375 uint32_t old_nr_cb;
376
377 if (!call)
378 return SIDE_ERROR_INVAL;
379 if (finalized)
380 return SIDE_ERROR_EXITING;
381 if (!initialized)
382 side_init();
383 pthread_mutex_lock(&side_event_lock);
384 event_state = side_ptr_get(desc->state);
385 if (side_unlikely(event_state->version != 0))
386 abort();
387 es0 = side_container_of(event_state, struct side_event_state_0, parent);
388 cb_pos = side_tracer_callback_lookup(desc, call, priv, key);
389 if (!cb_pos) {
390 ret = SIDE_ERROR_NOENT;
391 goto unlock;
392 }
393 old_nr_cb = es0->nr_callbacks;
394 old_cb = (struct side_callback *) es0->callbacks;
395 if (old_nr_cb == 1) {
396 new_cb = (struct side_callback *) &side_empty_callback;
397 } else {
398 pos_idx = cb_pos - es0->callbacks;
399 /* Remove entry at pos_idx. */
400 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
401 new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
402 if (!new_cb) {
403 ret = SIDE_ERROR_NOMEM;
404 goto unlock;
405 }
406 memcpy(new_cb, old_cb, pos_idx);
407 memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
408 }
409 /* High order bits are already zeroed. */
410 side_rcu_assign_pointer(es0->callbacks, new_cb);
411 side_rcu_wait_grace_period(&event_rcu_gp);
412 free(old_cb);
413 es0->nr_callbacks--;
414 /* Decrement concurrently with kernel setting the top bits. */
415 if (old_nr_cb == 1)
416 (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED);
417 unlock:
418 pthread_mutex_unlock(&side_event_lock);
419 return ret;
420 }
421
422 int side_tracer_callback_unregister(struct side_event_description *desc,
423 side_tracer_callback_func call,
424 void *priv, uint64_t key)
425 {
426 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
427 return SIDE_ERROR_INVAL;
428 return _side_tracer_callback_unregister(desc, (void *) call, priv, key);
429 }
430
431 int side_tracer_callback_variadic_unregister(struct side_event_description *desc,
432 side_tracer_callback_variadic_func call_variadic,
433 void *priv, uint64_t key)
434 {
435 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
436 return SIDE_ERROR_INVAL;
437 return _side_tracer_callback_unregister(desc, (void *) call_variadic, priv, key);
438 }
439
440 struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events)
441 {
442 struct side_events_register_handle *events_handle = NULL;
443 struct side_tracer_handle *tracer_handle;
444
445 if (finalized)
446 return NULL;
447 if (!initialized)
448 side_init();
449 events_handle = (struct side_events_register_handle *)
450 calloc(1, sizeof(struct side_events_register_handle));
451 if (!events_handle)
452 return NULL;
453 events_handle->events = events;
454 events_handle->nr_events = nr_events;
455
456 pthread_mutex_lock(&side_event_lock);
457 side_list_insert_node_tail(&side_events_list, &events_handle->node);
458 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
459 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
460 events, nr_events, tracer_handle->priv);
461 }
462 pthread_mutex_unlock(&side_event_lock);
463 //TODO: call event batch register ioctl
464 return events_handle;
465 }
466
467 static
468 void side_event_remove_callbacks(struct side_event_description *desc)
469 {
470 struct side_event_state *event_state = side_ptr_get(desc->state);
471 struct side_event_state_0 *es0;
472 struct side_callback *old_cb;
473 uint32_t nr_cb;
474
475 if (side_unlikely(event_state->version != 0))
476 abort();
477 es0 = side_container_of(event_state, struct side_event_state_0, parent);
478 nr_cb = es0->nr_callbacks;
479 if (!nr_cb)
480 return;
481 old_cb = (struct side_callback *) es0->callbacks;
482 (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED);
483 /*
484 * Setting the state back to 0 cb and empty callbacks out of
485 * caution. This should not matter because instrumentation is
486 * unreachable.
487 */
488 es0->nr_callbacks = 0;
489 side_rcu_assign_pointer(es0->callbacks, &side_empty_callback);
490 /*
491 * No need to wait for grace period because instrumentation is
492 * unreachable.
493 */
494 free(old_cb);
495 }
496
497 /*
498 * Unregister event handle. At this point, all side events in that
499 * handle should be unreachable.
500 */
501 void side_events_unregister(struct side_events_register_handle *events_handle)
502 {
503 struct side_tracer_handle *tracer_handle;
504 uint32_t i;
505
506 if (!events_handle)
507 return;
508 if (finalized)
509 return;
510 if (!initialized)
511 side_init();
512 pthread_mutex_lock(&side_event_lock);
513 side_list_remove_node(&events_handle->node);
514 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
515 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
516 events_handle->events, events_handle->nr_events,
517 tracer_handle->priv);
518 }
519 for (i = 0; i < events_handle->nr_events; i++) {
520 struct side_event_description *event = events_handle->events[i];
521
522 /* Skip NULL pointers */
523 if (!event)
524 continue;
525 side_event_remove_callbacks(event);
526 }
527 pthread_mutex_unlock(&side_event_lock);
528 //TODO: call event batch unregister ioctl
529 free(events_handle);
530 }
531
532 struct side_tracer_handle *side_tracer_event_notification_register(
533 void (*cb)(enum side_tracer_notification notif,
534 struct side_event_description **events, uint32_t nr_events, void *priv),
535 void *priv)
536 {
537 struct side_tracer_handle *tracer_handle;
538 struct side_events_register_handle *events_handle;
539
540 if (finalized)
541 return NULL;
542 if (!initialized)
543 side_init();
544 tracer_handle = (struct side_tracer_handle *)
545 calloc(1, sizeof(struct side_tracer_handle));
546 if (!tracer_handle)
547 return NULL;
548 pthread_mutex_lock(&side_event_lock);
549 tracer_handle->cb = cb;
550 tracer_handle->priv = priv;
551 side_list_insert_node_tail(&side_tracer_list, &tracer_handle->node);
552 side_list_for_each_entry(events_handle, &side_events_list, node) {
553 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
554 events_handle->events, events_handle->nr_events, priv);
555 }
556 pthread_mutex_unlock(&side_event_lock);
557 return tracer_handle;
558 }
559
560 void side_tracer_event_notification_unregister(struct side_tracer_handle *tracer_handle)
561 {
562 struct side_events_register_handle *events_handle;
563
564 if (finalized)
565 return;
566 if (!initialized)
567 side_init();
568 pthread_mutex_lock(&side_event_lock);
569 side_list_for_each_entry(events_handle, &side_events_list, node) {
570 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
571 events_handle->events, events_handle->nr_events,
572 tracer_handle->priv);
573 }
574 side_list_remove_node(&tracer_handle->node);
575 pthread_mutex_unlock(&side_event_lock);
576 free(tracer_handle);
577 }
578
579 /* Called with side_statedump_lock held. */
580 static
581 void queue_statedump_pending(struct side_statedump_request_handle *handle, uint64_t key)
582 {
583 struct side_statedump_notification *notif;
584
585 notif = (struct side_statedump_notification *) calloc(1, sizeof(struct side_statedump_notification));
586 if (!notif)
587 abort();
588 notif->key = key;
589 side_list_insert_node_tail(&handle->notification_queue, &notif->node);
590 if (handle->mode == SIDE_STATEDUMP_MODE_AGENT_THREAD) {
591 (void)__atomic_or_fetch(&statedump_agent_thread.state, AGENT_THREAD_STATE_HANDLE_REQUEST, __ATOMIC_SEQ_CST);
592 pthread_cond_broadcast(&statedump_agent_thread.worker_cond);
593 }
594 }
595
596 /* Called with side_statedump_lock held. */
597 static
598 void unqueue_statedump_pending(struct side_statedump_request_handle *handle, uint64_t key)
599 {
600 struct side_statedump_notification *notif, *tmp;
601
602 side_list_for_each_entry_safe(notif, tmp, &handle->notification_queue, node) {
603 if (key == SIDE_KEY_MATCH_ALL || key == notif->key) {
604 side_list_remove_node(&notif->node);
605 free(notif);
606 }
607 }
608 }
609
610 static
611 void side_statedump_run(struct side_statedump_request_handle *handle,
612 struct side_statedump_notification *notif)
613 {
614 side_statedump_event_call(side_statedump_begin, &notif->key,
615 side_arg_list(side_arg_string(handle->name)));
616 /* Invoke the state dump callback specifically for the tracer key. */
617 handle->cb(&notif->key);
618 side_statedump_event_call(side_statedump_end, &notif->key,
619 side_arg_list(side_arg_string(handle->name)));
620 }
621
622 static
623 void _side_statedump_run_pending_requests(struct side_statedump_request_handle *handle)
624 {
625 struct side_statedump_notification *notif, *tmp;
626 DEFINE_SIDE_LIST_HEAD(tmp_head);
627
628 pthread_mutex_lock(&side_statedump_lock);
629 side_list_splice(&handle->notification_queue, &tmp_head);
630 side_list_head_init(&handle->notification_queue);
631 pthread_mutex_unlock(&side_statedump_lock);
632
633 /* We are now sole owner of the tmp_head list. */
634 side_list_for_each_entry(notif, &tmp_head, node)
635 side_statedump_run(handle, notif);
636 side_list_for_each_entry_safe(notif, tmp, &tmp_head, node)
637 free(notif);
638
639 if (handle->mode == SIDE_STATEDUMP_MODE_AGENT_THREAD) {
640 pthread_mutex_lock(&side_statedump_lock);
641 pthread_cond_broadcast(&statedump_agent_thread.waiter_cond);
642 pthread_mutex_unlock(&side_statedump_lock);
643 }
644 }
645
646 static
647 void *statedump_agent_func(void *arg __attribute__((unused)))
648 {
649 for (;;) {
650 struct side_statedump_request_handle *handle;
651 struct side_rcu_read_state rcu_read_state;
652 enum agent_thread_state state;
653
654 pthread_mutex_lock(&side_statedump_lock);
655 for (;;) {
656 state = __atomic_load_n(&statedump_agent_thread.state, __ATOMIC_SEQ_CST);
657 if (state == AGENT_THREAD_STATE_BLOCKED)
658 pthread_cond_wait(&statedump_agent_thread.worker_cond, &side_statedump_lock);
659 else
660 break;
661 }
662 pthread_mutex_unlock(&side_statedump_lock);
663 if (state & AGENT_THREAD_STATE_EXIT)
664 break;
665 if (state & AGENT_THREAD_STATE_PAUSE) {
666 int attempt = 0;
667
668 (void)__atomic_or_fetch(&statedump_agent_thread.state, AGENT_THREAD_STATE_PAUSE_ACK, __ATOMIC_SEQ_CST);
669 for (;;) {
670 state = __atomic_load_n(&statedump_agent_thread.state, __ATOMIC_SEQ_CST);
671 if (!(state & AGENT_THREAD_STATE_PAUSE))
672 break;
673 if (attempt > SIDE_RETRY_BUSY_LOOP_ATTEMPTS) {
674 (void)poll(NULL, 0, SIDE_RETRY_DELAY_MS);
675 continue;
676 }
677 attempt++;
678 side_cpu_relax();
679 }
680 continue;
681 }
682 (void)__atomic_and_fetch(&statedump_agent_thread.state, ~AGENT_THREAD_STATE_HANDLE_REQUEST, __ATOMIC_SEQ_CST);
683 side_rcu_read_begin(&statedump_rcu_gp, &rcu_read_state);
684 side_list_for_each_entry_rcu(handle, &side_statedump_list, node)
685 _side_statedump_run_pending_requests(handle);
686 side_rcu_read_end(&statedump_rcu_gp, &rcu_read_state);
687 }
688 return NULL;
689 }
690
691 static
692 void statedump_agent_thread_init(void)
693 {
694 pthread_cond_init(&statedump_agent_thread.worker_cond, NULL);
695 pthread_cond_init(&statedump_agent_thread.waiter_cond, NULL);
696 statedump_agent_thread.state = AGENT_THREAD_STATE_BLOCKED;
697 }
698
699 /* Called with side_agent_thread_lock and side_statedump_lock held. */
700 static
701 void statedump_agent_thread_get(void)
702 {
703 int ret;
704
705 if (statedump_agent_thread.ref++)
706 return;
707 statedump_agent_thread_init();
708 ret = pthread_create(&statedump_agent_thread.id, NULL,
709 statedump_agent_func, NULL);
710 if (ret) {
711 abort();
712 }
713 }
714
715 /*
716 * Called with side_agent_thread_lock and side_statedump_lock held.
717 * Returns true if join for agent thread is needed.
718 */
719 static
720 bool statedump_agent_thread_put(void)
721 {
722 if (--statedump_agent_thread.ref)
723 return false;
724 (void)__atomic_or_fetch(&statedump_agent_thread.state, AGENT_THREAD_STATE_EXIT, __ATOMIC_SEQ_CST);
725 pthread_cond_broadcast(&statedump_agent_thread.worker_cond);
726 return true;
727 }
728
729 static
730 void statedump_agent_thread_fini(void)
731 {
732 statedump_agent_thread.state = AGENT_THREAD_STATE_BLOCKED;
733 if (pthread_cond_destroy(&statedump_agent_thread.worker_cond))
734 abort();
735 if (pthread_cond_destroy(&statedump_agent_thread.waiter_cond))
736 abort();
737 }
738
739 /* Called with side_agent_thread_lock held. */
740 static
741 void statedump_agent_thread_join(void)
742 {
743 int ret;
744 void *retval;
745
746 ret = pthread_join(statedump_agent_thread.id, &retval);
747 if (ret) {
748 abort();
749 }
750 statedump_agent_thread_fini();
751 }
752
753 struct side_statedump_request_handle *
754 side_statedump_request_notification_register(const char *state_name,
755 void (*statedump_cb)(void *statedump_request_key),
756 enum side_statedump_mode mode)
757 {
758 struct side_statedump_request_handle *handle;
759 char *name;
760
761 if (finalized)
762 return NULL;
763 if (!initialized)
764 side_init();
765 handle = (struct side_statedump_request_handle *)
766 calloc(1, sizeof(struct side_statedump_request_handle));
767 if (!handle)
768 return NULL;
769 name = strdup(state_name);
770 if (!name)
771 goto name_nomem;
772 handle->cb = statedump_cb;
773 handle->name = name;
774 handle->mode = mode;
775 side_list_head_init(&handle->notification_queue);
776
777 if (mode == SIDE_STATEDUMP_MODE_AGENT_THREAD)
778 pthread_mutex_lock(&side_agent_thread_lock);
779 pthread_mutex_lock(&side_statedump_lock);
780 if (mode == SIDE_STATEDUMP_MODE_AGENT_THREAD)
781 statedump_agent_thread_get();
782 side_list_insert_node_tail_rcu(&side_statedump_list, &handle->node);
783 /* Queue statedump pending for all tracers. */
784 queue_statedump_pending(handle, SIDE_KEY_MATCH_ALL);
785 pthread_mutex_unlock(&side_statedump_lock);
786
787 if (mode == SIDE_STATEDUMP_MODE_AGENT_THREAD) {
788 pthread_mutex_unlock(&side_agent_thread_lock);
789
790 pthread_mutex_lock(&side_statedump_lock);
791 while (!side_list_empty(&handle->notification_queue))
792 pthread_cond_wait(&statedump_agent_thread.waiter_cond, &side_statedump_lock);
793 pthread_mutex_unlock(&side_statedump_lock);
794 }
795
796 return handle;
797
798 name_nomem:
799 free(handle);
800 return NULL;
801 }
802
803 void side_statedump_request_notification_unregister(struct side_statedump_request_handle *handle)
804 {
805 bool join = false;
806
807 if (finalized)
808 return;
809 if (!initialized)
810 side_init();
811
812 if (handle->mode == SIDE_STATEDUMP_MODE_AGENT_THREAD)
813 pthread_mutex_lock(&side_agent_thread_lock);
814 pthread_mutex_lock(&side_statedump_lock);
815 unqueue_statedump_pending(handle, SIDE_KEY_MATCH_ALL);
816 side_list_remove_node_rcu(&handle->node);
817 if (handle->mode == SIDE_STATEDUMP_MODE_AGENT_THREAD)
818 join = statedump_agent_thread_put();
819 pthread_mutex_unlock(&side_statedump_lock);
820 if (join)
821 statedump_agent_thread_join();
822 if (handle->mode == SIDE_STATEDUMP_MODE_AGENT_THREAD)
823 pthread_mutex_unlock(&side_agent_thread_lock);
824
825 side_rcu_wait_grace_period(&statedump_rcu_gp);
826 free(handle->name);
827 free(handle);
828 }
829
830 /* Returns true if the handle has pending statedump requests. */
831 bool side_statedump_poll_pending_requests(struct side_statedump_request_handle *handle)
832 {
833 bool ret;
834
835 if (handle->mode != SIDE_STATEDUMP_MODE_POLLING)
836 return false;
837 pthread_mutex_lock(&side_statedump_lock);
838 ret = !side_list_empty(&handle->notification_queue);
839 pthread_mutex_unlock(&side_statedump_lock);
840 return ret;
841 }
842
843 /*
844 * Only polling mode state dump handles allow application to explicitly handle the
845 * pending requests.
846 */
847 int side_statedump_run_pending_requests(struct side_statedump_request_handle *handle)
848 {
849 if (handle->mode != SIDE_STATEDUMP_MODE_POLLING)
850 return SIDE_ERROR_INVAL;
851 _side_statedump_run_pending_requests(handle);
852 return SIDE_ERROR_OK;
853 }
854
855 /*
856 * Request a state dump for tracer callbacks identified with "key".
857 */
858 int side_tracer_statedump_request(uint64_t key)
859 {
860 struct side_statedump_request_handle *handle;
861
862 if (key == SIDE_KEY_MATCH_ALL)
863 return SIDE_ERROR_INVAL;
864 pthread_mutex_lock(&side_statedump_lock);
865 side_list_for_each_entry(handle, &side_statedump_list, node)
866 queue_statedump_pending(handle, key);
867 pthread_mutex_lock(&side_statedump_lock);
868 return SIDE_ERROR_OK;
869 }
870
871 /*
872 * Cancel a statedump request.
873 */
874 int side_tracer_statedump_request_cancel(uint64_t key)
875 {
876 struct side_statedump_request_handle *handle;
877
878 if (key == SIDE_KEY_MATCH_ALL)
879 return SIDE_ERROR_INVAL;
880 pthread_mutex_lock(&side_statedump_lock);
881 side_list_for_each_entry(handle, &side_statedump_list, node)
882 unqueue_statedump_pending(handle, key);
883 pthread_mutex_lock(&side_statedump_lock);
884 return SIDE_ERROR_OK;
885 }
886
887 /*
888 * Tracer keys are represented on 64-bit. Return SIDE_ERROR_NOMEM on
889 * overflow (which should never happen in practice).
890 */
891 int side_tracer_request_key(uint64_t *key)
892 {
893 int ret = SIDE_ERROR_OK;
894
895 pthread_mutex_lock(&side_key_lock);
896 if (side_key_next == 0) {
897 ret = SIDE_ERROR_NOMEM;
898 goto end;
899 }
900 *key = side_key_next++;
901 end:
902 pthread_mutex_unlock(&side_key_lock);
903 return ret;
904 }
905
906 /*
907 * Use of pthread_atfork depends on glibc 2.24 to eliminate hangs when
908 * waiting for the agent thread if the agent thread calls malloc. This
909 * is corrected by GNU libc
910 * commit 8a727af925be63aa6ea0f5f90e16751fd541626b.
911 * Ref. https://bugzilla.redhat.com/show_bug.cgi?id=906468
912 */
913 static
914 void side_before_fork(void)
915 {
916 int attempt = 0;
917
918 pthread_mutex_lock(&side_agent_thread_lock);
919 if (!statedump_agent_thread.ref)
920 return;
921 /* Pause agent thread. */
922 pthread_mutex_lock(&side_statedump_lock);
923 (void)__atomic_or_fetch(&statedump_agent_thread.state, AGENT_THREAD_STATE_PAUSE, __ATOMIC_SEQ_CST);
924 pthread_cond_broadcast(&statedump_agent_thread.worker_cond);
925 pthread_mutex_unlock(&side_statedump_lock);
926 /* Wait for agent thread acknowledge. */
927 while (!(__atomic_load_n(&statedump_agent_thread.state, __ATOMIC_SEQ_CST) & AGENT_THREAD_STATE_PAUSE_ACK)) {
928 if (attempt > SIDE_RETRY_BUSY_LOOP_ATTEMPTS) {
929 (void)poll(NULL, 0, SIDE_RETRY_DELAY_MS);
930 continue;
931 }
932 attempt++;
933 side_cpu_relax();
934 }
935 }
936
937 static
938 void side_after_fork_parent(void)
939 {
940 if (statedump_agent_thread.ref)
941 (void)__atomic_and_fetch(&statedump_agent_thread.state,
942 ~(AGENT_THREAD_STATE_PAUSE | AGENT_THREAD_STATE_PAUSE_ACK),
943 __ATOMIC_SEQ_CST);
944 pthread_mutex_unlock(&side_agent_thread_lock);
945 }
946
947 /*
948 * The agent thread does not exist in the child process after a fork.
949 * Re-initialize its data structures and create a new agent thread.
950 */
951 static
952 void side_after_fork_child(void)
953 {
954 if (statedump_agent_thread.ref) {
955 int ret;
956
957 statedump_agent_thread_fini();
958 statedump_agent_thread_init();
959 ret = pthread_create(&statedump_agent_thread.id, NULL,
960 statedump_agent_func, NULL);
961 if (ret) {
962 abort();
963 }
964 }
965 pthread_mutex_unlock(&side_agent_thread_lock);
966 }
967
968 void side_init(void)
969 {
970 if (initialized)
971 return;
972 side_rcu_gp_init(&event_rcu_gp);
973 side_rcu_gp_init(&statedump_rcu_gp);
974 if (pthread_atfork(side_before_fork, side_after_fork_parent, side_after_fork_child))
975 abort();
976 initialized = true;
977 }
978
979 /*
980 * side_exit() is executed from a library destructor. It can be called
981 * explicitly at application exit as well. Concurrent side API use is
982 * not expected at that point.
983 */
984 void side_exit(void)
985 {
986 struct side_events_register_handle *handle, *tmp;
987
988 if (finalized)
989 return;
990 side_list_for_each_entry_safe(handle, tmp, &side_events_list, node)
991 side_events_unregister(handle);
992 side_rcu_gp_exit(&event_rcu_gp);
993 side_rcu_gp_exit(&statedump_rcu_gp);
994 finalized = true;
995 }
This page took 0.046208 seconds and 3 git commands to generate.