1 // SPDX-License-Identifier: MIT
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 #include <tgif/trace.h>
12 /* Top 8 bits reserved for kernel tracer use. */
13 #if TGIF_BITS_PER_LONG == 64
14 # define TGIF_EVENT_ENABLED_KERNEL_MASK 0xFF00000000000000ULL
15 # define TGIF_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x8000000000000000ULL
17 /* Allow 2^56 tracer references on an event. */
18 # define TGIF_EVENT_ENABLED_USER_MASK 0x00FFFFFFFFFFFFFFULL
20 # define TGIF_EVENT_ENABLED_KERNEL_MASK 0xFF000000UL
21 # define TGIF_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000UL
23 /* Allow 2^24 tracer references on an event. */
24 # define TGIF_EVENT_ENABLED_USER_MASK 0x00FFFFFFUL
27 struct tgif_events_register_handle
{
28 struct tgif_list_node node
;
29 struct tgif_event_description
**events
;
33 struct tgif_tracer_handle
{
34 struct tgif_list_node node
;
35 void (*cb
)(enum tgif_tracer_notification notif
,
36 struct tgif_event_description
**events
, uint32_t nr_events
, void *priv
);
40 static struct tgif_rcu_gp_state rcu_gp
;
43 * Lazy initialization for early use within library constructors.
45 static bool initialized
;
47 * Do not register/unregister any more events after destructor.
49 static bool finalized
;
52 * Recursive mutex to allow tracer callbacks to use the tgif API.
54 static pthread_mutex_t tgif_lock
= PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
;
56 static DEFINE_TGIF_LIST_HEAD(tgif_events_list
);
57 static DEFINE_TGIF_LIST_HEAD(tgif_tracer_list
);
60 * The empty callback has a NULL function callback pointer, which stops
61 * iteration on the array of callbacks immediately.
63 const struct tgif_callback tgif_empty_callback
= { };
65 void tgif_call(const struct tgif_event_description
*desc
, const struct tgif_arg_vec
*tgif_arg_vec
)
67 struct tgif_rcu_read_state rcu_read_state
;
68 const struct tgif_callback
*tgif_cb
;
71 if (tgif_unlikely(finalized
))
73 if (tgif_unlikely(!initialized
))
75 if (tgif_unlikely(desc
->flags
& TGIF_EVENT_FLAG_VARIADIC
)) {
76 printf("ERROR: unexpected variadic event description\n");
79 enabled
= __atomic_load_n(desc
->enabled
, __ATOMIC_RELAXED
);
80 if (tgif_unlikely(enabled
& TGIF_EVENT_ENABLED_KERNEL_USER_EVENT_MASK
)) {
81 // TODO: call kernel write.
83 tgif_rcu_read_begin(&rcu_gp
, &rcu_read_state
);
84 for (tgif_cb
= tgif_rcu_dereference(desc
->callbacks
); tgif_cb
->u
.call
!= NULL
; tgif_cb
++)
85 tgif_cb
->u
.call(desc
, tgif_arg_vec
, tgif_cb
->priv
);
86 tgif_rcu_read_end(&rcu_gp
, &rcu_read_state
);
89 void tgif_call_variadic(const struct tgif_event_description
*desc
,
90 const struct tgif_arg_vec
*tgif_arg_vec
,
91 const struct tgif_arg_dynamic_struct
*var_struct
)
93 struct tgif_rcu_read_state rcu_read_state
;
94 const struct tgif_callback
*tgif_cb
;
97 if (tgif_unlikely(finalized
))
99 if (tgif_unlikely(!initialized
))
101 if (tgif_unlikely(!(desc
->flags
& TGIF_EVENT_FLAG_VARIADIC
))) {
102 printf("ERROR: unexpected non-variadic event description\n");
105 enabled
= __atomic_load_n(desc
->enabled
, __ATOMIC_RELAXED
);
106 if (tgif_unlikely(enabled
& TGIF_EVENT_ENABLED_KERNEL_USER_EVENT_MASK
)) {
107 // TODO: call kernel write.
109 tgif_rcu_read_begin(&rcu_gp
, &rcu_read_state
);
110 for (tgif_cb
= tgif_rcu_dereference(desc
->callbacks
); tgif_cb
->u
.call_variadic
!= NULL
; tgif_cb
++)
111 tgif_cb
->u
.call_variadic(desc
, tgif_arg_vec
, var_struct
, tgif_cb
->priv
);
112 tgif_rcu_read_end(&rcu_gp
, &rcu_read_state
);
116 const struct tgif_callback
*tgif_tracer_callback_lookup(
117 const struct tgif_event_description
*desc
,
118 void *call
, void *priv
)
120 const struct tgif_callback
*cb
;
122 for (cb
= desc
->callbacks
; cb
->u
.call
!= NULL
; cb
++) {
123 if ((void *) cb
->u
.call
== call
&& cb
->priv
== priv
)
130 int _tgif_tracer_callback_register(struct tgif_event_description
*desc
,
131 void *call
, void *priv
)
133 struct tgif_callback
*old_cb
, *new_cb
;
134 int ret
= TGIF_ERROR_OK
;
138 return TGIF_ERROR_INVAL
;
140 return TGIF_ERROR_EXITING
;
143 pthread_mutex_lock(&tgif_lock
);
144 old_nr_cb
= desc
->nr_callbacks
;
145 if (old_nr_cb
== UINT32_MAX
) {
146 ret
= TGIF_ERROR_INVAL
;
149 /* Reject duplicate (call, priv) tuples. */
150 if (tgif_tracer_callback_lookup(desc
, call
, priv
)) {
151 ret
= TGIF_ERROR_EXIST
;
154 old_cb
= (struct tgif_callback
*) desc
->callbacks
;
155 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
156 new_cb
= (struct tgif_callback
*) calloc(old_nr_cb
+ 2, sizeof(struct tgif_callback
));
158 ret
= TGIF_ERROR_NOMEM
;
161 memcpy(new_cb
, old_cb
, old_nr_cb
);
162 if (desc
->flags
& TGIF_EVENT_FLAG_VARIADIC
)
163 new_cb
[old_nr_cb
].u
.call_variadic
=
164 (tgif_tracer_callback_variadic_func
) call
;
166 new_cb
[old_nr_cb
].u
.call
=
167 (tgif_tracer_callback_func
) call
;
168 new_cb
[old_nr_cb
].priv
= priv
;
169 tgif_rcu_assign_pointer(desc
->callbacks
, new_cb
);
170 tgif_rcu_wait_grace_period(&rcu_gp
);
173 desc
->nr_callbacks
++;
174 /* Increment concurrently with kernel setting the top bits. */
176 (void) __atomic_add_fetch(desc
->enabled
, 1, __ATOMIC_RELAXED
);
178 pthread_mutex_unlock(&tgif_lock
);
182 int tgif_tracer_callback_register(struct tgif_event_description
*desc
,
183 tgif_tracer_callback_func call
,
186 if (desc
->flags
& TGIF_EVENT_FLAG_VARIADIC
)
187 return TGIF_ERROR_INVAL
;
188 return _tgif_tracer_callback_register(desc
, (void *) call
, priv
);
191 int tgif_tracer_callback_variadic_register(struct tgif_event_description
*desc
,
192 tgif_tracer_callback_variadic_func call_variadic
,
195 if (!(desc
->flags
& TGIF_EVENT_FLAG_VARIADIC
))
196 return TGIF_ERROR_INVAL
;
197 return _tgif_tracer_callback_register(desc
, (void *) call_variadic
, priv
);
200 static int _tgif_tracer_callback_unregister(struct tgif_event_description
*desc
,
201 void *call
, void *priv
)
203 struct tgif_callback
*old_cb
, *new_cb
;
204 const struct tgif_callback
*cb_pos
;
206 int ret
= TGIF_ERROR_OK
;
210 return TGIF_ERROR_INVAL
;
212 return TGIF_ERROR_EXITING
;
215 pthread_mutex_lock(&tgif_lock
);
216 cb_pos
= tgif_tracer_callback_lookup(desc
, call
, priv
);
218 ret
= TGIF_ERROR_NOENT
;
221 old_nr_cb
= desc
->nr_callbacks
;
222 old_cb
= (struct tgif_callback
*) desc
->callbacks
;
223 if (old_nr_cb
== 1) {
224 new_cb
= (struct tgif_callback
*) &tgif_empty_callback
;
226 pos_idx
= cb_pos
- desc
->callbacks
;
227 /* Remove entry at pos_idx. */
228 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
229 new_cb
= (struct tgif_callback
*) calloc(old_nr_cb
, sizeof(struct tgif_callback
));
231 ret
= TGIF_ERROR_NOMEM
;
234 memcpy(new_cb
, old_cb
, pos_idx
);
235 memcpy(&new_cb
[pos_idx
], &old_cb
[pos_idx
+ 1], old_nr_cb
- pos_idx
- 1);
237 tgif_rcu_assign_pointer(desc
->callbacks
, new_cb
);
238 tgif_rcu_wait_grace_period(&rcu_gp
);
240 desc
->nr_callbacks
--;
241 /* Decrement concurrently with kernel setting the top bits. */
243 (void) __atomic_add_fetch(desc
->enabled
, -1, __ATOMIC_RELAXED
);
245 pthread_mutex_unlock(&tgif_lock
);
249 int tgif_tracer_callback_unregister(struct tgif_event_description
*desc
,
250 tgif_tracer_callback_func call
,
253 if (desc
->flags
& TGIF_EVENT_FLAG_VARIADIC
)
254 return TGIF_ERROR_INVAL
;
255 return _tgif_tracer_callback_unregister(desc
, (void *) call
, priv
);
258 int tgif_tracer_callback_variadic_unregister(struct tgif_event_description
*desc
,
259 tgif_tracer_callback_variadic_func call_variadic
,
262 if (!(desc
->flags
& TGIF_EVENT_FLAG_VARIADIC
))
263 return TGIF_ERROR_INVAL
;
264 return _tgif_tracer_callback_unregister(desc
, (void *) call_variadic
, priv
);
267 struct tgif_events_register_handle
*tgif_events_register(struct tgif_event_description
**events
, uint32_t nr_events
)
269 struct tgif_events_register_handle
*events_handle
= NULL
;
270 struct tgif_tracer_handle
*tracer_handle
;
276 events_handle
= (struct tgif_events_register_handle
*)
277 calloc(1, sizeof(struct tgif_events_register_handle
));
280 events_handle
->events
= events
;
281 events_handle
->nr_events
= nr_events
;
283 pthread_mutex_lock(&tgif_lock
);
284 tgif_list_insert_node_tail(&tgif_events_list
, &events_handle
->node
);
285 tgif_list_for_each_entry(tracer_handle
, &tgif_tracer_list
, node
) {
286 tracer_handle
->cb(TGIF_TRACER_NOTIFICATION_INSERT_EVENTS
,
287 events
, nr_events
, tracer_handle
->priv
);
289 pthread_mutex_unlock(&tgif_lock
);
290 //TODO: call event batch register ioctl
291 return events_handle
;
295 void tgif_event_remove_callbacks(struct tgif_event_description
*desc
)
297 uint32_t nr_cb
= desc
->nr_callbacks
;
298 struct tgif_callback
*old_cb
;
302 old_cb
= (struct tgif_callback
*) desc
->callbacks
;
303 (void) __atomic_add_fetch(desc
->enabled
, -1, __ATOMIC_RELAXED
);
305 * Setting the state back to 0 cb and empty callbacks out of
306 * caution. This should not matter because instrumentation is
309 desc
->nr_callbacks
= 0;
310 tgif_rcu_assign_pointer(desc
->callbacks
, &tgif_empty_callback
);
312 * No need to wait for grace period because instrumentation is
319 * Unregister event handle. At this point, all tgif events in that
320 * handle should be unreachable.
322 void tgif_events_unregister(struct tgif_events_register_handle
*events_handle
)
324 struct tgif_tracer_handle
*tracer_handle
;
333 pthread_mutex_lock(&tgif_lock
);
334 tgif_list_remove_node(&events_handle
->node
);
335 tgif_list_for_each_entry(tracer_handle
, &tgif_tracer_list
, node
) {
336 tracer_handle
->cb(TGIF_TRACER_NOTIFICATION_REMOVE_EVENTS
,
337 events_handle
->events
, events_handle
->nr_events
,
338 tracer_handle
->priv
);
340 for (i
= 0; i
< events_handle
->nr_events
; i
++) {
341 struct tgif_event_description
*event
= events_handle
->events
[i
];
343 /* Skip NULL pointers */
346 tgif_event_remove_callbacks(event
);
348 pthread_mutex_unlock(&tgif_lock
);
349 //TODO: call event batch unregister ioctl
353 struct tgif_tracer_handle
*tgif_tracer_event_notification_register(
354 void (*cb
)(enum tgif_tracer_notification notif
,
355 struct tgif_event_description
**events
, uint32_t nr_events
, void *priv
),
358 struct tgif_tracer_handle
*tracer_handle
;
359 struct tgif_events_register_handle
*events_handle
;
365 tracer_handle
= (struct tgif_tracer_handle
*)
366 calloc(1, sizeof(struct tgif_tracer_handle
));
369 pthread_mutex_lock(&tgif_lock
);
370 tracer_handle
->cb
= cb
;
371 tracer_handle
->priv
= priv
;
372 tgif_list_insert_node_tail(&tgif_tracer_list
, &tracer_handle
->node
);
373 tgif_list_for_each_entry(events_handle
, &tgif_events_list
, node
) {
374 cb(TGIF_TRACER_NOTIFICATION_INSERT_EVENTS
,
375 events_handle
->events
, events_handle
->nr_events
, priv
);
377 pthread_mutex_unlock(&tgif_lock
);
378 return tracer_handle
;
381 void tgif_tracer_event_notification_unregister(struct tgif_tracer_handle
*tracer_handle
)
383 struct tgif_events_register_handle
*events_handle
;
389 pthread_mutex_lock(&tgif_lock
);
390 tgif_list_for_each_entry(events_handle
, &tgif_events_list
, node
) {
391 tracer_handle
->cb(TGIF_TRACER_NOTIFICATION_REMOVE_EVENTS
,
392 events_handle
->events
, events_handle
->nr_events
,
393 tracer_handle
->priv
);
395 tgif_list_remove_node(&tracer_handle
->node
);
396 pthread_mutex_unlock(&tgif_lock
);
403 tgif_rcu_gp_init(&rcu_gp
);
408 * tgif_exit() is executed from a library destructor. It can be called
409 * explicitly at application exit as well. Concurrent tgif API use is
410 * not expected at that point.
414 struct tgif_events_register_handle
*handle
, *tmp
;
418 tgif_list_for_each_entry_safe(handle
, tmp
, &tgif_events_list
, node
)
419 tgif_events_unregister(handle
);
420 tgif_rcu_gp_exit(&rcu_gp
);