SoW-2020-0002: Trace Hit Counters: Implement key-addressed counters in shared memory...
[deliverable/lttng-ust.git] / liblttng-ust / lttng-events.c
1 /*
2 * lttng-events.c
3 *
4 * Holds LTTng per-session event registry.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _GNU_SOURCE
24 #define _LGPL_SOURCE
25 #include <stdio.h>
26 #include <assert.h>
27 #include <errno.h>
28 #include <limits.h>
29 #include <pthread.h>
30 #include <sys/shm.h>
31 #include <sys/ipc.h>
32 #include <stdint.h>
33 #include <stddef.h>
34 #include <inttypes.h>
35 #include <time.h>
36 #include <stdbool.h>
37 #include <unistd.h>
38 #include <lttng/ust-endian.h>
39
40 #include <urcu-bp.h>
41 #include <urcu/arch.h>
42 #include <urcu/compiler.h>
43 #include <urcu/hlist.h>
44 #include <urcu/list.h>
45 #include <urcu/uatomic.h>
46
47 #include <lttng/tracepoint.h>
48 #include <lttng/ust-events.h>
49
50 #include <usterr-signal-safe.h>
51 #include <helper.h>
52 #include <lttng/ust-ctl.h>
53 #include <ust-comm.h>
54 #include <ust-fd.h>
55 #include <lttng/ust-dynamic-type.h>
56 #include <lttng/ust-context-provider.h>
57 #include "error.h"
58 #include "compat.h"
59 #include "lttng-ust-uuid.h"
60
61 #include "tracepoint-internal.h"
62 #include "string-utils.h"
63 #include "lttng-bytecode.h"
64 #include "lttng-tracer.h"
65 #include "lttng-tracer-core.h"
66 #include "lttng-ust-statedump.h"
67 #include "context-internal.h"
68 #include "ust-events-internal.h"
69 #include "wait.h"
70 #include "../libringbuffer/shm.h"
71 #include "../libcounter/counter.h"
72 #include "jhash.h"
73 #include <lttng/ust-abi.h>
74
75 /*
76 * All operations within this file are called by the communication
77 * thread, under ust_lock protection.
78 */
79
80 static CDS_LIST_HEAD(sessions);
81 static CDS_LIST_HEAD(trigger_groups);
82
83 struct cds_list_head *_lttng_get_sessions(void)
84 {
85 return &sessions;
86 }
87
88 static void _lttng_event_destroy(struct lttng_event *event);
89 static void _lttng_trigger_destroy(struct lttng_trigger *trigger);
90 static void _lttng_enum_destroy(struct lttng_enum *_enum);
91
92 static
93 void lttng_session_lazy_sync_event_enablers(struct lttng_session *session);
94 static
95 void lttng_session_sync_event_enablers(struct lttng_session *session);
96 static
97 void lttng_trigger_group_sync_enablers(struct lttng_trigger_group *trigger_group);
98 static
99 void lttng_enabler_destroy(struct lttng_enabler *enabler);
100
101 /*
102 * Called with ust lock held.
103 */
104 int lttng_session_active(void)
105 {
106 struct lttng_session *iter;
107
108 cds_list_for_each_entry(iter, &sessions, node) {
109 if (iter->active)
110 return 1;
111 }
112 return 0;
113 }
114
115 static
116 int lttng_loglevel_match(int loglevel,
117 unsigned int has_loglevel,
118 enum lttng_ust_loglevel_type req_type,
119 int req_loglevel)
120 {
121 if (!has_loglevel)
122 loglevel = TRACE_DEFAULT;
123 switch (req_type) {
124 case LTTNG_UST_LOGLEVEL_RANGE:
125 if (loglevel <= req_loglevel
126 || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
127 return 1;
128 else
129 return 0;
130 case LTTNG_UST_LOGLEVEL_SINGLE:
131 if (loglevel == req_loglevel
132 || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
133 return 1;
134 else
135 return 0;
136 case LTTNG_UST_LOGLEVEL_ALL:
137 default:
138 if (loglevel <= TRACE_DEBUG)
139 return 1;
140 else
141 return 0;
142 }
143 }
144
145 void synchronize_trace(void)
146 {
147 synchronize_rcu();
148 }
149
150 struct lttng_session *lttng_session_create(void)
151 {
152 struct lttng_session *session;
153 int i;
154
155 session = zmalloc(sizeof(struct lttng_session));
156 if (!session)
157 return NULL;
158 if (lttng_context_init_all(&session->ctx)) {
159 free(session);
160 return NULL;
161 }
162 CDS_INIT_LIST_HEAD(&session->chan_head);
163 CDS_INIT_LIST_HEAD(&session->events_head);
164 CDS_INIT_LIST_HEAD(&session->enums_head);
165 CDS_INIT_LIST_HEAD(&session->enablers_head);
166 for (i = 0; i < LTTNG_UST_EVENT_HT_SIZE; i++)
167 CDS_INIT_HLIST_HEAD(&session->events_ht.table[i]);
168 for (i = 0; i < LTTNG_UST_ENUM_HT_SIZE; i++)
169 CDS_INIT_HLIST_HEAD(&session->enums_ht.table[i]);
170 cds_list_add(&session->node, &sessions);
171 return session;
172 }
173
174 struct lttng_counter *lttng_ust_counter_create(
175 const char *counter_transport_name,
176 size_t number_dimensions, const struct lttng_counter_dimension *dimensions)
177 {
178 struct lttng_counter_transport *counter_transport = NULL;
179 struct lttng_counter *counter = NULL;
180
181 counter_transport = lttng_counter_transport_find(counter_transport_name);
182 if (!counter_transport)
183 goto notransport;
184 counter = zmalloc(sizeof(struct lttng_counter));
185 if (!counter)
186 goto nomem;
187
188 /* Create trigger error counter. */
189 counter->ops = &counter_transport->ops;
190 counter->transport = counter_transport;
191
192 counter->counter = counter->ops->counter_create(
193 number_dimensions, dimensions, 0,
194 -1, 0, NULL, false);
195 if (!counter->counter) {
196 goto create_error;
197 }
198
199 return counter;
200
201 create_error:
202 free(counter);
203 nomem:
204 notransport:
205 return NULL;
206 }
207
208 static
209 void lttng_ust_counter_destroy(struct lttng_counter *counter)
210 {
211 counter->ops->counter_destroy(counter->counter);
212 free(counter);
213 }
214
215 struct lttng_trigger_group *lttng_trigger_group_create(void)
216 {
217 struct lttng_trigger_group *trigger_group;
218 int i;
219
220 trigger_group = zmalloc(sizeof(struct lttng_trigger_group));
221 if (!trigger_group)
222 return NULL;
223
224 /* Add all contexts. */
225 if (lttng_context_init_all(&trigger_group->ctx)) {
226 free(trigger_group);
227 return NULL;
228 }
229
230 CDS_INIT_LIST_HEAD(&trigger_group->enablers_head);
231 CDS_INIT_LIST_HEAD(&trigger_group->triggers_head);
232 for (i = 0; i < LTTNG_UST_TRIGGER_HT_SIZE; i++)
233 CDS_INIT_HLIST_HEAD(&trigger_group->triggers_ht.table[i]);
234
235 cds_list_add(&trigger_group->node, &trigger_groups);
236
237 return trigger_group;
238 }
239
240 /*
241 * Only used internally at session destruction.
242 */
243 static
244 void _lttng_channel_unmap(struct lttng_channel *lttng_chan)
245 {
246 struct channel *chan;
247 struct lttng_ust_shm_handle *handle;
248
249 cds_list_del(&lttng_chan->node);
250 lttng_destroy_context(lttng_chan->ctx);
251 chan = lttng_chan->chan;
252 handle = lttng_chan->handle;
253 /*
254 * note: lttng_chan is private data contained within handle. It
255 * will be freed along with the handle.
256 */
257 channel_destroy(chan, handle, 0);
258 }
259
260 static
261 void register_event(struct lttng_event *event)
262 {
263 int ret;
264 const struct lttng_event_desc *desc;
265
266 assert(event->registered == 0);
267 desc = event->desc;
268 ret = __tracepoint_probe_register_queue_release(desc->name,
269 desc->probe_callback,
270 event, desc->signature);
271 WARN_ON_ONCE(ret);
272 if (!ret)
273 event->registered = 1;
274 }
275
276 static
277 void register_trigger(struct lttng_trigger *trigger)
278 {
279 int ret;
280 const struct lttng_event_desc *desc;
281
282 assert(trigger->registered == 0);
283 desc = trigger->desc;
284 ret = __tracepoint_probe_register_queue_release(desc->name,
285 desc->u.ext.trigger_callback, trigger, desc->signature);
286 WARN_ON_ONCE(ret);
287 if (!ret)
288 trigger->registered = 1;
289 }
290
291 static
292 void unregister_event(struct lttng_event *event)
293 {
294 int ret;
295 const struct lttng_event_desc *desc;
296
297 assert(event->registered == 1);
298 desc = event->desc;
299 ret = __tracepoint_probe_unregister_queue_release(desc->name,
300 desc->probe_callback,
301 event);
302 WARN_ON_ONCE(ret);
303 if (!ret)
304 event->registered = 0;
305 }
306
307 static
308 void unregister_trigger(struct lttng_trigger *trigger)
309 {
310 int ret;
311 const struct lttng_event_desc *desc;
312
313 assert(trigger->registered == 1);
314 desc = trigger->desc;
315 ret = __tracepoint_probe_unregister_queue_release(desc->name,
316 desc->u.ext.trigger_callback, trigger);
317 WARN_ON_ONCE(ret);
318 if (!ret)
319 trigger->registered = 0;
320 }
321
322 /*
323 * Only used internally at session destruction.
324 */
325 static
326 void _lttng_event_unregister(struct lttng_event *event)
327 {
328 if (event->registered)
329 unregister_event(event);
330 }
331
332 /*
333 * Only used internally at session destruction.
334 */
335 static
336 void _lttng_trigger_unregister(struct lttng_trigger *trigger)
337 {
338 if (trigger->registered)
339 unregister_trigger(trigger);
340 }
341
342 void lttng_session_destroy(struct lttng_session *session)
343 {
344 struct lttng_channel *chan, *tmpchan;
345 struct lttng_event *event, *tmpevent;
346 struct lttng_enum *_enum, *tmp_enum;
347 struct lttng_event_enabler *event_enabler, *event_tmpenabler;
348
349 CMM_ACCESS_ONCE(session->active) = 0;
350 cds_list_for_each_entry(event, &session->events_head, node) {
351 _lttng_event_unregister(event);
352 }
353 synchronize_trace(); /* Wait for in-flight events to complete */
354 __tracepoint_probe_prune_release_queue();
355 cds_list_for_each_entry_safe(event_enabler, event_tmpenabler,
356 &session->enablers_head, node)
357 lttng_event_enabler_destroy(event_enabler);
358 cds_list_for_each_entry_safe(event, tmpevent,
359 &session->events_head, node)
360 _lttng_event_destroy(event);
361 cds_list_for_each_entry_safe(_enum, tmp_enum,
362 &session->enums_head, node)
363 _lttng_enum_destroy(_enum);
364 cds_list_for_each_entry_safe(chan, tmpchan, &session->chan_head, node)
365 _lttng_channel_unmap(chan);
366 cds_list_del(&session->node);
367 lttng_destroy_context(session->ctx);
368 free(session);
369 }
370
371 void lttng_trigger_group_destroy(
372 struct lttng_trigger_group *trigger_group)
373 {
374 int close_ret;
375 struct lttng_trigger_enabler *trigger_enabler, *tmptrigger_enabler;
376 struct lttng_trigger *trigger, *tmptrigger;
377
378 if (!trigger_group) {
379 return;
380 }
381
382 cds_list_for_each_entry(trigger, &trigger_group->triggers_head, node)
383 _lttng_trigger_unregister(trigger);
384
385 synchronize_trace();
386
387 cds_list_for_each_entry_safe(trigger_enabler, tmptrigger_enabler,
388 &trigger_group->enablers_head, node)
389 lttng_trigger_enabler_destroy(trigger_enabler);
390
391 cds_list_for_each_entry_safe(trigger, tmptrigger,
392 &trigger_group->triggers_head, node)
393 _lttng_trigger_destroy(trigger);
394
395 if (trigger_group->error_counter)
396 lttng_ust_counter_destroy(trigger_group->error_counter);
397
398 /* Close the notification fd to the listener of triggers. */
399
400 lttng_ust_lock_fd_tracker();
401 close_ret = close(trigger_group->notification_fd);
402 if (!close_ret) {
403 lttng_ust_delete_fd_from_tracker(trigger_group->notification_fd);
404 } else {
405 PERROR("close");
406 abort();
407 }
408 lttng_ust_unlock_fd_tracker();
409
410 cds_list_del(&trigger_group->node);
411
412 free(trigger_group);
413 }
414
415 static
416 void lttng_enabler_destroy(struct lttng_enabler *enabler)
417 {
418 struct lttng_ust_bytecode_node *filter_node, *tmp_filter_node;
419 struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node;
420
421 if (!enabler) {
422 return;
423 }
424
425 /* Destroy filter bytecode */
426 cds_list_for_each_entry_safe(filter_node, tmp_filter_node,
427 &enabler->filter_bytecode_head, node) {
428 free(filter_node);
429 }
430
431 /* Destroy excluders */
432 cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node,
433 &enabler->excluder_head, node) {
434 free(excluder_node);
435 }
436 }
437
438 void lttng_trigger_enabler_destroy(struct lttng_trigger_enabler *trigger_enabler)
439 {
440 if (!trigger_enabler) {
441 return;
442 }
443
444 cds_list_del(&trigger_enabler->node);
445
446 lttng_enabler_destroy(lttng_trigger_enabler_as_enabler(trigger_enabler));
447
448 free(trigger_enabler);
449 }
450
451 static
452 int lttng_enum_create(const struct lttng_enum_desc *desc,
453 struct lttng_session *session)
454 {
455 const char *enum_name = desc->name;
456 struct lttng_enum *_enum;
457 struct cds_hlist_head *head;
458 int ret = 0;
459 size_t name_len = strlen(enum_name);
460 uint32_t hash;
461 int notify_socket;
462
463 /* Check if this enum is already registered for this session. */
464 hash = jhash(enum_name, name_len, 0);
465 head = &session->enums_ht.table[hash & (LTTNG_UST_ENUM_HT_SIZE - 1)];
466
467 _enum = lttng_ust_enum_get_from_desc(session, desc);
468 if (_enum) {
469 ret = -EEXIST;
470 goto exist;
471 }
472
473 notify_socket = lttng_get_notify_socket(session->owner);
474 if (notify_socket < 0) {
475 ret = notify_socket;
476 goto socket_error;
477 }
478
479 _enum = zmalloc(sizeof(*_enum));
480 if (!_enum) {
481 ret = -ENOMEM;
482 goto cache_error;
483 }
484 _enum->session = session;
485 _enum->desc = desc;
486
487 ret = ustcomm_register_enum(notify_socket,
488 session->objd,
489 enum_name,
490 desc->nr_entries,
491 desc->entries,
492 &_enum->id);
493 if (ret < 0) {
494 DBG("Error (%d) registering enumeration to sessiond", ret);
495 goto sessiond_register_error;
496 }
497 cds_list_add(&_enum->node, &session->enums_head);
498 cds_hlist_add_head(&_enum->hlist, head);
499 return 0;
500
501 sessiond_register_error:
502 free(_enum);
503 cache_error:
504 socket_error:
505 exist:
506 return ret;
507 }
508
509 static
510 int lttng_create_enum_check(const struct lttng_type *type,
511 struct lttng_session *session)
512 {
513 switch (type->atype) {
514 case atype_enum:
515 {
516 const struct lttng_enum_desc *enum_desc;
517 int ret;
518
519 enum_desc = type->u.legacy.basic.enumeration.desc;
520 ret = lttng_enum_create(enum_desc, session);
521 if (ret && ret != -EEXIST) {
522 DBG("Unable to create enum error: (%d)", ret);
523 return ret;
524 }
525 break;
526 }
527 case atype_enum_nestable:
528 {
529 const struct lttng_enum_desc *enum_desc;
530 int ret;
531
532 enum_desc = type->u.enum_nestable.desc;
533 ret = lttng_enum_create(enum_desc, session);
534 if (ret && ret != -EEXIST) {
535 DBG("Unable to create enum error: (%d)", ret);
536 return ret;
537 }
538 break;
539 }
540 case atype_dynamic:
541 {
542 const struct lttng_event_field *tag_field_generic;
543 const struct lttng_enum_desc *enum_desc;
544 int ret;
545
546 tag_field_generic = lttng_ust_dynamic_type_tag_field();
547 enum_desc = tag_field_generic->type.u.enum_nestable.desc;
548 ret = lttng_enum_create(enum_desc, session);
549 if (ret && ret != -EEXIST) {
550 DBG("Unable to create enum error: (%d)", ret);
551 return ret;
552 }
553 break;
554 }
555 default:
556 /* TODO: nested types when they become supported. */
557 break;
558 }
559 return 0;
560 }
561
562 static
563 int lttng_create_all_event_enums(size_t nr_fields,
564 const struct lttng_event_field *event_fields,
565 struct lttng_session *session)
566 {
567 size_t i;
568 int ret;
569
570 /* For each field, ensure enum is part of the session. */
571 for (i = 0; i < nr_fields; i++) {
572 const struct lttng_type *type = &event_fields[i].type;
573
574 ret = lttng_create_enum_check(type, session);
575 if (ret)
576 return ret;
577 }
578 return 0;
579 }
580
581 static
582 int lttng_create_all_ctx_enums(size_t nr_fields,
583 const struct lttng_ctx_field *ctx_fields,
584 struct lttng_session *session)
585 {
586 size_t i;
587 int ret;
588
589 /* For each field, ensure enum is part of the session. */
590 for (i = 0; i < nr_fields; i++) {
591 const struct lttng_type *type = &ctx_fields[i].event_field.type;
592
593 ret = lttng_create_enum_check(type, session);
594 if (ret)
595 return ret;
596 }
597 return 0;
598 }
599
600 /*
601 * Ensure that a state-dump will be performed for this session at the end
602 * of the current handle_message().
603 */
604 int lttng_session_statedump(struct lttng_session *session)
605 {
606 session->statedump_pending = 1;
607 lttng_ust_sockinfo_session_enabled(session->owner);
608 return 0;
609 }
610
611 int lttng_session_enable(struct lttng_session *session)
612 {
613 int ret = 0;
614 struct lttng_channel *chan;
615 int notify_socket;
616
617 if (session->active) {
618 ret = -EBUSY;
619 goto end;
620 }
621
622 notify_socket = lttng_get_notify_socket(session->owner);
623 if (notify_socket < 0)
624 return notify_socket;
625
626 /* Set transient enabler state to "enabled" */
627 session->tstate = 1;
628
629 /* We need to sync enablers with session before activation. */
630 lttng_session_sync_event_enablers(session);
631
632 /*
633 * Snapshot the number of events per channel to know the type of header
634 * we need to use.
635 */
636 cds_list_for_each_entry(chan, &session->chan_head, node) {
637 const struct lttng_ctx *ctx;
638 const struct lttng_ctx_field *fields = NULL;
639 size_t nr_fields = 0;
640 uint32_t chan_id;
641
642 /* don't change it if session stop/restart */
643 if (chan->header_type)
644 continue;
645 ctx = chan->ctx;
646 if (ctx) {
647 nr_fields = ctx->nr_fields;
648 fields = ctx->fields;
649 ret = lttng_create_all_ctx_enums(nr_fields, fields,
650 session);
651 if (ret < 0) {
652 DBG("Error (%d) adding enum to session", ret);
653 return ret;
654 }
655 }
656 ret = ustcomm_register_channel(notify_socket,
657 session,
658 session->objd,
659 chan->objd,
660 nr_fields,
661 fields,
662 &chan_id,
663 &chan->header_type);
664 if (ret) {
665 DBG("Error (%d) registering channel to sessiond", ret);
666 return ret;
667 }
668 if (chan_id != chan->id) {
669 DBG("Error: channel registration id (%u) does not match id assigned at creation (%u)",
670 chan_id, chan->id);
671 return -EINVAL;
672 }
673 }
674
675 /* Set atomically the state to "active" */
676 CMM_ACCESS_ONCE(session->active) = 1;
677 CMM_ACCESS_ONCE(session->been_active) = 1;
678
679 ret = lttng_session_statedump(session);
680 if (ret)
681 return ret;
682 end:
683 return ret;
684 }
685
686 int lttng_session_disable(struct lttng_session *session)
687 {
688 int ret = 0;
689
690 if (!session->active) {
691 ret = -EBUSY;
692 goto end;
693 }
694 /* Set atomically the state to "inactive" */
695 CMM_ACCESS_ONCE(session->active) = 0;
696
697 /* Set transient enabler state to "disabled" */
698 session->tstate = 0;
699 lttng_session_sync_event_enablers(session);
700 end:
701 return ret;
702 }
703
704 int lttng_channel_enable(struct lttng_channel *channel)
705 {
706 int ret = 0;
707
708 if (channel->enabled) {
709 ret = -EBUSY;
710 goto end;
711 }
712 /* Set transient enabler state to "enabled" */
713 channel->tstate = 1;
714 lttng_session_sync_event_enablers(channel->session);
715 /* Set atomically the state to "enabled" */
716 CMM_ACCESS_ONCE(channel->enabled) = 1;
717 end:
718 return ret;
719 }
720
721 int lttng_channel_disable(struct lttng_channel *channel)
722 {
723 int ret = 0;
724
725 if (!channel->enabled) {
726 ret = -EBUSY;
727 goto end;
728 }
729 /* Set atomically the state to "disabled" */
730 CMM_ACCESS_ONCE(channel->enabled) = 0;
731 /* Set transient enabler state to "enabled" */
732 channel->tstate = 0;
733 lttng_session_sync_event_enablers(channel->session);
734 end:
735 return ret;
736 }
737
738 static inline
739 struct cds_hlist_head *borrow_hash_table_bucket(
740 struct cds_hlist_head *hash_table,
741 unsigned int hash_table_size,
742 const struct lttng_event_desc *desc)
743 {
744 const char *event_name;
745 size_t name_len;
746 uint32_t hash;
747
748 event_name = desc->name;
749 name_len = strlen(event_name);
750
751 hash = jhash(event_name, name_len, 0);
752 return &hash_table[hash & (hash_table_size - 1)];
753 }
754
755 /*
756 * Supports event creation while tracing session is active.
757 */
758 static
759 int lttng_event_create(const struct lttng_event_desc *desc,
760 struct lttng_channel *chan)
761 {
762 struct lttng_event *event;
763 struct lttng_session *session = chan->session;
764 struct cds_hlist_head *head;
765 int ret = 0;
766 int notify_socket, loglevel;
767 const char *uri;
768
769 head = borrow_hash_table_bucket(chan->session->events_ht.table,
770 LTTNG_UST_EVENT_HT_SIZE, desc);
771
772 notify_socket = lttng_get_notify_socket(session->owner);
773 if (notify_socket < 0) {
774 ret = notify_socket;
775 goto socket_error;
776 }
777
778 ret = lttng_create_all_event_enums(desc->nr_fields, desc->fields,
779 session);
780 if (ret < 0) {
781 DBG("Error (%d) adding enum to session", ret);
782 goto create_enum_error;
783 }
784
785 /*
786 * Check if loglevel match. Refuse to connect event if not.
787 */
788 event = zmalloc(sizeof(struct lttng_event));
789 if (!event) {
790 ret = -ENOMEM;
791 goto cache_error;
792 }
793 event->chan = chan;
794
795 /* Event will be enabled by enabler sync. */
796 event->enabled = 0;
797 event->registered = 0;
798 CDS_INIT_LIST_HEAD(&event->filter_bytecode_runtime_head);
799 CDS_INIT_LIST_HEAD(&event->enablers_ref_head);
800 event->desc = desc;
801
802 if (desc->loglevel)
803 loglevel = *(*event->desc->loglevel);
804 else
805 loglevel = TRACE_DEFAULT;
806 if (desc->u.ext.model_emf_uri)
807 uri = *(desc->u.ext.model_emf_uri);
808 else
809 uri = NULL;
810
811 /* Fetch event ID from sessiond */
812 ret = ustcomm_register_event(notify_socket,
813 session,
814 session->objd,
815 chan->objd,
816 desc->name,
817 loglevel,
818 desc->signature,
819 desc->nr_fields,
820 desc->fields,
821 uri,
822 &event->id);
823 if (ret < 0) {
824 DBG("Error (%d) registering event to sessiond", ret);
825 goto sessiond_register_error;
826 }
827
828 cds_list_add(&event->node, &chan->session->events_head);
829 cds_hlist_add_head(&event->hlist, head);
830 return 0;
831
832 sessiond_register_error:
833 free(event);
834 cache_error:
835 create_enum_error:
836 socket_error:
837 return ret;
838 }
839
840 static
841 int lttng_trigger_create(const struct lttng_event_desc *desc,
842 uint64_t id, uint64_t error_counter_index,
843 struct lttng_trigger_group *trigger_group)
844 {
845 struct lttng_trigger *trigger;
846 struct cds_hlist_head *head;
847 int ret = 0;
848
849 /*
850 * Get the hashtable bucket the created lttng_trigger object should be
851 * inserted.
852 */
853 head = borrow_hash_table_bucket(trigger_group->triggers_ht.table,
854 LTTNG_UST_TRIGGER_HT_SIZE, desc);
855
856 trigger = zmalloc(sizeof(struct lttng_trigger));
857 if (!trigger) {
858 ret = -ENOMEM;
859 goto error;
860 }
861
862 trigger->group = trigger_group;
863 trigger->id = id;
864 trigger->error_counter_index = error_counter_index;
865
866 /* Trigger will be enabled by enabler sync. */
867 trigger->enabled = 0;
868 trigger->registered = 0;
869
870 CDS_INIT_LIST_HEAD(&trigger->filter_bytecode_runtime_head);
871 CDS_INIT_LIST_HEAD(&trigger->capture_bytecode_runtime_head);
872 CDS_INIT_LIST_HEAD(&trigger->enablers_ref_head);
873 trigger->desc = desc;
874
875 cds_list_add(&trigger->node, &trigger_group->triggers_head);
876 cds_hlist_add_head(&trigger->hlist, head);
877
878 return 0;
879
880 error:
881 return ret;
882 }
883
884 static
885 void _lttng_trigger_destroy(struct lttng_trigger *trigger)
886 {
887 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
888
889 /* Remove from trigger list. */
890 cds_list_del(&trigger->node);
891 /* Remove from trigger hash table. */
892 cds_hlist_del(&trigger->hlist);
893
894 lttng_free_trigger_filter_runtime(trigger);
895
896 /* Free trigger enabler refs */
897 cds_list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
898 &trigger->enablers_ref_head, node)
899 free(enabler_ref);
900 free(trigger);
901 }
902
903 static
904 int lttng_desc_match_star_glob_enabler(const struct lttng_event_desc *desc,
905 struct lttng_enabler *enabler)
906 {
907 int loglevel = 0;
908 unsigned int has_loglevel = 0;
909
910 assert(enabler->format_type == LTTNG_ENABLER_FORMAT_STAR_GLOB);
911 if (!strutils_star_glob_match(enabler->event_param.name, SIZE_MAX,
912 desc->name, SIZE_MAX))
913 return 0;
914 if (desc->loglevel) {
915 loglevel = *(*desc->loglevel);
916 has_loglevel = 1;
917 }
918 if (!lttng_loglevel_match(loglevel,
919 has_loglevel,
920 enabler->event_param.loglevel_type,
921 enabler->event_param.loglevel))
922 return 0;
923 return 1;
924 }
925
926 static
927 int lttng_desc_match_event_enabler(const struct lttng_event_desc *desc,
928 struct lttng_enabler *enabler)
929 {
930 int loglevel = 0;
931 unsigned int has_loglevel = 0;
932
933 assert(enabler->format_type == LTTNG_ENABLER_FORMAT_EVENT);
934 if (strcmp(desc->name, enabler->event_param.name))
935 return 0;
936 if (desc->loglevel) {
937 loglevel = *(*desc->loglevel);
938 has_loglevel = 1;
939 }
940 if (!lttng_loglevel_match(loglevel,
941 has_loglevel,
942 enabler->event_param.loglevel_type,
943 enabler->event_param.loglevel))
944 return 0;
945 return 1;
946 }
947
948 static
949 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
950 struct lttng_enabler *enabler)
951 {
952 switch (enabler->format_type) {
953 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
954 {
955 struct lttng_ust_excluder_node *excluder;
956
957 if (!lttng_desc_match_star_glob_enabler(desc, enabler)) {
958 return 0;
959 }
960
961 /*
962 * If the matching event matches with an excluder,
963 * return 'does not match'
964 */
965 cds_list_for_each_entry(excluder, &enabler->excluder_head, node) {
966 int count;
967
968 for (count = 0; count < excluder->excluder.count; count++) {
969 int len;
970 char *excluder_name;
971
972 excluder_name = (char *) (excluder->excluder.names)
973 + count * LTTNG_UST_SYM_NAME_LEN;
974 len = strnlen(excluder_name, LTTNG_UST_SYM_NAME_LEN);
975 if (len > 0 && strutils_star_glob_match(excluder_name, len, desc->name, SIZE_MAX))
976 return 0;
977 }
978 }
979 return 1;
980 }
981 case LTTNG_ENABLER_FORMAT_EVENT:
982 return lttng_desc_match_event_enabler(desc, enabler);
983 default:
984 return -EINVAL;
985 }
986 }
987
988 static
989 int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
990 struct lttng_event *event)
991 {
992 if (lttng_desc_match_enabler(event->desc,
993 lttng_event_enabler_as_enabler(event_enabler))
994 && event->chan == event_enabler->chan)
995 return 1;
996 else
997 return 0;
998 }
999
1000 static
1001 int lttng_trigger_enabler_match_trigger(
1002 struct lttng_trigger_enabler *trigger_enabler,
1003 struct lttng_trigger *trigger)
1004 {
1005 int desc_matches = lttng_desc_match_enabler(trigger->desc,
1006 lttng_trigger_enabler_as_enabler(trigger_enabler));
1007
1008 if (desc_matches && trigger->group == trigger_enabler->group &&
1009 trigger->id == trigger_enabler->id)
1010 return 1;
1011 else
1012 return 0;
1013 }
1014
1015 static
1016 struct lttng_enabler_ref *lttng_enabler_ref(
1017 struct cds_list_head *enabler_ref_list,
1018 struct lttng_enabler *enabler)
1019 {
1020 struct lttng_enabler_ref *enabler_ref;
1021
1022 cds_list_for_each_entry(enabler_ref, enabler_ref_list, node) {
1023 if (enabler_ref->ref == enabler)
1024 return enabler_ref;
1025 }
1026 return NULL;
1027 }
1028
1029 /*
1030 * Create struct lttng_event if it is missing and present in the list of
1031 * tracepoint probes.
1032 */
1033 static
1034 void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler)
1035 {
1036 struct lttng_session *session = event_enabler->chan->session;
1037 struct lttng_probe_desc *probe_desc;
1038 const struct lttng_event_desc *desc;
1039 struct lttng_event *event;
1040 int i;
1041 struct cds_list_head *probe_list;
1042
1043 probe_list = lttng_get_probe_list_head();
1044 /*
1045 * For each probe event, if we find that a probe event matches
1046 * our enabler, create an associated lttng_event if not
1047 * already present.
1048 */
1049 cds_list_for_each_entry(probe_desc, probe_list, head) {
1050 for (i = 0; i < probe_desc->nr_events; i++) {
1051 int ret;
1052 bool found = false;
1053 struct cds_hlist_head *head;
1054 struct cds_hlist_node *node;
1055
1056 desc = probe_desc->event_desc[i];
1057 if (!lttng_desc_match_enabler(desc,
1058 lttng_event_enabler_as_enabler(event_enabler)))
1059 continue;
1060
1061 head = borrow_hash_table_bucket(
1062 session->events_ht.table,
1063 LTTNG_UST_EVENT_HT_SIZE, desc);
1064
1065 cds_hlist_for_each_entry(event, node, head, hlist) {
1066 if (event->desc == desc
1067 && event->chan == event_enabler->chan) {
1068 found = true;
1069 break;
1070 }
1071 }
1072 if (found)
1073 continue;
1074
1075 /*
1076 * We need to create an event for this
1077 * event probe.
1078 */
1079 ret = lttng_event_create(probe_desc->event_desc[i],
1080 event_enabler->chan);
1081 if (ret) {
1082 DBG("Unable to create event %s, error %d\n",
1083 probe_desc->event_desc[i]->name, ret);
1084 }
1085 }
1086 }
1087 }
1088
1089 static
1090 void probe_provider_event_for_each(struct lttng_probe_desc *provider_desc,
1091 void (*event_func)(struct lttng_session *session,
1092 struct lttng_event *event),
1093 void (*trigger_func)(struct lttng_trigger *trigger))
1094 {
1095 struct cds_hlist_node *node, *tmp_node;
1096 struct cds_list_head *sessionsp;
1097 unsigned int i;
1098
1099 /* Get handle on list of sessions. */
1100 sessionsp = _lttng_get_sessions();
1101
1102 /*
1103 * Iterate over all events in the probe provider descriptions and
1104 * sessions to queue the unregistration of the events.
1105 */
1106 for (i = 0; i < provider_desc->nr_events; i++) {
1107 const struct lttng_event_desc *event_desc;
1108 struct lttng_trigger_group *trigger_group;
1109 struct lttng_trigger *trigger;
1110 struct lttng_session *session;
1111 struct cds_hlist_head *head;
1112 struct lttng_event *event;
1113
1114 event_desc = provider_desc->event_desc[i];
1115
1116 /*
1117 * Iterate over all session to find the current event
1118 * description.
1119 */
1120 cds_list_for_each_entry(session, sessionsp, node) {
1121 /*
1122 * Get the list of events in the hashtable bucket and
1123 * iterate to find the event matching this descriptor.
1124 */
1125 head = borrow_hash_table_bucket(
1126 session->events_ht.table,
1127 LTTNG_UST_EVENT_HT_SIZE, event_desc);
1128
1129 cds_hlist_for_each_entry_safe(event, node, tmp_node, head, hlist) {
1130 if (event_desc == event->desc) {
1131 event_func(session, event);
1132 break;
1133 }
1134 }
1135 }
1136
1137 /*
1138 * Iterate over all trigger groups to find the current event
1139 * description.
1140 */
1141 cds_list_for_each_entry(trigger_group, &trigger_groups, node) {
1142 /*
1143 * Get the list of triggers in the hashtable bucket and
1144 * iterate to find the trigger matching this
1145 * descriptor.
1146 */
1147 head = borrow_hash_table_bucket(
1148 trigger_group->triggers_ht.table,
1149 LTTNG_UST_TRIGGER_HT_SIZE, event_desc);
1150
1151 cds_hlist_for_each_entry_safe(trigger, node, tmp_node, head, hlist) {
1152 if (event_desc == trigger->desc) {
1153 trigger_func(trigger);
1154 break;
1155 }
1156 }
1157 }
1158 }
1159 }
1160
1161 static
1162 void _unregister_event(struct lttng_session *session,
1163 struct lttng_event *event)
1164 {
1165 _lttng_event_unregister(event);
1166 }
1167
1168 static
1169 void _event_enum_destroy(struct lttng_session *session,
1170 struct lttng_event *event)
1171 {
1172 unsigned int i;
1173
1174 /* Destroy enums of the current event. */
1175 for (i = 0; i < event->desc->nr_fields; i++) {
1176 const struct lttng_enum_desc *enum_desc;
1177 const struct lttng_event_field *field;
1178 struct lttng_enum *curr_enum;
1179
1180 field = &(event->desc->fields[i]);
1181 switch (field->type.atype) {
1182 case atype_enum:
1183 enum_desc = field->type.u.legacy.basic.enumeration.desc;
1184 break;
1185 case atype_enum_nestable:
1186 enum_desc = field->type.u.enum_nestable.desc;
1187 break;
1188 default:
1189 continue;
1190 }
1191
1192 curr_enum = lttng_ust_enum_get_from_desc(session, enum_desc);
1193 if (curr_enum) {
1194 _lttng_enum_destroy(curr_enum);
1195 }
1196 }
1197
1198 /* Destroy event. */
1199 _lttng_event_destroy(event);
1200 }
1201
1202 /*
1203 * Iterate over all the UST sessions to unregister and destroy all probes from
1204 * the probe provider descriptor received as argument. Must me called with the
1205 * ust_lock held.
1206 */
1207 void lttng_probe_provider_unregister_events(
1208 struct lttng_probe_desc *provider_desc)
1209 {
1210 /*
1211 * Iterate over all events in the probe provider descriptions and sessions
1212 * to queue the unregistration of the events.
1213 */
1214 probe_provider_event_for_each(provider_desc, _unregister_event,
1215 _lttng_trigger_unregister);
1216
1217 /* Wait for grace period. */
1218 synchronize_trace();
1219 /* Prune the unregistration queue. */
1220 __tracepoint_probe_prune_release_queue();
1221
1222 /*
1223 * It is now safe to destroy the events and remove them from the event list
1224 * and hashtables.
1225 */
1226 probe_provider_event_for_each(provider_desc, _event_enum_destroy,
1227 _lttng_trigger_destroy);
1228 }
1229
1230 /*
1231 * Create events associated with an event enabler (if not already present),
1232 * and add backward reference from the event to the enabler.
1233 */
1234 static
1235 int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler)
1236 {
1237 struct lttng_session *session = event_enabler->chan->session;
1238 struct lttng_event *event;
1239
1240 if (!lttng_event_enabler_as_enabler(event_enabler)->enabled)
1241 goto end;
1242
1243 /* First ensure that probe events are created for this enabler. */
1244 lttng_create_event_if_missing(event_enabler);
1245
1246 /* For each event matching enabler in session event list. */
1247 cds_list_for_each_entry(event, &session->events_head, node) {
1248 struct lttng_enabler_ref *enabler_ref;
1249
1250 if (!lttng_event_enabler_match_event(event_enabler, event))
1251 continue;
1252
1253 enabler_ref = lttng_enabler_ref(&event->enablers_ref_head,
1254 lttng_event_enabler_as_enabler(event_enabler));
1255 if (!enabler_ref) {
1256 /*
1257 * If no backward ref, create it.
1258 * Add backward ref from event to enabler.
1259 */
1260 enabler_ref = zmalloc(sizeof(*enabler_ref));
1261 if (!enabler_ref)
1262 return -ENOMEM;
1263 enabler_ref->ref = lttng_event_enabler_as_enabler(
1264 event_enabler);
1265 cds_list_add(&enabler_ref->node,
1266 &event->enablers_ref_head);
1267 }
1268
1269 /*
1270 * Link filter bytecodes if not linked yet.
1271 */
1272 lttng_enabler_link_bytecode(event->desc,
1273 &session->ctx,
1274 &event->filter_bytecode_runtime_head,
1275 &lttng_event_enabler_as_enabler(event_enabler)->filter_bytecode_head);
1276
1277 /* TODO: merge event context. */
1278 }
1279 end:
1280 return 0;
1281 }
1282
1283 /*
1284 * Called at library load: connect the probe on all enablers matching
1285 * this event.
1286 * Called with session mutex held.
1287 */
1288 int lttng_fix_pending_events(void)
1289 {
1290 struct lttng_session *session;
1291
1292 cds_list_for_each_entry(session, &sessions, node) {
1293 lttng_session_lazy_sync_event_enablers(session);
1294 }
1295 return 0;
1296 }
1297
1298 int lttng_fix_pending_triggers(void)
1299 {
1300 struct lttng_trigger_group *trigger_group;
1301
1302 cds_list_for_each_entry(trigger_group, &trigger_groups, node) {
1303 lttng_trigger_group_sync_enablers(trigger_group);
1304 }
1305 return 0;
1306 }
1307
1308 /*
1309 * For each session of the owner thread, execute pending statedump.
1310 * Only dump state for the sessions owned by the caller thread, because
1311 * we don't keep ust_lock across the entire iteration.
1312 */
1313 void lttng_handle_pending_statedump(void *owner)
1314 {
1315 struct lttng_session *session;
1316
1317 /* Execute state dump */
1318 do_lttng_ust_statedump(owner);
1319
1320 /* Clear pending state dump */
1321 if (ust_lock()) {
1322 goto end;
1323 }
1324 cds_list_for_each_entry(session, &sessions, node) {
1325 if (session->owner != owner)
1326 continue;
1327 if (!session->statedump_pending)
1328 continue;
1329 session->statedump_pending = 0;
1330 }
1331 end:
1332 ust_unlock();
1333 return;
1334 }
1335
1336 /*
1337 * Only used internally at session destruction.
1338 */
1339 static
1340 void _lttng_event_destroy(struct lttng_event *event)
1341 {
1342 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
1343
1344 /* Remove from event list. */
1345 cds_list_del(&event->node);
1346 /* Remove from event hash table. */
1347 cds_hlist_del(&event->hlist);
1348
1349 lttng_destroy_context(event->ctx);
1350 lttng_free_event_filter_runtime(event);
1351 /* Free event enabler refs */
1352 cds_list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
1353 &event->enablers_ref_head, node)
1354 free(enabler_ref);
1355 free(event);
1356 }
1357
1358 static
1359 void _lttng_enum_destroy(struct lttng_enum *_enum)
1360 {
1361 cds_list_del(&_enum->node);
1362 cds_hlist_del(&_enum->hlist);
1363 free(_enum);
1364 }
1365
1366 void lttng_ust_events_exit(void)
1367 {
1368 struct lttng_session *session, *tmpsession;
1369
1370 cds_list_for_each_entry_safe(session, tmpsession, &sessions, node)
1371 lttng_session_destroy(session);
1372 }
1373
1374 /*
1375 * Enabler management.
1376 */
1377 struct lttng_event_enabler *lttng_event_enabler_create(
1378 enum lttng_enabler_format_type format_type,
1379 struct lttng_ust_event *event_param,
1380 struct lttng_channel *chan)
1381 {
1382 struct lttng_event_enabler *event_enabler;
1383
1384 event_enabler = zmalloc(sizeof(*event_enabler));
1385 if (!event_enabler)
1386 return NULL;
1387 event_enabler->base.format_type = format_type;
1388 CDS_INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
1389 CDS_INIT_LIST_HEAD(&event_enabler->base.excluder_head);
1390 memcpy(&event_enabler->base.event_param, event_param,
1391 sizeof(event_enabler->base.event_param));
1392 event_enabler->chan = chan;
1393 /* ctx left NULL */
1394 event_enabler->base.enabled = 0;
1395 cds_list_add(&event_enabler->node, &event_enabler->chan->session->enablers_head);
1396 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
1397
1398 return event_enabler;
1399 }
1400
1401 struct lttng_trigger_enabler *lttng_trigger_enabler_create(
1402 struct lttng_trigger_group *trigger_group,
1403 enum lttng_enabler_format_type format_type,
1404 struct lttng_ust_trigger *trigger_param)
1405 {
1406 struct lttng_trigger_enabler *trigger_enabler;
1407
1408 trigger_enabler = zmalloc(sizeof(*trigger_enabler));
1409 if (!trigger_enabler)
1410 return NULL;
1411 trigger_enabler->base.format_type = format_type;
1412 CDS_INIT_LIST_HEAD(&trigger_enabler->base.filter_bytecode_head);
1413 CDS_INIT_LIST_HEAD(&trigger_enabler->capture_bytecode_head);
1414 CDS_INIT_LIST_HEAD(&trigger_enabler->base.excluder_head);
1415
1416 trigger_enabler->id = trigger_param->id;
1417 trigger_enabler->num_captures = 0;
1418
1419 memcpy(&trigger_enabler->base.event_param.name, trigger_param->name,
1420 sizeof(trigger_enabler->base.event_param.name));
1421 trigger_enabler->base.event_param.instrumentation = trigger_param->instrumentation;
1422 trigger_enabler->base.event_param.loglevel = trigger_param->loglevel;
1423 trigger_enabler->base.event_param.loglevel_type = trigger_param->loglevel_type;
1424
1425 trigger_enabler->base.enabled = 0;
1426 trigger_enabler->group = trigger_group;
1427
1428 cds_list_add(&trigger_enabler->node, &trigger_group->enablers_head);
1429
1430 lttng_trigger_group_sync_enablers(trigger_group);
1431
1432 return trigger_enabler;
1433 }
1434
1435 int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
1436 {
1437 lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
1438 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
1439
1440 return 0;
1441 }
1442
1443 int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
1444 {
1445 lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
1446 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
1447
1448 return 0;
1449 }
1450
1451 static
1452 void _lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler,
1453 struct lttng_ust_bytecode_node *bytecode)
1454 {
1455 bytecode->enabler = enabler;
1456 cds_list_add_tail(&bytecode->node, &enabler->filter_bytecode_head);
1457 }
1458
1459 int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
1460 struct lttng_ust_bytecode_node *bytecode)
1461 {
1462 _lttng_enabler_attach_filter_bytecode(
1463 lttng_event_enabler_as_enabler(event_enabler), bytecode);
1464
1465 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
1466 return 0;
1467 }
1468
1469 static
1470 void _lttng_enabler_attach_exclusion(struct lttng_enabler *enabler,
1471 struct lttng_ust_excluder_node *excluder)
1472 {
1473 excluder->enabler = enabler;
1474 cds_list_add_tail(&excluder->node, &enabler->excluder_head);
1475 }
1476
1477 int lttng_event_enabler_attach_exclusion(struct lttng_event_enabler *event_enabler,
1478 struct lttng_ust_excluder_node *excluder)
1479 {
1480 _lttng_enabler_attach_exclusion(
1481 lttng_event_enabler_as_enabler(event_enabler), excluder);
1482
1483 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
1484 return 0;
1485 }
1486
1487 int lttng_trigger_enabler_enable(struct lttng_trigger_enabler *trigger_enabler)
1488 {
1489 lttng_trigger_enabler_as_enabler(trigger_enabler)->enabled = 1;
1490 lttng_trigger_group_sync_enablers(trigger_enabler->group);
1491
1492 return 0;
1493 }
1494
1495 int lttng_trigger_enabler_disable(struct lttng_trigger_enabler *trigger_enabler)
1496 {
1497 lttng_trigger_enabler_as_enabler(trigger_enabler)->enabled = 0;
1498 lttng_trigger_group_sync_enablers(trigger_enabler->group);
1499
1500 return 0;
1501 }
1502
1503 int lttng_trigger_enabler_attach_filter_bytecode(
1504 struct lttng_trigger_enabler *trigger_enabler,
1505 struct lttng_ust_bytecode_node *bytecode)
1506 {
1507 _lttng_enabler_attach_filter_bytecode(
1508 lttng_trigger_enabler_as_enabler(trigger_enabler), bytecode);
1509
1510 lttng_trigger_group_sync_enablers(trigger_enabler->group);
1511 return 0;
1512 }
1513
1514 int lttng_trigger_enabler_attach_capture_bytecode(
1515 struct lttng_trigger_enabler *trigger_enabler,
1516 struct lttng_ust_bytecode_node *bytecode)
1517 {
1518 bytecode->enabler = lttng_trigger_enabler_as_enabler(trigger_enabler);
1519 cds_list_add_tail(&bytecode->node, &trigger_enabler->capture_bytecode_head);
1520 trigger_enabler->num_captures++;
1521
1522 lttng_trigger_group_sync_enablers(trigger_enabler->group);
1523 return 0;
1524 }
1525
1526 int lttng_trigger_enabler_attach_exclusion(
1527 struct lttng_trigger_enabler *trigger_enabler,
1528 struct lttng_ust_excluder_node *excluder)
1529 {
1530 _lttng_enabler_attach_exclusion(
1531 lttng_trigger_enabler_as_enabler(trigger_enabler), excluder);
1532
1533 lttng_trigger_group_sync_enablers(trigger_enabler->group);
1534 return 0;
1535 }
1536
1537 int lttng_attach_context(struct lttng_ust_context *context_param,
1538 union ust_args *uargs,
1539 struct lttng_ctx **ctx, struct lttng_session *session)
1540 {
1541 /*
1542 * We cannot attach a context after trace has been started for a
1543 * session because the metadata does not allow expressing this
1544 * information outside of the original channel scope.
1545 */
1546 if (session->been_active)
1547 return -EPERM;
1548
1549 switch (context_param->ctx) {
1550 case LTTNG_UST_CONTEXT_PTHREAD_ID:
1551 return lttng_add_pthread_id_to_ctx(ctx);
1552 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
1553 {
1554 struct lttng_ust_perf_counter_ctx *perf_ctx_param;
1555
1556 perf_ctx_param = &context_param->u.perf_counter;
1557 return lttng_add_perf_counter_to_ctx(
1558 perf_ctx_param->type,
1559 perf_ctx_param->config,
1560 perf_ctx_param->name,
1561 ctx);
1562 }
1563 case LTTNG_UST_CONTEXT_VTID:
1564 return lttng_add_vtid_to_ctx(ctx);
1565 case LTTNG_UST_CONTEXT_VPID:
1566 return lttng_add_vpid_to_ctx(ctx);
1567 case LTTNG_UST_CONTEXT_PROCNAME:
1568 return lttng_add_procname_to_ctx(ctx);
1569 case LTTNG_UST_CONTEXT_IP:
1570 return lttng_add_ip_to_ctx(ctx);
1571 case LTTNG_UST_CONTEXT_CPU_ID:
1572 return lttng_add_cpu_id_to_ctx(ctx);
1573 case LTTNG_UST_CONTEXT_APP_CONTEXT:
1574 return lttng_ust_add_app_context_to_ctx_rcu(uargs->app_context.ctxname,
1575 ctx);
1576 case LTTNG_UST_CONTEXT_CGROUP_NS:
1577 return lttng_add_cgroup_ns_to_ctx(ctx);
1578 case LTTNG_UST_CONTEXT_IPC_NS:
1579 return lttng_add_ipc_ns_to_ctx(ctx);
1580 case LTTNG_UST_CONTEXT_MNT_NS:
1581 return lttng_add_mnt_ns_to_ctx(ctx);
1582 case LTTNG_UST_CONTEXT_NET_NS:
1583 return lttng_add_net_ns_to_ctx(ctx);
1584 case LTTNG_UST_CONTEXT_PID_NS:
1585 return lttng_add_pid_ns_to_ctx(ctx);
1586 case LTTNG_UST_CONTEXT_TIME_NS:
1587 return lttng_add_time_ns_to_ctx(ctx);
1588 case LTTNG_UST_CONTEXT_USER_NS:
1589 return lttng_add_user_ns_to_ctx(ctx);
1590 case LTTNG_UST_CONTEXT_UTS_NS:
1591 return lttng_add_uts_ns_to_ctx(ctx);
1592 case LTTNG_UST_CONTEXT_VUID:
1593 return lttng_add_vuid_to_ctx(ctx);
1594 case LTTNG_UST_CONTEXT_VEUID:
1595 return lttng_add_veuid_to_ctx(ctx);
1596 case LTTNG_UST_CONTEXT_VSUID:
1597 return lttng_add_vsuid_to_ctx(ctx);
1598 case LTTNG_UST_CONTEXT_VGID:
1599 return lttng_add_vgid_to_ctx(ctx);
1600 case LTTNG_UST_CONTEXT_VEGID:
1601 return lttng_add_vegid_to_ctx(ctx);
1602 case LTTNG_UST_CONTEXT_VSGID:
1603 return lttng_add_vsgid_to_ctx(ctx);
1604 default:
1605 return -EINVAL;
1606 }
1607 }
1608
1609 int lttng_event_enabler_attach_context(struct lttng_event_enabler *enabler,
1610 struct lttng_ust_context *context_param)
1611 {
1612 return -ENOSYS;
1613 }
1614
1615 void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
1616 {
1617 if (!event_enabler) {
1618 return;
1619 }
1620 cds_list_del(&event_enabler->node);
1621
1622 lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
1623
1624 lttng_destroy_context(event_enabler->ctx);
1625 free(event_enabler);
1626 }
1627
1628 /*
1629 * lttng_session_sync_event_enablers should be called just before starting a
1630 * session.
1631 */
1632 static
1633 void lttng_session_sync_event_enablers(struct lttng_session *session)
1634 {
1635 struct lttng_event_enabler *event_enabler;
1636 struct lttng_event *event;
1637
1638 cds_list_for_each_entry(event_enabler, &session->enablers_head, node)
1639 lttng_event_enabler_ref_events(event_enabler);
1640 /*
1641 * For each event, if at least one of its enablers is enabled,
1642 * and its channel and session transient states are enabled, we
1643 * enable the event, else we disable it.
1644 */
1645 cds_list_for_each_entry(event, &session->events_head, node) {
1646 struct lttng_enabler_ref *enabler_ref;
1647 struct lttng_bytecode_runtime *runtime;
1648 int enabled = 0, has_enablers_without_bytecode = 0;
1649
1650 /* Enable events */
1651 cds_list_for_each_entry(enabler_ref,
1652 &event->enablers_ref_head, node) {
1653 if (enabler_ref->ref->enabled) {
1654 enabled = 1;
1655 break;
1656 }
1657 }
1658 /*
1659 * Enabled state is based on union of enablers, with
1660 * intesection of session and channel transient enable
1661 * states.
1662 */
1663 enabled = enabled && session->tstate && event->chan->tstate;
1664
1665 CMM_STORE_SHARED(event->enabled, enabled);
1666 /*
1667 * Sync tracepoint registration with event enabled
1668 * state.
1669 */
1670 if (enabled) {
1671 if (!event->registered)
1672 register_event(event);
1673 } else {
1674 if (event->registered)
1675 unregister_event(event);
1676 }
1677
1678 /* Check if has enablers without bytecode enabled */
1679 cds_list_for_each_entry(enabler_ref,
1680 &event->enablers_ref_head, node) {
1681 if (enabler_ref->ref->enabled
1682 && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
1683 has_enablers_without_bytecode = 1;
1684 break;
1685 }
1686 }
1687 event->has_enablers_without_bytecode =
1688 has_enablers_without_bytecode;
1689
1690 /* Enable filters */
1691 cds_list_for_each_entry(runtime,
1692 &event->filter_bytecode_runtime_head, node) {
1693 lttng_bytecode_filter_sync_state(runtime);
1694 }
1695 }
1696 __tracepoint_probe_prune_release_queue();
1697 }
1698
1699 static
1700 void lttng_create_trigger_if_missing(struct lttng_trigger_enabler *trigger_enabler)
1701 {
1702 struct lttng_trigger_group *trigger_group = trigger_enabler->group;
1703 struct lttng_probe_desc *probe_desc;
1704 struct cds_list_head *probe_list;
1705 int i;
1706
1707 probe_list = lttng_get_probe_list_head();
1708
1709 cds_list_for_each_entry(probe_desc, probe_list, head) {
1710 for (i = 0; i < probe_desc->nr_events; i++) {
1711 int ret;
1712 bool found = false;
1713 const struct lttng_event_desc *desc;
1714 struct lttng_trigger *trigger;
1715 struct cds_hlist_head *head;
1716 struct cds_hlist_node *node;
1717
1718 desc = probe_desc->event_desc[i];
1719
1720 if (!lttng_desc_match_enabler(desc,
1721 lttng_trigger_enabler_as_enabler(trigger_enabler)))
1722 continue;
1723
1724 /*
1725 * Given the current trigger group, get the bucket that
1726 * the target trigger would be if it was already
1727 * created.
1728 */
1729 head = borrow_hash_table_bucket(
1730 trigger_group->triggers_ht.table,
1731 LTTNG_UST_TRIGGER_HT_SIZE, desc);
1732
1733 cds_hlist_for_each_entry(trigger, node, head, hlist) {
1734 /*
1735 * Check if trigger already exists by checking
1736 * if the trigger and enabler share the same
1737 * description and id.
1738 */
1739 if (trigger->desc == desc &&
1740 trigger->id == trigger_enabler->id) {
1741 found = true;
1742 break;
1743 }
1744 }
1745
1746 if (found)
1747 continue;
1748
1749 /*
1750 * We need to create a trigger for this event probe.
1751 */
1752 ret = lttng_trigger_create(desc, trigger_enabler->id,
1753 trigger_enabler->error_counter_index,
1754 trigger_group);
1755 if (ret) {
1756 DBG("Unable to create trigger %s, error %d\n",
1757 probe_desc->event_desc[i]->name, ret);
1758 }
1759 }
1760 }
1761 }
1762
1763 /*
1764 * Create triggers associated with a trigger enabler (if not already present).
1765 */
1766 static
1767 int lttng_trigger_enabler_ref_triggers(struct lttng_trigger_enabler *trigger_enabler)
1768 {
1769 struct lttng_trigger_group *trigger_group = trigger_enabler->group;
1770 struct lttng_trigger *trigger;
1771
1772 /*
1773 * Only try to create triggers for enablers that are enabled, the user
1774 * might still be attaching filter or exclusion to the
1775 * trigger_enabler.
1776 */
1777 if (!lttng_trigger_enabler_as_enabler(trigger_enabler)->enabled)
1778 goto end;
1779
1780 /* First, ensure that probe triggers are created for this enabler. */
1781 lttng_create_trigger_if_missing(trigger_enabler);
1782
1783 /* Link the created trigger with its associated enabler. */
1784 cds_list_for_each_entry(trigger, &trigger_group->triggers_head, node) {
1785 struct lttng_enabler_ref *enabler_ref;
1786
1787 if (!lttng_trigger_enabler_match_trigger(trigger_enabler, trigger))
1788 continue;
1789
1790 enabler_ref = lttng_enabler_ref(&trigger->enablers_ref_head,
1791 lttng_trigger_enabler_as_enabler(trigger_enabler));
1792 if (!enabler_ref) {
1793 /*
1794 * If no backward ref, create it.
1795 * Add backward ref from trigger to enabler.
1796 */
1797 enabler_ref = zmalloc(sizeof(*enabler_ref));
1798 if (!enabler_ref)
1799 return -ENOMEM;
1800
1801 enabler_ref->ref = lttng_trigger_enabler_as_enabler(
1802 trigger_enabler);
1803 cds_list_add(&enabler_ref->node,
1804 &trigger->enablers_ref_head);
1805 }
1806
1807 /*
1808 * Link filter bytecodes if not linked yet.
1809 */
1810 lttng_enabler_link_bytecode(trigger->desc,
1811 &trigger_group->ctx, &trigger->filter_bytecode_runtime_head,
1812 &lttng_trigger_enabler_as_enabler(trigger_enabler)->filter_bytecode_head);
1813
1814 /*
1815 * Link capture bytecodes if not linked yet.
1816 */
1817 lttng_enabler_link_bytecode(trigger->desc,
1818 &trigger_group->ctx, &trigger->capture_bytecode_runtime_head,
1819 &trigger_enabler->capture_bytecode_head);
1820 trigger->num_captures = trigger_enabler->num_captures;
1821 }
1822 end:
1823 return 0;
1824 }
1825
1826 static
1827 void lttng_trigger_group_sync_enablers(struct lttng_trigger_group *trigger_group)
1828 {
1829 struct lttng_trigger_enabler *trigger_enabler;
1830 struct lttng_trigger *trigger;
1831
1832 cds_list_for_each_entry(trigger_enabler, &trigger_group->enablers_head, node)
1833 lttng_trigger_enabler_ref_triggers(trigger_enabler);
1834
1835 /*
1836 * For each trigger, if at least one of its enablers is enabled,
1837 * we enable the trigger, else we disable it.
1838 */
1839 cds_list_for_each_entry(trigger, &trigger_group->triggers_head, node) {
1840 struct lttng_enabler_ref *enabler_ref;
1841 struct lttng_bytecode_runtime *runtime;
1842 int enabled = 0, has_enablers_without_bytecode = 0;
1843
1844 /* Enable triggers */
1845 cds_list_for_each_entry(enabler_ref,
1846 &trigger->enablers_ref_head, node) {
1847 if (enabler_ref->ref->enabled) {
1848 enabled = 1;
1849 break;
1850 }
1851 }
1852
1853 CMM_STORE_SHARED(trigger->enabled, enabled);
1854 /*
1855 * Sync tracepoint registration with trigger enabled
1856 * state.
1857 */
1858 if (enabled) {
1859 if (!trigger->registered)
1860 register_trigger(trigger);
1861 } else {
1862 if (trigger->registered)
1863 unregister_trigger(trigger);
1864 }
1865
1866 /* Check if has enablers without bytecode enabled */
1867 cds_list_for_each_entry(enabler_ref,
1868 &trigger->enablers_ref_head, node) {
1869 if (enabler_ref->ref->enabled
1870 && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
1871 has_enablers_without_bytecode = 1;
1872 break;
1873 }
1874 }
1875 trigger->has_enablers_without_bytecode =
1876 has_enablers_without_bytecode;
1877
1878 /* Enable filters */
1879 cds_list_for_each_entry(runtime,
1880 &trigger->filter_bytecode_runtime_head, node) {
1881 lttng_bytecode_filter_sync_state(runtime);
1882 }
1883
1884 /* Enable captures. */
1885 cds_list_for_each_entry(runtime,
1886 &trigger->capture_bytecode_runtime_head, node) {
1887 lttng_bytecode_capture_sync_state(runtime);
1888 }
1889 }
1890 __tracepoint_probe_prune_release_queue();
1891 }
1892
1893 /*
1894 * Apply enablers to session events, adding events to session if need
1895 * be. It is required after each modification applied to an active
1896 * session, and right before session "start".
1897 * "lazy" sync means we only sync if required.
1898 */
1899 static
1900 void lttng_session_lazy_sync_event_enablers(struct lttng_session *session)
1901 {
1902 /* We can skip if session is not active */
1903 if (!session->active)
1904 return;
1905 lttng_session_sync_event_enablers(session);
1906 }
1907
1908 /*
1909 * Update all sessions with the given app context.
1910 * Called with ust lock held.
1911 * This is invoked when an application context gets loaded/unloaded. It
1912 * ensures the context callbacks are in sync with the application
1913 * context (either app context callbacks, or dummy callbacks).
1914 */
1915 void lttng_ust_context_set_session_provider(const char *name,
1916 size_t (*get_size)(struct lttng_ctx_field *field, size_t offset),
1917 void (*record)(struct lttng_ctx_field *field,
1918 struct lttng_ust_lib_ring_buffer_ctx *ctx,
1919 struct lttng_channel *chan),
1920 void (*get_value)(struct lttng_ctx_field *field,
1921 struct lttng_ctx_value *value))
1922 {
1923 struct lttng_session *session;
1924
1925 cds_list_for_each_entry(session, &sessions, node) {
1926 struct lttng_channel *chan;
1927 struct lttng_event *event;
1928 int ret;
1929
1930 ret = lttng_ust_context_set_provider_rcu(&session->ctx,
1931 name, get_size, record, get_value);
1932 if (ret)
1933 abort();
1934 cds_list_for_each_entry(chan, &session->chan_head, node) {
1935 ret = lttng_ust_context_set_provider_rcu(&chan->ctx,
1936 name, get_size, record, get_value);
1937 if (ret)
1938 abort();
1939 }
1940 cds_list_for_each_entry(event, &session->events_head, node) {
1941 ret = lttng_ust_context_set_provider_rcu(&event->ctx,
1942 name, get_size, record, get_value);
1943 if (ret)
1944 abort();
1945 }
1946 }
1947 }
1948
1949 /*
1950 * Update all trigger groups with the given app context.
1951 * Called with ust lock held.
1952 * This is invoked when an application context gets loaded/unloaded. It
1953 * ensures the context callbacks are in sync with the application
1954 * context (either app context callbacks, or dummy callbacks).
1955 */
1956 void lttng_ust_context_set_trigger_group_provider(const char *name,
1957 size_t (*get_size)(struct lttng_ctx_field *field, size_t offset),
1958 void (*record)(struct lttng_ctx_field *field,
1959 struct lttng_ust_lib_ring_buffer_ctx *ctx,
1960 struct lttng_channel *chan),
1961 void (*get_value)(struct lttng_ctx_field *field,
1962 struct lttng_ctx_value *value))
1963 {
1964 struct lttng_trigger_group *trigger_group;
1965
1966 cds_list_for_each_entry(trigger_group, &trigger_groups, node) {
1967 int ret;
1968
1969 ret = lttng_ust_context_set_provider_rcu(&trigger_group->ctx,
1970 name, get_size, record, get_value);
1971 if (ret)
1972 abort();
1973 }
1974 }
This page took 0.072753 seconds and 5 git commands to generate.