SoW-2019-0002: Dynamic Snapshot
[deliverable/lttng-ust.git] / liblttng-ust / lttng-events.c
1 /*
2 * lttng-events.c
3 *
4 * Holds LTTng per-session event registry.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _GNU_SOURCE
24 #define _LGPL_SOURCE
25 #include <stdio.h>
26 #include <assert.h>
27 #include <errno.h>
28 #include <limits.h>
29 #include <pthread.h>
30 #include <sys/shm.h>
31 #include <sys/ipc.h>
32 #include <stdint.h>
33 #include <stddef.h>
34 #include <inttypes.h>
35 #include <time.h>
36 #include <stdbool.h>
37 #include <unistd.h>
38 #include <lttng/ust-endian.h>
39
40 #include <urcu-bp.h>
41 #include <urcu/arch.h>
42 #include <urcu/compiler.h>
43 #include <urcu/hlist.h>
44 #include <urcu/list.h>
45 #include <urcu/uatomic.h>
46
47 #include <lttng/tracepoint.h>
48 #include <lttng/ust-events.h>
49
50 #include <usterr-signal-safe.h>
51 #include <helper.h>
52 #include <lttng/ust-ctl.h>
53 #include <ust-comm.h>
54 #include <ust-fd.h>
55 #include <lttng/ust-dynamic-type.h>
56 #include <lttng/ust-context-provider.h>
57 #include "error.h"
58 #include "compat.h"
59 #include "lttng-ust-uuid.h"
60
61 #include "tracepoint-internal.h"
62 #include "string-utils.h"
63 #include "lttng-tracer.h"
64 #include "lttng-tracer-core.h"
65 #include "lttng-ust-statedump.h"
66 #include "share.h"
67 #include "ust-events-internal.h"
68 #include "wait.h"
69 #include "../libringbuffer/shm.h"
70 #include "jhash.h"
71 #include "ust-abi.h"
72
73 /*
74 * All operations within this file are called by the communication
75 * thread, under ust_lock protection.
76 */
77
78 static CDS_LIST_HEAD(sessions);
79 static CDS_LIST_HEAD(trigger_groups);
80
81 struct cds_list_head *_lttng_get_sessions(void)
82 {
83 return &sessions;
84 }
85
86 static void _lttng_event_destroy(struct lttng_event *event);
87 static void _lttng_trigger_destroy(struct lttng_trigger *trigger);
88 static void _lttng_enum_destroy(struct lttng_enum *_enum);
89
90 static
91 void lttng_session_lazy_sync_event_enablers(struct lttng_session *session);
92 static
93 void lttng_session_sync_event_enablers(struct lttng_session *session);
94 static
95 void lttng_trigger_group_sync_enablers(struct lttng_trigger_group *trigger_group);
96 static
97 void lttng_enabler_destroy(struct lttng_enabler *enabler);
98
99 /*
100 * Called with ust lock held.
101 */
102 int lttng_session_active(void)
103 {
104 struct lttng_session *iter;
105
106 cds_list_for_each_entry(iter, &sessions, node) {
107 if (iter->active)
108 return 1;
109 }
110 return 0;
111 }
112
113 static
114 int lttng_loglevel_match(int loglevel,
115 unsigned int has_loglevel,
116 enum lttng_ust_loglevel_type req_type,
117 int req_loglevel)
118 {
119 if (!has_loglevel)
120 loglevel = TRACE_DEFAULT;
121 switch (req_type) {
122 case LTTNG_UST_LOGLEVEL_RANGE:
123 if (loglevel <= req_loglevel
124 || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
125 return 1;
126 else
127 return 0;
128 case LTTNG_UST_LOGLEVEL_SINGLE:
129 if (loglevel == req_loglevel
130 || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
131 return 1;
132 else
133 return 0;
134 case LTTNG_UST_LOGLEVEL_ALL:
135 default:
136 if (loglevel <= TRACE_DEBUG)
137 return 1;
138 else
139 return 0;
140 }
141 }
142
143 void synchronize_trace(void)
144 {
145 synchronize_rcu();
146 }
147
148 struct lttng_session *lttng_session_create(void)
149 {
150 struct lttng_session *session;
151 int i;
152
153 session = zmalloc(sizeof(struct lttng_session));
154 if (!session)
155 return NULL;
156 if (lttng_session_context_init(&session->ctx)) {
157 free(session);
158 return NULL;
159 }
160 CDS_INIT_LIST_HEAD(&session->chan_head);
161 CDS_INIT_LIST_HEAD(&session->events_head);
162 CDS_INIT_LIST_HEAD(&session->enums_head);
163 CDS_INIT_LIST_HEAD(&session->enablers_head);
164 for (i = 0; i < LTTNG_UST_EVENT_HT_SIZE; i++)
165 CDS_INIT_HLIST_HEAD(&session->events_ht.table[i]);
166 for (i = 0; i < LTTNG_UST_ENUM_HT_SIZE; i++)
167 CDS_INIT_HLIST_HEAD(&session->enums_ht.table[i]);
168 cds_list_add(&session->node, &sessions);
169 return session;
170 }
171
172 struct lttng_trigger_group *lttng_trigger_group_create(void)
173 {
174 struct lttng_trigger_group *trigger_group;
175 int i;
176
177 trigger_group = zmalloc(sizeof(struct lttng_trigger_group));
178 if (!trigger_group)
179 return NULL;
180
181 CDS_INIT_LIST_HEAD(&trigger_group->enablers_head);
182 CDS_INIT_LIST_HEAD(&trigger_group->triggers_head);
183 for (i = 0; i < LTTNG_UST_TRIGGER_HT_SIZE; i++)
184 CDS_INIT_HLIST_HEAD(&trigger_group->triggers_ht.table[i]);
185
186 cds_list_add(&trigger_group->node, &trigger_groups);
187
188 return trigger_group;
189 }
190
191 /*
192 * Only used internally at session destruction.
193 */
194 static
195 void _lttng_channel_unmap(struct lttng_channel *lttng_chan)
196 {
197 struct channel *chan;
198 struct lttng_ust_shm_handle *handle;
199
200 cds_list_del(&lttng_chan->node);
201 lttng_destroy_context(lttng_chan->ctx);
202 chan = lttng_chan->chan;
203 handle = lttng_chan->handle;
204 /*
205 * note: lttng_chan is private data contained within handle. It
206 * will be freed along with the handle.
207 */
208 channel_destroy(chan, handle, 0);
209 }
210
211 static
212 void register_event(struct lttng_event *event)
213 {
214 int ret;
215 const struct lttng_event_desc *desc;
216
217 assert(event->registered == 0);
218 desc = event->desc;
219 ret = __tracepoint_probe_register_queue_release(desc->name,
220 desc->probe_callback,
221 event, desc->signature);
222 WARN_ON_ONCE(ret);
223 if (!ret)
224 event->registered = 1;
225 }
226
227 static
228 void register_trigger(struct lttng_trigger *trigger)
229 {
230 int ret;
231 const struct lttng_event_desc *desc;
232
233 assert(trigger->registered == 0);
234 desc = trigger->desc;
235 ret = __tracepoint_probe_register_queue_release(desc->name,
236 desc->u.ext.trigger_callback, trigger, desc->signature);
237 WARN_ON_ONCE(ret);
238 if (!ret)
239 trigger->registered = 1;
240 }
241
242 static
243 void unregister_event(struct lttng_event *event)
244 {
245 int ret;
246 const struct lttng_event_desc *desc;
247
248 assert(event->registered == 1);
249 desc = event->desc;
250 ret = __tracepoint_probe_unregister_queue_release(desc->name,
251 desc->probe_callback,
252 event);
253 WARN_ON_ONCE(ret);
254 if (!ret)
255 event->registered = 0;
256 }
257
258 static
259 void unregister_trigger(struct lttng_trigger *trigger)
260 {
261 int ret;
262 const struct lttng_event_desc *desc;
263
264 assert(trigger->registered == 1);
265 desc = trigger->desc;
266 ret = __tracepoint_probe_unregister_queue_release(desc->name,
267 desc->u.ext.trigger_callback, trigger);
268 WARN_ON_ONCE(ret);
269 if (!ret)
270 trigger->registered = 0;
271 }
272
273 /*
274 * Only used internally at session destruction.
275 */
276 static
277 void _lttng_event_unregister(struct lttng_event *event)
278 {
279 if (event->registered)
280 unregister_event(event);
281 }
282
283 /*
284 * Only used internally at session destruction.
285 */
286 static
287 void _lttng_trigger_unregister(struct lttng_trigger *trigger)
288 {
289 if (trigger->registered)
290 unregister_trigger(trigger);
291 }
292
293 void lttng_session_destroy(struct lttng_session *session)
294 {
295 struct lttng_channel *chan, *tmpchan;
296 struct lttng_event *event, *tmpevent;
297 struct lttng_enum *_enum, *tmp_enum;
298 struct lttng_event_enabler *event_enabler, *event_tmpenabler;
299
300 CMM_ACCESS_ONCE(session->active) = 0;
301 cds_list_for_each_entry(event, &session->events_head, node) {
302 _lttng_event_unregister(event);
303 }
304 synchronize_trace(); /* Wait for in-flight events to complete */
305 __tracepoint_probe_prune_release_queue();
306 cds_list_for_each_entry_safe(event_enabler, event_tmpenabler,
307 &session->enablers_head, node)
308 lttng_event_enabler_destroy(event_enabler);
309 cds_list_for_each_entry_safe(event, tmpevent,
310 &session->events_head, node)
311 _lttng_event_destroy(event);
312 cds_list_for_each_entry_safe(_enum, tmp_enum,
313 &session->enums_head, node)
314 _lttng_enum_destroy(_enum);
315 cds_list_for_each_entry_safe(chan, tmpchan, &session->chan_head, node)
316 _lttng_channel_unmap(chan);
317 cds_list_del(&session->node);
318 lttng_destroy_context(session->ctx);
319 free(session);
320 }
321
322 void lttng_trigger_group_destroy(
323 struct lttng_trigger_group *trigger_group)
324 {
325 int close_ret;
326 struct lttng_trigger_enabler *trigger_enabler, *tmptrigger_enabler;
327 struct lttng_trigger *trigger, *tmptrigger;
328
329 if (!trigger_group) {
330 return;
331 }
332
333 cds_list_for_each_entry(trigger, &trigger_group->triggers_head, node)
334 _lttng_trigger_unregister(trigger);
335
336 synchronize_trace();
337
338 cds_list_for_each_entry_safe(trigger_enabler, tmptrigger_enabler,
339 &trigger_group->enablers_head, node)
340 lttng_trigger_enabler_destroy(trigger_enabler);
341
342 cds_list_for_each_entry_safe(trigger, tmptrigger,
343 &trigger_group->triggers_head, node)
344 _lttng_trigger_destroy(trigger);
345
346 /* Close the notification fd to the listener of triggers. */
347
348 lttng_ust_lock_fd_tracker();
349 close_ret = close(trigger_group->notification_fd);
350 if (!close_ret) {
351 lttng_ust_delete_fd_from_tracker(trigger_group->notification_fd);
352 } else {
353 PERROR("close");
354 abort();
355 }
356 lttng_ust_unlock_fd_tracker();
357
358 cds_list_del(&trigger_group->node);
359
360 free(trigger_group);
361 }
362
363 static
364 void lttng_enabler_destroy(struct lttng_enabler *enabler)
365 {
366 struct lttng_ust_filter_bytecode_node *filter_node, *tmp_filter_node;
367 struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node;
368
369 if (!enabler) {
370 return;
371 }
372
373 /* Destroy filter bytecode */
374 cds_list_for_each_entry_safe(filter_node, tmp_filter_node,
375 &enabler->filter_bytecode_head, node) {
376 free(filter_node);
377 }
378
379 /* Destroy excluders */
380 cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node,
381 &enabler->excluder_head, node) {
382 free(excluder_node);
383 }
384 }
385
386 void lttng_trigger_enabler_destroy(struct lttng_trigger_enabler *trigger_enabler)
387 {
388 if (!trigger_enabler) {
389 return;
390 }
391
392 cds_list_del(&trigger_enabler->node);
393
394 lttng_enabler_destroy(lttng_trigger_enabler_as_enabler(trigger_enabler));
395
396 free(trigger_enabler);
397 }
398
399 static
400 int lttng_enum_create(const struct lttng_enum_desc *desc,
401 struct lttng_session *session)
402 {
403 const char *enum_name = desc->name;
404 struct lttng_enum *_enum;
405 struct cds_hlist_head *head;
406 int ret = 0;
407 size_t name_len = strlen(enum_name);
408 uint32_t hash;
409 int notify_socket;
410
411 /* Check if this enum is already registered for this session. */
412 hash = jhash(enum_name, name_len, 0);
413 head = &session->enums_ht.table[hash & (LTTNG_UST_ENUM_HT_SIZE - 1)];
414
415 _enum = lttng_ust_enum_get_from_desc(session, desc);
416 if (_enum) {
417 ret = -EEXIST;
418 goto exist;
419 }
420
421 notify_socket = lttng_get_notify_socket(session->owner);
422 if (notify_socket < 0) {
423 ret = notify_socket;
424 goto socket_error;
425 }
426
427 _enum = zmalloc(sizeof(*_enum));
428 if (!_enum) {
429 ret = -ENOMEM;
430 goto cache_error;
431 }
432 _enum->session = session;
433 _enum->desc = desc;
434
435 ret = ustcomm_register_enum(notify_socket,
436 session->objd,
437 enum_name,
438 desc->nr_entries,
439 desc->entries,
440 &_enum->id);
441 if (ret < 0) {
442 DBG("Error (%d) registering enumeration to sessiond", ret);
443 goto sessiond_register_error;
444 }
445 cds_list_add(&_enum->node, &session->enums_head);
446 cds_hlist_add_head(&_enum->hlist, head);
447 return 0;
448
449 sessiond_register_error:
450 free(_enum);
451 cache_error:
452 socket_error:
453 exist:
454 return ret;
455 }
456
457 static
458 int lttng_create_enum_check(const struct lttng_type *type,
459 struct lttng_session *session)
460 {
461 switch (type->atype) {
462 case atype_enum:
463 {
464 const struct lttng_enum_desc *enum_desc;
465 int ret;
466
467 enum_desc = type->u.basic.enumeration.desc;
468 ret = lttng_enum_create(enum_desc, session);
469 if (ret && ret != -EEXIST) {
470 DBG("Unable to create enum error: (%d)", ret);
471 return ret;
472 }
473 break;
474 }
475 case atype_dynamic:
476 {
477 const struct lttng_event_field *tag_field_generic;
478 const struct lttng_enum_desc *enum_desc;
479 int ret;
480
481 tag_field_generic = lttng_ust_dynamic_type_tag_field();
482 enum_desc = tag_field_generic->type.u.basic.enumeration.desc;
483 ret = lttng_enum_create(enum_desc, session);
484 if (ret && ret != -EEXIST) {
485 DBG("Unable to create enum error: (%d)", ret);
486 return ret;
487 }
488 break;
489 }
490 default:
491 /* TODO: nested types when they become supported. */
492 break;
493 }
494 return 0;
495 }
496
497 static
498 int lttng_create_all_event_enums(size_t nr_fields,
499 const struct lttng_event_field *event_fields,
500 struct lttng_session *session)
501 {
502 size_t i;
503 int ret;
504
505 /* For each field, ensure enum is part of the session. */
506 for (i = 0; i < nr_fields; i++) {
507 const struct lttng_type *type = &event_fields[i].type;
508
509 ret = lttng_create_enum_check(type, session);
510 if (ret)
511 return ret;
512 }
513 return 0;
514 }
515
516 static
517 int lttng_create_all_ctx_enums(size_t nr_fields,
518 const struct lttng_ctx_field *ctx_fields,
519 struct lttng_session *session)
520 {
521 size_t i;
522 int ret;
523
524 /* For each field, ensure enum is part of the session. */
525 for (i = 0; i < nr_fields; i++) {
526 const struct lttng_type *type = &ctx_fields[i].event_field.type;
527
528 ret = lttng_create_enum_check(type, session);
529 if (ret)
530 return ret;
531 }
532 return 0;
533 }
534
535 /*
536 * Ensure that a state-dump will be performed for this session at the end
537 * of the current handle_message().
538 */
539 int lttng_session_statedump(struct lttng_session *session)
540 {
541 session->statedump_pending = 1;
542 lttng_ust_sockinfo_session_enabled(session->owner);
543 return 0;
544 }
545
546 int lttng_session_enable(struct lttng_session *session)
547 {
548 int ret = 0;
549 struct lttng_channel *chan;
550 int notify_socket;
551
552 if (session->active) {
553 ret = -EBUSY;
554 goto end;
555 }
556
557 notify_socket = lttng_get_notify_socket(session->owner);
558 if (notify_socket < 0)
559 return notify_socket;
560
561 /* Set transient enabler state to "enabled" */
562 session->tstate = 1;
563
564 /* We need to sync enablers with session before activation. */
565 lttng_session_sync_event_enablers(session);
566
567 /*
568 * Snapshot the number of events per channel to know the type of header
569 * we need to use.
570 */
571 cds_list_for_each_entry(chan, &session->chan_head, node) {
572 const struct lttng_ctx *ctx;
573 const struct lttng_ctx_field *fields = NULL;
574 size_t nr_fields = 0;
575 uint32_t chan_id;
576
577 /* don't change it if session stop/restart */
578 if (chan->header_type)
579 continue;
580 ctx = chan->ctx;
581 if (ctx) {
582 nr_fields = ctx->nr_fields;
583 fields = ctx->fields;
584 ret = lttng_create_all_ctx_enums(nr_fields, fields,
585 session);
586 if (ret < 0) {
587 DBG("Error (%d) adding enum to session", ret);
588 return ret;
589 }
590 }
591 ret = ustcomm_register_channel(notify_socket,
592 session,
593 session->objd,
594 chan->objd,
595 nr_fields,
596 fields,
597 &chan_id,
598 &chan->header_type);
599 if (ret) {
600 DBG("Error (%d) registering channel to sessiond", ret);
601 return ret;
602 }
603 if (chan_id != chan->id) {
604 DBG("Error: channel registration id (%u) does not match id assigned at creation (%u)",
605 chan_id, chan->id);
606 return -EINVAL;
607 }
608 }
609
610 /* Set atomically the state to "active" */
611 CMM_ACCESS_ONCE(session->active) = 1;
612 CMM_ACCESS_ONCE(session->been_active) = 1;
613
614 ret = lttng_session_statedump(session);
615 if (ret)
616 return ret;
617 end:
618 return ret;
619 }
620
621 int lttng_session_disable(struct lttng_session *session)
622 {
623 int ret = 0;
624
625 if (!session->active) {
626 ret = -EBUSY;
627 goto end;
628 }
629 /* Set atomically the state to "inactive" */
630 CMM_ACCESS_ONCE(session->active) = 0;
631
632 /* Set transient enabler state to "disabled" */
633 session->tstate = 0;
634 lttng_session_sync_event_enablers(session);
635 end:
636 return ret;
637 }
638
639 int lttng_channel_enable(struct lttng_channel *channel)
640 {
641 int ret = 0;
642
643 if (channel->enabled) {
644 ret = -EBUSY;
645 goto end;
646 }
647 /* Set transient enabler state to "enabled" */
648 channel->tstate = 1;
649 lttng_session_sync_event_enablers(channel->session);
650 /* Set atomically the state to "enabled" */
651 CMM_ACCESS_ONCE(channel->enabled) = 1;
652 end:
653 return ret;
654 }
655
656 int lttng_channel_disable(struct lttng_channel *channel)
657 {
658 int ret = 0;
659
660 if (!channel->enabled) {
661 ret = -EBUSY;
662 goto end;
663 }
664 /* Set atomically the state to "disabled" */
665 CMM_ACCESS_ONCE(channel->enabled) = 0;
666 /* Set transient enabler state to "enabled" */
667 channel->tstate = 0;
668 lttng_session_sync_event_enablers(channel->session);
669 end:
670 return ret;
671 }
672
673 static inline
674 struct cds_hlist_head *borrow_hash_table_bucket(
675 struct cds_hlist_head *hash_table,
676 unsigned int hash_table_size,
677 const struct lttng_event_desc *desc)
678 {
679 const char *event_name;
680 size_t name_len;
681 uint32_t hash;
682
683 event_name = desc->name;
684 name_len = strlen(event_name);
685
686 hash = jhash(event_name, name_len, 0);
687 return &hash_table[hash & (hash_table_size - 1)];
688 }
689
690 /*
691 * Supports event creation while tracing session is active.
692 */
693 static
694 int lttng_event_create(const struct lttng_event_desc *desc,
695 struct lttng_channel *chan)
696 {
697 struct lttng_event *event;
698 struct lttng_session *session = chan->session;
699 struct cds_hlist_head *head;
700 int ret = 0;
701 int notify_socket, loglevel;
702 const char *uri;
703
704 head = borrow_hash_table_bucket(chan->session->events_ht.table,
705 LTTNG_UST_EVENT_HT_SIZE, desc);
706
707 notify_socket = lttng_get_notify_socket(session->owner);
708 if (notify_socket < 0) {
709 ret = notify_socket;
710 goto socket_error;
711 }
712
713 ret = lttng_create_all_event_enums(desc->nr_fields, desc->fields,
714 session);
715 if (ret < 0) {
716 DBG("Error (%d) adding enum to session", ret);
717 goto create_enum_error;
718 }
719
720 /*
721 * Check if loglevel match. Refuse to connect event if not.
722 */
723 event = zmalloc(sizeof(struct lttng_event));
724 if (!event) {
725 ret = -ENOMEM;
726 goto cache_error;
727 }
728 event->chan = chan;
729
730 /* Event will be enabled by enabler sync. */
731 event->enabled = 0;
732 event->registered = 0;
733 CDS_INIT_LIST_HEAD(&event->bytecode_runtime_head);
734 CDS_INIT_LIST_HEAD(&event->enablers_ref_head);
735 event->desc = desc;
736
737 if (desc->loglevel)
738 loglevel = *(*event->desc->loglevel);
739 else
740 loglevel = TRACE_DEFAULT;
741 if (desc->u.ext.model_emf_uri)
742 uri = *(desc->u.ext.model_emf_uri);
743 else
744 uri = NULL;
745
746 /* Fetch event ID from sessiond */
747 ret = ustcomm_register_event(notify_socket,
748 session,
749 session->objd,
750 chan->objd,
751 desc->name,
752 loglevel,
753 desc->signature,
754 desc->nr_fields,
755 desc->fields,
756 uri,
757 &event->id);
758 if (ret < 0) {
759 DBG("Error (%d) registering event to sessiond", ret);
760 goto sessiond_register_error;
761 }
762
763 cds_list_add(&event->node, &chan->session->events_head);
764 cds_hlist_add_head(&event->hlist, head);
765 return 0;
766
767 sessiond_register_error:
768 free(event);
769 cache_error:
770 create_enum_error:
771 socket_error:
772 return ret;
773 }
774
775 static
776 int lttng_trigger_create(const struct lttng_event_desc *desc,
777 uint64_t id, struct lttng_trigger_group *trigger_group)
778 {
779 struct lttng_trigger *trigger;
780 struct cds_hlist_head *head;
781 int ret = 0;
782
783 /*
784 * Get the hashtable bucket the created lttng_trigger object should be
785 * inserted.
786 */
787 head = borrow_hash_table_bucket(trigger_group->triggers_ht.table,
788 LTTNG_UST_TRIGGER_HT_SIZE, desc);
789
790 trigger = zmalloc(sizeof(struct lttng_trigger));
791 if (!trigger) {
792 ret = -ENOMEM;
793 goto error;
794 }
795
796 trigger->group = trigger_group;
797 trigger->id = id;
798
799 /* Trigger will be enabled by enabler sync. */
800 trigger->enabled = 0;
801 trigger->registered = 0;
802
803 CDS_INIT_LIST_HEAD(&trigger->bytecode_runtime_head);
804 CDS_INIT_LIST_HEAD(&trigger->enablers_ref_head);
805 trigger->desc = desc;
806
807 cds_list_add(&trigger->node, &trigger_group->triggers_head);
808 cds_hlist_add_head(&trigger->hlist, head);
809
810 return 0;
811
812 error:
813 return ret;
814 }
815
816 static
817 void _lttng_trigger_destroy(struct lttng_trigger *trigger)
818 {
819 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
820
821 /* Remove from trigger list. */
822 cds_list_del(&trigger->node);
823 /* Remove from trigger hash table. */
824 cds_hlist_del(&trigger->hlist);
825
826 lttng_free_trigger_filter_runtime(trigger);
827
828 /* Free trigger enabler refs */
829 cds_list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
830 &trigger->enablers_ref_head, node)
831 free(enabler_ref);
832 free(trigger);
833 }
834
835 static
836 int lttng_desc_match_star_glob_enabler(const struct lttng_event_desc *desc,
837 struct lttng_enabler *enabler)
838 {
839 int loglevel = 0;
840 unsigned int has_loglevel = 0;
841
842 assert(enabler->format_type == LTTNG_ENABLER_FORMAT_STAR_GLOB);
843 if (!strutils_star_glob_match(enabler->event_param.name, SIZE_MAX,
844 desc->name, SIZE_MAX))
845 return 0;
846 if (desc->loglevel) {
847 loglevel = *(*desc->loglevel);
848 has_loglevel = 1;
849 }
850 if (!lttng_loglevel_match(loglevel,
851 has_loglevel,
852 enabler->event_param.loglevel_type,
853 enabler->event_param.loglevel))
854 return 0;
855 return 1;
856 }
857
858 static
859 int lttng_desc_match_event_enabler(const struct lttng_event_desc *desc,
860 struct lttng_enabler *enabler)
861 {
862 int loglevel = 0;
863 unsigned int has_loglevel = 0;
864
865 assert(enabler->format_type == LTTNG_ENABLER_FORMAT_EVENT);
866 if (strcmp(desc->name, enabler->event_param.name))
867 return 0;
868 if (desc->loglevel) {
869 loglevel = *(*desc->loglevel);
870 has_loglevel = 1;
871 }
872 if (!lttng_loglevel_match(loglevel,
873 has_loglevel,
874 enabler->event_param.loglevel_type,
875 enabler->event_param.loglevel))
876 return 0;
877 return 1;
878 }
879
880 static
881 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
882 struct lttng_enabler *enabler)
883 {
884 switch (enabler->format_type) {
885 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
886 {
887 struct lttng_ust_excluder_node *excluder;
888
889 if (!lttng_desc_match_star_glob_enabler(desc, enabler)) {
890 return 0;
891 }
892
893 /*
894 * If the matching event matches with an excluder,
895 * return 'does not match'
896 */
897 cds_list_for_each_entry(excluder, &enabler->excluder_head, node) {
898 int count;
899
900 for (count = 0; count < excluder->excluder.count; count++) {
901 int len;
902 char *excluder_name;
903
904 excluder_name = (char *) (excluder->excluder.names)
905 + count * LTTNG_UST_SYM_NAME_LEN;
906 len = strnlen(excluder_name, LTTNG_UST_SYM_NAME_LEN);
907 if (len > 0 && strutils_star_glob_match(excluder_name, len, desc->name, SIZE_MAX))
908 return 0;
909 }
910 }
911 return 1;
912 }
913 case LTTNG_ENABLER_FORMAT_EVENT:
914 return lttng_desc_match_event_enabler(desc, enabler);
915 default:
916 return -EINVAL;
917 }
918 }
919
920 static
921 int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
922 struct lttng_event *event)
923 {
924 if (lttng_desc_match_enabler(event->desc,
925 lttng_event_enabler_as_enabler(event_enabler))
926 && event->chan == event_enabler->chan)
927 return 1;
928 else
929 return 0;
930 }
931
932 static
933 int lttng_trigger_enabler_match_trigger(
934 struct lttng_trigger_enabler *trigger_enabler,
935 struct lttng_trigger *trigger)
936 {
937 int desc_matches = lttng_desc_match_enabler(trigger->desc,
938 lttng_trigger_enabler_as_enabler(trigger_enabler));
939
940 if (desc_matches && trigger->group == trigger_enabler->group &&
941 trigger->id == trigger_enabler->id)
942 return 1;
943 else
944 return 0;
945 }
946
947 static
948 struct lttng_enabler_ref *lttng_enabler_ref(
949 struct cds_list_head *enabler_ref_list,
950 struct lttng_enabler *enabler)
951 {
952 struct lttng_enabler_ref *enabler_ref;
953
954 cds_list_for_each_entry(enabler_ref, enabler_ref_list, node) {
955 if (enabler_ref->ref == enabler)
956 return enabler_ref;
957 }
958 return NULL;
959 }
960
961 /*
962 * Create struct lttng_event if it is missing and present in the list of
963 * tracepoint probes.
964 */
965 static
966 void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler)
967 {
968 struct lttng_session *session = event_enabler->chan->session;
969 struct lttng_probe_desc *probe_desc;
970 const struct lttng_event_desc *desc;
971 struct lttng_event *event;
972 int i;
973 struct cds_list_head *probe_list;
974
975 probe_list = lttng_get_probe_list_head();
976 /*
977 * For each probe event, if we find that a probe event matches
978 * our enabler, create an associated lttng_event if not
979 * already present.
980 */
981 cds_list_for_each_entry(probe_desc, probe_list, head) {
982 for (i = 0; i < probe_desc->nr_events; i++) {
983 int ret;
984 bool found = false;
985 struct cds_hlist_head *head;
986 struct cds_hlist_node *node;
987
988 desc = probe_desc->event_desc[i];
989 if (!lttng_desc_match_enabler(desc,
990 lttng_event_enabler_as_enabler(event_enabler)))
991 continue;
992
993 head = borrow_hash_table_bucket(
994 session->events_ht.table,
995 LTTNG_UST_EVENT_HT_SIZE, desc);
996
997 cds_hlist_for_each_entry(event, node, head, hlist) {
998 if (event->desc == desc
999 && event->chan == event_enabler->chan) {
1000 found = true;
1001 break;
1002 }
1003 }
1004 if (found)
1005 continue;
1006
1007 /*
1008 * We need to create an event for this
1009 * event probe.
1010 */
1011 ret = lttng_event_create(probe_desc->event_desc[i],
1012 event_enabler->chan);
1013 if (ret) {
1014 DBG("Unable to create event %s, error %d\n",
1015 probe_desc->event_desc[i]->name, ret);
1016 }
1017 }
1018 }
1019 }
1020
1021 static
1022 void probe_provider_event_for_each(struct lttng_probe_desc *provider_desc,
1023 void (*event_func)(struct lttng_session *session,
1024 struct lttng_event *event),
1025 void (*trigger_func)(struct lttng_trigger *trigger))
1026 {
1027 struct cds_hlist_node *node, *tmp_node;
1028 struct cds_list_head *sessionsp;
1029 unsigned int i;
1030
1031 /* Get handle on list of sessions. */
1032 sessionsp = _lttng_get_sessions();
1033
1034 /*
1035 * Iterate over all events in the probe provider descriptions and
1036 * sessions to queue the unregistration of the events.
1037 */
1038 for (i = 0; i < provider_desc->nr_events; i++) {
1039 const struct lttng_event_desc *event_desc;
1040 struct lttng_trigger_group *trigger_group;
1041 struct lttng_trigger *trigger;
1042 struct lttng_session *session;
1043 struct cds_hlist_head *head;
1044 struct lttng_event *event;
1045
1046 event_desc = provider_desc->event_desc[i];
1047
1048 /*
1049 * Iterate over all session to find the current event
1050 * description.
1051 */
1052 cds_list_for_each_entry(session, sessionsp, node) {
1053 /*
1054 * Get the list of events in the hashtable bucket and
1055 * iterate to find the event matching this descriptor.
1056 */
1057 head = borrow_hash_table_bucket(
1058 session->events_ht.table,
1059 LTTNG_UST_EVENT_HT_SIZE, event_desc);
1060
1061 cds_hlist_for_each_entry_safe(event, node, tmp_node, head, hlist) {
1062 if (event_desc == event->desc) {
1063 event_func(session, event);
1064 break;
1065 }
1066 }
1067 }
1068
1069 /*
1070 * Iterate over all trigger groups to find the current event
1071 * description.
1072 */
1073 cds_list_for_each_entry(trigger_group, &trigger_groups, node) {
1074 /*
1075 * Get the list of triggers in the hashtable bucket and
1076 * iterate to find the trigger matching this
1077 * descriptor.
1078 */
1079 head = borrow_hash_table_bucket(
1080 trigger_group->triggers_ht.table,
1081 LTTNG_UST_TRIGGER_HT_SIZE, event_desc);
1082
1083 cds_hlist_for_each_entry_safe(trigger, node, tmp_node, head, hlist) {
1084 if (event_desc == trigger->desc) {
1085 trigger_func(trigger);
1086 break;
1087 }
1088 }
1089 }
1090 }
1091 }
1092
1093 static
1094 void _unregister_event(struct lttng_session *session,
1095 struct lttng_event *event)
1096 {
1097 _lttng_event_unregister(event);
1098 }
1099
1100 static
1101 void _event_enum_destroy(struct lttng_session *session,
1102 struct lttng_event *event)
1103 {
1104 unsigned int i;
1105
1106 /* Destroy enums of the current event. */
1107 for (i = 0; i < event->desc->nr_fields; i++) {
1108 const struct lttng_enum_desc *enum_desc;
1109 const struct lttng_event_field *field;
1110 struct lttng_enum *curr_enum;
1111
1112 field = &(event->desc->fields[i]);
1113 if (field->type.atype != atype_enum) {
1114 continue;
1115 }
1116
1117 enum_desc = field->type.u.basic.enumeration.desc;
1118 curr_enum = lttng_ust_enum_get_from_desc(session, enum_desc);
1119 if (curr_enum) {
1120 _lttng_enum_destroy(curr_enum);
1121 }
1122 }
1123
1124 /* Destroy event. */
1125 _lttng_event_destroy(event);
1126 }
1127
1128 /*
1129 * Iterate over all the UST sessions to unregister and destroy all probes from
1130 * the probe provider descriptor received as argument. Must me called with the
1131 * ust_lock held.
1132 */
1133 void lttng_probe_provider_unregister_events(
1134 struct lttng_probe_desc *provider_desc)
1135 {
1136 /*
1137 * Iterate over all events in the probe provider descriptions and sessions
1138 * to queue the unregistration of the events.
1139 */
1140 probe_provider_event_for_each(provider_desc, _unregister_event,
1141 _lttng_trigger_unregister);
1142
1143 /* Wait for grace period. */
1144 synchronize_trace();
1145 /* Prune the unregistration queue. */
1146 __tracepoint_probe_prune_release_queue();
1147
1148 /*
1149 * It is now safe to destroy the events and remove them from the event list
1150 * and hashtables.
1151 */
1152 probe_provider_event_for_each(provider_desc, _event_enum_destroy,
1153 _lttng_trigger_destroy);
1154 }
1155
1156 /*
1157 * Create events associated with an event enabler (if not already present),
1158 * and add backward reference from the event to the enabler.
1159 */
1160 static
1161 int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler)
1162 {
1163 struct lttng_session *session = event_enabler->chan->session;
1164 struct lttng_event *event;
1165
1166 /* First ensure that probe events are created for this enabler. */
1167 lttng_create_event_if_missing(event_enabler);
1168
1169 /* For each event matching enabler in session event list. */
1170 cds_list_for_each_entry(event, &session->events_head, node) {
1171 struct lttng_enabler_ref *enabler_ref;
1172
1173 if (!lttng_event_enabler_match_event(event_enabler, event))
1174 continue;
1175
1176 enabler_ref = lttng_enabler_ref(&event->enablers_ref_head,
1177 lttng_event_enabler_as_enabler(event_enabler));
1178 if (!enabler_ref) {
1179 /*
1180 * If no backward ref, create it.
1181 * Add backward ref from event to enabler.
1182 */
1183 enabler_ref = zmalloc(sizeof(*enabler_ref));
1184 if (!enabler_ref)
1185 return -ENOMEM;
1186 enabler_ref->ref = lttng_event_enabler_as_enabler(event_enabler);
1187 cds_list_add(&enabler_ref->node,
1188 &event->enablers_ref_head);
1189 }
1190
1191 /*
1192 * Link filter bytecodes if not linked yet.
1193 */
1194 lttng_enabler_link_bytecode(event->desc,
1195 &session->ctx,
1196 &event->bytecode_runtime_head,
1197 lttng_event_enabler_as_enabler(event_enabler));
1198
1199 /* TODO: merge event context. */
1200 }
1201 return 0;
1202 }
1203
1204 /*
1205 * Called at library load: connect the probe on all enablers matching
1206 * this event.
1207 * Called with session mutex held.
1208 */
1209 int lttng_fix_pending_events(void)
1210 {
1211 struct lttng_session *session;
1212
1213 cds_list_for_each_entry(session, &sessions, node) {
1214 lttng_session_lazy_sync_event_enablers(session);
1215 }
1216 return 0;
1217 }
1218
1219 int lttng_fix_pending_triggers(void)
1220 {
1221 struct lttng_trigger_group *trigger_group;
1222
1223 cds_list_for_each_entry(trigger_group, &trigger_groups, node) {
1224 lttng_trigger_group_sync_enablers(trigger_group);
1225 }
1226 return 0;
1227 }
1228
1229 /*
1230 * For each session of the owner thread, execute pending statedump.
1231 * Only dump state for the sessions owned by the caller thread, because
1232 * we don't keep ust_lock across the entire iteration.
1233 */
1234 void lttng_handle_pending_statedump(void *owner)
1235 {
1236 struct lttng_session *session;
1237
1238 /* Execute state dump */
1239 do_lttng_ust_statedump(owner);
1240
1241 /* Clear pending state dump */
1242 if (ust_lock()) {
1243 goto end;
1244 }
1245 cds_list_for_each_entry(session, &sessions, node) {
1246 if (session->owner != owner)
1247 continue;
1248 if (!session->statedump_pending)
1249 continue;
1250 session->statedump_pending = 0;
1251 }
1252 end:
1253 ust_unlock();
1254 return;
1255 }
1256
1257 /*
1258 * Only used internally at session destruction.
1259 */
1260 static
1261 void _lttng_event_destroy(struct lttng_event *event)
1262 {
1263 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
1264
1265 /* Remove from event list. */
1266 cds_list_del(&event->node);
1267 /* Remove from event hash table. */
1268 cds_hlist_del(&event->hlist);
1269
1270 lttng_destroy_context(event->ctx);
1271 lttng_free_event_filter_runtime(event);
1272 /* Free event enabler refs */
1273 cds_list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
1274 &event->enablers_ref_head, node)
1275 free(enabler_ref);
1276 free(event);
1277 }
1278
1279 static
1280 void _lttng_enum_destroy(struct lttng_enum *_enum)
1281 {
1282 cds_list_del(&_enum->node);
1283 cds_hlist_del(&_enum->hlist);
1284 free(_enum);
1285 }
1286
1287 void lttng_ust_events_exit(void)
1288 {
1289 struct lttng_session *session, *tmpsession;
1290
1291 cds_list_for_each_entry_safe(session, tmpsession, &sessions, node)
1292 lttng_session_destroy(session);
1293 }
1294
1295 /*
1296 * Enabler management.
1297 */
1298 struct lttng_event_enabler *lttng_event_enabler_create(
1299 enum lttng_enabler_format_type format_type,
1300 struct lttng_ust_event *event_param,
1301 struct lttng_channel *chan)
1302 {
1303 struct lttng_event_enabler *event_enabler;
1304
1305 event_enabler = zmalloc(sizeof(*event_enabler));
1306 if (!event_enabler)
1307 return NULL;
1308 event_enabler->base.format_type = format_type;
1309 CDS_INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
1310 CDS_INIT_LIST_HEAD(&event_enabler->base.excluder_head);
1311 memcpy(&event_enabler->base.event_param, event_param,
1312 sizeof(event_enabler->base.event_param));
1313 event_enabler->chan = chan;
1314 /* ctx left NULL */
1315 event_enabler->base.enabled = 0;
1316 cds_list_add(&event_enabler->node, &event_enabler->chan->session->enablers_head);
1317 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
1318
1319 return event_enabler;
1320 }
1321
1322 struct lttng_trigger_enabler *lttng_trigger_enabler_create(
1323 struct lttng_trigger_group *trigger_group,
1324 enum lttng_enabler_format_type format_type,
1325 struct lttng_ust_trigger *trigger_param)
1326 {
1327 struct lttng_trigger_enabler *trigger_enabler;
1328
1329 trigger_enabler = zmalloc(sizeof(*trigger_enabler));
1330 if (!trigger_enabler)
1331 return NULL;
1332 trigger_enabler->base.format_type = format_type;
1333 CDS_INIT_LIST_HEAD(&trigger_enabler->base.filter_bytecode_head);
1334 CDS_INIT_LIST_HEAD(&trigger_enabler->base.excluder_head);
1335
1336 trigger_enabler->id = trigger_param->id;
1337
1338 memcpy(&trigger_enabler->base.event_param.name, trigger_param->name,
1339 sizeof(trigger_enabler->base.event_param.name));
1340 trigger_enabler->base.event_param.instrumentation = trigger_param->instrumentation;
1341 trigger_enabler->base.event_param.loglevel = trigger_param->loglevel;
1342 trigger_enabler->base.event_param.loglevel_type = trigger_param->loglevel_type;
1343
1344 trigger_enabler->base.enabled = 0;
1345 trigger_enabler->group = trigger_group;
1346
1347 cds_list_add(&trigger_enabler->node, &trigger_group->enablers_head);
1348
1349 lttng_trigger_group_sync_enablers(trigger_group);
1350
1351 return trigger_enabler;
1352 }
1353
1354 int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
1355 {
1356 lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
1357 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
1358
1359 return 0;
1360 }
1361
1362 int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
1363 {
1364 lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
1365 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
1366
1367 return 0;
1368 }
1369
1370 static
1371 void _lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
1372 struct lttng_ust_filter_bytecode_node *bytecode)
1373 {
1374 bytecode->enabler = enabler;
1375 cds_list_add_tail(&bytecode->node, &enabler->filter_bytecode_head);
1376 }
1377
1378 int lttng_event_enabler_attach_bytecode(struct lttng_event_enabler *event_enabler,
1379 struct lttng_ust_filter_bytecode_node *bytecode)
1380 {
1381 _lttng_enabler_attach_bytecode(
1382 lttng_event_enabler_as_enabler(event_enabler), bytecode);
1383
1384 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
1385 return 0;
1386 }
1387
1388 static
1389 void _lttng_enabler_attach_exclusion(struct lttng_enabler *enabler,
1390 struct lttng_ust_excluder_node *excluder)
1391 {
1392 excluder->enabler = enabler;
1393 cds_list_add_tail(&excluder->node, &enabler->excluder_head);
1394 }
1395
1396 int lttng_event_enabler_attach_exclusion(struct lttng_event_enabler *event_enabler,
1397 struct lttng_ust_excluder_node *excluder)
1398 {
1399 _lttng_enabler_attach_exclusion(
1400 lttng_event_enabler_as_enabler(event_enabler), excluder);
1401
1402 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
1403 return 0;
1404 }
1405
1406 int lttng_trigger_enabler_enable(struct lttng_trigger_enabler *trigger_enabler)
1407 {
1408 lttng_trigger_enabler_as_enabler(trigger_enabler)->enabled = 1;
1409 lttng_trigger_group_sync_enablers(trigger_enabler->group);
1410
1411 return 0;
1412 }
1413
1414 int lttng_trigger_enabler_disable(struct lttng_trigger_enabler *trigger_enabler)
1415 {
1416 lttng_trigger_enabler_as_enabler(trigger_enabler)->enabled = 0;
1417 lttng_trigger_group_sync_enablers(trigger_enabler->group);
1418
1419 return 0;
1420 }
1421
1422 int lttng_trigger_enabler_attach_bytecode( struct lttng_trigger_enabler *trigger_enabler,
1423 struct lttng_ust_filter_bytecode_node *bytecode)
1424 {
1425 _lttng_enabler_attach_bytecode(
1426 lttng_trigger_enabler_as_enabler(trigger_enabler), bytecode);
1427
1428 lttng_trigger_group_sync_enablers(trigger_enabler->group);
1429 return 0;
1430 }
1431
1432 int lttng_trigger_enabler_attach_exclusion(
1433 struct lttng_trigger_enabler *trigger_enabler,
1434 struct lttng_ust_excluder_node *excluder)
1435 {
1436 _lttng_enabler_attach_exclusion(
1437 lttng_trigger_enabler_as_enabler(trigger_enabler), excluder);
1438
1439 lttng_trigger_group_sync_enablers(trigger_enabler->group);
1440 return 0;
1441 }
1442
1443 int lttng_attach_context(struct lttng_ust_context *context_param,
1444 union ust_args *uargs,
1445 struct lttng_ctx **ctx, struct lttng_session *session)
1446 {
1447 /*
1448 * We cannot attach a context after trace has been started for a
1449 * session because the metadata does not allow expressing this
1450 * information outside of the original channel scope.
1451 */
1452 if (session->been_active)
1453 return -EPERM;
1454
1455 switch (context_param->ctx) {
1456 case LTTNG_UST_CONTEXT_PTHREAD_ID:
1457 return lttng_add_pthread_id_to_ctx(ctx);
1458 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
1459 {
1460 struct lttng_ust_perf_counter_ctx *perf_ctx_param;
1461
1462 perf_ctx_param = &context_param->u.perf_counter;
1463 return lttng_add_perf_counter_to_ctx(
1464 perf_ctx_param->type,
1465 perf_ctx_param->config,
1466 perf_ctx_param->name,
1467 ctx);
1468 }
1469 case LTTNG_UST_CONTEXT_VTID:
1470 return lttng_add_vtid_to_ctx(ctx);
1471 case LTTNG_UST_CONTEXT_VPID:
1472 return lttng_add_vpid_to_ctx(ctx);
1473 case LTTNG_UST_CONTEXT_PROCNAME:
1474 return lttng_add_procname_to_ctx(ctx);
1475 case LTTNG_UST_CONTEXT_IP:
1476 return lttng_add_ip_to_ctx(ctx);
1477 case LTTNG_UST_CONTEXT_CPU_ID:
1478 return lttng_add_cpu_id_to_ctx(ctx);
1479 case LTTNG_UST_CONTEXT_APP_CONTEXT:
1480 return lttng_ust_add_app_context_to_ctx_rcu(uargs->app_context.ctxname,
1481 ctx);
1482 case LTTNG_UST_CONTEXT_CGROUP_NS:
1483 return lttng_add_cgroup_ns_to_ctx(ctx);
1484 case LTTNG_UST_CONTEXT_IPC_NS:
1485 return lttng_add_ipc_ns_to_ctx(ctx);
1486 case LTTNG_UST_CONTEXT_MNT_NS:
1487 return lttng_add_mnt_ns_to_ctx(ctx);
1488 case LTTNG_UST_CONTEXT_NET_NS:
1489 return lttng_add_net_ns_to_ctx(ctx);
1490 case LTTNG_UST_CONTEXT_PID_NS:
1491 return lttng_add_pid_ns_to_ctx(ctx);
1492 case LTTNG_UST_CONTEXT_USER_NS:
1493 return lttng_add_user_ns_to_ctx(ctx);
1494 case LTTNG_UST_CONTEXT_UTS_NS:
1495 return lttng_add_uts_ns_to_ctx(ctx);
1496 case LTTNG_UST_CONTEXT_VUID:
1497 return lttng_add_vuid_to_ctx(ctx);
1498 case LTTNG_UST_CONTEXT_VEUID:
1499 return lttng_add_veuid_to_ctx(ctx);
1500 case LTTNG_UST_CONTEXT_VSUID:
1501 return lttng_add_vsuid_to_ctx(ctx);
1502 case LTTNG_UST_CONTEXT_VGID:
1503 return lttng_add_vgid_to_ctx(ctx);
1504 case LTTNG_UST_CONTEXT_VEGID:
1505 return lttng_add_vegid_to_ctx(ctx);
1506 case LTTNG_UST_CONTEXT_VSGID:
1507 return lttng_add_vsgid_to_ctx(ctx);
1508 default:
1509 return -EINVAL;
1510 }
1511 }
1512
1513 int lttng_event_enabler_attach_context(struct lttng_event_enabler *enabler,
1514 struct lttng_ust_context *context_param)
1515 {
1516 return -ENOSYS;
1517 }
1518
1519 void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
1520 {
1521 if (!event_enabler) {
1522 return;
1523 }
1524 cds_list_del(&event_enabler->node);
1525
1526 lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
1527
1528 lttng_destroy_context(event_enabler->ctx);
1529 free(event_enabler);
1530 }
1531
1532 /*
1533 * lttng_session_sync_event_enablers should be called just before starting a
1534 * session.
1535 */
1536 static
1537 void lttng_session_sync_event_enablers(struct lttng_session *session)
1538 {
1539 struct lttng_event_enabler *event_enabler;
1540 struct lttng_event *event;
1541
1542 cds_list_for_each_entry(event_enabler, &session->enablers_head, node)
1543 lttng_event_enabler_ref_events(event_enabler);
1544 /*
1545 * For each event, if at least one of its enablers is enabled,
1546 * and its channel and session transient states are enabled, we
1547 * enable the event, else we disable it.
1548 */
1549 cds_list_for_each_entry(event, &session->events_head, node) {
1550 struct lttng_enabler_ref *enabler_ref;
1551 struct lttng_bytecode_runtime *runtime;
1552 int enabled = 0, has_enablers_without_bytecode = 0;
1553
1554 /* Enable events */
1555 cds_list_for_each_entry(enabler_ref,
1556 &event->enablers_ref_head, node) {
1557 if (enabler_ref->ref->enabled) {
1558 enabled = 1;
1559 break;
1560 }
1561 }
1562 /*
1563 * Enabled state is based on union of enablers, with
1564 * intesection of session and channel transient enable
1565 * states.
1566 */
1567 enabled = enabled && session->tstate && event->chan->tstate;
1568
1569 CMM_STORE_SHARED(event->enabled, enabled);
1570 /*
1571 * Sync tracepoint registration with event enabled
1572 * state.
1573 */
1574 if (enabled) {
1575 if (!event->registered)
1576 register_event(event);
1577 } else {
1578 if (event->registered)
1579 unregister_event(event);
1580 }
1581
1582 /* Check if has enablers without bytecode enabled */
1583 cds_list_for_each_entry(enabler_ref,
1584 &event->enablers_ref_head, node) {
1585 if (enabler_ref->ref->enabled
1586 && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
1587 has_enablers_without_bytecode = 1;
1588 break;
1589 }
1590 }
1591 event->has_enablers_without_bytecode =
1592 has_enablers_without_bytecode;
1593
1594 /* Enable filters */
1595 cds_list_for_each_entry(runtime,
1596 &event->bytecode_runtime_head, node) {
1597 lttng_filter_sync_state(runtime);
1598 }
1599 }
1600 __tracepoint_probe_prune_release_queue();
1601 }
1602
1603 static
1604 void lttng_create_trigger_if_missing(struct lttng_trigger_enabler *trigger_enabler)
1605 {
1606 struct lttng_trigger_group *trigger_group = trigger_enabler->group;
1607 struct lttng_probe_desc *probe_desc;
1608 struct cds_list_head *probe_list;
1609 int i;
1610
1611 probe_list = lttng_get_probe_list_head();
1612
1613 cds_list_for_each_entry(probe_desc, probe_list, head) {
1614 for (i = 0; i < probe_desc->nr_events; i++) {
1615 int ret;
1616 bool found = false;
1617 const struct lttng_event_desc *desc;
1618 struct lttng_trigger *trigger;
1619 struct cds_hlist_head *head;
1620 struct cds_hlist_node *node;
1621
1622 desc = probe_desc->event_desc[i];
1623 if (!lttng_desc_match_enabler(desc,
1624 lttng_trigger_enabler_as_enabler(trigger_enabler)))
1625 continue;
1626
1627 /*
1628 * Given the current trigger group, get the bucket that
1629 * the target trigger would be if it was already
1630 * created.
1631 */
1632 head = borrow_hash_table_bucket(
1633 trigger_group->triggers_ht.table,
1634 LTTNG_UST_TRIGGER_HT_SIZE, desc);
1635
1636 cds_hlist_for_each_entry(trigger, node, head, hlist) {
1637 /*
1638 * Check if trigger already exists by checking
1639 * if the trigger and enabler share the same
1640 * description and id.
1641 */
1642 if (trigger->desc == desc &&
1643 trigger->id == trigger_enabler->id) {
1644 found = true;
1645 break;
1646 }
1647 }
1648
1649 if (found)
1650 continue;
1651
1652 /*
1653 * We need to create a trigger for this event probe.
1654 */
1655 ret = lttng_trigger_create(desc, trigger_enabler->id,
1656 trigger_group);
1657 if (ret) {
1658 DBG("Unable to create trigger %s, error %d\n",
1659 probe_desc->event_desc[i]->name, ret);
1660 }
1661 }
1662 }
1663 }
1664
1665 void lttng_trigger_send_notification(struct lttng_trigger *trigger)
1666 {
1667 /*
1668 * We want this write to be atomic AND non-blocking, meaning that we
1669 * want to write either everything OR nothing.
1670 * According to `pipe(7)`, writes that are smaller that the `PIPE_BUF`
1671 * value must be atomic, so we assert that the message we send is less
1672 * than PIPE_BUF.
1673 */
1674 struct lttng_ust_trigger_notification notif;
1675 ssize_t ret;
1676
1677 assert(trigger);
1678 assert(trigger->group);
1679 assert(sizeof(notif) <= PIPE_BUF);
1680
1681 notif.id = trigger->id;
1682
1683 ret = patient_write(trigger->group->notification_fd, &notif,
1684 sizeof(notif));
1685 if (ret == -1) {
1686 if (errno == EAGAIN) {
1687 DBG("Cannot send trigger notification without blocking: %s",
1688 strerror(errno));
1689 } else {
1690 DBG("Error to sending trigger notification: %s",
1691 strerror(errno));
1692 abort();
1693 }
1694 }
1695 }
1696
1697 /*
1698 * Create triggers associated with a trigger enabler (if not already present).
1699 */
1700 static
1701 int lttng_trigger_enabler_ref_triggers(struct lttng_trigger_enabler *trigger_enabler)
1702 {
1703 struct lttng_trigger_group *trigger_group = trigger_enabler->group;
1704 struct lttng_trigger *trigger;
1705
1706 /* First, ensure that probe triggers are created for this enabler. */
1707 lttng_create_trigger_if_missing(trigger_enabler);
1708
1709 /* Link the created trigger with its associated enabler. */
1710 cds_list_for_each_entry(trigger, &trigger_group->triggers_head, node) {
1711 struct lttng_enabler_ref *enabler_ref;
1712
1713 if (!lttng_trigger_enabler_match_trigger(trigger_enabler, trigger))
1714 continue;
1715
1716 enabler_ref = lttng_enabler_ref(&trigger->enablers_ref_head,
1717 lttng_trigger_enabler_as_enabler(trigger_enabler));
1718 if (!enabler_ref) {
1719 /*
1720 * If no backward ref, create it.
1721 * Add backward ref from trigger to enabler.
1722 */
1723 enabler_ref = zmalloc(sizeof(*enabler_ref));
1724 if (!enabler_ref)
1725 return -ENOMEM;
1726
1727 enabler_ref->ref = lttng_trigger_enabler_as_enabler(
1728 trigger_enabler);
1729 cds_list_add(&enabler_ref->node,
1730 &trigger->enablers_ref_head);
1731 }
1732
1733 /*
1734 * Link filter bytecodes if not linked yet.
1735 */
1736 lttng_enabler_link_bytecode(trigger->desc,
1737 &trigger_group->ctx, &trigger->bytecode_runtime_head,
1738 lttng_trigger_enabler_as_enabler(trigger_enabler));
1739 }
1740 return 0;
1741 }
1742
1743 static
1744 void lttng_trigger_group_sync_enablers(struct lttng_trigger_group *trigger_group)
1745 {
1746 struct lttng_trigger_enabler *trigger_enabler;
1747 struct lttng_trigger *trigger;
1748
1749 cds_list_for_each_entry(trigger_enabler, &trigger_group->enablers_head, node) {
1750 /*
1751 * Only link enablers that are enabled to triggers, the user
1752 * might still be attaching filter or exclusion to the
1753 * trigger_enabler.
1754 */
1755 if (!lttng_trigger_enabler_as_enabler(trigger_enabler)->enabled)
1756 continue;
1757
1758 lttng_trigger_enabler_ref_triggers(trigger_enabler);
1759 }
1760
1761 /*
1762 * For each trigger, if at least one of its enablers is enabled,
1763 * we enable the trigger, else we disable it.
1764 */
1765 cds_list_for_each_entry(trigger, &trigger_group->triggers_head, node) {
1766 struct lttng_enabler_ref *enabler_ref;
1767 struct lttng_bytecode_runtime *runtime;
1768 int enabled = 0, has_enablers_without_bytecode = 0;
1769
1770 /* Enable triggers */
1771 cds_list_for_each_entry(enabler_ref,
1772 &trigger->enablers_ref_head, node) {
1773 if (enabler_ref->ref->enabled) {
1774 enabled = 1;
1775 break;
1776 }
1777 }
1778
1779 CMM_STORE_SHARED(trigger->enabled, enabled);
1780 /*
1781 * Sync tracepoint registration with trigger enabled
1782 * state.
1783 */
1784 if (enabled) {
1785 if (!trigger->registered)
1786 register_trigger(trigger);
1787 } else {
1788 if (trigger->registered)
1789 unregister_trigger(trigger);
1790 }
1791
1792 /* Check if has enablers without bytecode enabled */
1793 cds_list_for_each_entry(enabler_ref,
1794 &trigger->enablers_ref_head, node) {
1795 if (enabler_ref->ref->enabled
1796 && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
1797 has_enablers_without_bytecode = 1;
1798 break;
1799 }
1800 }
1801 trigger->has_enablers_without_bytecode =
1802 has_enablers_without_bytecode;
1803
1804 /* Enable filters */
1805 cds_list_for_each_entry(runtime,
1806 &trigger->bytecode_runtime_head, node) {
1807 lttng_filter_sync_state(runtime);
1808 }
1809 }
1810 __tracepoint_probe_prune_release_queue();
1811 }
1812
1813 /*
1814 * Apply enablers to session events, adding events to session if need
1815 * be. It is required after each modification applied to an active
1816 * session, and right before session "start".
1817 * "lazy" sync means we only sync if required.
1818 */
1819 static
1820 void lttng_session_lazy_sync_event_enablers(struct lttng_session *session)
1821 {
1822 /* We can skip if session is not active */
1823 if (!session->active)
1824 return;
1825 lttng_session_sync_event_enablers(session);
1826 }
1827
1828 /*
1829 * Update all sessions with the given app context.
1830 * Called with ust lock held.
1831 * This is invoked when an application context gets loaded/unloaded. It
1832 * ensures the context callbacks are in sync with the application
1833 * context (either app context callbacks, or dummy callbacks).
1834 */
1835 void lttng_ust_context_set_session_provider(const char *name,
1836 size_t (*get_size)(struct lttng_ctx_field *field, size_t offset),
1837 void (*record)(struct lttng_ctx_field *field,
1838 struct lttng_ust_lib_ring_buffer_ctx *ctx,
1839 struct lttng_channel *chan),
1840 void (*get_value)(struct lttng_ctx_field *field,
1841 struct lttng_ctx_value *value))
1842 {
1843 struct lttng_session *session;
1844
1845 cds_list_for_each_entry(session, &sessions, node) {
1846 struct lttng_channel *chan;
1847 struct lttng_event *event;
1848 int ret;
1849
1850 ret = lttng_ust_context_set_provider_rcu(&session->ctx,
1851 name, get_size, record, get_value);
1852 if (ret)
1853 abort();
1854 cds_list_for_each_entry(chan, &session->chan_head, node) {
1855 ret = lttng_ust_context_set_provider_rcu(&chan->ctx,
1856 name, get_size, record, get_value);
1857 if (ret)
1858 abort();
1859 }
1860 cds_list_for_each_entry(event, &session->events_head, node) {
1861 ret = lttng_ust_context_set_provider_rcu(&event->ctx,
1862 name, get_size, record, get_value);
1863 if (ret)
1864 abort();
1865 }
1866 }
1867 }
1868
1869 /*
1870 * Update all trigger groups with the given app context.
1871 * Called with ust lock held.
1872 * This is invoked when an application context gets loaded/unloaded. It
1873 * ensures the context callbacks are in sync with the application
1874 * context (either app context callbacks, or dummy callbacks).
1875 */
1876 void lttng_ust_context_set_trigger_group_provider(const char *name,
1877 size_t (*get_size)(struct lttng_ctx_field *field, size_t offset),
1878 void (*record)(struct lttng_ctx_field *field,
1879 struct lttng_ust_lib_ring_buffer_ctx *ctx,
1880 struct lttng_channel *chan),
1881 void (*get_value)(struct lttng_ctx_field *field,
1882 struct lttng_ctx_value *value))
1883 {
1884 struct lttng_trigger_group *trigger_group;
1885
1886 cds_list_for_each_entry(trigger_group, &trigger_groups, node) {
1887 int ret;
1888
1889 ret = lttng_ust_context_set_provider_rcu(&trigger_group->ctx,
1890 name, get_size, record, get_value);
1891 if (ret)
1892 abort();
1893 }
1894 }
This page took 0.068911 seconds and 5 git commands to generate.