2 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
3 * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 * SPDX-License-Identifier: GPL-2.0-only
19 #include <sys/types.h>
21 #include <urcu/compiler.h>
24 #include <common/bytecode/bytecode.h>
25 #include <common/common.h>
26 #include <common/hashtable/utils.h>
27 #include <lttng/event-rule/event-rule.h>
28 #include <lttng/event-rule/event-rule-internal.h>
29 #include <lttng/event-rule/tracepoint.h>
30 #include <lttng/condition/condition.h>
31 #include <lttng/condition/event-rule-internal.h>
32 #include <lttng/condition/event-rule.h>
33 #include <lttng/trigger/trigger-internal.h>
34 #include <common/sessiond-comm/sessiond-comm.h>
36 #include "buffer-registry.h"
37 #include "condition-internal.h"
39 #include "health-sessiond.h"
41 #include "ust-consumer.h"
42 #include "lttng-ust-ctl.h"
43 #include "lttng-ust-error.h"
46 #include "lttng-sessiond.h"
47 #include "notification-thread-commands.h"
50 #include "trigger-error-accounting.h"
53 struct lttng_ht
*ust_app_ht
;
54 struct lttng_ht
*ust_app_ht_by_sock
;
55 struct lttng_ht
*ust_app_ht_by_notify_sock
;
58 int ust_app_flush_app_session(struct ust_app
*app
, struct ust_app_session
*ua_sess
);
60 /* Next available channel key. Access under next_channel_key_lock. */
61 static uint64_t _next_channel_key
;
62 static pthread_mutex_t next_channel_key_lock
= PTHREAD_MUTEX_INITIALIZER
;
64 /* Next available session ID. Access under next_session_id_lock. */
65 static uint64_t _next_session_id
;
66 static pthread_mutex_t next_session_id_lock
= PTHREAD_MUTEX_INITIALIZER
;
69 * Return the incremented value of next_channel_key.
71 static uint64_t get_next_channel_key(void)
75 pthread_mutex_lock(&next_channel_key_lock
);
76 ret
= ++_next_channel_key
;
77 pthread_mutex_unlock(&next_channel_key_lock
);
82 * Return the atomically incremented value of next_session_id.
84 static uint64_t get_next_session_id(void)
88 pthread_mutex_lock(&next_session_id_lock
);
89 ret
= ++_next_session_id
;
90 pthread_mutex_unlock(&next_session_id_lock
);
94 static void copy_channel_attr_to_ustctl(
95 struct ustctl_consumer_channel_attr
*attr
,
96 struct lttng_ust_channel_attr
*uattr
)
98 /* Copy event attributes since the layout is different. */
99 attr
->subbuf_size
= uattr
->subbuf_size
;
100 attr
->num_subbuf
= uattr
->num_subbuf
;
101 attr
->overwrite
= uattr
->overwrite
;
102 attr
->switch_timer_interval
= uattr
->switch_timer_interval
;
103 attr
->read_timer_interval
= uattr
->read_timer_interval
;
104 attr
->output
= uattr
->output
;
105 attr
->blocking_timeout
= uattr
->u
.s
.blocking_timeout
;
109 * Match function for the hash table lookup.
111 * It matches an ust app event based on three attributes which are the event
112 * name, the filter bytecode and the loglevel.
114 static int ht_match_ust_app_event(struct cds_lfht_node
*node
, const void *_key
)
116 struct ust_app_event
*event
;
117 const struct ust_app_ht_key
*key
;
118 int ev_loglevel_value
;
123 event
= caa_container_of(node
, struct ust_app_event
, node
.node
);
125 ev_loglevel_value
= event
->attr
.loglevel
;
127 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
130 if (strncmp(event
->attr
.name
, key
->name
, sizeof(event
->attr
.name
)) != 0) {
134 /* Event loglevel. */
135 if (ev_loglevel_value
!= key
->loglevel_type
) {
136 if (event
->attr
.loglevel_type
== LTTNG_UST_LOGLEVEL_ALL
137 && key
->loglevel_type
== 0 &&
138 ev_loglevel_value
== -1) {
140 * Match is accepted. This is because on event creation, the
141 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
142 * -1 are accepted for this loglevel type since 0 is the one set by
143 * the API when receiving an enable event.
150 /* One of the filters is NULL, fail. */
151 if ((key
->filter
&& !event
->filter
) || (!key
->filter
&& event
->filter
)) {
155 if (key
->filter
&& event
->filter
) {
156 /* Both filters exists, check length followed by the bytecode. */
157 if (event
->filter
->len
!= key
->filter
->len
||
158 memcmp(event
->filter
->data
, key
->filter
->data
,
159 event
->filter
->len
) != 0) {
164 /* One of the exclusions is NULL, fail. */
165 if ((key
->exclusion
&& !event
->exclusion
) || (!key
->exclusion
&& event
->exclusion
)) {
169 if (key
->exclusion
&& event
->exclusion
) {
170 /* Both exclusions exists, check count followed by the names. */
171 if (event
->exclusion
->count
!= key
->exclusion
->count
||
172 memcmp(event
->exclusion
->names
, key
->exclusion
->names
,
173 event
->exclusion
->count
* LTTNG_UST_SYM_NAME_LEN
) != 0) {
187 * Unique add of an ust app event in the given ht. This uses the custom
188 * ht_match_ust_app_event match function and the event name as hash.
190 static void add_unique_ust_app_event(struct ust_app_channel
*ua_chan
,
191 struct ust_app_event
*event
)
193 struct cds_lfht_node
*node_ptr
;
194 struct ust_app_ht_key key
;
198 assert(ua_chan
->events
);
201 ht
= ua_chan
->events
;
202 key
.name
= event
->attr
.name
;
203 key
.filter
= event
->filter
;
204 key
.loglevel_type
= event
->attr
.loglevel
;
205 key
.exclusion
= event
->exclusion
;
207 node_ptr
= cds_lfht_add_unique(ht
->ht
,
208 ht
->hash_fct(event
->node
.key
, lttng_ht_seed
),
209 ht_match_ust_app_event
, &key
, &event
->node
.node
);
210 assert(node_ptr
== &event
->node
.node
);
214 * Close the notify socket from the given RCU head object. This MUST be called
215 * through a call_rcu().
217 static void close_notify_sock_rcu(struct rcu_head
*head
)
220 struct ust_app_notify_sock_obj
*obj
=
221 caa_container_of(head
, struct ust_app_notify_sock_obj
, head
);
223 /* Must have a valid fd here. */
224 assert(obj
->fd
>= 0);
226 ret
= close(obj
->fd
);
228 ERR("close notify sock %d RCU", obj
->fd
);
230 lttng_fd_put(LTTNG_FD_APPS
, 1);
236 * Return the session registry according to the buffer type of the given
239 * A registry per UID object MUST exists before calling this function or else
240 * it assert() if not found. RCU read side lock must be acquired.
242 static struct ust_registry_session
*get_session_registry(
243 struct ust_app_session
*ua_sess
)
245 struct ust_registry_session
*registry
= NULL
;
249 switch (ua_sess
->buffer_type
) {
250 case LTTNG_BUFFER_PER_PID
:
252 struct buffer_reg_pid
*reg_pid
= buffer_reg_pid_find(ua_sess
->id
);
256 registry
= reg_pid
->registry
->reg
.ust
;
259 case LTTNG_BUFFER_PER_UID
:
261 struct buffer_reg_uid
*reg_uid
= buffer_reg_uid_find(
262 ua_sess
->tracing_id
, ua_sess
->bits_per_long
,
263 lttng_credentials_get_uid(&ua_sess
->real_credentials
));
267 registry
= reg_uid
->registry
->reg
.ust
;
279 * Delete ust context safely. RCU read lock must be held before calling
283 void delete_ust_app_ctx(int sock
, struct ust_app_ctx
*ua_ctx
,
291 pthread_mutex_lock(&app
->sock_lock
);
292 ret
= ustctl_release_object(sock
, ua_ctx
->obj
);
293 pthread_mutex_unlock(&app
->sock_lock
);
294 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
295 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
296 sock
, ua_ctx
->obj
->handle
, ret
);
304 * Delete ust app event safely. RCU read lock must be held before calling
308 void delete_ust_app_event(int sock
, struct ust_app_event
*ua_event
,
315 free(ua_event
->filter
);
316 if (ua_event
->exclusion
!= NULL
)
317 free(ua_event
->exclusion
);
318 if (ua_event
->obj
!= NULL
) {
319 pthread_mutex_lock(&app
->sock_lock
);
320 ret
= ustctl_release_object(sock
, ua_event
->obj
);
321 pthread_mutex_unlock(&app
->sock_lock
);
322 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
323 ERR("UST app sock %d release event obj failed with ret %d",
332 * Delete ust app token event_rule safely. RCU read lock must be held before calling
333 * this function. TODO: or does it????
336 void delete_ust_app_token_event_rule(int sock
, struct ust_app_token_event_rule
*ua_token
,
343 if (ua_token
->exclusion
!= NULL
)
344 free(ua_token
->exclusion
);
345 if (ua_token
->obj
!= NULL
) {
346 pthread_mutex_lock(&app
->sock_lock
);
347 ret
= ustctl_release_object(sock
, ua_token
->obj
);
348 pthread_mutex_unlock(&app
->sock_lock
);
349 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
350 ERR("UST app sock %d release event obj failed with ret %d",
355 lttng_trigger_put(ua_token
->trigger
);
360 * Release ust data object of the given stream.
362 * Return 0 on success or else a negative value.
364 static int release_ust_app_stream(int sock
, struct ust_app_stream
*stream
,
372 pthread_mutex_lock(&app
->sock_lock
);
373 ret
= ustctl_release_object(sock
, stream
->obj
);
374 pthread_mutex_unlock(&app
->sock_lock
);
375 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
376 ERR("UST app sock %d release stream obj failed with ret %d",
379 lttng_fd_put(LTTNG_FD_APPS
, 2);
387 * Delete ust app stream safely. RCU read lock must be held before calling
391 void delete_ust_app_stream(int sock
, struct ust_app_stream
*stream
,
396 (void) release_ust_app_stream(sock
, stream
, app
);
401 * We need to execute ht_destroy outside of RCU read-side critical
402 * section and outside of call_rcu thread, so we postpone its execution
403 * using ht_cleanup_push. It is simpler than to change the semantic of
404 * the many callers of delete_ust_app_session().
407 void delete_ust_app_channel_rcu(struct rcu_head
*head
)
409 struct ust_app_channel
*ua_chan
=
410 caa_container_of(head
, struct ust_app_channel
, rcu_head
);
412 ht_cleanup_push(ua_chan
->ctx
);
413 ht_cleanup_push(ua_chan
->events
);
418 * Extract the lost packet or discarded events counter when the channel is
419 * being deleted and store the value in the parent channel so we can
420 * access it from lttng list and at stop/destroy.
422 * The session list lock must be held by the caller.
425 void save_per_pid_lost_discarded_counters(struct ust_app_channel
*ua_chan
)
427 uint64_t discarded
= 0, lost
= 0;
428 struct ltt_session
*session
;
429 struct ltt_ust_channel
*uchan
;
431 if (ua_chan
->attr
.type
!= LTTNG_UST_CHAN_PER_CPU
) {
436 session
= session_find_by_id(ua_chan
->session
->tracing_id
);
437 if (!session
|| !session
->ust_session
) {
439 * Not finding the session is not an error because there are
440 * multiple ways the channels can be torn down.
442 * 1) The session daemon can initiate the destruction of the
443 * ust app session after receiving a destroy command or
444 * during its shutdown/teardown.
445 * 2) The application, since we are in per-pid tracing, is
446 * unregistering and tearing down its ust app session.
448 * Both paths are protected by the session list lock which
449 * ensures that the accounting of lost packets and discarded
450 * events is done exactly once. The session is then unpublished
451 * from the session list, resulting in this condition.
456 if (ua_chan
->attr
.overwrite
) {
457 consumer_get_lost_packets(ua_chan
->session
->tracing_id
,
458 ua_chan
->key
, session
->ust_session
->consumer
,
461 consumer_get_discarded_events(ua_chan
->session
->tracing_id
,
462 ua_chan
->key
, session
->ust_session
->consumer
,
465 uchan
= trace_ust_find_channel_by_name(
466 session
->ust_session
->domain_global
.channels
,
469 ERR("Missing UST channel to store discarded counters");
473 uchan
->per_pid_closed_app_discarded
+= discarded
;
474 uchan
->per_pid_closed_app_lost
+= lost
;
479 session_put(session
);
484 * Delete ust app channel safely. RCU read lock must be held before calling
487 * The session list lock must be held by the caller.
490 void delete_ust_app_channel(int sock
, struct ust_app_channel
*ua_chan
,
494 struct lttng_ht_iter iter
;
495 struct ust_app_event
*ua_event
;
496 struct ust_app_ctx
*ua_ctx
;
497 struct ust_app_stream
*stream
, *stmp
;
498 struct ust_registry_session
*registry
;
502 DBG3("UST app deleting channel %s", ua_chan
->name
);
505 cds_list_for_each_entry_safe(stream
, stmp
, &ua_chan
->streams
.head
, list
) {
506 cds_list_del(&stream
->list
);
507 delete_ust_app_stream(sock
, stream
, app
);
511 cds_lfht_for_each_entry(ua_chan
->ctx
->ht
, &iter
.iter
, ua_ctx
, node
.node
) {
512 cds_list_del(&ua_ctx
->list
);
513 ret
= lttng_ht_del(ua_chan
->ctx
, &iter
);
515 delete_ust_app_ctx(sock
, ua_ctx
, app
);
519 cds_lfht_for_each_entry(ua_chan
->events
->ht
, &iter
.iter
, ua_event
,
521 ret
= lttng_ht_del(ua_chan
->events
, &iter
);
523 delete_ust_app_event(sock
, ua_event
, app
);
526 if (ua_chan
->session
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
527 /* Wipe and free registry from session registry. */
528 registry
= get_session_registry(ua_chan
->session
);
530 ust_registry_channel_del_free(registry
, ua_chan
->key
,
534 * A negative socket can be used by the caller when
535 * cleaning-up a ua_chan in an error path. Skip the
536 * accounting in this case.
539 save_per_pid_lost_discarded_counters(ua_chan
);
543 if (ua_chan
->obj
!= NULL
) {
544 /* Remove channel from application UST object descriptor. */
545 iter
.iter
.node
= &ua_chan
->ust_objd_node
.node
;
546 ret
= lttng_ht_del(app
->ust_objd
, &iter
);
548 pthread_mutex_lock(&app
->sock_lock
);
549 ret
= ustctl_release_object(sock
, ua_chan
->obj
);
550 pthread_mutex_unlock(&app
->sock_lock
);
551 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
552 ERR("UST app sock %d release channel obj failed with ret %d",
555 lttng_fd_put(LTTNG_FD_APPS
, 1);
558 call_rcu(&ua_chan
->rcu_head
, delete_ust_app_channel_rcu
);
561 int ust_app_register_done(struct ust_app
*app
)
565 pthread_mutex_lock(&app
->sock_lock
);
566 ret
= ustctl_register_done(app
->sock
);
567 pthread_mutex_unlock(&app
->sock_lock
);
571 int ust_app_release_object(struct ust_app
*app
, struct lttng_ust_object_data
*data
)
576 pthread_mutex_lock(&app
->sock_lock
);
581 ret
= ustctl_release_object(sock
, data
);
583 pthread_mutex_unlock(&app
->sock_lock
);
589 * Push metadata to consumer socket.
591 * RCU read-side lock must be held to guarantee existance of socket.
592 * Must be called with the ust app session lock held.
593 * Must be called with the registry lock held.
595 * On success, return the len of metadata pushed or else a negative value.
596 * Returning a -EPIPE return value means we could not send the metadata,
597 * but it can be caused by recoverable errors (e.g. the application has
598 * terminated concurrently).
600 ssize_t
ust_app_push_metadata(struct ust_registry_session
*registry
,
601 struct consumer_socket
*socket
, int send_zero_data
)
604 char *metadata_str
= NULL
;
605 size_t len
, offset
, new_metadata_len_sent
;
607 uint64_t metadata_key
, metadata_version
;
612 metadata_key
= registry
->metadata_key
;
615 * Means that no metadata was assigned to the session. This can
616 * happens if no start has been done previously.
622 offset
= registry
->metadata_len_sent
;
623 len
= registry
->metadata_len
- registry
->metadata_len_sent
;
624 new_metadata_len_sent
= registry
->metadata_len
;
625 metadata_version
= registry
->metadata_version
;
627 DBG3("No metadata to push for metadata key %" PRIu64
,
628 registry
->metadata_key
);
630 if (send_zero_data
) {
631 DBG("No metadata to push");
637 /* Allocate only what we have to send. */
638 metadata_str
= zmalloc(len
);
640 PERROR("zmalloc ust app metadata string");
644 /* Copy what we haven't sent out. */
645 memcpy(metadata_str
, registry
->metadata
+ offset
, len
);
648 pthread_mutex_unlock(®istry
->lock
);
650 * We need to unlock the registry while we push metadata to
651 * break a circular dependency between the consumerd metadata
652 * lock and the sessiond registry lock. Indeed, pushing metadata
653 * to the consumerd awaits that it gets pushed all the way to
654 * relayd, but doing so requires grabbing the metadata lock. If
655 * a concurrent metadata request is being performed by
656 * consumerd, this can try to grab the registry lock on the
657 * sessiond while holding the metadata lock on the consumer
658 * daemon. Those push and pull schemes are performed on two
659 * different bidirectionnal communication sockets.
661 ret
= consumer_push_metadata(socket
, metadata_key
,
662 metadata_str
, len
, offset
, metadata_version
);
663 pthread_mutex_lock(®istry
->lock
);
666 * There is an acceptable race here between the registry
667 * metadata key assignment and the creation on the
668 * consumer. The session daemon can concurrently push
669 * metadata for this registry while being created on the
670 * consumer since the metadata key of the registry is
671 * assigned *before* it is setup to avoid the consumer
672 * to ask for metadata that could possibly be not found
673 * in the session daemon.
675 * The metadata will get pushed either by the session
676 * being stopped or the consumer requesting metadata if
677 * that race is triggered.
679 if (ret
== -LTTCOMM_CONSUMERD_CHANNEL_FAIL
) {
682 ERR("Error pushing metadata to consumer");
688 * Metadata may have been concurrently pushed, since
689 * we're not holding the registry lock while pushing to
690 * consumer. This is handled by the fact that we send
691 * the metadata content, size, and the offset at which
692 * that metadata belongs. This may arrive out of order
693 * on the consumer side, and the consumer is able to
694 * deal with overlapping fragments. The consumer
695 * supports overlapping fragments, which must be
696 * contiguous starting from offset 0. We keep the
697 * largest metadata_len_sent value of the concurrent
700 registry
->metadata_len_sent
=
701 max_t(size_t, registry
->metadata_len_sent
,
702 new_metadata_len_sent
);
711 * On error, flag the registry that the metadata is
712 * closed. We were unable to push anything and this
713 * means that either the consumer is not responding or
714 * the metadata cache has been destroyed on the
717 registry
->metadata_closed
= 1;
725 * For a given application and session, push metadata to consumer.
726 * Either sock or consumer is required : if sock is NULL, the default
727 * socket to send the metadata is retrieved from consumer, if sock
728 * is not NULL we use it to send the metadata.
729 * RCU read-side lock must be held while calling this function,
730 * therefore ensuring existance of registry. It also ensures existance
731 * of socket throughout this function.
733 * Return 0 on success else a negative error.
734 * Returning a -EPIPE return value means we could not send the metadata,
735 * but it can be caused by recoverable errors (e.g. the application has
736 * terminated concurrently).
738 static int push_metadata(struct ust_registry_session
*registry
,
739 struct consumer_output
*consumer
)
743 struct consumer_socket
*socket
;
748 pthread_mutex_lock(®istry
->lock
);
749 if (registry
->metadata_closed
) {
754 /* Get consumer socket to use to push the metadata.*/
755 socket
= consumer_find_socket_by_bitness(registry
->bits_per_long
,
762 ret
= ust_app_push_metadata(registry
, socket
, 0);
767 pthread_mutex_unlock(®istry
->lock
);
771 pthread_mutex_unlock(®istry
->lock
);
776 * Send to the consumer a close metadata command for the given session. Once
777 * done, the metadata channel is deleted and the session metadata pointer is
778 * nullified. The session lock MUST be held unless the application is
779 * in the destroy path.
781 * Do not hold the registry lock while communicating with the consumerd, because
782 * doing so causes inter-process deadlocks between consumerd and sessiond with
783 * the metadata request notification.
785 * Return 0 on success else a negative value.
787 static int close_metadata(struct ust_registry_session
*registry
,
788 struct consumer_output
*consumer
)
791 struct consumer_socket
*socket
;
792 uint64_t metadata_key
;
793 bool registry_was_already_closed
;
800 pthread_mutex_lock(®istry
->lock
);
801 metadata_key
= registry
->metadata_key
;
802 registry_was_already_closed
= registry
->metadata_closed
;
803 if (metadata_key
!= 0) {
805 * Metadata closed. Even on error this means that the consumer
806 * is not responding or not found so either way a second close
807 * should NOT be emit for this registry.
809 registry
->metadata_closed
= 1;
811 pthread_mutex_unlock(®istry
->lock
);
813 if (metadata_key
== 0 || registry_was_already_closed
) {
818 /* Get consumer socket to use to push the metadata.*/
819 socket
= consumer_find_socket_by_bitness(registry
->bits_per_long
,
826 ret
= consumer_close_metadata(socket
, metadata_key
);
837 * We need to execute ht_destroy outside of RCU read-side critical
838 * section and outside of call_rcu thread, so we postpone its execution
839 * using ht_cleanup_push. It is simpler than to change the semantic of
840 * the many callers of delete_ust_app_session().
843 void delete_ust_app_session_rcu(struct rcu_head
*head
)
845 struct ust_app_session
*ua_sess
=
846 caa_container_of(head
, struct ust_app_session
, rcu_head
);
848 ht_cleanup_push(ua_sess
->channels
);
853 * Delete ust app session safely. RCU read lock must be held before calling
856 * The session list lock must be held by the caller.
859 void delete_ust_app_session(int sock
, struct ust_app_session
*ua_sess
,
863 struct lttng_ht_iter iter
;
864 struct ust_app_channel
*ua_chan
;
865 struct ust_registry_session
*registry
;
869 pthread_mutex_lock(&ua_sess
->lock
);
871 assert(!ua_sess
->deleted
);
872 ua_sess
->deleted
= true;
874 registry
= get_session_registry(ua_sess
);
875 /* Registry can be null on error path during initialization. */
877 /* Push metadata for application before freeing the application. */
878 (void) push_metadata(registry
, ua_sess
->consumer
);
881 * Don't ask to close metadata for global per UID buffers. Close
882 * metadata only on destroy trace session in this case. Also, the
883 * previous push metadata could have flag the metadata registry to
884 * close so don't send a close command if closed.
886 if (ua_sess
->buffer_type
!= LTTNG_BUFFER_PER_UID
) {
887 /* And ask to close it for this session registry. */
888 (void) close_metadata(registry
, ua_sess
->consumer
);
892 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &iter
.iter
, ua_chan
,
894 ret
= lttng_ht_del(ua_sess
->channels
, &iter
);
896 delete_ust_app_channel(sock
, ua_chan
, app
);
899 /* In case of per PID, the registry is kept in the session. */
900 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
901 struct buffer_reg_pid
*reg_pid
= buffer_reg_pid_find(ua_sess
->id
);
904 * Registry can be null on error path during
907 buffer_reg_pid_remove(reg_pid
);
908 buffer_reg_pid_destroy(reg_pid
);
912 if (ua_sess
->handle
!= -1) {
913 pthread_mutex_lock(&app
->sock_lock
);
914 ret
= ustctl_release_handle(sock
, ua_sess
->handle
);
915 pthread_mutex_unlock(&app
->sock_lock
);
916 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
917 ERR("UST app sock %d release session handle failed with ret %d",
920 /* Remove session from application UST object descriptor. */
921 iter
.iter
.node
= &ua_sess
->ust_objd_node
.node
;
922 ret
= lttng_ht_del(app
->ust_sessions_objd
, &iter
);
926 pthread_mutex_unlock(&ua_sess
->lock
);
928 consumer_output_put(ua_sess
->consumer
);
930 call_rcu(&ua_sess
->rcu_head
, delete_ust_app_session_rcu
);
934 * Delete a traceable application structure from the global list. Never call
935 * this function outside of a call_rcu call.
937 * RCU read side lock should _NOT_ be held when calling this function.
940 void delete_ust_app(struct ust_app
*app
)
943 struct ust_app_session
*ua_sess
, *tmp_ua_sess
;
944 struct lttng_ht_iter iter
;
945 struct ust_app_token_event_rule
*token
;
948 * The session list lock must be held during this function to guarantee
949 * the existence of ua_sess.
952 /* Delete ust app sessions info */
957 cds_list_for_each_entry_safe(ua_sess
, tmp_ua_sess
, &app
->teardown_head
,
959 /* Free every object in the session and the session. */
961 delete_ust_app_session(sock
, ua_sess
, app
);
965 /* Wipe token associated with the app */
966 cds_lfht_for_each_entry(app
->tokens_ht
->ht
, &iter
.iter
, token
,
968 ret
= lttng_ht_del(app
->tokens_ht
, &iter
);
970 delete_ust_app_token_event_rule(app
->sock
, token
, app
);
973 ht_cleanup_push(app
->sessions
);
974 ht_cleanup_push(app
->ust_sessions_objd
);
975 ht_cleanup_push(app
->ust_objd
);
976 ht_cleanup_push(app
->tokens_ht
);
978 /* This can happen if trigger setup failed. e.g killed app */
979 if (app
->token_communication
.handle
) {
980 ustctl_release_object(sock
, app
->token_communication
.handle
);
981 free(app
->token_communication
.handle
);
984 lttng_pipe_destroy(app
->token_communication
.trigger_event_pipe
);
987 * Wait until we have deleted the application from the sock hash table
988 * before closing this socket, otherwise an application could re-use the
989 * socket ID and race with the teardown, using the same hash table entry.
991 * It's OK to leave the close in call_rcu. We want it to stay unique for
992 * all RCU readers that could run concurrently with unregister app,
993 * therefore we _need_ to only close that socket after a grace period. So
994 * it should stay in this RCU callback.
996 * This close() is a very important step of the synchronization model so
997 * every modification to this function must be carefully reviewed.
1003 lttng_fd_put(LTTNG_FD_APPS
, 1);
1005 DBG2("UST app pid %d deleted", app
->pid
);
1007 session_unlock_list();
1011 * URCU intermediate call to delete an UST app.
1014 void delete_ust_app_rcu(struct rcu_head
*head
)
1016 struct lttng_ht_node_ulong
*node
=
1017 caa_container_of(head
, struct lttng_ht_node_ulong
, head
);
1018 struct ust_app
*app
=
1019 caa_container_of(node
, struct ust_app
, pid_n
);
1021 DBG3("Call RCU deleting app PID %d", app
->pid
);
1022 delete_ust_app(app
);
1026 * Delete the session from the application ht and delete the data structure by
1027 * freeing every object inside and releasing them.
1029 * The session list lock must be held by the caller.
1031 static void destroy_app_session(struct ust_app
*app
,
1032 struct ust_app_session
*ua_sess
)
1035 struct lttng_ht_iter iter
;
1040 iter
.iter
.node
= &ua_sess
->node
.node
;
1041 ret
= lttng_ht_del(app
->sessions
, &iter
);
1043 /* Already scheduled for teardown. */
1047 /* Once deleted, free the data structure. */
1048 delete_ust_app_session(app
->sock
, ua_sess
, app
);
1055 * Alloc new UST app session.
1058 struct ust_app_session
*alloc_ust_app_session(void)
1060 struct ust_app_session
*ua_sess
;
1062 /* Init most of the default value by allocating and zeroing */
1063 ua_sess
= zmalloc(sizeof(struct ust_app_session
));
1064 if (ua_sess
== NULL
) {
1069 ua_sess
->handle
= -1;
1070 ua_sess
->channels
= lttng_ht_new(0, LTTNG_HT_TYPE_STRING
);
1071 ua_sess
->metadata_attr
.type
= LTTNG_UST_CHAN_METADATA
;
1072 pthread_mutex_init(&ua_sess
->lock
, NULL
);
1081 * Alloc new UST app channel.
1084 struct ust_app_channel
*alloc_ust_app_channel(const char *name
,
1085 struct ust_app_session
*ua_sess
,
1086 struct lttng_ust_channel_attr
*attr
)
1088 struct ust_app_channel
*ua_chan
;
1090 /* Init most of the default value by allocating and zeroing */
1091 ua_chan
= zmalloc(sizeof(struct ust_app_channel
));
1092 if (ua_chan
== NULL
) {
1097 /* Setup channel name */
1098 strncpy(ua_chan
->name
, name
, sizeof(ua_chan
->name
));
1099 ua_chan
->name
[sizeof(ua_chan
->name
) - 1] = '\0';
1101 ua_chan
->enabled
= 1;
1102 ua_chan
->handle
= -1;
1103 ua_chan
->session
= ua_sess
;
1104 ua_chan
->key
= get_next_channel_key();
1105 ua_chan
->ctx
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
1106 ua_chan
->events
= lttng_ht_new(0, LTTNG_HT_TYPE_STRING
);
1107 lttng_ht_node_init_str(&ua_chan
->node
, ua_chan
->name
);
1109 CDS_INIT_LIST_HEAD(&ua_chan
->streams
.head
);
1110 CDS_INIT_LIST_HEAD(&ua_chan
->ctx_list
);
1112 /* Copy attributes */
1114 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
1115 ua_chan
->attr
.subbuf_size
= attr
->subbuf_size
;
1116 ua_chan
->attr
.num_subbuf
= attr
->num_subbuf
;
1117 ua_chan
->attr
.overwrite
= attr
->overwrite
;
1118 ua_chan
->attr
.switch_timer_interval
= attr
->switch_timer_interval
;
1119 ua_chan
->attr
.read_timer_interval
= attr
->read_timer_interval
;
1120 ua_chan
->attr
.output
= attr
->output
;
1121 ua_chan
->attr
.blocking_timeout
= attr
->u
.s
.blocking_timeout
;
1123 /* By default, the channel is a per cpu channel. */
1124 ua_chan
->attr
.type
= LTTNG_UST_CHAN_PER_CPU
;
1126 DBG3("UST app channel %s allocated", ua_chan
->name
);
1135 * Allocate and initialize a UST app stream.
1137 * Return newly allocated stream pointer or NULL on error.
1139 struct ust_app_stream
*ust_app_alloc_stream(void)
1141 struct ust_app_stream
*stream
= NULL
;
1143 stream
= zmalloc(sizeof(*stream
));
1144 if (stream
== NULL
) {
1145 PERROR("zmalloc ust app stream");
1149 /* Zero could be a valid value for a handle so flag it to -1. */
1150 stream
->handle
= -1;
1157 * Alloc new UST app event.
1160 struct ust_app_event
*alloc_ust_app_event(char *name
,
1161 struct lttng_ust_event
*attr
)
1163 struct ust_app_event
*ua_event
;
1165 /* Init most of the default value by allocating and zeroing */
1166 ua_event
= zmalloc(sizeof(struct ust_app_event
));
1167 if (ua_event
== NULL
) {
1168 PERROR("Failed to allocate ust_app_event structure");
1172 ua_event
->enabled
= 1;
1173 strncpy(ua_event
->name
, name
, sizeof(ua_event
->name
));
1174 ua_event
->name
[sizeof(ua_event
->name
) - 1] = '\0';
1175 lttng_ht_node_init_str(&ua_event
->node
, ua_event
->name
);
1177 /* Copy attributes */
1179 memcpy(&ua_event
->attr
, attr
, sizeof(ua_event
->attr
));
1182 DBG3("UST app event %s allocated", ua_event
->name
);
1191 * Alloc new UST app token event rule.
1193 static struct ust_app_token_event_rule
*alloc_ust_app_token_event_rule(
1194 struct lttng_trigger
*trigger
)
1196 struct ust_app_token_event_rule
*ua_token
;
1197 struct lttng_condition
*condition
= NULL
;
1198 struct lttng_event_rule
*event_rule
= NULL
;
1200 ua_token
= zmalloc(sizeof(struct ust_app_token_event_rule
));
1201 if (ua_token
== NULL
) {
1202 PERROR("Failed to allocate ust_app_token_event_rule structure");
1206 /* Get reference of the trigger */
1207 /* TODO should this be like lttng_event_rule_get with a returned bool? */
1208 lttng_trigger_get(trigger
);
1210 ua_token
->enabled
= 1;
1211 ua_token
->token
= lttng_trigger_get_tracer_token(trigger
);
1212 lttng_ht_node_init_u64(&ua_token
->node
, ua_token
->token
);
1214 condition
= lttng_trigger_get_condition(trigger
);
1216 assert(lttng_condition_get_type(condition
) == LTTNG_CONDITION_TYPE_EVENT_RULE_HIT
);
1218 assert(LTTNG_CONDITION_STATUS_OK
== lttng_condition_event_rule_get_rule_mutable(condition
, &event_rule
));
1221 ua_token
->trigger
= trigger
;
1222 ua_token
->filter
= lttng_event_rule_get_filter_bytecode(event_rule
);
1223 ua_token
->exclusion
= lttng_event_rule_generate_exclusions(event_rule
);
1224 ua_token
->error_counter_index
= lttng_trigger_get_error_counter_index(trigger
);
1226 /* TODO put capture here? or later*/
1228 DBG3("UST app token event rule %" PRIu64
" allocated", ua_token
->token
);
1237 * Alloc new UST app context.
1240 struct ust_app_ctx
*alloc_ust_app_ctx(struct lttng_ust_context_attr
*uctx
)
1242 struct ust_app_ctx
*ua_ctx
;
1244 ua_ctx
= zmalloc(sizeof(struct ust_app_ctx
));
1245 if (ua_ctx
== NULL
) {
1249 CDS_INIT_LIST_HEAD(&ua_ctx
->list
);
1252 memcpy(&ua_ctx
->ctx
, uctx
, sizeof(ua_ctx
->ctx
));
1253 if (uctx
->ctx
== LTTNG_UST_CONTEXT_APP_CONTEXT
) {
1254 char *provider_name
= NULL
, *ctx_name
= NULL
;
1256 provider_name
= strdup(uctx
->u
.app_ctx
.provider_name
);
1257 ctx_name
= strdup(uctx
->u
.app_ctx
.ctx_name
);
1258 if (!provider_name
|| !ctx_name
) {
1259 free(provider_name
);
1264 ua_ctx
->ctx
.u
.app_ctx
.provider_name
= provider_name
;
1265 ua_ctx
->ctx
.u
.app_ctx
.ctx_name
= ctx_name
;
1269 DBG3("UST app context %d allocated", ua_ctx
->ctx
.ctx
);
1277 * Create a liblttng-ust filter bytecode from given bytecode.
1279 * Return allocated filter or NULL on error.
1281 static struct lttng_ust_filter_bytecode
*
1282 create_ust_filter_bytecode_from_bytecode(const struct lttng_bytecode
*orig_f
)
1284 struct lttng_ust_filter_bytecode
*filter
= NULL
;
1286 /* Copy filter bytecode */
1287 filter
= zmalloc(sizeof(*filter
) + orig_f
->len
);
1289 PERROR("zmalloc alloc ust filter bytecode");
1293 assert(sizeof(struct lttng_bytecode
) ==
1294 sizeof(struct lttng_ust_filter_bytecode
));
1295 memcpy(filter
, orig_f
, sizeof(*filter
) + orig_f
->len
);
1301 * Create a liblttng-ust capture bytecode from given bytecode.
1303 * Return allocated filter or NULL on error.
1305 static struct lttng_ust_capture_bytecode
*
1306 create_ust_capture_bytecode_from_bytecode(const struct lttng_bytecode
*orig_f
)
1308 struct lttng_ust_capture_bytecode
*capture
= NULL
;
1310 /* Copy capture bytecode */
1311 capture
= zmalloc(sizeof(*capture
) + orig_f
->len
);
1313 PERROR("zmalloc alloc ust capture bytecode");
1317 assert(sizeof(struct lttng_bytecode
) ==
1318 sizeof(struct lttng_ust_capture_bytecode
));
1319 memcpy(capture
, orig_f
, sizeof(*capture
) + orig_f
->len
);
1325 * Find an ust_app using the sock and return it. RCU read side lock must be
1326 * held before calling this helper function.
1328 struct ust_app
*ust_app_find_by_sock(int sock
)
1330 struct lttng_ht_node_ulong
*node
;
1331 struct lttng_ht_iter iter
;
1333 lttng_ht_lookup(ust_app_ht_by_sock
, (void *)((unsigned long) sock
), &iter
);
1334 node
= lttng_ht_iter_get_node_ulong(&iter
);
1336 DBG2("UST app find by sock %d not found", sock
);
1340 return caa_container_of(node
, struct ust_app
, sock_n
);
1347 * Find an ust_app using the notify sock and return it. RCU read side lock must
1348 * be held before calling this helper function.
1350 static struct ust_app
*find_app_by_notify_sock(int sock
)
1352 struct lttng_ht_node_ulong
*node
;
1353 struct lttng_ht_iter iter
;
1355 lttng_ht_lookup(ust_app_ht_by_notify_sock
, (void *)((unsigned long) sock
),
1357 node
= lttng_ht_iter_get_node_ulong(&iter
);
1359 DBG2("UST app find by notify sock %d not found", sock
);
1363 return caa_container_of(node
, struct ust_app
, notify_sock_n
);
1370 * Lookup for an ust app event based on event name, filter bytecode and the
1373 * Return an ust_app_event object or NULL on error.
1375 static struct ust_app_event
*find_ust_app_event(struct lttng_ht
*ht
,
1376 const char *name
, const struct lttng_bytecode
*filter
,
1378 const struct lttng_event_exclusion
*exclusion
)
1380 struct lttng_ht_iter iter
;
1381 struct lttng_ht_node_str
*node
;
1382 struct ust_app_event
*event
= NULL
;
1383 struct ust_app_ht_key key
;
1388 /* Setup key for event lookup. */
1390 key
.filter
= filter
;
1391 key
.loglevel_type
= loglevel_value
;
1392 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1393 key
.exclusion
= exclusion
;
1395 /* Lookup using the event name as hash and a custom match fct. */
1396 cds_lfht_lookup(ht
->ht
, ht
->hash_fct((void *) name
, lttng_ht_seed
),
1397 ht_match_ust_app_event
, &key
, &iter
.iter
);
1398 node
= lttng_ht_iter_get_node_str(&iter
);
1403 event
= caa_container_of(node
, struct ust_app_event
, node
);
1410 * Lookup for an ust app tokens based on a token id.
1412 * Return an ust_app_token_event_rule object or NULL on error.
1414 static struct ust_app_token_event_rule
*find_ust_app_token_event_rule(struct lttng_ht
*ht
,
1417 struct lttng_ht_iter iter
;
1418 struct lttng_ht_node_u64
*node
;
1419 struct ust_app_token_event_rule
*token_event_rule
= NULL
;
1423 lttng_ht_lookup(ht
, &token
, &iter
);
1424 node
= lttng_ht_iter_get_node_u64(&iter
);
1426 DBG2("UST app token %" PRIu64
" not found", token
);
1430 token_event_rule
= caa_container_of(node
, struct ust_app_token_event_rule
, node
);
1432 return token_event_rule
;
1436 * Create the channel context on the tracer.
1438 * Called with UST app session lock held.
1441 int create_ust_channel_context(struct ust_app_channel
*ua_chan
,
1442 struct ust_app_ctx
*ua_ctx
, struct ust_app
*app
)
1446 health_code_update();
1448 pthread_mutex_lock(&app
->sock_lock
);
1449 ret
= ustctl_add_context(app
->sock
, &ua_ctx
->ctx
,
1450 ua_chan
->obj
, &ua_ctx
->obj
);
1451 pthread_mutex_unlock(&app
->sock_lock
);
1453 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1454 ERR("UST app create channel context failed for app (pid: %d) "
1455 "with ret %d", app
->pid
, ret
);
1458 * This is normal behavior, an application can die during the
1459 * creation process. Don't report an error so the execution can
1460 * continue normally.
1463 DBG3("UST app add context failed. Application is dead.");
1468 ua_ctx
->handle
= ua_ctx
->obj
->handle
;
1470 DBG2("UST app context handle %d created successfully for channel %s",
1471 ua_ctx
->handle
, ua_chan
->name
);
1474 health_code_update();
1479 * Set the filter on the tracer.
1481 static int set_ust_filter(struct ust_app
*app
,
1482 const struct lttng_bytecode
*bytecode
,
1483 struct lttng_ust_object_data
*ust_object
)
1486 struct lttng_ust_filter_bytecode
*ust_bytecode
= NULL
;
1488 health_code_update();
1490 ust_bytecode
= create_ust_filter_bytecode_from_bytecode(bytecode
);
1491 if (!ust_bytecode
) {
1492 ret
= -LTTNG_ERR_NOMEM
;
1495 pthread_mutex_lock(&app
->sock_lock
);
1496 ret
= ustctl_set_filter(app
->sock
, ust_bytecode
,
1498 pthread_mutex_unlock(&app
->sock_lock
);
1500 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1501 ERR("UST app set filter failed for object %p of app (pid: %d) "
1502 "with ret %d", ust_object
, app
->pid
, ret
);
1505 * This is normal behavior, an application can die during the
1506 * creation process. Don't report an error so the execution can
1507 * continue normally.
1510 DBG3("UST app set filter. Application is dead.");
1515 DBG2("UST filter set for object %p successfully", ust_object
);
1518 health_code_update();
1524 * Set a capture bytecode for the passed object.
1525 * The seqnum enforce the ordering at runtime and on reception.
1527 static int set_ust_capture(struct ust_app
*app
,
1528 const struct lttng_bytecode
*bytecode
,
1529 unsigned int seqnum
,
1530 struct lttng_ust_object_data
*ust_object
)
1533 struct lttng_ust_capture_bytecode
*ust_bytecode
= NULL
;
1535 health_code_update();
1537 ust_bytecode
= create_ust_capture_bytecode_from_bytecode(bytecode
);
1538 if (!ust_bytecode
) {
1539 ret
= -LTTNG_ERR_NOMEM
;
1543 /* Set the seqnum */
1544 ust_bytecode
->seqnum
= seqnum
;
1546 pthread_mutex_lock(&app
->sock_lock
);
1547 ret
= ustctl_set_capture(app
->sock
, ust_bytecode
,
1549 pthread_mutex_unlock(&app
->sock_lock
);
1551 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1552 ERR("UST app set capture failed for object %p of app (pid: %d) "
1553 "with ret %d", ust_object
, app
->pid
, ret
);
1556 * This is normal behavior, an application can die during the
1557 * creation process. Don't report an error so the execution can
1558 * continue normally.
1561 DBG3("UST app set capture. Application is dead.");
1566 DBG2("UST capture set for object %p successfully", ust_object
);
1569 health_code_update();
1575 struct lttng_ust_event_exclusion
*create_ust_exclusion_from_exclusion(
1576 struct lttng_event_exclusion
*exclusion
)
1578 struct lttng_ust_event_exclusion
*ust_exclusion
= NULL
;
1579 size_t exclusion_alloc_size
= sizeof(struct lttng_ust_event_exclusion
) +
1580 LTTNG_UST_SYM_NAME_LEN
* exclusion
->count
;
1582 ust_exclusion
= zmalloc(exclusion_alloc_size
);
1583 if (!ust_exclusion
) {
1588 assert(sizeof(struct lttng_event_exclusion
) ==
1589 sizeof(struct lttng_ust_event_exclusion
));
1590 memcpy(ust_exclusion
, exclusion
, exclusion_alloc_size
);
1592 return ust_exclusion
;
1596 * Set event exclusions on the tracer.
1598 static int set_ust_exclusions(struct ust_app
*app
,
1599 struct lttng_event_exclusion
*exclusions
,
1600 struct lttng_ust_object_data
*ust_object
)
1603 struct lttng_ust_event_exclusion
*ust_exclusions
= NULL
;
1605 assert(exclusions
&& exclusions
->count
> 0);
1607 health_code_update();
1609 ust_exclusions
= create_ust_exclusion_from_exclusion(
1611 if (!ust_exclusions
) {
1612 ret
= -LTTNG_ERR_NOMEM
;
1615 pthread_mutex_lock(&app
->sock_lock
);
1616 ret
= ustctl_set_exclusion(app
->sock
, ust_exclusions
, ust_object
);
1617 pthread_mutex_unlock(&app
->sock_lock
);
1619 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1620 ERR("UST app exclusions failed for object %p of app (pid: %d) "
1621 "with ret %d", ust_object
, app
->pid
, ret
);
1624 * This is normal behavior, an application can die during the
1625 * creation process. Don't report an error so the execution can
1626 * continue normally.
1629 DBG3("UST app set exclusions failed. Application is dead.");
1634 DBG2("UST exclusions set successfully for object %p", ust_object
);
1637 health_code_update();
1638 free(ust_exclusions
);
1643 * Disable the specified event on to UST tracer for the UST session.
1645 static int disable_ust_object(struct ust_app
*app
,
1646 struct lttng_ust_object_data
*object
)
1650 health_code_update();
1652 pthread_mutex_lock(&app
->sock_lock
);
1653 ret
= ustctl_disable(app
->sock
, object
);
1654 pthread_mutex_unlock(&app
->sock_lock
);
1656 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1657 ERR("UST app disable failed for object %p app (pid: %d) with ret %d",
1658 object
, app
->pid
, ret
);
1661 * This is normal behavior, an application can die during the
1662 * creation process. Don't report an error so the execution can
1663 * continue normally.
1666 DBG3("UST app disable event failed. Application is dead.");
1671 DBG2("UST app object %p disabled successfully for app (pid: %d)",
1675 health_code_update();
1680 * Disable the specified channel on to UST tracer for the UST session.
1682 static int disable_ust_channel(struct ust_app
*app
,
1683 struct ust_app_session
*ua_sess
, struct ust_app_channel
*ua_chan
)
1687 health_code_update();
1689 pthread_mutex_lock(&app
->sock_lock
);
1690 ret
= ustctl_disable(app
->sock
, ua_chan
->obj
);
1691 pthread_mutex_unlock(&app
->sock_lock
);
1693 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1694 ERR("UST app channel %s disable failed for app (pid: %d) "
1695 "and session handle %d with ret %d",
1696 ua_chan
->name
, app
->pid
, ua_sess
->handle
, ret
);
1699 * This is normal behavior, an application can die during the
1700 * creation process. Don't report an error so the execution can
1701 * continue normally.
1704 DBG3("UST app disable channel failed. Application is dead.");
1709 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1710 ua_chan
->name
, app
->pid
);
1713 health_code_update();
1718 * Enable the specified channel on to UST tracer for the UST session.
1720 static int enable_ust_channel(struct ust_app
*app
,
1721 struct ust_app_session
*ua_sess
, struct ust_app_channel
*ua_chan
)
1725 health_code_update();
1727 pthread_mutex_lock(&app
->sock_lock
);
1728 ret
= ustctl_enable(app
->sock
, ua_chan
->obj
);
1729 pthread_mutex_unlock(&app
->sock_lock
);
1731 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1732 ERR("UST app channel %s enable failed for app (pid: %d) "
1733 "and session handle %d with ret %d",
1734 ua_chan
->name
, app
->pid
, ua_sess
->handle
, ret
);
1737 * This is normal behavior, an application can die during the
1738 * creation process. Don't report an error so the execution can
1739 * continue normally.
1742 DBG3("UST app enable channel failed. Application is dead.");
1747 ua_chan
->enabled
= 1;
1749 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1750 ua_chan
->name
, app
->pid
);
1753 health_code_update();
1758 * Enable the specified event on to UST tracer for the UST session.
1760 static int enable_ust_object(struct ust_app
*app
, struct lttng_ust_object_data
*ust_object
)
1764 health_code_update();
1766 pthread_mutex_lock(&app
->sock_lock
);
1767 ret
= ustctl_enable(app
->sock
, ust_object
);
1768 pthread_mutex_unlock(&app
->sock_lock
);
1770 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1771 ERR("UST app enable failed for object %p app (pid: %d) with ret %d",
1772 ust_object
, app
->pid
, ret
);
1775 * This is normal behavior, an application can die during the
1776 * creation process. Don't report an error so the execution can
1777 * continue normally.
1780 DBG3("UST app enable failed. Application is dead.");
1785 DBG2("UST app object %p enabled successfully for app (pid: %d)",
1786 ust_object
, app
->pid
);
1789 health_code_update();
1794 * Send channel and stream buffer to application.
1796 * Return 0 on success. On error, a negative value is returned.
1798 static int send_channel_pid_to_ust(struct ust_app
*app
,
1799 struct ust_app_session
*ua_sess
, struct ust_app_channel
*ua_chan
)
1802 struct ust_app_stream
*stream
, *stmp
;
1808 health_code_update();
1810 DBG("UST app sending channel %s to UST app sock %d", ua_chan
->name
,
1813 /* Send channel to the application. */
1814 ret
= ust_consumer_send_channel_to_ust(app
, ua_sess
, ua_chan
);
1815 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1816 ret
= -ENOTCONN
; /* Caused by app exiting. */
1818 } else if (ret
< 0) {
1822 health_code_update();
1824 /* Send all streams to application. */
1825 cds_list_for_each_entry_safe(stream
, stmp
, &ua_chan
->streams
.head
, list
) {
1826 ret
= ust_consumer_send_stream_to_ust(app
, ua_chan
, stream
);
1827 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1828 ret
= -ENOTCONN
; /* Caused by app exiting. */
1830 } else if (ret
< 0) {
1833 /* We don't need the stream anymore once sent to the tracer. */
1834 cds_list_del(&stream
->list
);
1835 delete_ust_app_stream(-1, stream
, app
);
1837 /* Flag the channel that it is sent to the application. */
1838 ua_chan
->is_sent
= 1;
1841 health_code_update();
1846 * Create the specified event onto the UST tracer for a UST session.
1848 * Should be called with session mutex held.
1851 int create_ust_event(struct ust_app
*app
, struct ust_app_session
*ua_sess
,
1852 struct ust_app_channel
*ua_chan
, struct ust_app_event
*ua_event
)
1856 health_code_update();
1858 /* Create UST event on tracer */
1859 pthread_mutex_lock(&app
->sock_lock
);
1860 ret
= ustctl_create_event(app
->sock
, &ua_event
->attr
, ua_chan
->obj
,
1862 pthread_mutex_unlock(&app
->sock_lock
);
1864 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1866 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1867 ua_event
->attr
.name
, app
->pid
, ret
);
1870 * This is normal behavior, an application can die during the
1871 * creation process. Don't report an error so the execution can
1872 * continue normally.
1875 DBG3("UST app create event failed. Application is dead.");
1880 ua_event
->handle
= ua_event
->obj
->handle
;
1882 DBG2("UST app event %s created successfully for pid:%d object: %p",
1883 ua_event
->attr
.name
, app
->pid
, ua_event
->obj
);
1885 health_code_update();
1887 /* Set filter if one is present. */
1888 if (ua_event
->filter
) {
1889 ret
= set_ust_filter(app
, ua_event
->filter
, ua_event
->obj
);
1895 /* Set exclusions for the event */
1896 if (ua_event
->exclusion
) {
1897 ret
= set_ust_exclusions(app
, ua_event
->exclusion
, ua_event
->obj
);
1903 /* If event not enabled, disable it on the tracer */
1904 if (ua_event
->enabled
) {
1906 * We now need to explicitly enable the event, since it
1907 * is now disabled at creation.
1909 ret
= enable_ust_object(app
, ua_event
->obj
);
1912 * If we hit an EPERM, something is wrong with our enable call. If
1913 * we get an EEXIST, there is a problem on the tracer side since we
1917 case -LTTNG_UST_ERR_PERM
:
1918 /* Code flow problem */
1920 case -LTTNG_UST_ERR_EXIST
:
1921 /* It's OK for our use case. */
1932 health_code_update();
1937 void init_ust_trigger_from_event_rule(const struct lttng_event_rule
*rule
, struct lttng_ust_trigger
*trigger
)
1939 enum lttng_event_rule_status status
;
1940 enum lttng_loglevel_type loglevel_type
;
1941 enum lttng_ust_loglevel_type ust_loglevel_type
= LTTNG_UST_LOGLEVEL_ALL
;
1943 const char *pattern
;
1945 /* For now only LTTNG_EVENT_RULE_TYPE_TRACEPOINT are supported */
1946 assert(lttng_event_rule_get_type(rule
) == LTTNG_EVENT_RULE_TYPE_TRACEPOINT
);
1948 memset(trigger
, 0, sizeof(*trigger
));
1950 if (lttng_event_rule_is_agent(rule
)) {
1952 * Special event for agents
1953 * The actual meat of the event is in the filter that will be
1954 * attached later on.
1955 * Set the default values for the agent event.
1957 pattern
= event_get_default_agent_ust_name(lttng_event_rule_get_domain_type(rule
));
1959 ust_loglevel_type
= LTTNG_UST_LOGLEVEL_ALL
;
1961 status
= lttng_event_rule_tracepoint_get_pattern(rule
, &pattern
);
1962 if (status
!= LTTNG_EVENT_RULE_STATUS_OK
) {
1963 /* At this point this is a fatal error */
1967 status
= lttng_event_rule_tracepoint_get_log_level_type(
1968 rule
, &loglevel_type
);
1969 if (status
!= LTTNG_EVENT_RULE_STATUS_OK
) {
1970 /* At this point this is a fatal error */
1974 switch (loglevel_type
) {
1975 case LTTNG_EVENT_LOGLEVEL_ALL
:
1976 ust_loglevel_type
= LTTNG_UST_LOGLEVEL_ALL
;
1978 case LTTNG_EVENT_LOGLEVEL_RANGE
:
1979 ust_loglevel_type
= LTTNG_UST_LOGLEVEL_RANGE
;
1981 case LTTNG_EVENT_LOGLEVEL_SINGLE
:
1982 ust_loglevel_type
= LTTNG_UST_LOGLEVEL_SINGLE
;
1986 if (loglevel_type
!= LTTNG_EVENT_LOGLEVEL_ALL
) {
1987 status
= lttng_event_rule_tracepoint_get_log_level(
1989 assert(status
== LTTNG_EVENT_RULE_STATUS_OK
);
1993 trigger
->instrumentation
= LTTNG_UST_TRACEPOINT
;
1994 strncpy(trigger
->name
, pattern
, LTTNG_UST_SYM_NAME_LEN
- 1);
1995 trigger
->loglevel_type
= ust_loglevel_type
;
1996 trigger
->loglevel
= loglevel
;
2000 * Create the specified event rule token onto the UST tracer for a UST app.
2003 int create_ust_token_event_rule(struct ust_app
*app
, struct ust_app_token_event_rule
*ua_token
)
2006 struct lttng_ust_trigger trigger
;
2007 struct lttng_condition
*condition
= NULL
;
2008 struct lttng_event_rule
*event_rule
= NULL
;
2009 unsigned int capture_bytecode_count
= 0;
2011 health_code_update();
2012 assert(app
->token_communication
.handle
);
2014 condition
= lttng_trigger_get_condition(ua_token
->trigger
);
2016 assert(lttng_condition_get_type(condition
) == LTTNG_CONDITION_TYPE_EVENT_RULE_HIT
);
2018 lttng_condition_event_rule_get_rule_mutable(condition
, &event_rule
);
2020 assert(lttng_event_rule_get_type(event_rule
) == LTTNG_EVENT_RULE_TYPE_TRACEPOINT
);
2021 /* Should we also test for UST at this point, or do we trust all the
2024 init_ust_trigger_from_event_rule(event_rule
, &trigger
);
2026 trigger
.id
= ua_token
->token
;
2027 trigger
.error_counter_index
= ua_token
->error_counter_index
;
2029 /* Create UST trigger on tracer */
2030 pthread_mutex_lock(&app
->sock_lock
);
2031 ret
= ustctl_create_trigger(app
->sock
, &trigger
, app
->token_communication
.handle
, &ua_token
->obj
);
2032 pthread_mutex_unlock(&app
->sock_lock
);
2034 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
2036 ERR("Error ustctl create trigger %s for app pid: %d with ret %d",
2037 trigger
.name
, app
->pid
, ret
);
2040 * This is normal behavior, an application can die during the
2041 * creation process. Don't report an error so the execution can
2042 * continue normally.
2045 DBG3("UST app create event failed. Application is dead.");
2050 ua_token
->handle
= ua_token
->obj
->handle
;
2052 DBG2("UST app event %s created successfully for pid:%d object: %p",
2053 trigger
.name
, app
->pid
, ua_token
->obj
);
2055 health_code_update();
2057 /* Set filter if one is present. */
2058 if (ua_token
->filter
) {
2059 ret
= set_ust_filter(app
, ua_token
->filter
, ua_token
->obj
);
2065 /* Set exclusions for the event */
2066 if (ua_token
->exclusion
) {
2067 ret
= set_ust_exclusions(app
, ua_token
->exclusion
, ua_token
->obj
);
2073 /* Set the capture bytecode
2074 * TODO: do we want to emulate what is done with exclusion and provide
2075 * and object with a count of capture bytecode? instead of multiple
2078 capture_bytecode_count
= lttng_trigger_get_capture_bytecode_count(ua_token
->trigger
);
2079 for (unsigned int i
= 0; i
< capture_bytecode_count
; i
++) {
2080 const struct lttng_bytecode
*capture_bytecode
= lttng_trigger_get_capture_bytecode_at_index(ua_token
->trigger
, i
);
2081 ret
= set_ust_capture(app
, capture_bytecode
, i
, ua_token
->obj
);
2088 * We now need to explicitly enable the event, since it
2089 * is disabled at creation.
2091 ret
= enable_ust_object(app
, ua_token
->obj
);
2094 * If we hit an EPERM, something is wrong with our enable call. If
2095 * we get an EEXIST, there is a problem on the tracer side since we
2099 case -LTTNG_UST_ERR_PERM
:
2100 /* Code flow problem */
2102 case -LTTNG_UST_ERR_EXIST
:
2103 /* It's OK for our use case. */
2111 ua_token
->enabled
= true;
2114 health_code_update();
2119 * Copy data between an UST app event and a LTT event.
2121 static void shadow_copy_event(struct ust_app_event
*ua_event
,
2122 struct ltt_ust_event
*uevent
)
2124 size_t exclusion_alloc_size
;
2126 strncpy(ua_event
->name
, uevent
->attr
.name
, sizeof(ua_event
->name
));
2127 ua_event
->name
[sizeof(ua_event
->name
) - 1] = '\0';
2129 ua_event
->enabled
= uevent
->enabled
;
2131 /* Copy event attributes */
2132 memcpy(&ua_event
->attr
, &uevent
->attr
, sizeof(ua_event
->attr
));
2134 /* Copy filter bytecode */
2135 if (uevent
->filter
) {
2136 ua_event
->filter
= bytecode_copy(uevent
->filter
);
2137 /* Filter might be NULL here in case of ENONEM. */
2140 /* Copy exclusion data */
2141 if (uevent
->exclusion
) {
2142 exclusion_alloc_size
= sizeof(struct lttng_event_exclusion
) +
2143 LTTNG_UST_SYM_NAME_LEN
* uevent
->exclusion
->count
;
2144 ua_event
->exclusion
= zmalloc(exclusion_alloc_size
);
2145 if (ua_event
->exclusion
== NULL
) {
2148 memcpy(ua_event
->exclusion
, uevent
->exclusion
,
2149 exclusion_alloc_size
);
2155 * Copy data between an UST app channel and a LTT channel.
2157 static void shadow_copy_channel(struct ust_app_channel
*ua_chan
,
2158 struct ltt_ust_channel
*uchan
)
2160 DBG2("UST app shadow copy of channel %s started", ua_chan
->name
);
2162 strncpy(ua_chan
->name
, uchan
->name
, sizeof(ua_chan
->name
));
2163 ua_chan
->name
[sizeof(ua_chan
->name
) - 1] = '\0';
2165 ua_chan
->tracefile_size
= uchan
->tracefile_size
;
2166 ua_chan
->tracefile_count
= uchan
->tracefile_count
;
2168 /* Copy event attributes since the layout is different. */
2169 ua_chan
->attr
.subbuf_size
= uchan
->attr
.subbuf_size
;
2170 ua_chan
->attr
.num_subbuf
= uchan
->attr
.num_subbuf
;
2171 ua_chan
->attr
.overwrite
= uchan
->attr
.overwrite
;
2172 ua_chan
->attr
.switch_timer_interval
= uchan
->attr
.switch_timer_interval
;
2173 ua_chan
->attr
.read_timer_interval
= uchan
->attr
.read_timer_interval
;
2174 ua_chan
->monitor_timer_interval
= uchan
->monitor_timer_interval
;
2175 ua_chan
->attr
.output
= uchan
->attr
.output
;
2176 ua_chan
->attr
.blocking_timeout
= uchan
->attr
.u
.s
.blocking_timeout
;
2179 * Note that the attribute channel type is not set since the channel on the
2180 * tracing registry side does not have this information.
2183 ua_chan
->enabled
= uchan
->enabled
;
2184 ua_chan
->tracing_channel_id
= uchan
->id
;
2186 DBG3("UST app shadow copy of channel %s done", ua_chan
->name
);
2190 * Copy data between a UST app session and a regular LTT session.
2192 static void shadow_copy_session(struct ust_app_session
*ua_sess
,
2193 struct ltt_ust_session
*usess
, struct ust_app
*app
)
2195 struct tm
*timeinfo
;
2198 char tmp_shm_path
[PATH_MAX
];
2200 timeinfo
= localtime(&app
->registration_time
);
2201 strftime(datetime
, sizeof(datetime
), "%Y%m%d-%H%M%S", timeinfo
);
2203 DBG2("Shadow copy of session handle %d", ua_sess
->handle
);
2205 ua_sess
->tracing_id
= usess
->id
;
2206 ua_sess
->id
= get_next_session_id();
2207 LTTNG_OPTIONAL_SET(&ua_sess
->real_credentials
.uid
, app
->uid
);
2208 LTTNG_OPTIONAL_SET(&ua_sess
->real_credentials
.gid
, app
->gid
);
2209 LTTNG_OPTIONAL_SET(&ua_sess
->effective_credentials
.uid
, usess
->uid
);
2210 LTTNG_OPTIONAL_SET(&ua_sess
->effective_credentials
.gid
, usess
->gid
);
2211 ua_sess
->buffer_type
= usess
->buffer_type
;
2212 ua_sess
->bits_per_long
= app
->bits_per_long
;
2214 /* There is only one consumer object per session possible. */
2215 consumer_output_get(usess
->consumer
);
2216 ua_sess
->consumer
= usess
->consumer
;
2218 ua_sess
->output_traces
= usess
->output_traces
;
2219 ua_sess
->live_timer_interval
= usess
->live_timer_interval
;
2220 copy_channel_attr_to_ustctl(&ua_sess
->metadata_attr
,
2221 &usess
->metadata_attr
);
2223 switch (ua_sess
->buffer_type
) {
2224 case LTTNG_BUFFER_PER_PID
:
2225 ret
= snprintf(ua_sess
->path
, sizeof(ua_sess
->path
),
2226 DEFAULT_UST_TRACE_PID_PATH
"/%s-%d-%s", app
->name
, app
->pid
,
2229 case LTTNG_BUFFER_PER_UID
:
2230 ret
= snprintf(ua_sess
->path
, sizeof(ua_sess
->path
),
2231 DEFAULT_UST_TRACE_UID_PATH
,
2232 lttng_credentials_get_uid(&ua_sess
->real_credentials
),
2233 app
->bits_per_long
);
2240 PERROR("asprintf UST shadow copy session");
2245 strncpy(ua_sess
->root_shm_path
, usess
->root_shm_path
,
2246 sizeof(ua_sess
->root_shm_path
));
2247 ua_sess
->root_shm_path
[sizeof(ua_sess
->root_shm_path
) - 1] = '\0';
2248 strncpy(ua_sess
->shm_path
, usess
->shm_path
,
2249 sizeof(ua_sess
->shm_path
));
2250 ua_sess
->shm_path
[sizeof(ua_sess
->shm_path
) - 1] = '\0';
2251 if (ua_sess
->shm_path
[0]) {
2252 switch (ua_sess
->buffer_type
) {
2253 case LTTNG_BUFFER_PER_PID
:
2254 ret
= snprintf(tmp_shm_path
, sizeof(tmp_shm_path
),
2255 "/" DEFAULT_UST_TRACE_PID_PATH
"/%s-%d-%s",
2256 app
->name
, app
->pid
, datetime
);
2258 case LTTNG_BUFFER_PER_UID
:
2259 ret
= snprintf(tmp_shm_path
, sizeof(tmp_shm_path
),
2260 "/" DEFAULT_UST_TRACE_UID_PATH
,
2261 app
->uid
, app
->bits_per_long
);
2268 PERROR("sprintf UST shadow copy session");
2272 strncat(ua_sess
->shm_path
, tmp_shm_path
,
2273 sizeof(ua_sess
->shm_path
) - strlen(ua_sess
->shm_path
) - 1);
2274 ua_sess
->shm_path
[sizeof(ua_sess
->shm_path
) - 1] = '\0';
2279 consumer_output_put(ua_sess
->consumer
);
2283 * Lookup sesison wrapper.
2286 void __lookup_session_by_app(const struct ltt_ust_session
*usess
,
2287 struct ust_app
*app
, struct lttng_ht_iter
*iter
)
2289 /* Get right UST app session from app */
2290 lttng_ht_lookup(app
->sessions
, &usess
->id
, iter
);
2294 * Return ust app session from the app session hashtable using the UST session
2297 static struct ust_app_session
*lookup_session_by_app(
2298 const struct ltt_ust_session
*usess
, struct ust_app
*app
)
2300 struct lttng_ht_iter iter
;
2301 struct lttng_ht_node_u64
*node
;
2303 __lookup_session_by_app(usess
, app
, &iter
);
2304 node
= lttng_ht_iter_get_node_u64(&iter
);
2309 return caa_container_of(node
, struct ust_app_session
, node
);
2316 * Setup buffer registry per PID for the given session and application. If none
2317 * is found, a new one is created, added to the global registry and
2318 * initialized. If regp is valid, it's set with the newly created object.
2320 * Return 0 on success or else a negative value.
2322 static int setup_buffer_reg_pid(struct ust_app_session
*ua_sess
,
2323 struct ust_app
*app
, struct buffer_reg_pid
**regp
)
2326 struct buffer_reg_pid
*reg_pid
;
2333 reg_pid
= buffer_reg_pid_find(ua_sess
->id
);
2336 * This is the create channel path meaning that if there is NO
2337 * registry available, we have to create one for this session.
2339 ret
= buffer_reg_pid_create(ua_sess
->id
, ®_pid
,
2340 ua_sess
->root_shm_path
, ua_sess
->shm_path
);
2348 /* Initialize registry. */
2349 ret
= ust_registry_session_init(®_pid
->registry
->reg
.ust
, app
,
2350 app
->bits_per_long
, app
->uint8_t_alignment
,
2351 app
->uint16_t_alignment
, app
->uint32_t_alignment
,
2352 app
->uint64_t_alignment
, app
->long_alignment
,
2353 app
->byte_order
, app
->version
.major
, app
->version
.minor
,
2354 reg_pid
->root_shm_path
, reg_pid
->shm_path
,
2355 lttng_credentials_get_uid(&ua_sess
->effective_credentials
),
2356 lttng_credentials_get_gid(&ua_sess
->effective_credentials
),
2357 ua_sess
->tracing_id
,
2361 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2362 * destroy the buffer registry, because it is always expected
2363 * that if the buffer registry can be found, its ust registry is
2366 buffer_reg_pid_destroy(reg_pid
);
2370 buffer_reg_pid_add(reg_pid
);
2372 DBG3("UST app buffer registry per PID created successfully");
2384 * Setup buffer registry per UID for the given session and application. If none
2385 * is found, a new one is created, added to the global registry and
2386 * initialized. If regp is valid, it's set with the newly created object.
2388 * Return 0 on success or else a negative value.
2390 static int setup_buffer_reg_uid(struct ltt_ust_session
*usess
,
2391 struct ust_app_session
*ua_sess
,
2392 struct ust_app
*app
, struct buffer_reg_uid
**regp
)
2395 struct buffer_reg_uid
*reg_uid
;
2402 reg_uid
= buffer_reg_uid_find(usess
->id
, app
->bits_per_long
, app
->uid
);
2405 * This is the create channel path meaning that if there is NO
2406 * registry available, we have to create one for this session.
2408 ret
= buffer_reg_uid_create(usess
->id
, app
->bits_per_long
, app
->uid
,
2409 LTTNG_DOMAIN_UST
, ®_uid
,
2410 ua_sess
->root_shm_path
, ua_sess
->shm_path
);
2418 /* Initialize registry. */
2419 ret
= ust_registry_session_init(®_uid
->registry
->reg
.ust
, NULL
,
2420 app
->bits_per_long
, app
->uint8_t_alignment
,
2421 app
->uint16_t_alignment
, app
->uint32_t_alignment
,
2422 app
->uint64_t_alignment
, app
->long_alignment
,
2423 app
->byte_order
, app
->version
.major
,
2424 app
->version
.minor
, reg_uid
->root_shm_path
,
2425 reg_uid
->shm_path
, usess
->uid
, usess
->gid
,
2426 ua_sess
->tracing_id
, app
->uid
);
2429 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2430 * destroy the buffer registry, because it is always expected
2431 * that if the buffer registry can be found, its ust registry is
2434 buffer_reg_uid_destroy(reg_uid
, NULL
);
2437 /* Add node to teardown list of the session. */
2438 cds_list_add(®_uid
->lnode
, &usess
->buffer_reg_uid_list
);
2440 buffer_reg_uid_add(reg_uid
);
2442 DBG3("UST app buffer registry per UID created successfully");
2453 * Create a session on the tracer side for the given app.
2455 * On success, ua_sess_ptr is populated with the session pointer or else left
2456 * untouched. If the session was created, is_created is set to 1. On error,
2457 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2460 * Returns 0 on success or else a negative code which is either -ENOMEM or
2461 * -ENOTCONN which is the default code if the ustctl_create_session fails.
2463 static int find_or_create_ust_app_session(struct ltt_ust_session
*usess
,
2464 struct ust_app
*app
, struct ust_app_session
**ua_sess_ptr
,
2467 int ret
, created
= 0;
2468 struct ust_app_session
*ua_sess
;
2472 assert(ua_sess_ptr
);
2474 health_code_update();
2476 ua_sess
= lookup_session_by_app(usess
, app
);
2477 if (ua_sess
== NULL
) {
2478 DBG2("UST app pid: %d session id %" PRIu64
" not found, creating it",
2479 app
->pid
, usess
->id
);
2480 ua_sess
= alloc_ust_app_session();
2481 if (ua_sess
== NULL
) {
2482 /* Only malloc can failed so something is really wrong */
2486 shadow_copy_session(ua_sess
, usess
, app
);
2490 switch (usess
->buffer_type
) {
2491 case LTTNG_BUFFER_PER_PID
:
2492 /* Init local registry. */
2493 ret
= setup_buffer_reg_pid(ua_sess
, app
, NULL
);
2495 delete_ust_app_session(-1, ua_sess
, app
);
2499 case LTTNG_BUFFER_PER_UID
:
2500 /* Look for a global registry. If none exists, create one. */
2501 ret
= setup_buffer_reg_uid(usess
, ua_sess
, app
, NULL
);
2503 delete_ust_app_session(-1, ua_sess
, app
);
2513 health_code_update();
2515 if (ua_sess
->handle
== -1) {
2516 pthread_mutex_lock(&app
->sock_lock
);
2517 ret
= ustctl_create_session(app
->sock
);
2518 pthread_mutex_unlock(&app
->sock_lock
);
2520 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
2521 ERR("Creating session for app pid %d with ret %d",
2524 DBG("UST app creating session failed. Application is dead");
2526 * This is normal behavior, an application can die during the
2527 * creation process. Don't report an error so the execution can
2528 * continue normally. This will get flagged ENOTCONN and the
2529 * caller will handle it.
2533 delete_ust_app_session(-1, ua_sess
, app
);
2534 if (ret
!= -ENOMEM
) {
2536 * Tracer is probably gone or got an internal error so let's
2537 * behave like it will soon unregister or not usable.
2544 ua_sess
->handle
= ret
;
2546 /* Add ust app session to app's HT */
2547 lttng_ht_node_init_u64(&ua_sess
->node
,
2548 ua_sess
->tracing_id
);
2549 lttng_ht_add_unique_u64(app
->sessions
, &ua_sess
->node
);
2550 lttng_ht_node_init_ulong(&ua_sess
->ust_objd_node
, ua_sess
->handle
);
2551 lttng_ht_add_unique_ulong(app
->ust_sessions_objd
,
2552 &ua_sess
->ust_objd_node
);
2554 DBG2("UST app session created successfully with handle %d", ret
);
2557 *ua_sess_ptr
= ua_sess
;
2559 *is_created
= created
;
2562 /* Everything went well. */
2566 health_code_update();
2571 * Match function for a hash table lookup of ust_app_ctx.
2573 * It matches an ust app context based on the context type and, in the case
2574 * of perf counters, their name.
2576 static int ht_match_ust_app_ctx(struct cds_lfht_node
*node
, const void *_key
)
2578 struct ust_app_ctx
*ctx
;
2579 const struct lttng_ust_context_attr
*key
;
2584 ctx
= caa_container_of(node
, struct ust_app_ctx
, node
.node
);
2588 if (ctx
->ctx
.ctx
!= key
->ctx
) {
2593 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER
:
2594 if (strncmp(key
->u
.perf_counter
.name
,
2595 ctx
->ctx
.u
.perf_counter
.name
,
2596 sizeof(key
->u
.perf_counter
.name
))) {
2600 case LTTNG_UST_CONTEXT_APP_CONTEXT
:
2601 if (strcmp(key
->u
.app_ctx
.provider_name
,
2602 ctx
->ctx
.u
.app_ctx
.provider_name
) ||
2603 strcmp(key
->u
.app_ctx
.ctx_name
,
2604 ctx
->ctx
.u
.app_ctx
.ctx_name
)) {
2620 * Lookup for an ust app context from an lttng_ust_context.
2622 * Must be called while holding RCU read side lock.
2623 * Return an ust_app_ctx object or NULL on error.
2626 struct ust_app_ctx
*find_ust_app_context(struct lttng_ht
*ht
,
2627 struct lttng_ust_context_attr
*uctx
)
2629 struct lttng_ht_iter iter
;
2630 struct lttng_ht_node_ulong
*node
;
2631 struct ust_app_ctx
*app_ctx
= NULL
;
2636 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2637 cds_lfht_lookup(ht
->ht
, ht
->hash_fct((void *) uctx
->ctx
, lttng_ht_seed
),
2638 ht_match_ust_app_ctx
, uctx
, &iter
.iter
);
2639 node
= lttng_ht_iter_get_node_ulong(&iter
);
2644 app_ctx
= caa_container_of(node
, struct ust_app_ctx
, node
);
2651 * Create a context for the channel on the tracer.
2653 * Called with UST app session lock held and a RCU read side lock.
2656 int create_ust_app_channel_context(struct ust_app_channel
*ua_chan
,
2657 struct lttng_ust_context_attr
*uctx
,
2658 struct ust_app
*app
)
2661 struct ust_app_ctx
*ua_ctx
;
2663 DBG2("UST app adding context to channel %s", ua_chan
->name
);
2665 ua_ctx
= find_ust_app_context(ua_chan
->ctx
, uctx
);
2671 ua_ctx
= alloc_ust_app_ctx(uctx
);
2672 if (ua_ctx
== NULL
) {
2678 lttng_ht_node_init_ulong(&ua_ctx
->node
, (unsigned long) ua_ctx
->ctx
.ctx
);
2679 lttng_ht_add_ulong(ua_chan
->ctx
, &ua_ctx
->node
);
2680 cds_list_add_tail(&ua_ctx
->list
, &ua_chan
->ctx_list
);
2682 ret
= create_ust_channel_context(ua_chan
, ua_ctx
, app
);
2692 * Enable on the tracer side a ust app event for the session and channel.
2694 * Called with UST app session lock held.
2697 int enable_ust_app_event(struct ust_app_session
*ua_sess
,
2698 struct ust_app_event
*ua_event
, struct ust_app
*app
)
2702 ret
= enable_ust_object(app
, ua_event
->obj
);
2707 ua_event
->enabled
= 1;
2714 * Disable on the tracer side a ust app event for the session and channel.
2716 static int disable_ust_app_event(struct ust_app_session
*ua_sess
,
2717 struct ust_app_event
*ua_event
, struct ust_app
*app
)
2721 ret
= disable_ust_object(app
, ua_event
->obj
);
2726 ua_event
->enabled
= 0;
2733 * Lookup ust app channel for session and disable it on the tracer side.
2736 int disable_ust_app_channel(struct ust_app_session
*ua_sess
,
2737 struct ust_app_channel
*ua_chan
, struct ust_app
*app
)
2741 ret
= disable_ust_channel(app
, ua_sess
, ua_chan
);
2746 ua_chan
->enabled
= 0;
2753 * Lookup ust app channel for session and enable it on the tracer side. This
2754 * MUST be called with a RCU read side lock acquired.
2756 static int enable_ust_app_channel(struct ust_app_session
*ua_sess
,
2757 struct ltt_ust_channel
*uchan
, struct ust_app
*app
)
2760 struct lttng_ht_iter iter
;
2761 struct lttng_ht_node_str
*ua_chan_node
;
2762 struct ust_app_channel
*ua_chan
;
2764 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &iter
);
2765 ua_chan_node
= lttng_ht_iter_get_node_str(&iter
);
2766 if (ua_chan_node
== NULL
) {
2767 DBG2("Unable to find channel %s in ust session id %" PRIu64
,
2768 uchan
->name
, ua_sess
->tracing_id
);
2772 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
2774 ret
= enable_ust_channel(app
, ua_sess
, ua_chan
);
2784 * Ask the consumer to create a channel and get it if successful.
2786 * Called with UST app session lock held.
2788 * Return 0 on success or else a negative value.
2790 static int do_consumer_create_channel(struct ltt_ust_session
*usess
,
2791 struct ust_app_session
*ua_sess
, struct ust_app_channel
*ua_chan
,
2792 int bitness
, struct ust_registry_session
*registry
,
2793 uint64_t trace_archive_id
)
2796 unsigned int nb_fd
= 0;
2797 struct consumer_socket
*socket
;
2805 health_code_update();
2807 /* Get the right consumer socket for the application. */
2808 socket
= consumer_find_socket_by_bitness(bitness
, usess
->consumer
);
2814 health_code_update();
2816 /* Need one fd for the channel. */
2817 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
2819 ERR("Exhausted number of available FD upon create channel");
2824 * Ask consumer to create channel. The consumer will return the number of
2825 * stream we have to expect.
2827 ret
= ust_consumer_ask_channel(ua_sess
, ua_chan
, usess
->consumer
, socket
,
2828 registry
, usess
->current_trace_chunk
);
2834 * Compute the number of fd needed before receiving them. It must be 2 per
2835 * stream (2 being the default value here).
2837 nb_fd
= DEFAULT_UST_STREAM_FD_NUM
* ua_chan
->expected_stream_count
;
2839 /* Reserve the amount of file descriptor we need. */
2840 ret
= lttng_fd_get(LTTNG_FD_APPS
, nb_fd
);
2842 ERR("Exhausted number of available FD upon create channel");
2843 goto error_fd_get_stream
;
2846 health_code_update();
2849 * Now get the channel from the consumer. This call wil populate the stream
2850 * list of that channel and set the ust objects.
2852 if (usess
->consumer
->enabled
) {
2853 ret
= ust_consumer_get_channel(socket
, ua_chan
);
2863 lttng_fd_put(LTTNG_FD_APPS
, nb_fd
);
2864 error_fd_get_stream
:
2866 * Initiate a destroy channel on the consumer since we had an error
2867 * handling it on our side. The return value is of no importance since we
2868 * already have a ret value set by the previous error that we need to
2871 (void) ust_consumer_destroy_channel(socket
, ua_chan
);
2873 lttng_fd_put(LTTNG_FD_APPS
, 1);
2875 health_code_update();
2881 * Duplicate the ust data object of the ust app stream and save it in the
2882 * buffer registry stream.
2884 * Return 0 on success or else a negative value.
2886 static int duplicate_stream_object(struct buffer_reg_stream
*reg_stream
,
2887 struct ust_app_stream
*stream
)
2894 /* Reserve the amount of file descriptor we need. */
2895 ret
= lttng_fd_get(LTTNG_FD_APPS
, 2);
2897 ERR("Exhausted number of available FD upon duplicate stream");
2901 /* Duplicate object for stream once the original is in the registry. */
2902 ret
= ustctl_duplicate_ust_object_data(&stream
->obj
,
2903 reg_stream
->obj
.ust
);
2905 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2906 reg_stream
->obj
.ust
, stream
->obj
, ret
);
2907 lttng_fd_put(LTTNG_FD_APPS
, 2);
2910 stream
->handle
= stream
->obj
->handle
;
2917 * Duplicate the ust data object of the ust app. channel and save it in the
2918 * buffer registry channel.
2920 * Return 0 on success or else a negative value.
2922 static int duplicate_channel_object(struct buffer_reg_channel
*reg_chan
,
2923 struct ust_app_channel
*ua_chan
)
2930 /* Need two fds for the channel. */
2931 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
2933 ERR("Exhausted number of available FD upon duplicate channel");
2937 /* Duplicate object for stream once the original is in the registry. */
2938 ret
= ustctl_duplicate_ust_object_data(&ua_chan
->obj
, reg_chan
->obj
.ust
);
2940 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2941 reg_chan
->obj
.ust
, ua_chan
->obj
, ret
);
2944 ua_chan
->handle
= ua_chan
->obj
->handle
;
2949 lttng_fd_put(LTTNG_FD_APPS
, 1);
2955 * For a given channel buffer registry, setup all streams of the given ust
2956 * application channel.
2958 * Return 0 on success or else a negative value.
2960 static int setup_buffer_reg_streams(struct buffer_reg_channel
*reg_chan
,
2961 struct ust_app_channel
*ua_chan
,
2962 struct ust_app
*app
)
2965 struct ust_app_stream
*stream
, *stmp
;
2970 DBG2("UST app setup buffer registry stream");
2972 /* Send all streams to application. */
2973 cds_list_for_each_entry_safe(stream
, stmp
, &ua_chan
->streams
.head
, list
) {
2974 struct buffer_reg_stream
*reg_stream
;
2976 ret
= buffer_reg_stream_create(®_stream
);
2982 * Keep original pointer and nullify it in the stream so the delete
2983 * stream call does not release the object.
2985 reg_stream
->obj
.ust
= stream
->obj
;
2987 buffer_reg_stream_add(reg_stream
, reg_chan
);
2989 /* We don't need the streams anymore. */
2990 cds_list_del(&stream
->list
);
2991 delete_ust_app_stream(-1, stream
, app
);
2999 * Create a buffer registry channel for the given session registry and
3000 * application channel object. If regp pointer is valid, it's set with the
3001 * created object. Important, the created object is NOT added to the session
3002 * registry hash table.
3004 * Return 0 on success else a negative value.
3006 static int create_buffer_reg_channel(struct buffer_reg_session
*reg_sess
,
3007 struct ust_app_channel
*ua_chan
, struct buffer_reg_channel
**regp
)
3010 struct buffer_reg_channel
*reg_chan
= NULL
;
3015 DBG2("UST app creating buffer registry channel for %s", ua_chan
->name
);
3017 /* Create buffer registry channel. */
3018 ret
= buffer_reg_channel_create(ua_chan
->tracing_channel_id
, ®_chan
);
3023 reg_chan
->consumer_key
= ua_chan
->key
;
3024 reg_chan
->subbuf_size
= ua_chan
->attr
.subbuf_size
;
3025 reg_chan
->num_subbuf
= ua_chan
->attr
.num_subbuf
;
3027 /* Create and add a channel registry to session. */
3028 ret
= ust_registry_channel_add(reg_sess
->reg
.ust
,
3029 ua_chan
->tracing_channel_id
);
3033 buffer_reg_channel_add(reg_sess
, reg_chan
);
3042 /* Safe because the registry channel object was not added to any HT. */
3043 buffer_reg_channel_destroy(reg_chan
, LTTNG_DOMAIN_UST
);
3049 * Setup buffer registry channel for the given session registry and application
3050 * channel object. If regp pointer is valid, it's set with the created object.
3052 * Return 0 on success else a negative value.
3054 static int setup_buffer_reg_channel(struct buffer_reg_session
*reg_sess
,
3055 struct ust_app_channel
*ua_chan
, struct buffer_reg_channel
*reg_chan
,
3056 struct ust_app
*app
)
3063 assert(ua_chan
->obj
);
3065 DBG2("UST app setup buffer registry channel for %s", ua_chan
->name
);
3067 /* Setup all streams for the registry. */
3068 ret
= setup_buffer_reg_streams(reg_chan
, ua_chan
, app
);
3073 reg_chan
->obj
.ust
= ua_chan
->obj
;
3074 ua_chan
->obj
= NULL
;
3079 buffer_reg_channel_remove(reg_sess
, reg_chan
);
3080 buffer_reg_channel_destroy(reg_chan
, LTTNG_DOMAIN_UST
);
3085 * Send buffer registry channel to the application.
3087 * Return 0 on success else a negative value.
3089 static int send_channel_uid_to_ust(struct buffer_reg_channel
*reg_chan
,
3090 struct ust_app
*app
, struct ust_app_session
*ua_sess
,
3091 struct ust_app_channel
*ua_chan
)
3094 struct buffer_reg_stream
*reg_stream
;
3101 DBG("UST app sending buffer registry channel to ust sock %d", app
->sock
);
3103 ret
= duplicate_channel_object(reg_chan
, ua_chan
);
3108 /* Send channel to the application. */
3109 ret
= ust_consumer_send_channel_to_ust(app
, ua_sess
, ua_chan
);
3110 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
3111 ret
= -ENOTCONN
; /* Caused by app exiting. */
3113 } else if (ret
< 0) {
3117 health_code_update();
3119 /* Send all streams to application. */
3120 pthread_mutex_lock(®_chan
->stream_list_lock
);
3121 cds_list_for_each_entry(reg_stream
, ®_chan
->streams
, lnode
) {
3122 struct ust_app_stream stream
;
3124 ret
= duplicate_stream_object(reg_stream
, &stream
);
3126 goto error_stream_unlock
;
3129 ret
= ust_consumer_send_stream_to_ust(app
, ua_chan
, &stream
);
3131 (void) release_ust_app_stream(-1, &stream
, app
);
3132 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
3133 ret
= -ENOTCONN
; /* Caused by app exiting. */
3135 goto error_stream_unlock
;
3139 * The return value is not important here. This function will output an
3142 (void) release_ust_app_stream(-1, &stream
, app
);
3144 ua_chan
->is_sent
= 1;
3146 error_stream_unlock
:
3147 pthread_mutex_unlock(®_chan
->stream_list_lock
);
3153 * Create and send to the application the created buffers with per UID buffers.
3155 * This MUST be called with a RCU read side lock acquired.
3156 * The session list lock and the session's lock must be acquired.
3158 * Return 0 on success else a negative value.
3160 static int create_channel_per_uid(struct ust_app
*app
,
3161 struct ltt_ust_session
*usess
, struct ust_app_session
*ua_sess
,
3162 struct ust_app_channel
*ua_chan
)
3165 struct buffer_reg_uid
*reg_uid
;
3166 struct buffer_reg_channel
*reg_chan
;
3167 struct ltt_session
*session
= NULL
;
3168 enum lttng_error_code notification_ret
;
3169 struct ust_registry_channel
*chan_reg
;
3176 DBG("UST app creating channel %s with per UID buffers", ua_chan
->name
);
3178 reg_uid
= buffer_reg_uid_find(usess
->id
, app
->bits_per_long
, app
->uid
);
3180 * The session creation handles the creation of this global registry
3181 * object. If none can be find, there is a code flow problem or a
3186 reg_chan
= buffer_reg_channel_find(ua_chan
->tracing_channel_id
,
3192 /* Create the buffer registry channel object. */
3193 ret
= create_buffer_reg_channel(reg_uid
->registry
, ua_chan
, ®_chan
);
3195 ERR("Error creating the UST channel \"%s\" registry instance",
3200 session
= session_find_by_id(ua_sess
->tracing_id
);
3202 assert(pthread_mutex_trylock(&session
->lock
));
3203 assert(session_trylock_list());
3206 * Create the buffers on the consumer side. This call populates the
3207 * ust app channel object with all streams and data object.
3209 ret
= do_consumer_create_channel(usess
, ua_sess
, ua_chan
,
3210 app
->bits_per_long
, reg_uid
->registry
->reg
.ust
,
3211 session
->most_recent_chunk_id
.value
);
3213 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3217 * Let's remove the previously created buffer registry channel so
3218 * it's not visible anymore in the session registry.
3220 ust_registry_channel_del_free(reg_uid
->registry
->reg
.ust
,
3221 ua_chan
->tracing_channel_id
, false);
3222 buffer_reg_channel_remove(reg_uid
->registry
, reg_chan
);
3223 buffer_reg_channel_destroy(reg_chan
, LTTNG_DOMAIN_UST
);