2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Copyright (C) 2016 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 #include <sys/types.h>
29 #include <urcu/compiler.h>
30 #include <lttng/ust-error.h>
33 #include <common/common.h>
34 #include <common/sessiond-comm/sessiond-comm.h>
37 #include "buffer-registry.h"
39 #include "health-sessiond.h"
41 #include "ust-consumer.h"
47 int ust_app_flush_app_session(struct ust_app
*app
, struct ust_app_session
*ua_sess
);
49 /* Next available channel key. Access under next_channel_key_lock. */
50 static uint64_t _next_channel_key
;
51 static pthread_mutex_t next_channel_key_lock
= PTHREAD_MUTEX_INITIALIZER
;
53 /* Next available session ID. Access under next_session_id_lock. */
54 static uint64_t _next_session_id
;
55 static pthread_mutex_t next_session_id_lock
= PTHREAD_MUTEX_INITIALIZER
;
58 * Return the incremented value of next_channel_key.
60 static uint64_t get_next_channel_key(void)
64 pthread_mutex_lock(&next_channel_key_lock
);
65 ret
= ++_next_channel_key
;
66 pthread_mutex_unlock(&next_channel_key_lock
);
71 * Return the atomically incremented value of next_session_id.
73 static uint64_t get_next_session_id(void)
77 pthread_mutex_lock(&next_session_id_lock
);
78 ret
= ++_next_session_id
;
79 pthread_mutex_unlock(&next_session_id_lock
);
83 static void copy_channel_attr_to_ustctl(
84 struct ustctl_consumer_channel_attr
*attr
,
85 struct lttng_ust_channel_attr
*uattr
)
87 /* Copy event attributes since the layout is different. */
88 attr
->subbuf_size
= uattr
->subbuf_size
;
89 attr
->num_subbuf
= uattr
->num_subbuf
;
90 attr
->overwrite
= uattr
->overwrite
;
91 attr
->switch_timer_interval
= uattr
->switch_timer_interval
;
92 attr
->read_timer_interval
= uattr
->read_timer_interval
;
93 attr
->output
= uattr
->output
;
97 * Match function for the hash table lookup.
99 * It matches an ust app event based on three attributes which are the event
100 * name, the filter bytecode and the loglevel.
102 static int ht_match_ust_app_event(struct cds_lfht_node
*node
, const void *_key
)
104 struct ust_app_event
*event
;
105 const struct ust_app_ht_key
*key
;
106 int ev_loglevel_value
;
111 event
= caa_container_of(node
, struct ust_app_event
, node
.node
);
113 ev_loglevel_value
= event
->attr
.loglevel
;
115 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
118 if (strncmp(event
->attr
.name
, key
->name
, sizeof(event
->attr
.name
)) != 0) {
122 /* Event loglevel. */
123 if (ev_loglevel_value
!= key
->loglevel_type
) {
124 if (event
->attr
.loglevel_type
== LTTNG_UST_LOGLEVEL_ALL
125 && key
->loglevel_type
== 0 &&
126 ev_loglevel_value
== -1) {
128 * Match is accepted. This is because on event creation, the
129 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
130 * -1 are accepted for this loglevel type since 0 is the one set by
131 * the API when receiving an enable event.
138 /* One of the filters is NULL, fail. */
139 if ((key
->filter
&& !event
->filter
) || (!key
->filter
&& event
->filter
)) {
143 if (key
->filter
&& event
->filter
) {
144 /* Both filters exists, check length followed by the bytecode. */
145 if (event
->filter
->len
!= key
->filter
->len
||
146 memcmp(event
->filter
->data
, key
->filter
->data
,
147 event
->filter
->len
) != 0) {
152 /* One of the exclusions is NULL, fail. */
153 if ((key
->exclusion
&& !event
->exclusion
) || (!key
->exclusion
&& event
->exclusion
)) {
157 if (key
->exclusion
&& event
->exclusion
) {
158 /* Both exclusions exists, check count followed by the names. */
159 if (event
->exclusion
->count
!= key
->exclusion
->count
||
160 memcmp(event
->exclusion
->names
, key
->exclusion
->names
,
161 event
->exclusion
->count
* LTTNG_UST_SYM_NAME_LEN
) != 0) {
175 * Unique add of an ust app event in the given ht. This uses the custom
176 * ht_match_ust_app_event match function and the event name as hash.
178 static void add_unique_ust_app_event(struct ust_app_channel
*ua_chan
,
179 struct ust_app_event
*event
)
181 struct cds_lfht_node
*node_ptr
;
182 struct ust_app_ht_key key
;
186 assert(ua_chan
->events
);
189 ht
= ua_chan
->events
;
190 key
.name
= event
->attr
.name
;
191 key
.filter
= event
->filter
;
192 key
.loglevel_type
= event
->attr
.loglevel
;
193 key
.exclusion
= event
->exclusion
;
195 node_ptr
= cds_lfht_add_unique(ht
->ht
,
196 ht
->hash_fct(event
->node
.key
, lttng_ht_seed
),
197 ht_match_ust_app_event
, &key
, &event
->node
.node
);
198 assert(node_ptr
== &event
->node
.node
);
202 * Close the notify socket from the given RCU head object. This MUST be called
203 * through a call_rcu().
205 static void close_notify_sock_rcu(struct rcu_head
*head
)
208 struct ust_app_notify_sock_obj
*obj
=
209 caa_container_of(head
, struct ust_app_notify_sock_obj
, head
);
211 /* Must have a valid fd here. */
212 assert(obj
->fd
>= 0);
214 ret
= close(obj
->fd
);
216 ERR("close notify sock %d RCU", obj
->fd
);
218 lttng_fd_put(LTTNG_FD_APPS
, 1);
224 * Return the session registry according to the buffer type of the given
227 * A registry per UID object MUST exists before calling this function or else
228 * it assert() if not found. RCU read side lock must be acquired.
230 static struct ust_registry_session
*get_session_registry(
231 struct ust_app_session
*ua_sess
)
233 struct ust_registry_session
*registry
= NULL
;
237 switch (ua_sess
->buffer_type
) {
238 case LTTNG_BUFFER_PER_PID
:
240 struct buffer_reg_pid
*reg_pid
= buffer_reg_pid_find(ua_sess
->id
);
244 registry
= reg_pid
->registry
->reg
.ust
;
247 case LTTNG_BUFFER_PER_UID
:
249 struct buffer_reg_uid
*reg_uid
= buffer_reg_uid_find(
250 ua_sess
->tracing_id
, ua_sess
->bits_per_long
, ua_sess
->uid
);
254 registry
= reg_uid
->registry
->reg
.ust
;
266 * Delete ust context safely. RCU read lock must be held before calling
270 void delete_ust_app_ctx(int sock
, struct ust_app_ctx
*ua_ctx
,
278 pthread_mutex_lock(&app
->sock_lock
);
279 ret
= ustctl_release_object(sock
, ua_ctx
->obj
);
280 pthread_mutex_unlock(&app
->sock_lock
);
281 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
282 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
283 sock
, ua_ctx
->obj
->handle
, ret
);
291 * Delete ust app event safely. RCU read lock must be held before calling
295 void delete_ust_app_event(int sock
, struct ust_app_event
*ua_event
,
302 free(ua_event
->filter
);
303 if (ua_event
->exclusion
!= NULL
)
304 free(ua_event
->exclusion
);
305 if (ua_event
->obj
!= NULL
) {
306 pthread_mutex_lock(&app
->sock_lock
);
307 ret
= ustctl_release_object(sock
, ua_event
->obj
);
308 pthread_mutex_unlock(&app
->sock_lock
);
309 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
310 ERR("UST app sock %d release event obj failed with ret %d",
319 * Release ust data object of the given stream.
321 * Return 0 on success or else a negative value.
323 static int release_ust_app_stream(int sock
, struct ust_app_stream
*stream
,
331 pthread_mutex_lock(&app
->sock_lock
);
332 ret
= ustctl_release_object(sock
, stream
->obj
);
333 pthread_mutex_unlock(&app
->sock_lock
);
334 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
335 ERR("UST app sock %d release stream obj failed with ret %d",
338 lttng_fd_put(LTTNG_FD_APPS
, 2);
346 * Delete ust app stream safely. RCU read lock must be held before calling
350 void delete_ust_app_stream(int sock
, struct ust_app_stream
*stream
,
355 (void) release_ust_app_stream(sock
, stream
, app
);
360 * We need to execute ht_destroy outside of RCU read-side critical
361 * section and outside of call_rcu thread, so we postpone its execution
362 * using ht_cleanup_push. It is simpler than to change the semantic of
363 * the many callers of delete_ust_app_session().
366 void delete_ust_app_channel_rcu(struct rcu_head
*head
)
368 struct ust_app_channel
*ua_chan
=
369 caa_container_of(head
, struct ust_app_channel
, rcu_head
);
371 ht_cleanup_push(ua_chan
->ctx
);
372 ht_cleanup_push(ua_chan
->events
);
377 * Extract the lost packet or discarded events counter when the channel is
378 * being deleted and store the value in the parent channel so we can
379 * access it from lttng list and at stop/destroy.
381 * The session list lock must be held by the caller.
384 void save_per_pid_lost_discarded_counters(struct ust_app_channel
*ua_chan
)
386 uint64_t discarded
= 0, lost
= 0;
387 struct ltt_session
*session
;
388 struct ltt_ust_channel
*uchan
;
390 if (ua_chan
->attr
.type
!= LTTNG_UST_CHAN_PER_CPU
) {
395 session
= session_find_by_id(ua_chan
->session
->tracing_id
);
396 if (!session
|| !session
->ust_session
) {
398 * Not finding the session is not an error because there are
399 * multiple ways the channels can be torn down.
401 * 1) The session daemon can initiate the destruction of the
402 * ust app session after receiving a destroy command or
403 * during its shutdown/teardown.
404 * 2) The application, since we are in per-pid tracing, is
405 * unregistering and tearing down its ust app session.
407 * Both paths are protected by the session list lock which
408 * ensures that the accounting of lost packets and discarded
409 * events is done exactly once. The session is then unpublished
410 * from the session list, resulting in this condition.
415 if (ua_chan
->attr
.overwrite
) {
416 consumer_get_lost_packets(ua_chan
->session
->tracing_id
,
417 ua_chan
->key
, session
->ust_session
->consumer
,
420 consumer_get_discarded_events(ua_chan
->session
->tracing_id
,
421 ua_chan
->key
, session
->ust_session
->consumer
,
424 uchan
= trace_ust_find_channel_by_name(
425 session
->ust_session
->domain_global
.channels
,
428 ERR("Missing UST channel to store discarded counters");
432 uchan
->per_pid_closed_app_discarded
+= discarded
;
433 uchan
->per_pid_closed_app_lost
+= lost
;
440 * Delete ust app channel safely. RCU read lock must be held before calling
443 * The session list lock must be held by the caller.
446 void delete_ust_app_channel(int sock
, struct ust_app_channel
*ua_chan
,
450 struct lttng_ht_iter iter
;
451 struct ust_app_event
*ua_event
;
452 struct ust_app_ctx
*ua_ctx
;
453 struct ust_app_stream
*stream
, *stmp
;
454 struct ust_registry_session
*registry
;
458 DBG3("UST app deleting channel %s", ua_chan
->name
);
461 cds_list_for_each_entry_safe(stream
, stmp
, &ua_chan
->streams
.head
, list
) {
462 cds_list_del(&stream
->list
);
463 delete_ust_app_stream(sock
, stream
, app
);
467 cds_lfht_for_each_entry(ua_chan
->ctx
->ht
, &iter
.iter
, ua_ctx
, node
.node
) {
468 cds_list_del(&ua_ctx
->list
);
469 ret
= lttng_ht_del(ua_chan
->ctx
, &iter
);
471 delete_ust_app_ctx(sock
, ua_ctx
, app
);
475 cds_lfht_for_each_entry(ua_chan
->events
->ht
, &iter
.iter
, ua_event
,
477 ret
= lttng_ht_del(ua_chan
->events
, &iter
);
479 delete_ust_app_event(sock
, ua_event
, app
);
482 if (ua_chan
->session
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
483 /* Wipe and free registry from session registry. */
484 registry
= get_session_registry(ua_chan
->session
);
486 ust_registry_channel_del_free(registry
, ua_chan
->key
);
488 save_per_pid_lost_discarded_counters(ua_chan
);
491 if (ua_chan
->obj
!= NULL
) {
492 /* Remove channel from application UST object descriptor. */
493 iter
.iter
.node
= &ua_chan
->ust_objd_node
.node
;
494 ret
= lttng_ht_del(app
->ust_objd
, &iter
);
496 pthread_mutex_lock(&app
->sock_lock
);
497 ret
= ustctl_release_object(sock
, ua_chan
->obj
);
498 pthread_mutex_unlock(&app
->sock_lock
);
499 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
500 ERR("UST app sock %d release channel obj failed with ret %d",
503 lttng_fd_put(LTTNG_FD_APPS
, 1);
507 call_rcu(&ua_chan
->rcu_head
, delete_ust_app_channel_rcu
);
510 int ust_app_register_done(struct ust_app
*app
)
514 pthread_mutex_lock(&app
->sock_lock
);
515 ret
= ustctl_register_done(app
->sock
);
516 pthread_mutex_unlock(&app
->sock_lock
);
520 int ust_app_release_object(struct ust_app
*app
, struct lttng_ust_object_data
*data
)
525 pthread_mutex_lock(&app
->sock_lock
);
530 ret
= ustctl_release_object(sock
, data
);
532 pthread_mutex_unlock(&app
->sock_lock
);
538 * Push metadata to consumer socket.
540 * RCU read-side lock must be held to guarantee existance of socket.
541 * Must be called with the ust app session lock held.
542 * Must be called with the registry lock held.
544 * On success, return the len of metadata pushed or else a negative value.
545 * Returning a -EPIPE return value means we could not send the metadata,
546 * but it can be caused by recoverable errors (e.g. the application has
547 * terminated concurrently).
549 ssize_t
ust_app_push_metadata(struct ust_registry_session
*registry
,
550 struct consumer_socket
*socket
, int send_zero_data
)
553 char *metadata_str
= NULL
;
554 size_t len
, offset
, new_metadata_len_sent
;
556 uint64_t metadata_key
, metadata_version
;
561 metadata_key
= registry
->metadata_key
;
564 * Means that no metadata was assigned to the session. This can
565 * happens if no start has been done previously.
571 offset
= registry
->metadata_len_sent
;
572 len
= registry
->metadata_len
- registry
->metadata_len_sent
;
573 new_metadata_len_sent
= registry
->metadata_len
;
574 metadata_version
= registry
->metadata_version
;
576 DBG3("No metadata to push for metadata key %" PRIu64
,
577 registry
->metadata_key
);
579 if (send_zero_data
) {
580 DBG("No metadata to push");
586 /* Allocate only what we have to send. */
587 metadata_str
= zmalloc(len
);
589 PERROR("zmalloc ust app metadata string");
593 /* Copy what we haven't sent out. */
594 memcpy(metadata_str
, registry
->metadata
+ offset
, len
);
597 pthread_mutex_unlock(®istry
->lock
);
599 * We need to unlock the registry while we push metadata to
600 * break a circular dependency between the consumerd metadata
601 * lock and the sessiond registry lock. Indeed, pushing metadata
602 * to the consumerd awaits that it gets pushed all the way to
603 * relayd, but doing so requires grabbing the metadata lock. If
604 * a concurrent metadata request is being performed by
605 * consumerd, this can try to grab the registry lock on the
606 * sessiond while holding the metadata lock on the consumer
607 * daemon. Those push and pull schemes are performed on two
608 * different bidirectionnal communication sockets.
610 ret
= consumer_push_metadata(socket
, metadata_key
,
611 metadata_str
, len
, offset
, metadata_version
);
612 pthread_mutex_lock(®istry
->lock
);
615 * There is an acceptable race here between the registry
616 * metadata key assignment and the creation on the
617 * consumer. The session daemon can concurrently push
618 * metadata for this registry while being created on the
619 * consumer since the metadata key of the registry is
620 * assigned *before* it is setup to avoid the consumer
621 * to ask for metadata that could possibly be not found
622 * in the session daemon.
624 * The metadata will get pushed either by the session
625 * being stopped or the consumer requesting metadata if
626 * that race is triggered.
628 if (ret
== -LTTCOMM_CONSUMERD_CHANNEL_FAIL
) {
631 ERR("Error pushing metadata to consumer");
637 * Metadata may have been concurrently pushed, since
638 * we're not holding the registry lock while pushing to
639 * consumer. This is handled by the fact that we send
640 * the metadata content, size, and the offset at which
641 * that metadata belongs. This may arrive out of order
642 * on the consumer side, and the consumer is able to
643 * deal with overlapping fragments. The consumer
644 * supports overlapping fragments, which must be
645 * contiguous starting from offset 0. We keep the
646 * largest metadata_len_sent value of the concurrent
649 registry
->metadata_len_sent
=
650 max_t(size_t, registry
->metadata_len_sent
,
651 new_metadata_len_sent
);
660 * On error, flag the registry that the metadata is
661 * closed. We were unable to push anything and this
662 * means that either the consumer is not responding or
663 * the metadata cache has been destroyed on the
666 registry
->metadata_closed
= 1;
674 * For a given application and session, push metadata to consumer.
675 * Either sock or consumer is required : if sock is NULL, the default
676 * socket to send the metadata is retrieved from consumer, if sock
677 * is not NULL we use it to send the metadata.
678 * RCU read-side lock must be held while calling this function,
679 * therefore ensuring existance of registry. It also ensures existance
680 * of socket throughout this function.
682 * Return 0 on success else a negative error.
683 * Returning a -EPIPE return value means we could not send the metadata,
684 * but it can be caused by recoverable errors (e.g. the application has
685 * terminated concurrently).
687 static int push_metadata(struct ust_registry_session
*registry
,
688 struct consumer_output
*consumer
)
692 struct consumer_socket
*socket
;
697 pthread_mutex_lock(®istry
->lock
);
698 if (registry
->metadata_closed
) {
703 /* Get consumer socket to use to push the metadata.*/
704 socket
= consumer_find_socket_by_bitness(registry
->bits_per_long
,
711 ret
= ust_app_push_metadata(registry
, socket
, 0);
716 pthread_mutex_unlock(®istry
->lock
);
720 pthread_mutex_unlock(®istry
->lock
);
725 * Send to the consumer a close metadata command for the given session. Once
726 * done, the metadata channel is deleted and the session metadata pointer is
727 * nullified. The session lock MUST be held unless the application is
728 * in the destroy path.
730 * Return 0 on success else a negative value.
732 static int close_metadata(struct ust_registry_session
*registry
,
733 struct consumer_output
*consumer
)
736 struct consumer_socket
*socket
;
743 pthread_mutex_lock(®istry
->lock
);
745 if (!registry
->metadata_key
|| registry
->metadata_closed
) {
750 /* Get consumer socket to use to push the metadata.*/
751 socket
= consumer_find_socket_by_bitness(registry
->bits_per_long
,
758 ret
= consumer_close_metadata(socket
, registry
->metadata_key
);
765 * Metadata closed. Even on error this means that the consumer is not
766 * responding or not found so either way a second close should NOT be emit
769 registry
->metadata_closed
= 1;
771 pthread_mutex_unlock(®istry
->lock
);
777 * We need to execute ht_destroy outside of RCU read-side critical
778 * section and outside of call_rcu thread, so we postpone its execution
779 * using ht_cleanup_push. It is simpler than to change the semantic of
780 * the many callers of delete_ust_app_session().
783 void delete_ust_app_session_rcu(struct rcu_head
*head
)
785 struct ust_app_session
*ua_sess
=
786 caa_container_of(head
, struct ust_app_session
, rcu_head
);
788 ht_cleanup_push(ua_sess
->channels
);
793 * Delete ust app session safely. RCU read lock must be held before calling
796 * The session list lock must be held by the caller.
799 void delete_ust_app_session(int sock
, struct ust_app_session
*ua_sess
,
803 struct lttng_ht_iter iter
;
804 struct ust_app_channel
*ua_chan
;
805 struct ust_registry_session
*registry
;
809 pthread_mutex_lock(&ua_sess
->lock
);
811 assert(!ua_sess
->deleted
);
812 ua_sess
->deleted
= true;
814 registry
= get_session_registry(ua_sess
);
815 /* Registry can be null on error path during initialization. */
817 /* Push metadata for application before freeing the application. */
818 (void) push_metadata(registry
, ua_sess
->consumer
);
821 * Don't ask to close metadata for global per UID buffers. Close
822 * metadata only on destroy trace session in this case. Also, the
823 * previous push metadata could have flag the metadata registry to
824 * close so don't send a close command if closed.
826 if (ua_sess
->buffer_type
!= LTTNG_BUFFER_PER_UID
) {
827 /* And ask to close it for this session registry. */
828 (void) close_metadata(registry
, ua_sess
->consumer
);
832 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &iter
.iter
, ua_chan
,
834 ret
= lttng_ht_del(ua_sess
->channels
, &iter
);
836 delete_ust_app_channel(sock
, ua_chan
, app
);
839 /* In case of per PID, the registry is kept in the session. */
840 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
841 struct buffer_reg_pid
*reg_pid
= buffer_reg_pid_find(ua_sess
->id
);
844 * Registry can be null on error path during
847 buffer_reg_pid_remove(reg_pid
);
848 buffer_reg_pid_destroy(reg_pid
);
852 if (ua_sess
->handle
!= -1) {
853 pthread_mutex_lock(&app
->sock_lock
);
854 ret
= ustctl_release_handle(sock
, ua_sess
->handle
);
855 pthread_mutex_unlock(&app
->sock_lock
);
856 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
857 ERR("UST app sock %d release session handle failed with ret %d",
860 /* Remove session from application UST object descriptor. */
861 iter
.iter
.node
= &ua_sess
->ust_objd_node
.node
;
862 ret
= lttng_ht_del(app
->ust_sessions_objd
, &iter
);
866 pthread_mutex_unlock(&ua_sess
->lock
);
868 consumer_output_put(ua_sess
->consumer
);
870 call_rcu(&ua_sess
->rcu_head
, delete_ust_app_session_rcu
);
874 * Delete a traceable application structure from the global list. Never call
875 * this function outside of a call_rcu call.
877 * RCU read side lock should _NOT_ be held when calling this function.
880 void delete_ust_app(struct ust_app
*app
)
883 struct ust_app_session
*ua_sess
, *tmp_ua_sess
;
886 * The session list lock must be held during this function to guarantee
887 * the existence of ua_sess.
890 /* Delete ust app sessions info */
895 cds_list_for_each_entry_safe(ua_sess
, tmp_ua_sess
, &app
->teardown_head
,
897 /* Free every object in the session and the session. */
899 delete_ust_app_session(sock
, ua_sess
, app
);
903 ht_cleanup_push(app
->sessions
);
904 ht_cleanup_push(app
->ust_sessions_objd
);
905 ht_cleanup_push(app
->ust_objd
);
908 * Wait until we have deleted the application from the sock hash table
909 * before closing this socket, otherwise an application could re-use the
910 * socket ID and race with the teardown, using the same hash table entry.
912 * It's OK to leave the close in call_rcu. We want it to stay unique for
913 * all RCU readers that could run concurrently with unregister app,
914 * therefore we _need_ to only close that socket after a grace period. So
915 * it should stay in this RCU callback.
917 * This close() is a very important step of the synchronization model so
918 * every modification to this function must be carefully reviewed.
924 lttng_fd_put(LTTNG_FD_APPS
, 1);
926 DBG2("UST app pid %d deleted", app
->pid
);
928 session_unlock_list();
932 * URCU intermediate call to delete an UST app.
935 void delete_ust_app_rcu(struct rcu_head
*head
)
937 struct lttng_ht_node_ulong
*node
=
938 caa_container_of(head
, struct lttng_ht_node_ulong
, head
);
939 struct ust_app
*app
=
940 caa_container_of(node
, struct ust_app
, pid_n
);
942 DBG3("Call RCU deleting app PID %d", app
->pid
);
947 * Delete the session from the application ht and delete the data structure by
948 * freeing every object inside and releasing them.
950 * The session list lock must be held by the caller.
952 static void destroy_app_session(struct ust_app
*app
,
953 struct ust_app_session
*ua_sess
)
956 struct lttng_ht_iter iter
;
961 iter
.iter
.node
= &ua_sess
->node
.node
;
962 ret
= lttng_ht_del(app
->sessions
, &iter
);
964 /* Already scheduled for teardown. */
968 /* Once deleted, free the data structure. */
969 delete_ust_app_session(app
->sock
, ua_sess
, app
);
976 * Alloc new UST app session.
979 struct ust_app_session
*alloc_ust_app_session(struct ust_app
*app
)
981 struct ust_app_session
*ua_sess
;
983 /* Init most of the default value by allocating and zeroing */
984 ua_sess
= zmalloc(sizeof(struct ust_app_session
));
985 if (ua_sess
== NULL
) {
990 ua_sess
->handle
= -1;
991 ua_sess
->channels
= lttng_ht_new(0, LTTNG_HT_TYPE_STRING
);
992 ua_sess
->metadata_attr
.type
= LTTNG_UST_CHAN_METADATA
;
993 pthread_mutex_init(&ua_sess
->lock
, NULL
);
1002 * Alloc new UST app channel.
1005 struct ust_app_channel
*alloc_ust_app_channel(char *name
,
1006 struct ust_app_session
*ua_sess
,
1007 struct lttng_ust_channel_attr
*attr
)
1009 struct ust_app_channel
*ua_chan
;
1011 /* Init most of the default value by allocating and zeroing */
1012 ua_chan
= zmalloc(sizeof(struct ust_app_channel
));
1013 if (ua_chan
== NULL
) {
1018 /* Setup channel name */
1019 strncpy(ua_chan
->name
, name
, sizeof(ua_chan
->name
));
1020 ua_chan
->name
[sizeof(ua_chan
->name
) - 1] = '\0';
1022 ua_chan
->enabled
= 1;
1023 ua_chan
->handle
= -1;
1024 ua_chan
->session
= ua_sess
;
1025 ua_chan
->key
= get_next_channel_key();
1026 ua_chan
->ctx
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
1027 ua_chan
->events
= lttng_ht_new(0, LTTNG_HT_TYPE_STRING
);
1028 lttng_ht_node_init_str(&ua_chan
->node
, ua_chan
->name
);
1030 CDS_INIT_LIST_HEAD(&ua_chan
->streams
.head
);
1031 CDS_INIT_LIST_HEAD(&ua_chan
->ctx_list
);
1033 /* Copy attributes */
1035 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
1036 ua_chan
->attr
.subbuf_size
= attr
->subbuf_size
;
1037 ua_chan
->attr
.num_subbuf
= attr
->num_subbuf
;
1038 ua_chan
->attr
.overwrite
= attr
->overwrite
;
1039 ua_chan
->attr
.switch_timer_interval
= attr
->switch_timer_interval
;
1040 ua_chan
->attr
.read_timer_interval
= attr
->read_timer_interval
;
1041 ua_chan
->attr
.output
= attr
->output
;
1043 /* By default, the channel is a per cpu channel. */
1044 ua_chan
->attr
.type
= LTTNG_UST_CHAN_PER_CPU
;
1046 DBG3("UST app channel %s allocated", ua_chan
->name
);
1055 * Allocate and initialize a UST app stream.
1057 * Return newly allocated stream pointer or NULL on error.
1059 struct ust_app_stream
*ust_app_alloc_stream(void)
1061 struct ust_app_stream
*stream
= NULL
;
1063 stream
= zmalloc(sizeof(*stream
));
1064 if (stream
== NULL
) {
1065 PERROR("zmalloc ust app stream");
1069 /* Zero could be a valid value for a handle so flag it to -1. */
1070 stream
->handle
= -1;
1077 * Alloc new UST app event.
1080 struct ust_app_event
*alloc_ust_app_event(char *name
,
1081 struct lttng_ust_event
*attr
)
1083 struct ust_app_event
*ua_event
;
1085 /* Init most of the default value by allocating and zeroing */
1086 ua_event
= zmalloc(sizeof(struct ust_app_event
));
1087 if (ua_event
== NULL
) {
1092 ua_event
->enabled
= 1;
1093 strncpy(ua_event
->name
, name
, sizeof(ua_event
->name
));
1094 ua_event
->name
[sizeof(ua_event
->name
) - 1] = '\0';
1095 lttng_ht_node_init_str(&ua_event
->node
, ua_event
->name
);
1097 /* Copy attributes */
1099 memcpy(&ua_event
->attr
, attr
, sizeof(ua_event
->attr
));
1102 DBG3("UST app event %s allocated", ua_event
->name
);
1111 * Alloc new UST app context.
1114 struct ust_app_ctx
*alloc_ust_app_ctx(struct lttng_ust_context_attr
*uctx
)
1116 struct ust_app_ctx
*ua_ctx
;
1118 ua_ctx
= zmalloc(sizeof(struct ust_app_ctx
));
1119 if (ua_ctx
== NULL
) {
1123 CDS_INIT_LIST_HEAD(&ua_ctx
->list
);
1126 memcpy(&ua_ctx
->ctx
, uctx
, sizeof(ua_ctx
->ctx
));
1127 if (uctx
->ctx
== LTTNG_UST_CONTEXT_APP_CONTEXT
) {
1128 char *provider_name
= NULL
, *ctx_name
= NULL
;
1130 provider_name
= strdup(uctx
->u
.app_ctx
.provider_name
);
1131 ctx_name
= strdup(uctx
->u
.app_ctx
.ctx_name
);
1132 if (!provider_name
|| !ctx_name
) {
1133 free(provider_name
);
1138 ua_ctx
->ctx
.u
.app_ctx
.provider_name
= provider_name
;
1139 ua_ctx
->ctx
.u
.app_ctx
.ctx_name
= ctx_name
;
1143 DBG3("UST app context %d allocated", ua_ctx
->ctx
.ctx
);
1151 * Allocate a filter and copy the given original filter.
1153 * Return allocated filter or NULL on error.
1155 static struct lttng_filter_bytecode
*copy_filter_bytecode(
1156 struct lttng_filter_bytecode
*orig_f
)
1158 struct lttng_filter_bytecode
*filter
= NULL
;
1160 /* Copy filter bytecode */
1161 filter
= zmalloc(sizeof(*filter
) + orig_f
->len
);
1163 PERROR("zmalloc alloc filter bytecode");
1167 memcpy(filter
, orig_f
, sizeof(*filter
) + orig_f
->len
);
1174 * Create a liblttng-ust filter bytecode from given bytecode.
1176 * Return allocated filter or NULL on error.
1178 static struct lttng_ust_filter_bytecode
*create_ust_bytecode_from_bytecode(
1179 struct lttng_filter_bytecode
*orig_f
)
1181 struct lttng_ust_filter_bytecode
*filter
= NULL
;
1183 /* Copy filter bytecode */
1184 filter
= zmalloc(sizeof(*filter
) + orig_f
->len
);
1186 PERROR("zmalloc alloc ust filter bytecode");
1190 assert(sizeof(struct lttng_filter_bytecode
) ==
1191 sizeof(struct lttng_ust_filter_bytecode
));
1192 memcpy(filter
, orig_f
, sizeof(*filter
) + orig_f
->len
);
1198 * Find an ust_app using the sock and return it. RCU read side lock must be
1199 * held before calling this helper function.
1201 struct ust_app
*ust_app_find_by_sock(int sock
)
1203 struct lttng_ht_node_ulong
*node
;
1204 struct lttng_ht_iter iter
;
1206 lttng_ht_lookup(ust_app_ht_by_sock
, (void *)((unsigned long) sock
), &iter
);
1207 node
= lttng_ht_iter_get_node_ulong(&iter
);
1209 DBG2("UST app find by sock %d not found", sock
);
1213 return caa_container_of(node
, struct ust_app
, sock_n
);
1220 * Find an ust_app using the notify sock and return it. RCU read side lock must
1221 * be held before calling this helper function.
1223 static struct ust_app
*find_app_by_notify_sock(int sock
)
1225 struct lttng_ht_node_ulong
*node
;
1226 struct lttng_ht_iter iter
;
1228 lttng_ht_lookup(ust_app_ht_by_notify_sock
, (void *)((unsigned long) sock
),
1230 node
= lttng_ht_iter_get_node_ulong(&iter
);
1232 DBG2("UST app find by notify sock %d not found", sock
);
1236 return caa_container_of(node
, struct ust_app
, notify_sock_n
);
1243 * Lookup for an ust app event based on event name, filter bytecode and the
1246 * Return an ust_app_event object or NULL on error.
1248 static struct ust_app_event
*find_ust_app_event(struct lttng_ht
*ht
,
1249 char *name
, struct lttng_filter_bytecode
*filter
,
1251 const struct lttng_event_exclusion
*exclusion
)
1253 struct lttng_ht_iter iter
;
1254 struct lttng_ht_node_str
*node
;
1255 struct ust_app_event
*event
= NULL
;
1256 struct ust_app_ht_key key
;
1261 /* Setup key for event lookup. */
1263 key
.filter
= filter
;
1264 key
.loglevel_type
= loglevel_value
;
1265 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1266 key
.exclusion
= exclusion
;
1268 /* Lookup using the event name as hash and a custom match fct. */
1269 cds_lfht_lookup(ht
->ht
, ht
->hash_fct((void *) name
, lttng_ht_seed
),
1270 ht_match_ust_app_event
, &key
, &iter
.iter
);
1271 node
= lttng_ht_iter_get_node_str(&iter
);
1276 event
= caa_container_of(node
, struct ust_app_event
, node
);
1283 * Create the channel context on the tracer.
1285 * Called with UST app session lock held.
1288 int create_ust_channel_context(struct ust_app_channel
*ua_chan
,
1289 struct ust_app_ctx
*ua_ctx
, struct ust_app
*app
)
1293 health_code_update();
1295 pthread_mutex_lock(&app
->sock_lock
);
1296 ret
= ustctl_add_context(app
->sock
, &ua_ctx
->ctx
,
1297 ua_chan
->obj
, &ua_ctx
->obj
);
1298 pthread_mutex_unlock(&app
->sock_lock
);
1300 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1301 ERR("UST app create channel context failed for app (pid: %d) "
1302 "with ret %d", app
->pid
, ret
);
1305 * This is normal behavior, an application can die during the
1306 * creation process. Don't report an error so the execution can
1307 * continue normally.
1310 DBG3("UST app disable event failed. Application is dead.");
1315 ua_ctx
->handle
= ua_ctx
->obj
->handle
;
1317 DBG2("UST app context handle %d created successfully for channel %s",
1318 ua_ctx
->handle
, ua_chan
->name
);
1321 health_code_update();
1326 * Set the filter on the tracer.
1329 int set_ust_event_filter(struct ust_app_event
*ua_event
,
1330 struct ust_app
*app
)
1333 struct lttng_ust_filter_bytecode
*ust_bytecode
= NULL
;
1335 health_code_update();
1337 if (!ua_event
->filter
) {
1342 ust_bytecode
= create_ust_bytecode_from_bytecode(ua_event
->filter
);
1343 if (!ust_bytecode
) {
1344 ret
= -LTTNG_ERR_NOMEM
;
1347 pthread_mutex_lock(&app
->sock_lock
);
1348 ret
= ustctl_set_filter(app
->sock
, ust_bytecode
,
1350 pthread_mutex_unlock(&app
->sock_lock
);
1352 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1353 ERR("UST app event %s filter failed for app (pid: %d) "
1354 "with ret %d", ua_event
->attr
.name
, app
->pid
, ret
);
1357 * This is normal behavior, an application can die during the
1358 * creation process. Don't report an error so the execution can
1359 * continue normally.
1362 DBG3("UST app filter event failed. Application is dead.");
1367 DBG2("UST filter set successfully for event %s", ua_event
->name
);
1370 health_code_update();
1376 struct lttng_ust_event_exclusion
*create_ust_exclusion_from_exclusion(
1377 struct lttng_event_exclusion
*exclusion
)
1379 struct lttng_ust_event_exclusion
*ust_exclusion
= NULL
;
1380 size_t exclusion_alloc_size
= sizeof(struct lttng_ust_event_exclusion
) +
1381 LTTNG_UST_SYM_NAME_LEN
* exclusion
->count
;
1383 ust_exclusion
= zmalloc(exclusion_alloc_size
);
1384 if (!ust_exclusion
) {
1389 assert(sizeof(struct lttng_event_exclusion
) ==
1390 sizeof(struct lttng_ust_event_exclusion
));
1391 memcpy(ust_exclusion
, exclusion
, exclusion_alloc_size
);
1393 return ust_exclusion
;
1397 * Set event exclusions on the tracer.
1400 int set_ust_event_exclusion(struct ust_app_event
*ua_event
,
1401 struct ust_app
*app
)
1404 struct lttng_ust_event_exclusion
*ust_exclusion
= NULL
;
1406 health_code_update();
1408 if (!ua_event
->exclusion
|| !ua_event
->exclusion
->count
) {
1413 ust_exclusion
= create_ust_exclusion_from_exclusion(
1414 ua_event
->exclusion
);
1415 if (!ust_exclusion
) {
1416 ret
= -LTTNG_ERR_NOMEM
;
1419 pthread_mutex_lock(&app
->sock_lock
);
1420 ret
= ustctl_set_exclusion(app
->sock
, ust_exclusion
, ua_event
->obj
);
1421 pthread_mutex_unlock(&app
->sock_lock
);
1423 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1424 ERR("UST app event %s exclusions failed for app (pid: %d) "
1425 "with ret %d", ua_event
->attr
.name
, app
->pid
, ret
);
1428 * This is normal behavior, an application can die during the
1429 * creation process. Don't report an error so the execution can
1430 * continue normally.
1433 DBG3("UST app event exclusion failed. Application is dead.");
1438 DBG2("UST exclusion set successfully for event %s", ua_event
->name
);
1441 health_code_update();
1442 free(ust_exclusion
);
1447 * Disable the specified event on to UST tracer for the UST session.
1449 static int disable_ust_event(struct ust_app
*app
,
1450 struct ust_app_session
*ua_sess
, struct ust_app_event
*ua_event
)
1454 health_code_update();
1456 pthread_mutex_lock(&app
->sock_lock
);
1457 ret
= ustctl_disable(app
->sock
, ua_event
->obj
);
1458 pthread_mutex_unlock(&app
->sock_lock
);
1460 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1461 ERR("UST app event %s disable failed for app (pid: %d) "
1462 "and session handle %d with ret %d",
1463 ua_event
->attr
.name
, app
->pid
, ua_sess
->handle
, ret
);
1466 * This is normal behavior, an application can die during the
1467 * creation process. Don't report an error so the execution can
1468 * continue normally.
1471 DBG3("UST app disable event failed. Application is dead.");
1476 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1477 ua_event
->attr
.name
, app
->pid
);
1480 health_code_update();
1485 * Disable the specified channel on to UST tracer for the UST session.
1487 static int disable_ust_channel(struct ust_app
*app
,
1488 struct ust_app_session
*ua_sess
, struct ust_app_channel
*ua_chan
)
1492 health_code_update();
1494 pthread_mutex_lock(&app
->sock_lock
);
1495 ret
= ustctl_disable(app
->sock
, ua_chan
->obj
);
1496 pthread_mutex_unlock(&app
->sock_lock
);
1498 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1499 ERR("UST app channel %s disable failed for app (pid: %d) "
1500 "and session handle %d with ret %d",
1501 ua_chan
->name
, app
->pid
, ua_sess
->handle
, ret
);
1504 * This is normal behavior, an application can die during the
1505 * creation process. Don't report an error so the execution can
1506 * continue normally.
1509 DBG3("UST app disable channel failed. Application is dead.");
1514 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1515 ua_chan
->name
, app
->pid
);
1518 health_code_update();
1523 * Enable the specified channel on to UST tracer for the UST session.
1525 static int enable_ust_channel(struct ust_app
*app
,
1526 struct ust_app_session
*ua_sess
, struct ust_app_channel
*ua_chan
)
1530 health_code_update();
1532 pthread_mutex_lock(&app
->sock_lock
);
1533 ret
= ustctl_enable(app
->sock
, ua_chan
->obj
);
1534 pthread_mutex_unlock(&app
->sock_lock
);
1536 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1537 ERR("UST app channel %s enable failed for app (pid: %d) "
1538 "and session handle %d with ret %d",
1539 ua_chan
->name
, app
->pid
, ua_sess
->handle
, ret
);
1542 * This is normal behavior, an application can die during the
1543 * creation process. Don't report an error so the execution can
1544 * continue normally.
1547 DBG3("UST app enable channel failed. Application is dead.");
1552 ua_chan
->enabled
= 1;
1554 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1555 ua_chan
->name
, app
->pid
);
1558 health_code_update();
1563 * Enable the specified event on to UST tracer for the UST session.
1565 static int enable_ust_event(struct ust_app
*app
,
1566 struct ust_app_session
*ua_sess
, struct ust_app_event
*ua_event
)
1570 health_code_update();
1572 pthread_mutex_lock(&app
->sock_lock
);
1573 ret
= ustctl_enable(app
->sock
, ua_event
->obj
);
1574 pthread_mutex_unlock(&app
->sock_lock
);
1576 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1577 ERR("UST app event %s enable failed for app (pid: %d) "
1578 "and session handle %d with ret %d",
1579 ua_event
->attr
.name
, app
->pid
, ua_sess
->handle
, ret
);
1582 * This is normal behavior, an application can die during the
1583 * creation process. Don't report an error so the execution can
1584 * continue normally.
1587 DBG3("UST app enable event failed. Application is dead.");
1592 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1593 ua_event
->attr
.name
, app
->pid
);
1596 health_code_update();
1601 * Send channel and stream buffer to application.
1603 * Return 0 on success. On error, a negative value is returned.
1605 static int send_channel_pid_to_ust(struct ust_app
*app
,
1606 struct ust_app_session
*ua_sess
, struct ust_app_channel
*ua_chan
)
1609 struct ust_app_stream
*stream
, *stmp
;
1615 health_code_update();
1617 DBG("UST app sending channel %s to UST app sock %d", ua_chan
->name
,
1620 /* Send channel to the application. */
1621 ret
= ust_consumer_send_channel_to_ust(app
, ua_sess
, ua_chan
);
1622 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1623 ret
= -ENOTCONN
; /* Caused by app exiting. */
1625 } else if (ret
< 0) {
1629 health_code_update();
1631 /* Send all streams to application. */
1632 cds_list_for_each_entry_safe(stream
, stmp
, &ua_chan
->streams
.head
, list
) {
1633 ret
= ust_consumer_send_stream_to_ust(app
, ua_chan
, stream
);
1634 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1635 ret
= -ENOTCONN
; /* Caused by app exiting. */
1637 } else if (ret
< 0) {
1640 /* We don't need the stream anymore once sent to the tracer. */
1641 cds_list_del(&stream
->list
);
1642 delete_ust_app_stream(-1, stream
, app
);
1644 /* Flag the channel that it is sent to the application. */
1645 ua_chan
->is_sent
= 1;
1648 health_code_update();
1653 * Create the specified event onto the UST tracer for a UST session.
1655 * Should be called with session mutex held.
1658 int create_ust_event(struct ust_app
*app
, struct ust_app_session
*ua_sess
,
1659 struct ust_app_channel
*ua_chan
, struct ust_app_event
*ua_event
)
1663 health_code_update();
1665 /* Create UST event on tracer */
1666 pthread_mutex_lock(&app
->sock_lock
);
1667 ret
= ustctl_create_event(app
->sock
, &ua_event
->attr
, ua_chan
->obj
,
1669 pthread_mutex_unlock(&app
->sock_lock
);
1671 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1672 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1673 ua_event
->attr
.name
, app
->pid
, ret
);
1676 * This is normal behavior, an application can die during the
1677 * creation process. Don't report an error so the execution can
1678 * continue normally.
1681 DBG3("UST app create event failed. Application is dead.");
1686 ua_event
->handle
= ua_event
->obj
->handle
;
1688 DBG2("UST app event %s created successfully for pid:%d",
1689 ua_event
->attr
.name
, app
->pid
);
1691 health_code_update();
1693 /* Set filter if one is present. */
1694 if (ua_event
->filter
) {
1695 ret
= set_ust_event_filter(ua_event
, app
);
1701 /* Set exclusions for the event */
1702 if (ua_event
->exclusion
) {
1703 ret
= set_ust_event_exclusion(ua_event
, app
);
1709 /* If event not enabled, disable it on the tracer */
1710 if (ua_event
->enabled
) {
1712 * We now need to explicitly enable the event, since it
1713 * is now disabled at creation.
1715 ret
= enable_ust_event(app
, ua_sess
, ua_event
);
1718 * If we hit an EPERM, something is wrong with our enable call. If
1719 * we get an EEXIST, there is a problem on the tracer side since we
1723 case -LTTNG_UST_ERR_PERM
:
1724 /* Code flow problem */
1726 case -LTTNG_UST_ERR_EXIST
:
1727 /* It's OK for our use case. */
1738 health_code_update();
1743 * Copy data between an UST app event and a LTT event.
1745 static void shadow_copy_event(struct ust_app_event
*ua_event
,
1746 struct ltt_ust_event
*uevent
)
1748 size_t exclusion_alloc_size
;
1750 strncpy(ua_event
->name
, uevent
->attr
.name
, sizeof(ua_event
->name
));
1751 ua_event
->name
[sizeof(ua_event
->name
) - 1] = '\0';
1753 ua_event
->enabled
= uevent
->enabled
;
1755 /* Copy event attributes */
1756 memcpy(&ua_event
->attr
, &uevent
->attr
, sizeof(ua_event
->attr
));
1758 /* Copy filter bytecode */
1759 if (uevent
->filter
) {
1760 ua_event
->filter
= copy_filter_bytecode(uevent
->filter
);
1761 /* Filter might be NULL here in case of ENONEM. */
1764 /* Copy exclusion data */
1765 if (uevent
->exclusion
) {
1766 exclusion_alloc_size
= sizeof(struct lttng_event_exclusion
) +
1767 LTTNG_UST_SYM_NAME_LEN
* uevent
->exclusion
->count
;
1768 ua_event
->exclusion
= zmalloc(exclusion_alloc_size
);
1769 if (ua_event
->exclusion
== NULL
) {
1772 memcpy(ua_event
->exclusion
, uevent
->exclusion
,
1773 exclusion_alloc_size
);
1779 * Copy data between an UST app channel and a LTT channel.
1781 static void shadow_copy_channel(struct ust_app_channel
*ua_chan
,
1782 struct ltt_ust_channel
*uchan
)
1784 struct lttng_ht_iter iter
;
1785 struct ltt_ust_event
*uevent
;
1786 struct ltt_ust_context
*uctx
;
1787 struct ust_app_event
*ua_event
;
1789 DBG2("UST app shadow copy of channel %s started", ua_chan
->name
);
1791 strncpy(ua_chan
->name
, uchan
->name
, sizeof(ua_chan
->name
));
1792 ua_chan
->name
[sizeof(ua_chan
->name
) - 1] = '\0';
1794 ua_chan
->tracefile_size
= uchan
->tracefile_size
;
1795 ua_chan
->tracefile_count
= uchan
->tracefile_count
;
1797 /* Copy event attributes since the layout is different. */
1798 ua_chan
->attr
.subbuf_size
= uchan
->attr
.subbuf_size
;
1799 ua_chan
->attr
.num_subbuf
= uchan
->attr
.num_subbuf
;
1800 ua_chan
->attr
.overwrite
= uchan
->attr
.overwrite
;
1801 ua_chan
->attr
.switch_timer_interval
= uchan
->attr
.switch_timer_interval
;
1802 ua_chan
->attr
.read_timer_interval
= uchan
->attr
.read_timer_interval
;
1803 ua_chan
->attr
.output
= uchan
->attr
.output
;
1805 * Note that the attribute channel type is not set since the channel on the
1806 * tracing registry side does not have this information.
1809 ua_chan
->enabled
= uchan
->enabled
;
1810 ua_chan
->tracing_channel_id
= uchan
->id
;
1812 cds_list_for_each_entry(uctx
, &uchan
->ctx_list
, list
) {
1813 struct ust_app_ctx
*ua_ctx
= alloc_ust_app_ctx(&uctx
->ctx
);
1815 if (ua_ctx
== NULL
) {
1818 lttng_ht_node_init_ulong(&ua_ctx
->node
,
1819 (unsigned long) ua_ctx
->ctx
.ctx
);
1820 lttng_ht_add_ulong(ua_chan
->ctx
, &ua_ctx
->node
);
1821 cds_list_add_tail(&ua_ctx
->list
, &ua_chan
->ctx_list
);
1824 /* Copy all events from ltt ust channel to ust app channel */
1825 cds_lfht_for_each_entry(uchan
->events
->ht
, &iter
.iter
, uevent
, node
.node
) {
1826 ua_event
= find_ust_app_event(ua_chan
->events
, uevent
->attr
.name
,
1827 uevent
->filter
, uevent
->attr
.loglevel
, uevent
->exclusion
);
1828 if (ua_event
== NULL
) {
1829 DBG2("UST event %s not found on shadow copy channel",
1831 ua_event
= alloc_ust_app_event(uevent
->attr
.name
, &uevent
->attr
);
1832 if (ua_event
== NULL
) {
1835 shadow_copy_event(ua_event
, uevent
);
1836 add_unique_ust_app_event(ua_chan
, ua_event
);
1840 DBG3("UST app shadow copy of channel %s done", ua_chan
->name
);
1844 * Copy data between a UST app session and a regular LTT session.
1846 static void shadow_copy_session(struct ust_app_session
*ua_sess
,
1847 struct ltt_ust_session
*usess
, struct ust_app
*app
)
1849 struct lttng_ht_node_str
*ua_chan_node
;
1850 struct lttng_ht_iter iter
;
1851 struct ltt_ust_channel
*uchan
;
1852 struct ust_app_channel
*ua_chan
;
1854 struct tm
*timeinfo
;
1857 char tmp_shm_path
[PATH_MAX
];
1859 /* Get date and time for unique app path */
1861 timeinfo
= localtime(&rawtime
);
1862 strftime(datetime
, sizeof(datetime
), "%Y%m%d-%H%M%S", timeinfo
);
1864 DBG2("Shadow copy of session handle %d", ua_sess
->handle
);
1866 ua_sess
->tracing_id
= usess
->id
;
1867 ua_sess
->id
= get_next_session_id();
1868 ua_sess
->uid
= app
->uid
;
1869 ua_sess
->gid
= app
->gid
;
1870 ua_sess
->euid
= usess
->uid
;
1871 ua_sess
->egid
= usess
->gid
;
1872 ua_sess
->buffer_type
= usess
->buffer_type
;
1873 ua_sess
->bits_per_long
= app
->bits_per_long
;
1875 /* There is only one consumer object per session possible. */
1876 consumer_output_get(usess
->consumer
);
1877 ua_sess
->consumer
= usess
->consumer
;
1879 ua_sess
->output_traces
= usess
->output_traces
;
1880 ua_sess
->live_timer_interval
= usess
->live_timer_interval
;
1881 copy_channel_attr_to_ustctl(&ua_sess
->metadata_attr
,
1882 &usess
->metadata_attr
);
1884 switch (ua_sess
->buffer_type
) {
1885 case LTTNG_BUFFER_PER_PID
:
1886 ret
= snprintf(ua_sess
->path
, sizeof(ua_sess
->path
),
1887 DEFAULT_UST_TRACE_PID_PATH
"/%s-%d-%s", app
->name
, app
->pid
,
1890 case LTTNG_BUFFER_PER_UID
:
1891 ret
= snprintf(ua_sess
->path
, sizeof(ua_sess
->path
),
1892 DEFAULT_UST_TRACE_UID_PATH
, ua_sess
->uid
, app
->bits_per_long
);
1899 PERROR("asprintf UST shadow copy session");
1904 strncpy(ua_sess
->root_shm_path
, usess
->root_shm_path
,
1905 sizeof(ua_sess
->root_shm_path
));
1906 ua_sess
->root_shm_path
[sizeof(ua_sess
->root_shm_path
) - 1] = '\0';
1907 strncpy(ua_sess
->shm_path
, usess
->shm_path
,
1908 sizeof(ua_sess
->shm_path
));
1909 ua_sess
->shm_path
[sizeof(ua_sess
->shm_path
) - 1] = '\0';
1910 if (ua_sess
->shm_path
[0]) {
1911 switch (ua_sess
->buffer_type
) {
1912 case LTTNG_BUFFER_PER_PID
:
1913 ret
= snprintf(tmp_shm_path
, sizeof(tmp_shm_path
),
1914 DEFAULT_UST_TRACE_PID_PATH
"/%s-%d-%s",
1915 app
->name
, app
->pid
, datetime
);
1917 case LTTNG_BUFFER_PER_UID
:
1918 ret
= snprintf(tmp_shm_path
, sizeof(tmp_shm_path
),
1919 DEFAULT_UST_TRACE_UID_PATH
,
1920 app
->uid
, app
->bits_per_long
);
1927 PERROR("sprintf UST shadow copy session");
1931 strncat(ua_sess
->shm_path
, tmp_shm_path
,
1932 sizeof(ua_sess
->shm_path
) - strlen(ua_sess
->shm_path
) - 1);
1933 ua_sess
->shm_path
[sizeof(ua_sess
->shm_path
) - 1] = '\0';
1936 /* Iterate over all channels in global domain. */
1937 cds_lfht_for_each_entry(usess
->domain_global
.channels
->ht
, &iter
.iter
,
1939 struct lttng_ht_iter uiter
;
1941 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
1942 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
1943 if (ua_chan_node
!= NULL
) {
1944 /* Session exist. Contiuing. */
1948 DBG2("Channel %s not found on shadow session copy, creating it",
1950 ua_chan
= alloc_ust_app_channel(uchan
->name
, ua_sess
,
1952 if (ua_chan
== NULL
) {
1953 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1956 shadow_copy_channel(ua_chan
, uchan
);
1958 * The concept of metadata channel does not exist on the tracing
1959 * registry side of the session daemon so this can only be a per CPU
1960 * channel and not metadata.
1962 ua_chan
->attr
.type
= LTTNG_UST_CHAN_PER_CPU
;
1964 lttng_ht_add_unique_str(ua_sess
->channels
, &ua_chan
->node
);
1969 consumer_output_put(ua_sess
->consumer
);
1973 * Lookup sesison wrapper.
1976 void __lookup_session_by_app(struct ltt_ust_session
*usess
,
1977 struct ust_app
*app
, struct lttng_ht_iter
*iter
)
1979 /* Get right UST app session from app */
1980 lttng_ht_lookup(app
->sessions
, &usess
->id
, iter
);
1984 * Return ust app session from the app session hashtable using the UST session
1987 static struct ust_app_session
*lookup_session_by_app(
1988 struct ltt_ust_session
*usess
, struct ust_app
*app
)
1990 struct lttng_ht_iter iter
;
1991 struct lttng_ht_node_u64
*node
;
1993 __lookup_session_by_app(usess
, app
, &iter
);
1994 node
= lttng_ht_iter_get_node_u64(&iter
);
1999 return caa_container_of(node
, struct ust_app_session
, node
);
2006 * Setup buffer registry per PID for the given session and application. If none
2007 * is found, a new one is created, added to the global registry and
2008 * initialized. If regp is valid, it's set with the newly created object.
2010 * Return 0 on success or else a negative value.
2012 static int setup_buffer_reg_pid(struct ust_app_session
*ua_sess
,
2013 struct ust_app
*app
, struct buffer_reg_pid
**regp
)
2016 struct buffer_reg_pid
*reg_pid
;
2023 reg_pid
= buffer_reg_pid_find(ua_sess
->id
);
2026 * This is the create channel path meaning that if there is NO
2027 * registry available, we have to create one for this session.
2029 ret
= buffer_reg_pid_create(ua_sess
->id
, ®_pid
,
2030 ua_sess
->root_shm_path
, ua_sess
->shm_path
);
2038 /* Initialize registry. */
2039 ret
= ust_registry_session_init(®_pid
->registry
->reg
.ust
, app
,
2040 app
->bits_per_long
, app
->uint8_t_alignment
,
2041 app
->uint16_t_alignment
, app
->uint32_t_alignment
,
2042 app
->uint64_t_alignment
, app
->long_alignment
,
2043 app
->byte_order
, app
->version
.major
,
2044 app
->version
.minor
, reg_pid
->root_shm_path
,
2046 ua_sess
->euid
, ua_sess
->egid
);
2049 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2050 * destroy the buffer registry, because it is always expected
2051 * that if the buffer registry can be found, its ust registry is
2054 buffer_reg_pid_destroy(reg_pid
);
2058 buffer_reg_pid_add(reg_pid
);
2060 DBG3("UST app buffer registry per PID created successfully");
2072 * Setup buffer registry per UID for the given session and application. If none
2073 * is found, a new one is created, added to the global registry and
2074 * initialized. If regp is valid, it's set with the newly created object.
2076 * Return 0 on success or else a negative value.
2078 static int setup_buffer_reg_uid(struct ltt_ust_session
*usess
,
2079 struct ust_app_session
*ua_sess
,
2080 struct ust_app
*app
, struct buffer_reg_uid
**regp
)
2083 struct buffer_reg_uid
*reg_uid
;
2090 reg_uid
= buffer_reg_uid_find(usess
->id
, app
->bits_per_long
, app
->uid
);
2093 * This is the create channel path meaning that if there is NO
2094 * registry available, we have to create one for this session.
2096 ret
= buffer_reg_uid_create(usess
->id
, app
->bits_per_long
, app
->uid
,
2097 LTTNG_DOMAIN_UST
, ®_uid
,
2098 ua_sess
->root_shm_path
, ua_sess
->shm_path
);
2106 /* Initialize registry. */
2107 ret
= ust_registry_session_init(®_uid
->registry
->reg
.ust
, NULL
,
2108 app
->bits_per_long
, app
->uint8_t_alignment
,
2109 app
->uint16_t_alignment
, app
->uint32_t_alignment
,
2110 app
->uint64_t_alignment
, app
->long_alignment
,
2111 app
->byte_order
, app
->version
.major
,
2112 app
->version
.minor
, reg_uid
->root_shm_path
,
2113 reg_uid
->shm_path
, usess
->uid
, usess
->gid
);
2116 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2117 * destroy the buffer registry, because it is always expected
2118 * that if the buffer registry can be found, its ust registry is
2121 buffer_reg_uid_destroy(reg_uid
, NULL
);
2124 /* Add node to teardown list of the session. */
2125 cds_list_add(®_uid
->lnode
, &usess
->buffer_reg_uid_list
);
2127 buffer_reg_uid_add(reg_uid
);
2129 DBG3("UST app buffer registry per UID created successfully");
2140 * Create a session on the tracer side for the given app.
2142 * On success, ua_sess_ptr is populated with the session pointer or else left
2143 * untouched. If the session was created, is_created is set to 1. On error,
2144 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2147 * Returns 0 on success or else a negative code which is either -ENOMEM or
2148 * -ENOTCONN which is the default code if the ustctl_create_session fails.
2150 static int create_ust_app_session(struct ltt_ust_session
*usess
,
2151 struct ust_app
*app
, struct ust_app_session
**ua_sess_ptr
,
2154 int ret
, created
= 0;
2155 struct ust_app_session
*ua_sess
;
2159 assert(ua_sess_ptr
);
2161 health_code_update();
2163 ua_sess
= lookup_session_by_app(usess
, app
);
2164 if (ua_sess
== NULL
) {
2165 DBG2("UST app pid: %d session id %" PRIu64
" not found, creating it",
2166 app
->pid
, usess
->id
);
2167 ua_sess
= alloc_ust_app_session(app
);
2168 if (ua_sess
== NULL
) {
2169 /* Only malloc can failed so something is really wrong */
2173 shadow_copy_session(ua_sess
, usess
, app
);
2177 switch (usess
->buffer_type
) {
2178 case LTTNG_BUFFER_PER_PID
:
2179 /* Init local registry. */
2180 ret
= setup_buffer_reg_pid(ua_sess
, app
, NULL
);
2182 delete_ust_app_session(-1, ua_sess
, app
);
2186 case LTTNG_BUFFER_PER_UID
:
2187 /* Look for a global registry. If none exists, create one. */
2188 ret
= setup_buffer_reg_uid(usess
, ua_sess
, app
, NULL
);
2190 delete_ust_app_session(-1, ua_sess
, app
);
2200 health_code_update();
2202 if (ua_sess
->handle
== -1) {
2203 pthread_mutex_lock(&app
->sock_lock
);
2204 ret
= ustctl_create_session(app
->sock
);
2205 pthread_mutex_unlock(&app
->sock_lock
);
2207 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
2208 ERR("Creating session for app pid %d with ret %d",
2211 DBG("UST app creating session failed. Application is dead");
2213 * This is normal behavior, an application can die during the
2214 * creation process. Don't report an error so the execution can
2215 * continue normally. This will get flagged ENOTCONN and the
2216 * caller will handle it.
2220 delete_ust_app_session(-1, ua_sess
, app
);
2221 if (ret
!= -ENOMEM
) {
2223 * Tracer is probably gone or got an internal error so let's
2224 * behave like it will soon unregister or not usable.
2231 ua_sess
->handle
= ret
;
2233 /* Add ust app session to app's HT */
2234 lttng_ht_node_init_u64(&ua_sess
->node
,
2235 ua_sess
->tracing_id
);
2236 lttng_ht_add_unique_u64(app
->sessions
, &ua_sess
->node
);
2237 lttng_ht_node_init_ulong(&ua_sess
->ust_objd_node
, ua_sess
->handle
);
2238 lttng_ht_add_unique_ulong(app
->ust_sessions_objd
,
2239 &ua_sess
->ust_objd_node
);
2241 DBG2("UST app session created successfully with handle %d", ret
);
2244 *ua_sess_ptr
= ua_sess
;
2246 *is_created
= created
;
2249 /* Everything went well. */
2253 health_code_update();
2258 * Match function for a hash table lookup of ust_app_ctx.
2260 * It matches an ust app context based on the context type and, in the case
2261 * of perf counters, their name.
2263 static int ht_match_ust_app_ctx(struct cds_lfht_node
*node
, const void *_key
)
2265 struct ust_app_ctx
*ctx
;
2266 const struct lttng_ust_context_attr
*key
;
2271 ctx
= caa_container_of(node
, struct ust_app_ctx
, node
.node
);
2275 if (ctx
->ctx
.ctx
!= key
->ctx
) {
2280 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER
:
2281 if (strncmp(key
->u
.perf_counter
.name
,
2282 ctx
->ctx
.u
.perf_counter
.name
,
2283 sizeof(key
->u
.perf_counter
.name
))) {
2287 case LTTNG_UST_CONTEXT_APP_CONTEXT
:
2288 if (strcmp(key
->u
.app_ctx
.provider_name
,
2289 ctx
->ctx
.u
.app_ctx
.provider_name
) ||
2290 strcmp(key
->u
.app_ctx
.ctx_name
,
2291 ctx
->ctx
.u
.app_ctx
.ctx_name
)) {
2307 * Lookup for an ust app context from an lttng_ust_context.
2309 * Must be called while holding RCU read side lock.
2310 * Return an ust_app_ctx object or NULL on error.
2313 struct ust_app_ctx
*find_ust_app_context(struct lttng_ht
*ht
,
2314 struct lttng_ust_context_attr
*uctx
)
2316 struct lttng_ht_iter iter
;
2317 struct lttng_ht_node_ulong
*node
;
2318 struct ust_app_ctx
*app_ctx
= NULL
;
2323 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2324 cds_lfht_lookup(ht
->ht
, ht
->hash_fct((void *) uctx
->ctx
, lttng_ht_seed
),
2325 ht_match_ust_app_ctx
, uctx
, &iter
.iter
);
2326 node
= lttng_ht_iter_get_node_ulong(&iter
);
2331 app_ctx
= caa_container_of(node
, struct ust_app_ctx
, node
);
2338 * Create a context for the channel on the tracer.
2340 * Called with UST app session lock held and a RCU read side lock.
2343 int create_ust_app_channel_context(struct ust_app_session
*ua_sess
,
2344 struct ust_app_channel
*ua_chan
,
2345 struct lttng_ust_context_attr
*uctx
,
2346 struct ust_app
*app
)
2349 struct ust_app_ctx
*ua_ctx
;
2351 DBG2("UST app adding context to channel %s", ua_chan
->name
);
2353 ua_ctx
= find_ust_app_context(ua_chan
->ctx
, uctx
);
2359 ua_ctx
= alloc_ust_app_ctx(uctx
);
2360 if (ua_ctx
== NULL
) {
2366 lttng_ht_node_init_ulong(&ua_ctx
->node
, (unsigned long) ua_ctx
->ctx
.ctx
);
2367 lttng_ht_add_ulong(ua_chan
->ctx
, &ua_ctx
->node
);
2368 cds_list_add_tail(&ua_ctx
->list
, &ua_chan
->ctx_list
);
2370 ret
= create_ust_channel_context(ua_chan
, ua_ctx
, app
);
2380 * Enable on the tracer side a ust app event for the session and channel.
2382 * Called with UST app session lock held.
2385 int enable_ust_app_event(struct ust_app_session
*ua_sess
,
2386 struct ust_app_event
*ua_event
, struct ust_app
*app
)
2390 ret
= enable_ust_event(app
, ua_sess
, ua_event
);
2395 ua_event
->enabled
= 1;
2402 * Disable on the tracer side a ust app event for the session and channel.
2404 static int disable_ust_app_event(struct ust_app_session
*ua_sess
,
2405 struct ust_app_event
*ua_event
, struct ust_app
*app
)
2409 ret
= disable_ust_event(app
, ua_sess
, ua_event
);
2414 ua_event
->enabled
= 0;
2421 * Lookup ust app channel for session and disable it on the tracer side.
2424 int disable_ust_app_channel(struct ust_app_session
*ua_sess
,
2425 struct ust_app_channel
*ua_chan
, struct ust_app
*app
)
2429 ret
= disable_ust_channel(app
, ua_sess
, ua_chan
);
2434 ua_chan
->enabled
= 0;
2441 * Lookup ust app channel for session and enable it on the tracer side. This
2442 * MUST be called with a RCU read side lock acquired.
2444 static int enable_ust_app_channel(struct ust_app_session
*ua_sess
,
2445 struct ltt_ust_channel
*uchan
, struct ust_app
*app
)
2448 struct lttng_ht_iter iter
;
2449 struct lttng_ht_node_str
*ua_chan_node
;
2450 struct ust_app_channel
*ua_chan
;
2452 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &iter
);
2453 ua_chan_node
= lttng_ht_iter_get_node_str(&iter
);
2454 if (ua_chan_node
== NULL
) {
2455 DBG2("Unable to find channel %s in ust session id %" PRIu64
,
2456 uchan
->name
, ua_sess
->tracing_id
);
2460 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
2462 ret
= enable_ust_channel(app
, ua_sess
, ua_chan
);
2472 * Ask the consumer to create a channel and get it if successful.
2474 * Called with UST app session lock held.
2476 * Return 0 on success or else a negative value.
2478 static int do_consumer_create_channel(struct ltt_ust_session
*usess
,
2479 struct ust_app_session
*ua_sess
, struct ust_app_channel
*ua_chan
,
2480 int bitness
, struct ust_registry_session
*registry
)
2483 unsigned int nb_fd
= 0;
2484 struct consumer_socket
*socket
;
2492 health_code_update();
2494 /* Get the right consumer socket for the application. */
2495 socket
= consumer_find_socket_by_bitness(bitness
, usess
->consumer
);
2501 health_code_update();
2503 /* Need one fd for the channel. */
2504 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
2506 ERR("Exhausted number of available FD upon create channel");
2511 * Ask consumer to create channel. The consumer will return the number of
2512 * stream we have to expect.
2514 ret
= ust_consumer_ask_channel(ua_sess
, ua_chan
, usess
->consumer
, socket
,
2521 * Compute the number of fd needed before receiving them. It must be 2 per
2522 * stream (2 being the default value here).
2524 nb_fd
= DEFAULT_UST_STREAM_FD_NUM
* ua_chan
->expected_stream_count
;
2526 /* Reserve the amount of file descriptor we need. */
2527 ret
= lttng_fd_get(LTTNG_FD_APPS
, nb_fd
);
2529 ERR("Exhausted number of available FD upon create channel");
2530 goto error_fd_get_stream
;
2533 health_code_update();
2536 * Now get the channel from the consumer. This call wil populate the stream
2537 * list of that channel and set the ust objects.
2539 if (usess
->consumer
->enabled
) {
2540 ret
= ust_consumer_get_channel(socket
, ua_chan
);
2550 lttng_fd_put(LTTNG_FD_APPS
, nb_fd
);
2551 error_fd_get_stream
:
2553 * Initiate a destroy channel on the consumer since we had an error
2554 * handling it on our side. The return value is of no importance since we
2555 * already have a ret value set by the previous error that we need to
2558 (void) ust_consumer_destroy_channel(socket
, ua_chan
);
2560 lttng_fd_put(LTTNG_FD_APPS
, 1);
2562 health_code_update();
2568 * Duplicate the ust data object of the ust app stream and save it in the
2569 * buffer registry stream.
2571 * Return 0 on success or else a negative value.
2573 static int duplicate_stream_object(struct buffer_reg_stream
*reg_stream
,
2574 struct ust_app_stream
*stream
)
2581 /* Reserve the amount of file descriptor we need. */
2582 ret
= lttng_fd_get(LTTNG_FD_APPS
, 2);
2584 ERR("Exhausted number of available FD upon duplicate stream");
2588 /* Duplicate object for stream once the original is in the registry. */
2589 ret
= ustctl_duplicate_ust_object_data(&stream
->obj
,
2590 reg_stream
->obj
.ust
);
2592 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2593 reg_stream
->obj
.ust
, stream
->obj
, ret
);
2594 lttng_fd_put(LTTNG_FD_APPS
, 2);
2597 stream
->handle
= stream
->obj
->handle
;
2604 * Duplicate the ust data object of the ust app. channel and save it in the
2605 * buffer registry channel.
2607 * Return 0 on success or else a negative value.
2609 static int duplicate_channel_object(struct buffer_reg_channel
*reg_chan
,
2610 struct ust_app_channel
*ua_chan
)
2617 /* Need two fds for the channel. */
2618 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
2620 ERR("Exhausted number of available FD upon duplicate channel");
2624 /* Duplicate object for stream once the original is in the registry. */
2625 ret
= ustctl_duplicate_ust_object_data(&ua_chan
->obj
, reg_chan
->obj
.ust
);
2627 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2628 reg_chan
->obj
.ust
, ua_chan
->obj
, ret
);
2631 ua_chan
->handle
= ua_chan
->obj
->handle
;
2636 lttng_fd_put(LTTNG_FD_APPS
, 1);
2642 * For a given channel buffer registry, setup all streams of the given ust
2643 * application channel.
2645 * Return 0 on success or else a negative value.
2647 static int setup_buffer_reg_streams(struct buffer_reg_channel
*reg_chan
,
2648 struct ust_app_channel
*ua_chan
,
2649 struct ust_app
*app
)
2652 struct ust_app_stream
*stream
, *stmp
;
2657 DBG2("UST app setup buffer registry stream");
2659 /* Send all streams to application. */
2660 cds_list_for_each_entry_safe(stream
, stmp
, &ua_chan
->streams
.head
, list
) {
2661 struct buffer_reg_stream
*reg_stream
;
2663 ret
= buffer_reg_stream_create(®_stream
);
2669 * Keep original pointer and nullify it in the stream so the delete
2670 * stream call does not release the object.
2672 reg_stream
->obj
.ust
= stream
->obj
;
2674 buffer_reg_stream_add(reg_stream
, reg_chan
);
2676 /* We don't need the streams anymore. */
2677 cds_list_del(&stream
->list
);
2678 delete_ust_app_stream(-1, stream
, app
);
2686 * Create a buffer registry channel for the given session registry and
2687 * application channel object. If regp pointer is valid, it's set with the
2688 * created object. Important, the created object is NOT added to the session
2689 * registry hash table.
2691 * Return 0 on success else a negative value.
2693 static int create_buffer_reg_channel(struct buffer_reg_session
*reg_sess
,
2694 struct ust_app_channel
*ua_chan
, struct buffer_reg_channel
**regp
)
2697 struct buffer_reg_channel
*reg_chan
= NULL
;
2702 DBG2("UST app creating buffer registry channel for %s", ua_chan
->name
);
2704 /* Create buffer registry channel. */
2705 ret
= buffer_reg_channel_create(ua_chan
->tracing_channel_id
, ®_chan
);
2710 reg_chan
->consumer_key
= ua_chan
->key
;
2711 reg_chan
->subbuf_size
= ua_chan
->attr
.subbuf_size
;
2712 reg_chan
->num_subbuf
= ua_chan
->attr
.num_subbuf
;
2714 /* Create and add a channel registry to session. */
2715 ret
= ust_registry_channel_add(reg_sess
->reg
.ust
,
2716 ua_chan
->tracing_channel_id
);
2720 buffer_reg_channel_add(reg_sess
, reg_chan
);
2729 /* Safe because the registry channel object was not added to any HT. */
2730 buffer_reg_channel_destroy(reg_chan
, LTTNG_DOMAIN_UST
);
2736 * Setup buffer registry channel for the given session registry and application
2737 * channel object. If regp pointer is valid, it's set with the created object.
2739 * Return 0 on success else a negative value.
2741 static int setup_buffer_reg_channel(struct buffer_reg_session
*reg_sess
,
2742 struct ust_app_channel
*ua_chan
, struct buffer_reg_channel
*reg_chan
,
2743 struct ust_app
*app
)
2750 assert(ua_chan
->obj
);
2752 DBG2("UST app setup buffer registry channel for %s", ua_chan
->name
);
2754 /* Setup all streams for the registry. */
2755 ret
= setup_buffer_reg_streams(reg_chan
, ua_chan
, app
);
2760 reg_chan
->obj
.ust
= ua_chan
->obj
;
2761 ua_chan
->obj
= NULL
;
2766 buffer_reg_channel_remove(reg_sess
, reg_chan
);
2767 buffer_reg_channel_destroy(reg_chan
, LTTNG_DOMAIN_UST
);
2772 * Send buffer registry channel to the application.
2774 * Return 0 on success else a negative value.
2776 static int send_channel_uid_to_ust(struct buffer_reg_channel
*reg_chan
,
2777 struct ust_app
*app
, struct ust_app_session
*ua_sess
,
2778 struct ust_app_channel
*ua_chan
)
2781 struct buffer_reg_stream
*reg_stream
;
2788 DBG("UST app sending buffer registry channel to ust sock %d", app
->sock
);
2790 ret
= duplicate_channel_object(reg_chan
, ua_chan
);
2795 /* Send channel to the application. */
2796 ret
= ust_consumer_send_channel_to_ust(app
, ua_sess
, ua_chan
);
2797 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
2798 ret
= -ENOTCONN
; /* Caused by app exiting. */
2800 } else if (ret
< 0) {
2804 health_code_update();
2806 /* Send all streams to application. */
2807 pthread_mutex_lock(®_chan
->stream_list_lock
);
2808 cds_list_for_each_entry(reg_stream
, ®_chan
->streams
, lnode
) {
2809 struct ust_app_stream stream
;
2811 ret
= duplicate_stream_object(reg_stream
, &stream
);
2813 goto error_stream_unlock
;
2816 ret
= ust_consumer_send_stream_to_ust(app
, ua_chan
, &stream
);
2818 (void) release_ust_app_stream(-1, &stream
, app
);
2819 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
2820 ret
= -ENOTCONN
; /* Caused by app exiting. */
2822 goto error_stream_unlock
;
2826 * The return value is not important here. This function will output an
2829 (void) release_ust_app_stream(-1, &stream
, app
);
2831 ua_chan
->is_sent
= 1;
2833 error_stream_unlock
:
2834 pthread_mutex_unlock(®_chan
->stream_list_lock
);
2840 * Create and send to the application the created buffers with per UID buffers.
2842 * Return 0 on success else a negative value.
2844 static int create_channel_per_uid(struct ust_app
*app
,
2845 struct ltt_ust_session
*usess
, struct ust_app_session
*ua_sess
,
2846 struct ust_app_channel
*ua_chan
)
2849 struct buffer_reg_uid
*reg_uid
;
2850 struct buffer_reg_channel
*reg_chan
;
2857 DBG("UST app creating channel %s with per UID buffers", ua_chan
->name
);
2859 reg_uid
= buffer_reg_uid_find(usess
->id
, app
->bits_per_long
, app
->uid
);
2861 * The session creation handles the creation of this global registry
2862 * object. If none can be find, there is a code flow problem or a
2867 reg_chan
= buffer_reg_channel_find(ua_chan
->tracing_channel_id
,
2870 /* Create the buffer registry channel object. */
2871 ret
= create_buffer_reg_channel(reg_uid
->registry
, ua_chan
, ®_chan
);
2873 ERR("Error creating the UST channel \"%s\" registry instance",
2880 * Create the buffers on the consumer side. This call populates the
2881 * ust app channel object with all streams and data object.
2883 ret
= do_consumer_create_channel(usess
, ua_sess
, ua_chan
,
2884 app
->bits_per_long
, reg_uid
->registry
->reg
.ust
);
2886 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2890 * Let's remove the previously created buffer registry channel so
2891 * it's not visible anymore in the session registry.
2893 ust_registry_channel_del_free(reg_uid
->registry
->reg
.ust
,
2894 ua_chan
->tracing_channel_id
);
2895 buffer_reg_channel_remove(reg_uid
->registry
, reg_chan
);
2896 buffer_reg_channel_destroy(reg_chan
, LTTNG_DOMAIN_UST
);
2901 * Setup the streams and add it to the session registry.
2903 ret
= setup_buffer_reg_channel(reg_uid
->registry
,
2904 ua_chan
, reg_chan
, app
);
2906 ERR("Error setting up UST channel \"%s\"",
2913 /* Send buffers to the application. */
2914 ret
= send_channel_uid_to_ust(reg_chan
, app
, ua_sess
, ua_chan
);
2916 if (ret
!= -ENOTCONN
) {
2917 ERR("Error sending channel to application");
2927 * Create and send to the application the created buffers with per PID buffers.
2929 * Called with UST app session lock held.
2931 * Return 0 on success else a negative value.
2933 static int create_channel_per_pid(struct ust_app
*app
,
2934 struct ltt_ust_session
*usess
, struct ust_app_session
*ua_sess
,
2935 struct ust_app_channel
*ua_chan
)
2938 struct ust_registry_session
*registry
;
2945 DBG("UST app creating channel %s with per PID buffers", ua_chan
->name
);
2949 registry
= get_session_registry(ua_sess
);
2950 /* The UST app session lock is held, registry shall not be null. */
2953 /* Create and add a new channel registry to session. */
2954 ret
= ust_registry_channel_add(registry
, ua_chan
->key
);
2956 ERR("Error creating the UST channel \"%s\" registry instance",
2961 /* Create and get channel on the consumer side. */
2962 ret
= do_consumer_create_channel(usess
, ua_sess
, ua_chan
,
2963 app
->bits_per_long
, registry
);
2965 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2970 ret
= send_channel_pid_to_ust(app
, ua_sess
, ua_chan
);
2972 if (ret
!= -ENOTCONN
) {
2973 ERR("Error sending channel to application");
2984 * From an already allocated ust app channel, create the channel buffers if
2985 * need and send it to the application. This MUST be called with a RCU read
2986 * side lock acquired.
2988 * Called with UST app session lock held.
2990 * Return 0 on success or else a negative value. Returns -ENOTCONN if
2991 * the application exited concurrently.
2993 static int do_create_channel(struct ust_app
*app
,
2994 struct ltt_ust_session
*usess
, struct ust_app_session
*ua_sess
,
2995 struct ust_app_channel
*ua_chan
)
3004 /* Handle buffer type before sending the channel to the application. */
3005 switch (usess
->buffer_type
) {
3006 case LTTNG_BUFFER_PER_UID
:
3008 ret
= create_channel_per_uid(app
, usess
, ua_sess
, ua_chan
);
3014 case LTTNG_BUFFER_PER_PID
:
3016 ret
= create_channel_per_pid(app
, usess
, ua_sess
, ua_chan
);
3028 /* Initialize ust objd object using the received handle and add it. */
3029 lttng_ht_node_init_ulong(&ua_chan
->ust_objd_node
, ua_chan
->handle
);
3030 lttng_ht_add_unique_ulong(app
->ust_objd
, &ua_chan
->ust_objd_node
);
3032 /* If channel is not enabled, disable it on the tracer */
3033 if (!ua_chan
->enabled
) {
3034 ret
= disable_ust_channel(app
, ua_sess
, ua_chan
);
3045 * Create UST app channel and create it on the tracer. Set ua_chanp of the
3046 * newly created channel if not NULL.
3048 * Called with UST app session lock and RCU read-side lock held.
3050 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3051 * the application exited concurrently.
3053 static int create_ust_app_channel(struct ust_app_session
*ua_sess
,
3054 struct ltt_ust_channel
*uchan
, struct ust_app
*app
,
3055 enum lttng_ust_chan_type type
, struct ltt_ust_session
*usess
,
3056 struct ust_app_channel
**ua_chanp
)
3059 struct lttng_ht_iter iter
;
3060 struct lttng_ht_node_str
*ua_chan_node
;
3061 struct ust_app_channel
*ua_chan
;
3063 /* Lookup channel in the ust app session */
3064 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &iter
);
3065 ua_chan_node
= lttng_ht_iter_get_node_str(&iter
);
3066 if (ua_chan_node
!= NULL
) {
3067 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
3071 ua_chan
= alloc_ust_app_channel(uchan
->name
, ua_sess
, &uchan
->attr
);
3072 if (ua_chan
== NULL
) {
3073 /* Only malloc can fail here */
3077 shadow_copy_channel(ua_chan
, uchan
);
3079 /* Set channel type. */
3080 ua_chan
->attr
.type
= type
;
3082 ret
= do_create_channel(app
, usess
, ua_sess
, ua_chan
);
3087 DBG2("UST app create channel %s for PID %d completed", ua_chan
->name
,
3090 /* Only add the channel if successful on the tracer side. */
3091 lttng_ht_add_unique_str(ua_sess
->channels
, &ua_chan
->node
);
3095 *ua_chanp
= ua_chan
;
3098 /* Everything went well. */
3102 delete_ust_app_channel(ua_chan
->is_sent
? app
->sock
: -1, ua_chan
, app
);
3108 * Create UST app event and create it on the tracer side.
3110 * Called with ust app session mutex held.
3113 int create_ust_app_event(struct ust_app_session
*ua_sess
,
3114 struct ust_app_channel
*ua_chan
, struct ltt_ust_event
*uevent
,
3115 struct ust_app
*app
)
3118 struct ust_app_event
*ua_event
;
3120 /* Get event node */
3121 ua_event
= find_ust_app_event(ua_chan
->events
, uevent
->attr
.name
,
3122 uevent
->filter
, uevent
->attr
.loglevel
, uevent
->exclusion
);
3123 if (ua_event
!= NULL
) {
3128 /* Does not exist so create one */
3129 ua_event
= alloc_ust_app_event(uevent
->attr
.name
, &uevent
->attr
);
3130 if (ua_event
== NULL
) {
3131 /* Only malloc can failed so something is really wrong */
3135 shadow_copy_event(ua_event
, uevent
);
3137 /* Create it on the tracer side */
3138 ret
= create_ust_event(app
, ua_sess
, ua_chan
, ua_event
);
3140 /* Not found previously means that it does not exist on the tracer */
3141 assert(ret
!= -LTTNG_UST_ERR_EXIST
);
3145 add_unique_ust_app_event(ua_chan
, ua_event
);
3147 DBG2("UST app create event %s for PID %d completed", ua_event
->name
,
3154 /* Valid. Calling here is already in a read side lock */
3155 delete_ust_app_event(-1, ua_event
, app
);
3160 * Create UST metadata and open it on the tracer side.
3162 * Called with UST app session lock held and RCU read side lock.
3164 static int create_ust_app_metadata(struct ust_app_session
*ua_sess
,
3165 struct ust_app
*app
, struct consumer_output
*consumer
)
3168 struct ust_app_channel
*metadata
;
3169 struct consumer_socket
*socket
;
3170 struct ust_registry_session
*registry
;
3176 registry
= get_session_registry(ua_sess
);
3177 /* The UST app session is held registry shall not be null. */
3180 pthread_mutex_lock(®istry
->lock
);
3182 /* Metadata already exists for this registry or it was closed previously */
3183 if (registry
->metadata_key
|| registry
->metadata_closed
) {
3188 /* Allocate UST metadata */
3189 metadata
= alloc_ust_app_channel(DEFAULT_METADATA_NAME
, ua_sess
, NULL
);
3191 /* malloc() failed */
3196 memcpy(&metadata
->attr
, &ua_sess
->metadata_attr
, sizeof(metadata
->attr
));
3198 /* Need one fd for the channel. */
3199 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
3201 ERR("Exhausted number of available FD upon create metadata");
3205 /* Get the right consumer socket for the application. */
3206 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
, consumer
);
3209 goto error_consumer
;
3213 * Keep metadata key so we can identify it on the consumer side. Assign it
3214 * to the registry *before* we ask the consumer so we avoid the race of the
3215 * consumer requesting the metadata and the ask_channel call on our side
3216 * did not returned yet.
3218 registry
->metadata_key
= metadata
->key
;
3221 * Ask the metadata channel creation to the consumer. The metadata object
3222 * will be created by the consumer and kept their. However, the stream is
3223 * never added or monitored until we do a first push metadata to the
3226 ret
= ust_consumer_ask_channel(ua_sess
, metadata
, consumer
, socket
,
3229 /* Nullify the metadata key so we don't try to close it later on. */
3230 registry
->metadata_key
= 0;
3231 goto error_consumer
;
3235 * The setup command will make the metadata stream be sent to the relayd,
3236 * if applicable, and the thread managing the metadatas. This is important
3237 * because after this point, if an error occurs, the only way the stream
3238 * can be deleted is to be monitored in the consumer.
3240 ret
= consumer_setup_metadata(socket
, metadata
->key
);
3242 /* Nullify the metadata key so we don't try to close it later on. */
3243 registry
->metadata_key
= 0;
3244 goto error_consumer
;
3247 DBG2("UST metadata with key %" PRIu64
" created for app pid %d",
3248 metadata
->key
, app
->pid
);
3251 lttng_fd_put(LTTNG_FD_APPS
, 1);
3252 delete_ust_app_channel(-1, metadata
, app
);
3254 pthread_mutex_unlock(®istry
->lock
);
3259 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3260 * acquired before calling this function.
3262 struct ust_app
*ust_app_find_by_pid(pid_t pid
)
3264 struct ust_app
*app
= NULL
;
3265 struct lttng_ht_node_ulong
*node
;
3266 struct lttng_ht_iter iter
;
3268 lttng_ht_lookup(ust_app_ht
, (void *)((unsigned long) pid
), &iter
);
3269 node
= lttng_ht_iter_get_node_ulong(&iter
);
3271 DBG2("UST app no found with pid %d", pid
);
3275 DBG2("Found UST app by pid %d", pid
);
3277 app
= caa_container_of(node
, struct ust_app
, pid_n
);
3284 * Allocate and init an UST app object using the registration information and
3285 * the command socket. This is called when the command socket connects to the
3288 * The object is returned on success or else NULL.
3290 struct ust_app
*ust_app_create(struct ust_register_msg
*msg
, int sock
)
3292 struct ust_app
*lta
= NULL
;
3297 DBG3("UST app creating application for socket %d", sock
);
3299 if ((msg
->bits_per_long
== 64 &&
3300 (uatomic_read(&ust_consumerd64_fd
) == -EINVAL
))
3301 || (msg
->bits_per_long
== 32 &&
3302 (uatomic_read(&ust_consumerd32_fd
) == -EINVAL
))) {
3303 ERR("Registration failed: application \"%s\" (pid: %d) has "
3304 "%d-bit long, but no consumerd for this size is available.\n",
3305 msg
->name
, msg
->pid
, msg
->bits_per_long
);
3309 lta
= zmalloc(sizeof(struct ust_app
));
3315 lta
->ppid
= msg
->ppid
;
3316 lta
->uid
= msg
->uid
;
3317 lta
->gid
= msg
->gid
;
3319 lta
->bits_per_long
= msg
->bits_per_long
;
3320 lta
->uint8_t_alignment
= msg
->uint8_t_alignment
;
3321 lta
->uint16_t_alignment
= msg
->uint16_t_alignment
;
3322 lta
->uint32_t_alignment
= msg
->uint32_t_alignment
;
3323 lta
->uint64_t_alignment
= msg
->uint64_t_alignment
;
3324 lta
->long_alignment
= msg
->long_alignment
;
3325 lta
->byte_order
= msg
->byte_order
;
3327 lta
->v_major
= msg
->major
;
3328 lta
->v_minor
= msg
->minor
;
3329 lta
->sessions
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3330 lta
->ust_objd
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
3331 lta
->ust_sessions_objd
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
3332 lta
->notify_sock
= -1;
3334 /* Copy name and make sure it's NULL terminated. */
3335 strncpy(lta
->name
, msg
->name
, sizeof(lta
->name
));
3336 lta
->name
[UST_APP_PROCNAME_LEN
] = '\0';
3339 * Before this can be called, when receiving the registration information,
3340 * the application compatibility is checked. So, at this point, the
3341 * application can work with this session daemon.
3343 lta
->compatible
= 1;
3345 lta
->pid
= msg
->pid
;
3346 lttng_ht_node_init_ulong(<a
->pid_n
, (unsigned long) lta
->pid
);
3348 pthread_mutex_init(<a
->sock_lock
, NULL
);
3349 lttng_ht_node_init_ulong(<a
->sock_n
, (unsigned long) lta
->sock
);
3351 CDS_INIT_LIST_HEAD(<a
->teardown_head
);
3357 * For a given application object, add it to every hash table.
3359 void ust_app_add(struct ust_app
*app
)
3362 assert(app
->notify_sock
>= 0);
3367 * On a re-registration, we want to kick out the previous registration of
3370 lttng_ht_add_replace_ulong(ust_app_ht
, &app
->pid_n
);
3373 * The socket _should_ be unique until _we_ call close. So, a add_unique
3374 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3375 * already in the table.
3377 lttng_ht_add_unique_ulong(ust_app_ht_by_sock
, &app
->sock_n
);
3379 /* Add application to the notify socket hash table. */
3380 lttng_ht_node_init_ulong(&app
->notify_sock_n
, app
->notify_sock
);
3381 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock
, &app
->notify_sock_n
);
3383 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
3384 "notify_sock:%d (version %d.%d)", app
->pid
, app
->ppid
, app
->uid
,
3385 app
->gid
, app
->sock
, app
->name
, app
->notify_sock
, app
->v_major
,
3392 * Set the application version into the object.
3394 * Return 0 on success else a negative value either an errno code or a
3395 * LTTng-UST error code.
3397 int ust_app_version(struct ust_app
*app
)
3403 pthread_mutex_lock(&app
->sock_lock
);
3404 ret
= ustctl_tracer_version(app
->sock
, &app
->version
);
3405 pthread_mutex_unlock(&app
->sock_lock
);
3407 if (ret
!= -LTTNG_UST_ERR_EXITING
&& ret
!= -EPIPE
) {
3408 ERR("UST app %d version failed with ret %d", app
->sock
, ret
);
3410 DBG3("UST app %d version failed. Application is dead", app
->sock
);
3418 * Unregister app by removing it from the global traceable app list and freeing
3421 * The socket is already closed at this point so no close to sock.
3423 void ust_app_unregister(int sock
)
3425 struct ust_app
*lta
;
3426 struct lttng_ht_node_ulong
*node
;
3427 struct lttng_ht_iter ust_app_sock_iter
;
3428 struct lttng_ht_iter iter
;
3429 struct ust_app_session
*ua_sess
;
3434 /* Get the node reference for a call_rcu */
3435 lttng_ht_lookup(ust_app_ht_by_sock
, (void *)((unsigned long) sock
), &ust_app_sock_iter
);
3436 node
= lttng_ht_iter_get_node_ulong(&ust_app_sock_iter
);
3439 lta
= caa_container_of(node
, struct ust_app
, sock_n
);
3440 DBG("PID %d unregistering with sock %d", lta
->pid
, sock
);
3443 * For per-PID buffers, perform "push metadata" and flush all
3444 * application streams before removing app from hash tables,
3445 * ensuring proper behavior of data_pending check.
3446 * Remove sessions so they are not visible during deletion.
3448 cds_lfht_for_each_entry(lta
->sessions
->ht
, &iter
.iter
, ua_sess
,
3450 struct ust_registry_session
*registry
;
3452 ret
= lttng_ht_del(lta
->sessions
, &iter
);
3454 /* The session was already removed so scheduled for teardown. */
3458 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
3459 (void) ust_app_flush_app_session(lta
, ua_sess
);
3463 * Add session to list for teardown. This is safe since at this point we
3464 * are the only one using this list.
3466 pthread_mutex_lock(&ua_sess
->lock
);
3468 if (ua_sess
->deleted
) {
3469 pthread_mutex_unlock(&ua_sess
->lock
);
3474 * Normally, this is done in the delete session process which is
3475 * executed in the call rcu below. However, upon registration we can't
3476 * afford to wait for the grace period before pushing data or else the
3477 * data pending feature can race between the unregistration and stop
3478 * command where the data pending command is sent *before* the grace
3481 * The close metadata below nullifies the metadata pointer in the
3482 * session so the delete session will NOT push/close a second time.
3484 registry
= get_session_registry(ua_sess
);
3486 /* Push metadata for application before freeing the application. */
3487 (void) push_metadata(registry
, ua_sess
->consumer
);
3490 * Don't ask to close metadata for global per UID buffers. Close
3491 * metadata only on destroy trace session in this case. Also, the
3492 * previous push metadata could have flag the metadata registry to
3493 * close so don't send a close command if closed.
3495 if (ua_sess
->buffer_type
!= LTTNG_BUFFER_PER_UID
) {
3496 /* And ask to close it for this session registry. */
3497 (void) close_metadata(registry
, ua_sess
->consumer
);
3500 cds_list_add(&ua_sess
->teardown_node
, <a
->teardown_head
);
3502 pthread_mutex_unlock(&ua_sess
->lock
);
3505 /* Remove application from PID hash table */
3506 ret
= lttng_ht_del(ust_app_ht_by_sock
, &ust_app_sock_iter
);
3510 * Remove application from notify hash table. The thread handling the
3511 * notify socket could have deleted the node so ignore on error because
3512 * either way it's valid. The close of that socket is handled by the other
3515 iter
.iter
.node
= <a
->notify_sock_n
.node
;
3516 (void) lttng_ht_del(ust_app_ht_by_notify_sock
, &iter
);
3519 * Ignore return value since the node might have been removed before by an
3520 * add replace during app registration because the PID can be reassigned by
3523 iter
.iter
.node
= <a
->pid_n
.node
;
3524 ret
= lttng_ht_del(ust_app_ht
, &iter
);
3526 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
3531 call_rcu(<a
->pid_n
.head
, delete_ust_app_rcu
);
3538 * Fill events array with all events name of all registered apps.
3540 int ust_app_list_events(struct lttng_event
**events
)
3543 size_t nbmem
, count
= 0;
3544 struct lttng_ht_iter iter
;
3545 struct ust_app
*app
;
3546 struct lttng_event
*tmp_event
;
3548 nbmem
= UST_APP_EVENT_LIST_SIZE
;
3549 tmp_event
= zmalloc(nbmem
* sizeof(struct lttng_event
));
3550 if (tmp_event
== NULL
) {
3551 PERROR("zmalloc ust app events");
3558 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
3559 struct lttng_ust_tracepoint_iter uiter
;
3561 health_code_update();
3563 if (!app
->compatible
) {
3565 * TODO: In time, we should notice the caller of this error by
3566 * telling him that this is a version error.
3570 pthread_mutex_lock(&app
->sock_lock
);
3571 handle
= ustctl_tracepoint_list(app
->sock
);
3573 if (handle
!= -EPIPE
&& handle
!= -LTTNG_UST_ERR_EXITING
) {
3574 ERR("UST app list events getting handle failed for app pid %d",
3577 pthread_mutex_unlock(&app
->sock_lock
);
3581 while ((ret
= ustctl_tracepoint_list_get(app
->sock
, handle
,
3582 &uiter
)) != -LTTNG_UST_ERR_NOENT
) {
3583 /* Handle ustctl error. */
3587 if (ret
!= -LTTNG_UST_ERR_EXITING
&& ret
!= -EPIPE
) {
3588 ERR("UST app tp list get failed for app %d with ret %d",
3591 DBG3("UST app tp list get failed. Application is dead");
3593 * This is normal behavior, an application can die during the
3594 * creation process. Don't report an error so the execution can
3595 * continue normally. Continue normal execution.
3600 release_ret
= ustctl_release_handle(app
->sock
, handle
);
3601 if (release_ret
< 0 &&
3602 release_ret
!= -LTTNG_UST_ERR_EXITING
&&
3603 release_ret
!= -EPIPE
) {
3604 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, release_ret
);
3606 pthread_mutex_unlock(&app
->sock_lock
);
3610 health_code_update();
3611 if (count
>= nbmem
) {
3612 /* In case the realloc fails, we free the memory */
3613 struct lttng_event
*new_tmp_event
;
3616 new_nbmem
= nbmem
<< 1;
3617 DBG2("Reallocating event list from %zu to %zu entries",
3619 new_tmp_event
= realloc(tmp_event
,
3620 new_nbmem
* sizeof(struct lttng_event
));
3621 if (new_tmp_event
== NULL
) {
3624 PERROR("realloc ust app events");
3627 release_ret
= ustctl_release_handle(app
->sock
, handle
);
3628 if (release_ret
< 0 &&
3629 release_ret
!= -LTTNG_UST_ERR_EXITING
&&
3630 release_ret
!= -EPIPE
) {
3631 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, release_ret
);
3633 pthread_mutex_unlock(&app
->sock_lock
);
3636 /* Zero the new memory */
3637 memset(new_tmp_event
+ nbmem
, 0,
3638 (new_nbmem
- nbmem
) * sizeof(struct lttng_event
));
3640 tmp_event
= new_tmp_event
;
3642 memcpy(tmp_event
[count
].name
, uiter
.name
, LTTNG_UST_SYM_NAME_LEN
);
3643 tmp_event
[count
].loglevel
= uiter
.loglevel
;
3644 tmp_event
[count
].type
= (enum lttng_event_type
) LTTNG_UST_TRACEPOINT
;
3645 tmp_event
[count
].pid
= app
->pid
;
3646 tmp_event
[count
].enabled
= -1;
3649 ret
= ustctl_release_handle(app
->sock
, handle
);
3650 pthread_mutex_unlock(&app
->sock_lock
);
3651 if (ret
< 0 && ret
!= -LTTNG_UST_ERR_EXITING
&& ret
!= -EPIPE
) {
3652 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, ret
);
3657 *events
= tmp_event
;
3659 DBG2("UST app list events done (%zu events)", count
);
3664 health_code_update();
3669 * Fill events array with all events name of all registered apps.
3671 int ust_app_list_event_fields(struct lttng_event_field
**fields
)
3674 size_t nbmem
, count
= 0;
3675 struct lttng_ht_iter iter
;
3676 struct ust_app
*app
;
3677 struct lttng_event_field
*tmp_event
;
3679 nbmem
= UST_APP_EVENT_LIST_SIZE
;
3680 tmp_event
= zmalloc(nbmem
* sizeof(struct lttng_event_field
));
3681 if (tmp_event
== NULL
) {
3682 PERROR("zmalloc ust app event fields");
3689 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
3690 struct lttng_ust_field_iter uiter
;
3692 health_code_update();
3694 if (!app
->compatible
) {
3696 * TODO: In time, we should notice the caller of this error by
3697 * telling him that this is a version error.
3701 pthread_mutex_lock(&app
->sock_lock
);
3702 handle
= ustctl_tracepoint_field_list(app
->sock
);
3704 if (handle
!= -EPIPE
&& handle
!= -LTTNG_UST_ERR_EXITING
) {
3705 ERR("UST app list field getting handle failed for app pid %d",
3708 pthread_mutex_unlock(&app
->sock_lock
);
3712 while ((ret
= ustctl_tracepoint_field_list_get(app
->sock
, handle
,
3713 &uiter
)) != -LTTNG_UST_ERR_NOENT
) {
3714 /* Handle ustctl error. */
3718 if (ret
!= -LTTNG_UST_ERR_EXITING
&& ret
!= -EPIPE
) {
3719 ERR("UST app tp list field failed for app %d with ret %d",
3722 DBG3("UST app tp list field failed. Application is dead");
3724 * This is normal behavior, an application can die during the
3725 * creation process. Don't report an error so the execution can
3726 * continue normally. Reset list and count for next app.
3731 release_ret
= ustctl_release_handle(app
->sock
, handle
);
3732 pthread_mutex_unlock(&app
->sock_lock
);
3733 if (release_ret
< 0 &&
3734 release_ret
!= -LTTNG_UST_ERR_EXITING
&&
3735 release_ret
!= -EPIPE
) {
3736 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, release_ret
);
3741 health_code_update();
3742 if (count
>= nbmem
) {
3743 /* In case the realloc fails, we free the memory */
3744 struct lttng_event_field
*new_tmp_event
;
3747 new_nbmem
= nbmem
<< 1;
3748 DBG2("Reallocating event field list from %zu to %zu entries",
3750 new_tmp_event
= realloc(tmp_event
,
3751 new_nbmem
* sizeof(struct lttng_event_field
));
3752 if (new_tmp_event
== NULL
) {
3755 PERROR("realloc ust app event fields");
3758 release_ret
= ustctl_release_handle(app
->sock
, handle
);
3759 pthread_mutex_unlock(&app
->sock_lock
);
3761 release_ret
!= -LTTNG_UST_ERR_EXITING
&&
3762 release_ret
!= -EPIPE
) {
3763 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, release_ret
);
3767 /* Zero the new memory */
3768 memset(new_tmp_event
+ nbmem
, 0,
3769 (new_nbmem
- nbmem
) * sizeof(struct lttng_event_field
));
3771 tmp_event
= new_tmp_event
;
3774 memcpy(tmp_event
[count
].field_name
, uiter
.field_name
, LTTNG_UST_SYM_NAME_LEN
);
3775 /* Mapping between these enums matches 1 to 1. */
3776 tmp_event
[count
].type
= (enum lttng_event_field_type
) uiter
.type
;
3777 tmp_event
[count
].nowrite
= uiter
.nowrite
;
3779 memcpy(tmp_event
[count
].event
.name
, uiter
.event_name
, LTTNG_UST_SYM_NAME_LEN
);
3780 tmp_event
[count
].event
.loglevel
= uiter
.loglevel
;
3781 tmp_event
[count
].event
.type
= LTTNG_EVENT_TRACEPOINT
;
3782 tmp_event
[count
].event
.pid
= app
->pid
;
3783 tmp_event
[count
].event
.enabled
= -1;
3786 ret
= ustctl_release_handle(app
->sock
, handle
);
3787 pthread_mutex_unlock(&app
->sock_lock
);
3789 ret
!= -LTTNG_UST_ERR_EXITING
&&
3791 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, ret
);
3796 *fields
= tmp_event
;
3798 DBG2("UST app list event fields done (%zu events)", count
);
3803 health_code_update();
3808 * Free and clean all traceable apps of the global list.
3810 * Should _NOT_ be called with RCU read-side lock held.
3812 void ust_app_clean_list(void)
3815 struct ust_app
*app
;
3816 struct lttng_ht_iter iter
;
3818 DBG2("UST app cleaning registered apps hash table");
3823 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
3824 ret
= lttng_ht_del(ust_app_ht
, &iter
);
3826 call_rcu(&app
->pid_n
.head
, delete_ust_app_rcu
);
3830 /* Cleanup socket hash table */
3831 if (ust_app_ht_by_sock
) {
3832 cds_lfht_for_each_entry(ust_app_ht_by_sock
->ht
, &iter
.iter
, app
,
3834 ret
= lttng_ht_del(ust_app_ht_by_sock
, &iter
);
3839 /* Cleanup notify socket hash table */
3840 if (ust_app_ht_by_notify_sock
) {
3841 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock
->ht
, &iter
.iter
, app
,
3842 notify_sock_n
.node
) {
3843 ret
= lttng_ht_del(ust_app_ht_by_notify_sock
, &iter
);
3849 /* Destroy is done only when the ht is empty */
3851 ht_cleanup_push(ust_app_ht
);
3853 if (ust_app_ht_by_sock
) {
3854 ht_cleanup_push(ust_app_ht_by_sock
);
3856 if (ust_app_ht_by_notify_sock
) {
3857 ht_cleanup_push(ust_app_ht_by_notify_sock
);
3862 * Init UST app hash table.
3864 int ust_app_ht_alloc(void)
3866 ust_app_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
3870 ust_app_ht_by_sock
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
3871 if (!ust_app_ht_by_sock
) {
3874 ust_app_ht_by_notify_sock
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
3875 if (!ust_app_ht_by_notify_sock
) {
3882 * For a specific UST session, disable the channel for all registered apps.
3884 int ust_app_disable_channel_glb(struct ltt_ust_session
*usess
,
3885 struct ltt_ust_channel
*uchan
)
3888 struct lttng_ht_iter iter
;
3889 struct lttng_ht_node_str
*ua_chan_node
;
3890 struct ust_app
*app
;
3891 struct ust_app_session
*ua_sess
;
3892 struct ust_app_channel
*ua_chan
;
3894 if (usess
== NULL
|| uchan
== NULL
) {
3895 ERR("Disabling UST global channel with NULL values");
3900 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64
,
3901 uchan
->name
, usess
->id
);
3905 /* For every registered applications */
3906 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
3907 struct lttng_ht_iter uiter
;
3908 if (!app
->compatible
) {
3910 * TODO: In time, we should notice the caller of this error by
3911 * telling him that this is a version error.
3915 ua_sess
= lookup_session_by_app(usess
, app
);
3916 if (ua_sess
== NULL
) {
3921 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
3922 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
3923 /* If the session if found for the app, the channel must be there */
3924 assert(ua_chan_node
);
3926 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
3927 /* The channel must not be already disabled */
3928 assert(ua_chan
->enabled
== 1);
3930 /* Disable channel onto application */
3931 ret
= disable_ust_app_channel(ua_sess
, ua_chan
, app
);
3933 /* XXX: We might want to report this error at some point... */
3945 * For a specific UST session, enable the channel for all registered apps.
3947 int ust_app_enable_channel_glb(struct ltt_ust_session
*usess
,
3948 struct ltt_ust_channel
*uchan
)
3951 struct lttng_ht_iter iter
;
3952 struct ust_app
*app
;
3953 struct ust_app_session
*ua_sess
;
3955 if (usess
== NULL
|| uchan
== NULL
) {
3956 ERR("Adding UST global channel to NULL values");
3961 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64
,
3962 uchan
->name
, usess
->id
);
3966 /* For every registered applications */
3967 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
3968 if (!app
->compatible
) {
3970 * TODO: In time, we should notice the caller of this error by
3971 * telling him that this is a version error.
3975 ua_sess
= lookup_session_by_app(usess
, app
);
3976 if (ua_sess
== NULL
) {
3980 /* Enable channel onto application */
3981 ret
= enable_ust_app_channel(ua_sess
, uchan
, app
);
3983 /* XXX: We might want to report this error at some point... */
3995 * Disable an event in a channel and for a specific session.
3997 int ust_app_disable_event_glb(struct ltt_ust_session
*usess
,
3998 struct ltt_ust_channel
*uchan
, struct ltt_ust_event
*uevent
)
4001 struct lttng_ht_iter iter
, uiter
;
4002 struct lttng_ht_node_str
*ua_chan_node
;
4003 struct ust_app
*app
;
4004 struct ust_app_session
*ua_sess
;
4005 struct ust_app_channel
*ua_chan
;
4006 struct ust_app_event
*ua_event
;
4008 DBG("UST app disabling event %s for all apps in channel "
4009 "%s for session id %" PRIu64
,
4010 uevent
->attr
.name
, uchan
->name
, usess
->id
);
4014 /* For all registered applications */
4015 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4016 if (!app
->compatible
) {
4018 * TODO: In time, we should notice the caller of this error by
4019 * telling him that this is a version error.
4023 ua_sess
= lookup_session_by_app(usess
, app
);
4024 if (ua_sess
== NULL
) {
4029 /* Lookup channel in the ust app session */
4030 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
4031 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
4032 if (ua_chan_node
== NULL
) {
4033 DBG2("Channel %s not found in session id %" PRIu64
" for app pid %d."
4034 "Skipping", uchan
->name
, usess
->id
, app
->pid
);
4037 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
4039 ua_event
= find_ust_app_event(ua_chan
->events
, uevent
->attr
.name
,
4040 uevent
->filter
, uevent
->attr
.loglevel
,
4042 if (ua_event
== NULL
) {
4043 DBG2("Event %s not found in channel %s for app pid %d."
4044 "Skipping", uevent
->attr
.name
, uchan
->name
, app
->pid
);
4048 ret
= disable_ust_app_event(ua_sess
, ua_event
, app
);
4050 /* XXX: Report error someday... */
4061 * For a specific UST session, create the channel for all registered apps.
4063 int ust_app_create_channel_glb(struct ltt_ust_session
*usess
,
4064 struct ltt_ust_channel
*uchan
)
4066 int ret
= 0, created
;
4067 struct lttng_ht_iter iter
;
4068 struct ust_app
*app
;
4069 struct ust_app_session
*ua_sess
= NULL
;
4071 /* Very wrong code flow */
4075 DBG2("UST app adding channel %s to UST domain for session id %" PRIu64
,
4076 uchan
->name
, usess
->id
);
4080 /* For every registered applications */
4081 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4082 if (!app
->compatible
) {
4084 * TODO: In time, we should notice the caller of this error by
4085 * telling him that this is a version error.
4089 if (!(trace_ust_id_tracker_lookup(LTTNG_TRACKER_VPID
, usess
, app
->pid
)
4090 && trace_ust_id_tracker_lookup(LTTNG_TRACKER_VUID
, usess
, app
->uid
)
4091 && trace_ust_id_tracker_lookup(LTTNG_TRACKER_VGID
, usess
, app
->gid
))) {
4097 * Create session on the tracer side and add it to app session HT. Note
4098 * that if session exist, it will simply return a pointer to the ust
4101 ret
= create_ust_app_session(usess
, app
, &ua_sess
, &created
);
4106 * The application's socket is not valid. Either a bad socket
4107 * or a timeout on it. We can't inform the caller that for a
4108 * specific app, the session failed so lets continue here.
4110 ret
= 0; /* Not an error. */
4114 goto error_rcu_unlock
;
4119 pthread_mutex_lock(&ua_sess
->lock
);
4121 if (ua_sess
->deleted
) {
4122 pthread_mutex_unlock(&ua_sess
->lock
);
4126 if (!strncmp(uchan
->name
, DEFAULT_METADATA_NAME
,
4127 sizeof(uchan
->name
))) {
4128 copy_channel_attr_to_ustctl(&ua_sess
->metadata_attr
, &uchan
->attr
);
4131 /* Create channel onto application. We don't need the chan ref. */
4132 ret
= create_ust_app_channel(ua_sess
, uchan
, app
,
4133 LTTNG_UST_CHAN_PER_CPU
, usess
, NULL
);
4135 pthread_mutex_unlock(&ua_sess
->lock
);
4137 /* Cleanup the created session if it's the case. */
4139 destroy_app_session(app
, ua_sess
);
4144 * The application's socket is not valid. Either a bad socket
4145 * or a timeout on it. We can't inform the caller that for a
4146 * specific app, the session failed so lets continue here.
4148 ret
= 0; /* Not an error. */
4152 goto error_rcu_unlock
;
4163 * Enable event for a specific session and channel on the tracer.
4165 int ust_app_enable_event_glb(struct ltt_ust_session
*usess
,
4166 struct ltt_ust_channel
*uchan
, struct ltt_ust_event
*uevent
)
4169 struct lttng_ht_iter iter
, uiter
;
4170 struct lttng_ht_node_str
*ua_chan_node
;
4171 struct ust_app
*app
;
4172 struct ust_app_session
*ua_sess
;
4173 struct ust_app_channel
*ua_chan
;
4174 struct ust_app_event
*ua_event
;
4176 DBG("UST app enabling event %s for all apps for session id %" PRIu64
,
4177 uevent
->attr
.name
, usess
->id
);
4180 * NOTE: At this point, this function is called only if the session and
4181 * channel passed are already created for all apps. and enabled on the
4187 /* For all registered applications */
4188 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4189 if (!app
->compatible
) {
4191 * TODO: In time, we should notice the caller of this error by
4192 * telling him that this is a version error.
4196 ua_sess
= lookup_session_by_app(usess
, app
);
4198 /* The application has problem or is probably dead. */
4202 pthread_mutex_lock(&ua_sess
->lock
);
4204 if (ua_sess
->deleted
) {
4205 pthread_mutex_unlock(&ua_sess
->lock
);
4209 /* Lookup channel in the ust app session */
4210 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
4211 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
4213 * It is possible that the channel cannot be found is
4214 * the channel/event creation occurs concurrently with
4215 * an application exit.
4217 if (!ua_chan_node
) {
4218 pthread_mutex_unlock(&ua_sess
->lock
);
4222 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
4224 /* Get event node */
4225 ua_event
= find_ust_app_event(ua_chan
->events
, uevent
->attr
.name
,
4226 uevent
->filter
, uevent
->attr
.loglevel
, uevent
->exclusion
);
4227 if (ua_event
== NULL
) {
4228 DBG3("UST app enable event %s not found for app PID %d."
4229 "Skipping app", uevent
->attr
.name
, app
->pid
);
4233 ret
= enable_ust_app_event(ua_sess
, ua_event
, app
);
4235 pthread_mutex_unlock(&ua_sess
->lock
);
4239 pthread_mutex_unlock(&ua_sess
->lock
);
4248 * For a specific existing UST session and UST channel, creates the event for
4249 * all registered apps.
4251 int ust_app_create_event_glb(struct ltt_ust_session
*usess
,
4252 struct ltt_ust_channel
*uchan
, struct ltt_ust_event
*uevent
)
4255 struct lttng_ht_iter iter
, uiter
;
4256 struct lttng_ht_node_str
*ua_chan_node
;
4257 struct ust_app
*app
;
4258 struct ust_app_session
*ua_sess
;
4259 struct ust_app_channel
*ua_chan
;
4261 DBG("UST app creating event %s for all apps for session id %" PRIu64
,
4262 uevent
->attr
.name
, usess
->id
);
4266 /* For all registered applications */
4267 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4268 if (!app
->compatible
) {
4270 * TODO: In time, we should notice the caller of this error by
4271 * telling him that this is a version error.
4275 ua_sess
= lookup_session_by_app(usess
, app
);
4277 /* The application has problem or is probably dead. */
4281 pthread_mutex_lock(&ua_sess
->lock
);
4283 if (ua_sess
->deleted
) {
4284 pthread_mutex_unlock(&ua_sess
->lock
);
4288 /* Lookup channel in the ust app session */
4289 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
4290 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
4291 /* If the channel is not found, there is a code flow error */
4292 assert(ua_chan_node
);
4294 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
4296 ret
= create_ust_app_event(ua_sess
, ua_chan
, uevent
, app
);
4297 pthread_mutex_unlock(&ua_sess
->lock
);
4299 if (ret
!= -LTTNG_UST_ERR_EXIST
) {
4300 /* Possible value at this point: -ENOMEM. If so, we stop! */
4303 DBG2("UST app event %s already exist on app PID %d",
4304 uevent
->attr
.name
, app
->pid
);
4315 * Start tracing for a specific UST session and app.
4317 * Called with UST app session lock held.
4321 int ust_app_start_trace(struct ltt_ust_session
*usess
, struct ust_app
*app
)
4324 struct ust_app_session
*ua_sess
;
4326 DBG("Starting tracing for ust app pid %d", app
->pid
);
4330 if (!app
->compatible
) {
4334 ua_sess
= lookup_session_by_app(usess
, app
);
4335 if (ua_sess
== NULL
) {
4336 /* The session is in teardown process. Ignore and continue. */
4340 pthread_mutex_lock(&ua_sess
->lock
);
4342 if (ua_sess
->deleted
) {
4343 pthread_mutex_unlock(&ua_sess
->lock
);
4347 /* Upon restart, we skip the setup, already done */
4348 if (ua_sess
->started
) {
4352 /* Create directories if consumer is LOCAL and has a path defined. */
4353 if (usess
->consumer
->type
== CONSUMER_DST_LOCAL
&&
4354 strlen(usess
->consumer
->dst
.trace_path
) > 0) {
4355 ret
= run_as_mkdir_recursive(usess
->consumer
->dst
.trace_path
,
4356 S_IRWXU
| S_IRWXG
, ua_sess
->euid
, ua_sess
->egid
);
4358 if (errno
!= EEXIST
) {
4359 ERR("Trace directory creation error");
4366 * Create the metadata for the application. This returns gracefully if a
4367 * metadata was already set for the session.
4369 ret
= create_ust_app_metadata(ua_sess
, app
, usess
->consumer
);
4374 health_code_update();
4377 /* This start the UST tracing */
4378 pthread_mutex_lock(&app
->sock_lock
);
4379 ret
= ustctl_start_session(app
->sock
, ua_sess
->handle
);
4380 pthread_mutex_unlock(&app
->sock_lock
);
4382 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
4383 ERR("Error starting tracing for app pid: %d (ret: %d)",
4386 DBG("UST app start session failed. Application is dead.");
4388 * This is normal behavior, an application can die during the
4389 * creation process. Don't report an error so the execution can
4390 * continue normally.
4392 pthread_mutex_unlock(&ua_sess
->lock
);
4398 /* Indicate that the session has been started once */
4399 ua_sess
->started
= 1;
4401 pthread_mutex_unlock(&ua_sess
->lock
);
4403 health_code_update();
4405 /* Quiescent wait after starting trace */
4406 pthread_mutex_lock(&app
->sock_lock
);
4407 ret
= ustctl_wait_quiescent(app
->sock
);
4408 pthread_mutex_unlock(&app
->sock_lock
);
4409 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
4410 ERR("UST app wait quiescent failed for app pid %d ret %d",
4416 health_code_update();
4420 pthread_mutex_unlock(&ua_sess
->lock
);
4422 health_code_update();
4427 * Stop tracing for a specific UST session and app.
4430 int ust_app_stop_trace(struct ltt_ust_session
*usess
, struct ust_app
*app
)
4433 struct ust_app_session
*ua_sess
;
4434 struct ust_registry_session
*registry
;
4436 DBG("Stopping tracing for ust app pid %d", app
->pid
);
4440 if (!app
->compatible
) {
4441 goto end_no_session
;
4444 ua_sess
= lookup_session_by_app(usess
, app
);
4445 if (ua_sess
== NULL
) {
4446 goto end_no_session
;
4449 pthread_mutex_lock(&ua_sess
->lock
);
4451 if (ua_sess
->deleted
) {
4452 pthread_mutex_unlock(&ua_sess
->lock
);
4453 goto end_no_session
;
4457 * If started = 0, it means that stop trace has been called for a session
4458 * that was never started. It's possible since we can have a fail start
4459 * from either the application manager thread or the command thread. Simply
4460 * indicate that this is a stop error.
4462 if (!ua_sess
->started
) {
4463 goto error_rcu_unlock
;
4466 health_code_update();
4468 /* This inhibits UST tracing */
4469 pthread_mutex_lock(&app
->sock_lock
);
4470 ret
= ustctl_stop_session(app
->sock
, ua_sess
->handle
);
4471 pthread_mutex_unlock(&app
->sock_lock
);
4473 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
4474 ERR("Error stopping tracing for app pid: %d (ret: %d)",
4477 DBG("UST app stop session failed. Application is dead.");
4479 * This is normal behavior, an application can die during the
4480 * creation process. Don't report an error so the execution can
4481 * continue normally.
4485 goto error_rcu_unlock
;
4488 health_code_update();
4490 /* Quiescent wait after stopping trace */
4491 pthread_mutex_lock(&app
->sock_lock
);
4492 ret
= ustctl_wait_quiescent(app
->sock
);
4493 pthread_mutex_unlock(&app
->sock_lock
);
4494 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
4495 ERR("UST app wait quiescent failed for app pid %d ret %d",
4499 health_code_update();
4501 registry
= get_session_registry(ua_sess
);
4503 /* The UST app session is held registry shall not be null. */
4506 /* Push metadata for application before freeing the application. */
4507 (void) push_metadata(registry
, ua_sess
->consumer
);
4510 pthread_mutex_unlock(&ua_sess
->lock
);
4513 health_code_update();
4517 pthread_mutex_unlock(&ua_sess
->lock
);
4519 health_code_update();
4524 int ust_app_flush_app_session(struct ust_app
*app
,
4525 struct ust_app_session
*ua_sess
)
4527 int ret
, retval
= 0;
4528 struct lttng_ht_iter iter
;
4529 struct ust_app_channel
*ua_chan
;
4530 struct consumer_socket
*socket
;
4532 DBG("Flushing app session buffers for ust app pid %d", app
->pid
);
4536 if (!app
->compatible
) {
4537 goto end_not_compatible
;
4540 pthread_mutex_lock(&ua_sess
->lock
);
4542 if (ua_sess
->deleted
) {
4546 health_code_update();
4548 /* Flushing buffers */
4549 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
,
4552 /* Flush buffers and push metadata. */
4553 switch (ua_sess
->buffer_type
) {
4554 case LTTNG_BUFFER_PER_PID
:
4555 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &iter
.iter
, ua_chan
,
4557 health_code_update();
4558 ret
= consumer_flush_channel(socket
, ua_chan
->key
);
4560 ERR("Error flushing consumer channel");
4566 case LTTNG_BUFFER_PER_UID
:
4572 health_code_update();
4575 pthread_mutex_unlock(&ua_sess
->lock
);
4579 health_code_update();
4584 * Flush buffers for all applications for a specific UST session.
4585 * Called with UST session lock held.
4588 int ust_app_flush_session(struct ltt_ust_session
*usess
)
4593 DBG("Flushing session buffers for all ust apps");
4597 /* Flush buffers and push metadata. */
4598 switch (usess
->buffer_type
) {
4599 case LTTNG_BUFFER_PER_UID
:
4601 struct buffer_reg_uid
*reg
;
4602 struct lttng_ht_iter iter
;
4604 /* Flush all per UID buffers associated to that session. */
4605 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
4606 struct ust_registry_session
*ust_session_reg
;
4607 struct buffer_reg_channel
*reg_chan
;
4608 struct consumer_socket
*socket
;
4610 /* Get consumer socket to use to push the metadata.*/
4611 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
4614 /* Ignore request if no consumer is found for the session. */
4618 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
4619 reg_chan
, node
.node
) {
4621 * The following call will print error values so the return
4622 * code is of little importance because whatever happens, we
4623 * have to try them all.
4625 (void) consumer_flush_channel(socket
, reg_chan
->consumer_key
);
4628 ust_session_reg
= reg
->registry
->reg
.ust
;
4629 /* Push metadata. */
4630 (void) push_metadata(ust_session_reg
, usess
->consumer
);
4634 case LTTNG_BUFFER_PER_PID
:
4636 struct ust_app_session
*ua_sess
;
4637 struct lttng_ht_iter iter
;
4638 struct ust_app
*app
;
4640 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4641 ua_sess
= lookup_session_by_app(usess
, app
);
4642 if (ua_sess
== NULL
) {
4645 (void) ust_app_flush_app_session(app
, ua_sess
);
4656 health_code_update();
4661 int ust_app_clear_quiescent_app_session(struct ust_app
*app
,
4662 struct ust_app_session
*ua_sess
)
4665 struct lttng_ht_iter iter
;
4666 struct ust_app_channel
*ua_chan
;
4667 struct consumer_socket
*socket
;
4669 DBG("Clearing stream quiescent state for ust app pid %d", app
->pid
);
4673 if (!app
->compatible
) {
4674 goto end_not_compatible
;
4677 pthread_mutex_lock(&ua_sess
->lock
);
4679 if (ua_sess
->deleted
) {
4683 health_code_update();
4685 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
,
4688 ERR("Failed to find consumer (%" PRIu32
") socket",
4689 app
->bits_per_long
);
4694 /* Clear quiescent state. */
4695 switch (ua_sess
->buffer_type
) {
4696 case LTTNG_BUFFER_PER_PID
:
4697 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &iter
.iter
,
4698 ua_chan
, node
.node
) {
4699 health_code_update();
4700 ret
= consumer_clear_quiescent_channel(socket
,
4703 ERR("Error clearing quiescent state for consumer channel");
4709 case LTTNG_BUFFER_PER_UID
:
4716 health_code_update();
4719 pthread_mutex_unlock(&ua_sess
->lock
);
4723 health_code_update();
4728 * Clear quiescent state in each stream for all applications for a
4729 * specific UST session.
4730 * Called with UST session lock held.
4733 int ust_app_clear_quiescent_session(struct ltt_ust_session
*usess
)
4738 DBG("Clearing stream quiescent state for all ust apps");
4742 switch (usess
->buffer_type
) {
4743 case LTTNG_BUFFER_PER_UID
:
4745 struct lttng_ht_iter iter
;
4746 struct buffer_reg_uid
*reg
;
4749 * Clear quiescent for all per UID buffers associated to
4752 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
4753 struct consumer_socket
*socket
;
4754 struct buffer_reg_channel
*reg_chan
;
4756 /* Get associated consumer socket.*/
4757 socket
= consumer_find_socket_by_bitness(
4758 reg
->bits_per_long
, usess
->consumer
);
4761 * Ignore request if no consumer is found for
4767 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
,
4768 &iter
.iter
, reg_chan
, node
.node
) {
4770 * The following call will print error values so
4771 * the return code is of little importance
4772 * because whatever happens, we have to try them
4775 (void) consumer_clear_quiescent_channel(socket
,
4776 reg_chan
->consumer_key
);
4781 case LTTNG_BUFFER_PER_PID
:
4783 struct ust_app_session
*ua_sess
;
4784 struct lttng_ht_iter iter
;
4785 struct ust_app
*app
;
4787 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
,
4789 ua_sess
= lookup_session_by_app(usess
, app
);
4790 if (ua_sess
== NULL
) {
4793 (void) ust_app_clear_quiescent_app_session(app
,
4805 health_code_update();
4810 * Destroy a specific UST session in apps.
4812 static int destroy_trace(struct ltt_ust_session
*usess
, struct ust_app
*app
)
4815 struct ust_app_session
*ua_sess
;
4816 struct lttng_ht_iter iter
;
4817 struct lttng_ht_node_u64
*node
;
4819 DBG("Destroy tracing for ust app pid %d", app
->pid
);
4823 if (!app
->compatible
) {
4827 __lookup_session_by_app(usess
, app
, &iter
);
4828 node
= lttng_ht_iter_get_node_u64(&iter
);
4830 /* Session is being or is deleted. */
4833 ua_sess
= caa_container_of(node
, struct ust_app_session
, node
);
4835 health_code_update();
4836 destroy_app_session(app
, ua_sess
);
4838 health_code_update();
4840 /* Quiescent wait after stopping trace */
4841 pthread_mutex_lock(&app
->sock_lock
);
4842 ret
= ustctl_wait_quiescent(app
->sock
);
4843 pthread_mutex_unlock(&app
->sock_lock
);
4844 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
4845 ERR("UST app wait quiescent failed for app pid %d ret %d",
4850 health_code_update();
4855 * Start tracing for the UST session.
4857 int ust_app_start_trace_all(struct ltt_ust_session
*usess
)
4860 struct lttng_ht_iter iter
;
4861 struct ust_app
*app
;
4863 DBG("Starting all UST traces");
4868 * In a start-stop-start use-case, we need to clear the quiescent state
4869 * of each channel set by the prior stop command, thus ensuring that a
4870 * following stop or destroy is sure to grab a timestamp_end near those
4871 * operations, even if the packet is empty.
4873 (void) ust_app_clear_quiescent_session(usess
);
4875 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4876 ret
= ust_app_start_trace(usess
, app
);
4878 /* Continue to next apps even on error */
4889 * Start tracing for the UST session.
4890 * Called with UST session lock held.
4892 int ust_app_stop_trace_all(struct ltt_ust_session
*usess
)
4895 struct lttng_ht_iter iter
;
4896 struct ust_app
*app
;
4898 DBG("Stopping all UST traces");
4902 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4903 ret
= ust_app_stop_trace(usess
, app
);
4905 /* Continue to next apps even on error */
4910 (void) ust_app_flush_session(usess
);
4918 * Force stop live timers associated with the ust session.
4920 int ust_force_stop_live_timer(struct ltt_ust_session
*usess
)
4924 if (usess
->live_timer_interval
== 0) {
4928 DBG("Stop all live timer associated with UST session %p.", usess
);
4932 switch (usess
->buffer_type
) {
4933 case LTTNG_BUFFER_PER_UID
:
4935 struct buffer_reg_uid
*reg
;
4936 struct lttng_ht_iter iter
;
4938 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
4939 struct ust_registry_session
*ust_session_reg
;
4940 struct buffer_reg_channel
*reg_chan
;
4941 struct consumer_socket
*socket
;
4943 /* Get consumer socket to use */
4944 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
4947 /* Ignore request if no consumer is found for the session. */
4951 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
4952 reg_chan
, node
.node
) {
4953 ret
= consumer_channel_stop_live_timer(socket
, reg_chan
->consumer_key
);
4955 ERR("Error stopping live timer for channel %" PRIu64
, reg_chan
->consumer_key
);
4961 case LTTNG_BUFFER_PER_PID
:
4963 struct lttng_ht_iter iter_i
;
4964 struct ust_app
*app
;
4965 uint64_t chan_reg_key
;
4967 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter_i
.iter
, app
,
4970 struct ust_app_session
*ua_sess
;
4971 struct lttng_ht_iter iter_j
, iter_k
;
4972 struct lttng_ht_node_u64
*node
;
4973 struct ust_app_channel
*ua_chan
;
4975 DBG("Stopping live timer associated with ust app pid "
4979 if (!app
->compatible
) {
4983 __lookup_session_by_app(usess
, app
, &iter_j
);
4984 node
= lttng_ht_iter_get_node_u64(&iter_j
);
4986 /* Session is being or is deleted. */
4989 ua_sess
= caa_container_of(node
, struct ust_app_session
,
4992 health_code_update();
4994 cds_lfht_for_each_entry(ua_sess
->channels
->ht
,
4995 &iter_k
.iter
, ua_chan
,
4997 struct consumer_socket
*consumer_socket
;
4999 /* Stop live timer immediately if any */
5001 consumer_find_socket_by_bitness(
5003 ua_chan
->session
->consumer
);
5004 ret
= consumer_channel_stop_live_timer(
5005 consumer_socket
, ua_chan
->key
);
5007 ERR("Error stopping live timer");
5020 health_code_update();
5026 * Force start live timers associated with the ust session.
5028 int ust_force_start_live_timer(struct ltt_ust_session
*usess
)
5032 if (usess
->live_timer_interval
== 0) {
5036 DBG("Start all live timer associated with UST session %p", usess
);
5040 switch (usess
->buffer_type
) {
5041 case LTTNG_BUFFER_PER_UID
:
5043 struct buffer_reg_uid
*reg
;
5044 struct lttng_ht_iter iter
;
5046 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
5047 struct ust_registry_session
*ust_session_reg
;
5048 struct buffer_reg_channel
*reg_chan
;
5049 struct consumer_socket
*socket
;
5051 /* Get consumer socket to use */
5052 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
5055 /* Ignore request if no consumer is found for the session. */
5059 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
5060 reg_chan
, node
.node
) {
5061 ret
= consumer_channel_start_live_timer(socket
, reg_chan
->consumer_key
);
5063 ERR("Error stopping live timer for channel %" PRIu64
, reg_chan
->consumer_key
);
5069 case LTTNG_BUFFER_PER_PID
:
5071 struct lttng_ht_iter iter_i
;
5072 struct ust_app
*app
;
5073 uint64_t chan_reg_key
;
5075 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter_i
.iter
, app
,
5078 struct ust_app_session
*ua_sess
;
5079 struct lttng_ht_iter iter_j
, iter_k
;
5080 struct lttng_ht_node_u64
*node
;
5081 struct ust_app_channel
*ua_chan
;
5083 DBG("Stopping live timer associated with ust app pid "
5087 if (!app
->compatible
) {
5091 __lookup_session_by_app(usess
, app
, &iter_j
);
5092 node
= lttng_ht_iter_get_node_u64(&iter_j
);
5094 /* Session is being or is deleted. */
5097 ua_sess
= caa_container_of(node
, struct ust_app_session
,
5100 health_code_update();
5102 cds_lfht_for_each_entry(ua_sess
->channels
->ht
,
5103 &iter_k
.iter
, ua_chan
,
5105 struct consumer_socket
*consumer_socket
;
5107 /* Stop live timer immediately if any */
5109 consumer_find_socket_by_bitness(
5111 ua_chan
->session
->consumer
);
5112 ret
= consumer_channel_start_live_timer(
5113 consumer_socket
, ua_chan
->key
);
5115 ERR("Error stopping live timer");
5128 health_code_update();
5134 * Destroy app UST session.
5136 int ust_app_destroy_trace_all(struct ltt_ust_session
*usess
)
5139 struct lttng_ht_iter iter
;
5140 struct ust_app
*app
;
5142 DBG("Destroy all UST traces");
5146 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5147 ret
= destroy_trace(usess
, app
);
5149 /* Continue to next apps even on error */
5160 void ust_app_global_create(struct ltt_ust_session
*usess
, struct ust_app
*app
)
5163 struct lttng_ht_iter iter
, uiter
;
5164 struct ust_app_session
*ua_sess
= NULL
;
5165 struct ust_app_channel
*ua_chan
;
5166 struct ust_app_event
*ua_event
;
5167 struct ust_app_ctx
*ua_ctx
;
5170 ret
= create_ust_app_session(usess
, app
, &ua_sess
, &is_created
);
5172 /* Tracer is probably gone or ENOMEM. */
5176 /* App session already created. */
5181 pthread_mutex_lock(&ua_sess
->lock
);
5183 if (ua_sess
->deleted
) {
5184 pthread_mutex_unlock(&ua_sess
->lock
);
5189 * We can iterate safely here over all UST app session since the create ust
5190 * app session above made a shadow copy of the UST global domain from the
5193 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &iter
.iter
, ua_chan
,
5195 ret
= do_create_channel(app
, usess
, ua_sess
, ua_chan
);
5196 if (ret
< 0 && ret
!= -ENOTCONN
) {
5198 * Stop everything. On error, the application
5199 * failed, no more file descriptor are available
5200 * or ENOMEM so stopping here is the only thing
5201 * we can do for now. The only exception is
5202 * -ENOTCONN, which indicates that the application
5209 * Add context using the list so they are enabled in the same order the
5212 cds_list_for_each_entry(ua_ctx
, &ua_chan
->ctx_list
, list
) {
5213 ret
= create_ust_channel_context(ua_chan
, ua_ctx
, app
);
5220 /* For each events */
5221 cds_lfht_for_each_entry(ua_chan
->events
->ht
, &uiter
.iter
, ua_event
,
5223 ret
= create_ust_event(app
, ua_sess
, ua_chan
, ua_event
);
5230 pthread_mutex_unlock(&ua_sess
->lock
);
5232 if (usess
->active
) {
5233 ret
= ust_app_start_trace(usess
, app
);
5238 DBG2("UST trace started for app pid %d", app
->pid
);
5241 /* Everything went well at this point. */
5245 pthread_mutex_unlock(&ua_sess
->lock
);
5248 destroy_app_session(app
, ua_sess
);
5254 void ust_app_global_destroy(struct ltt_ust_session
*usess
, struct ust_app
*app
)
5256 struct ust_app_session
*ua_sess
;
5258 ua_sess
= lookup_session_by_app(usess
, app
);
5259 if (ua_sess
== NULL
) {
5262 destroy_app_session(app
, ua_sess
);
5266 * Add channels/events from UST global domain to registered apps at sock.
5268 * Called with session lock held.
5269 * Called with RCU read-side lock held.
5271 void ust_app_global_update(struct ltt_ust_session
*usess
, struct ust_app
*app
)
5275 DBG2("UST app global update for app sock %d for session id %" PRIu64
,
5276 app
->sock
, usess
->id
);
5278 if (!app
->compatible
) {
5282 if (trace_ust_id_tracker_lookup(LTTNG_TRACKER_VPID
, usess
, app
->pid
)
5283 && trace_ust_id_tracker_lookup(LTTNG_TRACKER_VUID
, usess
, app
->uid
)
5284 && trace_ust_id_tracker_lookup(LTTNG_TRACKER_VGID
, usess
, app
->gid
)) {
5285 ust_app_global_create(usess
, app
);
5287 ust_app_global_destroy(usess
, app
);
5292 * Called with session lock held.
5294 void ust_app_global_update_all(struct ltt_ust_session
*usess
)
5296 struct lttng_ht_iter iter
;
5297 struct ust_app
*app
;
5300 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5301 ust_app_global_update(usess
, app
);
5307 * Add context to a specific channel for global UST domain.
5309 int ust_app_add_ctx_channel_glb(struct ltt_ust_session
*usess
,
5310 struct ltt_ust_channel
*uchan
, struct ltt_ust_context
*uctx
)
5313 struct lttng_ht_node_str
*ua_chan_node
;
5314 struct lttng_ht_iter iter
, uiter
;
5315 struct ust_app_channel
*ua_chan
= NULL
;
5316 struct ust_app_session
*ua_sess
;
5317 struct ust_app
*app
;
5321 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5322 if (!app
->compatible
) {
5324 * TODO: In time, we should notice the caller of this error by
5325 * telling him that this is a version error.
5329 ua_sess
= lookup_session_by_app(usess
, app
);
5330 if (ua_sess
== NULL
) {
5334 pthread_mutex_lock(&ua_sess
->lock
);
5336 if (ua_sess
->deleted
) {
5337 pthread_mutex_unlock(&ua_sess
->lock
);
5341 /* Lookup channel in the ust app session */
5342 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
5343 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
5344 if (ua_chan_node
== NULL
) {
5347 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
,
5349 ret
= create_ust_app_channel_context(ua_sess
, ua_chan
, &uctx
->ctx
, app
);
5354 pthread_mutex_unlock(&ua_sess
->lock
);
5362 * Enable event for a channel from a UST session for a specific PID.
5364 int ust_app_enable_event_pid(struct ltt_ust_session
*usess
,
5365 struct ltt_ust_channel
*uchan
, struct ltt_ust_event
*uevent
, pid_t pid
)
5368 struct lttng_ht_iter iter
;
5369 struct lttng_ht_node_str
*ua_chan_node
;
5370 struct ust_app
*app
;
5371 struct ust_app_session
*ua_sess
;
5372 struct ust_app_channel
*ua_chan
;
5373 struct ust_app_event
*ua_event
;
5375 DBG("UST app enabling event %s for PID %d", uevent
->attr
.name
, pid
);
5379 app
= ust_app_find_by_pid(pid
);
5381 ERR("UST app enable event per PID %d not found", pid
);
5386 if (!app
->compatible
) {
5391 ua_sess
= lookup_session_by_app(usess
, app
);
5393 /* The application has problem or is probably dead. */
5398 pthread_mutex_lock(&ua_sess
->lock
);
5400 if (ua_sess
->deleted
) {
5405 /* Lookup channel in the ust app session */
5406 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &iter
);
5407 ua_chan_node
= lttng_ht_iter_get_node_str(&iter
);
5408 /* If the channel is not found, there is a code flow error */
5409 assert(ua_chan_node
);
5411 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
5413 ua_event
= find_ust_app_event(ua_chan
->events
, uevent
->attr
.name
,
5414 uevent
->filter
, uevent
->attr
.loglevel
, uevent
->exclusion
);
5415 if (ua_event
== NULL
) {
5416 ret
= create_ust_app_event(ua_sess
, ua_chan
, uevent
, app
);
5421 ret
= enable_ust_app_event(ua_sess
, ua_event
, app
);
5428 pthread_mutex_unlock(&ua_sess
->lock
);
5435 * Receive registration and populate the given msg structure.
5437 * On success return 0 else a negative value returned by the ustctl call.
5439 int ust_app_recv_registration(int sock
, struct ust_register_msg
*msg
)
5442 uint32_t pid
, ppid
, uid
, gid
;
5446 ret
= ustctl_recv_reg_msg(sock
, &msg
->type
, &msg
->major
, &msg
->minor
,
5447 &pid
, &ppid
, &uid
, &gid
,
5448 &msg
->bits_per_long
,
5449 &msg
->uint8_t_alignment
,
5450 &msg
->uint16_t_alignment
,
5451 &msg
->uint32_t_alignment
,
5452 &msg
->uint64_t_alignment
,
5453 &msg
->long_alignment
,
5460 case LTTNG_UST_ERR_EXITING
:
5461 DBG3("UST app recv reg message failed. Application died");
5463 case LTTNG_UST_ERR_UNSUP_MAJOR
:
5464 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
5465 msg
->major
, msg
->minor
, LTTNG_UST_ABI_MAJOR_VERSION
,
5466 LTTNG_UST_ABI_MINOR_VERSION
);
5469 ERR("UST app recv reg message failed with ret %d", ret
);
5474 msg
->pid
= (pid_t
) pid
;
5475 msg
->ppid
= (pid_t
) ppid
;
5476 msg
->uid
= (uid_t
) uid
;
5477 msg
->gid
= (gid_t
) gid
;
5484 * Return a ust app session object using the application object and the
5485 * session object descriptor has a key. If not found, NULL is returned.
5486 * A RCU read side lock MUST be acquired when calling this function.
5488 static struct ust_app_session
*find_session_by_objd(struct ust_app
*app
,
5491 struct lttng_ht_node_ulong
*node
;
5492 struct lttng_ht_iter iter
;
5493 struct ust_app_session
*ua_sess
= NULL
;
5497 lttng_ht_lookup(app
->ust_sessions_objd
, (void *)((unsigned long) objd
), &iter
);
5498 node
= lttng_ht_iter_get_node_ulong(&iter
);
5500 DBG2("UST app session find by objd %d not found", objd
);
5504 ua_sess
= caa_container_of(node
, struct ust_app_session
, ust_objd_node
);
5511 * Return a ust app channel object using the application object and the channel
5512 * object descriptor has a key. If not found, NULL is returned. A RCU read side
5513 * lock MUST be acquired before calling this function.
5515 static struct ust_app_channel
*find_channel_by_objd(struct ust_app
*app
,
5518 struct lttng_ht_node_ulong
*node
;
5519 struct lttng_ht_iter iter
;
5520 struct ust_app_channel
*ua_chan
= NULL
;
5524 lttng_ht_lookup(app
->ust_objd
, (void *)((unsigned long) objd
), &iter
);
5525 node
= lttng_ht_iter_get_node_ulong(&iter
);
5527 DBG2("UST app channel find by objd %d not found", objd
);
5531 ua_chan
= caa_container_of(node
, struct ust_app_channel
, ust_objd_node
);
5538 * Reply to a register channel notification from an application on the notify
5539 * socket. The channel metadata is also created.
5541 * The session UST registry lock is acquired in this function.
5543 * On success 0 is returned else a negative value.
5545 static int reply_ust_register_channel(int sock
, int sobjd
, int cobjd
,
5546 size_t nr_fields
, struct ustctl_field
*fields
)
5548 int ret
, ret_code
= 0;
5549 uint32_t chan_id
, reg_count
;
5550 uint64_t chan_reg_key
;
5551 enum ustctl_channel_header type
;
5552 struct ust_app
*app
;
5553 struct ust_app_channel
*ua_chan
;
5554 struct ust_app_session
*ua_sess
;
5555 struct ust_registry_session
*registry
;
5556 struct ust_registry_channel
*chan_reg
;
5560 /* Lookup application. If not found, there is a code flow error. */
5561 app
= find_app_by_notify_sock(sock
);
5563 DBG("Application socket %d is being torn down. Abort event notify",
5566 goto error_rcu_unlock
;
5569 /* Lookup channel by UST object descriptor. */
5570 ua_chan
= find_channel_by_objd(app
, cobjd
);
5572 DBG("Application channel is being torn down. Abort event notify");
5574 goto error_rcu_unlock
;
5577 assert(ua_chan
->session
);
5578 ua_sess
= ua_chan
->session
;
5580 /* Get right session registry depending on the session buffer type. */
5581 registry
= get_session_registry(ua_sess
);
5583 DBG("Application session is being torn down. Abort event notify");
5585 goto error_rcu_unlock
;
5588 /* Depending on the buffer type, a different channel key is used. */
5589 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_UID
) {
5590 chan_reg_key
= ua_chan
->tracing_channel_id
;
5592 chan_reg_key
= ua_chan
->key
;
5595 pthread_mutex_lock(®istry
->lock
);
5597 chan_reg
= ust_registry_channel_find(registry
, chan_reg_key
);
5600 if (!chan_reg
->register_done
) {
5601 reg_count
= ust_registry_get_event_count(chan_reg
);
5602 if (reg_count
< 31) {
5603 type
= USTCTL_CHANNEL_HEADER_COMPACT
;
5605 type
= USTCTL_CHANNEL_HEADER_LARGE
;
5608 chan_reg
->nr_ctx_fields
= nr_fields
;
5609 chan_reg
->ctx_fields
= fields
;
5611 chan_reg
->header_type
= type
;
5613 /* Get current already assigned values. */
5614 type
= chan_reg
->header_type
;
5616 /* Channel id is set during the object creation. */
5617 chan_id
= chan_reg
->chan_id
;
5619 /* Append to metadata */
5620 if (!chan_reg
->metadata_dumped
) {
5621 ret_code
= ust_metadata_channel_statedump(registry
, chan_reg
);
5623 ERR("Error appending channel metadata (errno = %d)", ret_code
);
5629 DBG3("UST app replying to register channel key %" PRIu64
5630 " with id %u, type: %d, ret: %d", chan_reg_key
, chan_id
, type
,
5633 ret
= ustctl_reply_register_channel(sock
, chan_id
, type
, ret_code
);
5635 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
5636 ERR("UST app reply channel failed with ret %d", ret
);
5638 DBG3("UST app reply channel failed. Application died");
5643 /* This channel registry registration is completed. */
5644 chan_reg
->register_done
= 1;
5647 pthread_mutex_unlock(®istry
->lock
);
5655 * Add event to the UST channel registry. When the event is added to the
5656 * registry, the metadata is also created. Once done, this replies to the
5657 * application with the appropriate error code.
5659 * The session UST registry lock is acquired in the function.
5661 * On success 0 is returned else a negative value.
5663 static int add_event_ust_registry(int sock
, int sobjd
, int cobjd
, char *name
,
5664 char *sig
, size_t nr_fields
, struct ustctl_field
*fields
,
5665 int loglevel_value
, char *model_emf_uri
)
5668 uint32_t event_id
= 0;
5669 uint64_t chan_reg_key
;
5670 struct ust_app
*app
;
5671 struct ust_app_channel
*ua_chan
;
5672 struct ust_app_session
*ua_sess
;
5673 struct ust_registry_session
*registry
;
5677 /* Lookup application. If not found, there is a code flow error. */
5678 app
= find_app_by_notify_sock(sock
);
5680 DBG("Application socket %d is being torn down. Abort event notify",
5683 goto error_rcu_unlock
;
5686 /* Lookup channel by UST object descriptor. */
5687 ua_chan
= find_channel_by_objd(app
, cobjd
);
5689 DBG("Application channel is being torn down. Abort event notify");
5691 goto error_rcu_unlock
;
5694 assert(ua_chan
->session
);
5695 ua_sess
= ua_chan
->session
;
5697 registry
= get_session_registry(ua_sess
);
5699 DBG("Application session is being torn down. Abort event notify");
5701 goto error_rcu_unlock
;
5704 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_UID
) {
5705 chan_reg_key
= ua_chan
->tracing_channel_id
;
5707 chan_reg_key
= ua_chan
->key
;
5710 pthread_mutex_lock(®istry
->lock
);
5713 * From this point on, this call acquires the ownership of the sig, fields
5714 * and model_emf_uri meaning any free are done inside it if needed. These
5715 * three variables MUST NOT be read/write after this.
5717 ret_code
= ust_registry_create_event(registry
, chan_reg_key
,
5718 sobjd
, cobjd
, name
, sig
, nr_fields
, fields
,
5719 loglevel_value
, model_emf_uri
, ua_sess
->buffer_type
,
5723 model_emf_uri
= NULL
;
5726 * The return value is returned to ustctl so in case of an error, the
5727 * application can be notified. In case of an error, it's important not to
5728 * return a negative error or else the application will get closed.
5730 ret
= ustctl_reply_register_event(sock
, event_id
, ret_code
);
5732 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
5733 ERR("UST app reply event failed with ret %d", ret
);
5735 DBG3("UST app reply event failed. Application died");
5738 * No need to wipe the create event since the application socket will
5739 * get close on error hence cleaning up everything by itself.
5744 DBG3("UST registry event %s with id %" PRId32
" added successfully",
5748 pthread_mutex_unlock(®istry
->lock
);
5753 free(model_emf_uri
);
5758 * Add enum to the UST session registry. Once done, this replies to the
5759 * application with the appropriate error code.
5761 * The session UST registry lock is acquired within this function.
5763 * On success 0 is returned else a negative value.
5765 static int add_enum_ust_registry(int sock
, int sobjd
, char *name
,
5766 struct ustctl_enum_entry
*entries
, size_t nr_entries
)
5768 int ret
= 0, ret_code
;
5769 struct ust_app
*app
;
5770 struct ust_app_session
*ua_sess
;
5771 struct ust_registry_session
*registry
;
5772 uint64_t enum_id
= -1ULL;
5776 /* Lookup application. If not found, there is a code flow error. */
5777 app
= find_app_by_notify_sock(sock
);
5779 /* Return an error since this is not an error */
5780 DBG("Application socket %d is being torn down. Aborting enum registration",
5783 goto error_rcu_unlock
;
5786 /* Lookup session by UST object descriptor. */
5787 ua_sess
= find_session_by_objd(app
, sobjd
);
5789 /* Return an error since this is not an error */
5790 DBG("Application session is being torn down (session not found). Aborting enum registration.");
5792 goto error_rcu_unlock
;
5795 registry
= get_session_registry(ua_sess
);
5797 DBG("Application session is being torn down (registry not found). Aborting enum registration.");
5799 goto error_rcu_unlock
;
5802 pthread_mutex_lock(®istry
->lock
);
5805 * From this point on, the callee acquires the ownership of
5806 * entries. The variable entries MUST NOT be read/written after
5809 ret_code
= ust_registry_create_or_find_enum(registry
, sobjd
, name
,
5810 entries
, nr_entries
, &enum_id
);
5814 * The return value is returned to ustctl so in case of an error, the
5815 * application can be notified. In case of an error, it's important not to
5816 * return a negative error or else the application will get closed.
5818 ret
= ustctl_reply_register_enum(sock
, enum_id
, ret_code
);
5820 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
5821 ERR("UST app reply enum failed with ret %d", ret
);
5823 DBG3("UST app reply enum failed. Application died");
5826 * No need to wipe the create enum since the application socket will
5827 * get close on error hence cleaning up everything by itself.
5832 DBG3("UST registry enum %s added successfully or already found", name
);
5835 pthread_mutex_unlock(®istry
->lock
);
5842 * Handle application notification through the given notify socket.
5844 * Return 0 on success or else a negative value.
5846 int ust_app_recv_notify(int sock
)
5849 enum ustctl_notify_cmd cmd
;
5851 DBG3("UST app receiving notify from sock %d", sock
);
5853 ret
= ustctl_recv_notify(sock
, &cmd
);
5855 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
5856 ERR("UST app recv notify failed with ret %d", ret
);
5858 DBG3("UST app recv notify failed. Application died");
5864 case USTCTL_NOTIFY_CMD_EVENT
:
5866 int sobjd
, cobjd
, loglevel_value
;
5867 char name
[LTTNG_UST_SYM_NAME_LEN
], *sig
, *model_emf_uri
;
5869 struct ustctl_field
*fields
;
5871 DBG2("UST app ustctl register event received");
5873 ret
= ustctl_recv_register_event(sock
, &sobjd
, &cobjd
, name
,
5874 &loglevel_value
, &sig
, &nr_fields
, &fields
,
5877 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
5878 ERR("UST app recv event failed with ret %d", ret
);
5880 DBG3("UST app recv event failed. Application died");
5886 * Add event to the UST registry coming from the notify socket. This
5887 * call will free if needed the sig, fields and model_emf_uri. This
5888 * code path loses the ownsership of these variables and transfer them
5889 * to the this function.
5891 ret
= add_event_ust_registry(sock
, sobjd
, cobjd
, name
, sig
, nr_fields
,
5892 fields
, loglevel_value
, model_emf_uri
);
5899 case USTCTL_NOTIFY_CMD_CHANNEL
:
5903 struct ustctl_field
*fields
;
5905 DBG2("UST app ustctl register channel received");
5907 ret
= ustctl_recv_register_channel(sock
, &sobjd
, &cobjd
, &nr_fields
,
5910 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
5911 ERR("UST app recv channel failed with ret %d", ret
);
5913 DBG3("UST app recv channel failed. Application died");
5919 * The fields ownership are transfered to this function call meaning
5920 * that if needed it will be freed. After this, it's invalid to access
5921 * fields or clean it up.
5923 ret
= reply_ust_register_channel(sock
, sobjd
, cobjd
, nr_fields
,
5931 case USTCTL_NOTIFY_CMD_ENUM
:
5934 char name
[LTTNG_UST_SYM_NAME_LEN
];
5936 struct ustctl_enum_entry
*entries
;
5938 DBG2("UST app ustctl register enum received");
5940 ret
= ustctl_recv_register_enum(sock
, &sobjd
, name
,
5941 &entries
, &nr_entries
);
5943 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
5944 ERR("UST app recv enum failed with ret %d", ret
);
5946 DBG3("UST app recv enum failed. Application died");
5951 /* Callee assumes ownership of entries */
5952 ret
= add_enum_ust_registry(sock
, sobjd
, name
,
5953 entries
, nr_entries
);
5961 /* Should NEVER happen. */
5970 * Once the notify socket hangs up, this is called. First, it tries to find the
5971 * corresponding application. On failure, the call_rcu to close the socket is
5972 * executed. If an application is found, it tries to delete it from the notify
5973 * socket hash table. Whathever the result, it proceeds to the call_rcu.
5975 * Note that an object needs to be allocated here so on ENOMEM failure, the
5976 * call RCU is not done but the rest of the cleanup is.
5978 void ust_app_notify_sock_unregister(int sock
)
5981 struct lttng_ht_iter iter
;
5982 struct ust_app
*app
;
5983 struct ust_app_notify_sock_obj
*obj
;
5989 obj
= zmalloc(sizeof(*obj
));
5992 * An ENOMEM is kind of uncool. If this strikes we continue the
5993 * procedure but the call_rcu will not be called. In this case, we
5994 * accept the fd leak rather than possibly creating an unsynchronized
5995 * state between threads.
5997 * TODO: The notify object should be created once the notify socket is
5998 * registered and stored independantely from the ust app object. The
5999 * tricky part is to synchronize the teardown of the application and
6000 * this notify object. Let's keep that in mind so we can avoid this
6001 * kind of shenanigans with ENOMEM in the teardown path.
6008 DBG("UST app notify socket unregister %d", sock
);
6011 * Lookup application by notify socket. If this fails, this means that the
6012 * hash table delete has already been done by the application
6013 * unregistration process so we can safely close the notify socket in a
6016 app
= find_app_by_notify_sock(sock
);
6021 iter
.iter
.node
= &app
->notify_sock_n
.node
;
6024 * Whatever happens here either we fail or succeed, in both cases we have
6025 * to close the socket after a grace period to continue to the call RCU
6026 * here. If the deletion is successful, the application is not visible
6027 * anymore by other threads and is it fails it means that it was already
6028 * deleted from the hash table so either way we just have to close the
6031 (void) lttng_ht_del(ust_app_ht_by_notify_sock
, &iter
);
6037 * Close socket after a grace period to avoid for the socket to be reused
6038 * before the application object is freed creating potential race between
6039 * threads trying to add unique in the global hash table.
6042 call_rcu(&obj
->head
, close_notify_sock_rcu
);
6047 * Destroy a ust app data structure and free its memory.
6049 void ust_app_destroy(struct ust_app
*app
)
6055 call_rcu(&app
->pid_n
.head
, delete_ust_app_rcu
);
6059 * Take a snapshot for a given UST session. The snapshot is sent to the given
6062 * Return 0 on success or else a negative value.
6064 int ust_app_snapshot_record(struct ltt_ust_session
*usess
,
6065 struct snapshot_output
*output
, int wait
,
6066 uint64_t nb_packets_per_stream
)
6069 struct lttng_ht_iter iter
;
6070 struct ust_app
*app
;
6071 char pathname
[PATH_MAX
];
6078 switch (usess
->buffer_type
) {
6079 case LTTNG_BUFFER_PER_UID
:
6081 struct buffer_reg_uid
*reg
;
6083 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
6084 struct buffer_reg_channel
*reg_chan
;
6085 struct consumer_socket
*socket
;
6087 if (!reg
->registry
->reg
.ust
->metadata_key
) {
6088 /* Skip since no metadata is present */
6092 /* Get consumer socket to use to push the metadata.*/
6093 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
6100 memset(pathname
, 0, sizeof(pathname
));
6101 ret
= snprintf(pathname
, sizeof(pathname
),
6102 DEFAULT_UST_TRACE_DIR
"/" DEFAULT_UST_TRACE_UID_PATH
,
6103 reg
->uid
, reg
->bits_per_long
);
6105 PERROR("snprintf snapshot path");
6109 /* Add the UST default trace dir to path. */
6110 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
6111 reg_chan
, node
.node
) {
6112 ret
= consumer_snapshot_channel(socket
, reg_chan
->consumer_key
,
6113 output
, 0, usess
->uid
, usess
->gid
, pathname
, wait
,
6114 nb_packets_per_stream
);
6119 ret
= consumer_snapshot_channel(socket
,
6120 reg
->registry
->reg
.ust
->metadata_key
, output
, 1,
6121 usess
->uid
, usess
->gid
, pathname
, wait
, 0);
6128 case LTTNG_BUFFER_PER_PID
:
6130 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
6131 struct consumer_socket
*socket
;
6132 struct lttng_ht_iter chan_iter
;
6133 struct ust_app_channel
*ua_chan
;
6134 struct ust_app_session
*ua_sess
;
6135 struct ust_registry_session
*registry
;
6137 ua_sess
= lookup_session_by_app(usess
, app
);
6139 /* Session not associated with this app. */
6143 /* Get the right consumer socket for the application. */
6144 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
,
6151 /* Add the UST default trace dir to path. */
6152 memset(pathname
, 0, sizeof(pathname
));
6153 ret
= snprintf(pathname
, sizeof(pathname
), DEFAULT_UST_TRACE_DIR
"/%s",
6156 PERROR("snprintf snapshot path");
6160 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &chan_iter
.iter
,
6161 ua_chan
, node
.node
) {
6162 ret
= consumer_snapshot_channel(socket
, ua_chan
->key
, output
,
6163 0, ua_sess
->euid
, ua_sess
->egid
, pathname
, wait
,
6164 nb_packets_per_stream
);
6170 registry
= get_session_registry(ua_sess
);
6172 DBG("Application session is being torn down. Abort snapshot record.");
6176 ret
= consumer_snapshot_channel(socket
, registry
->metadata_key
, output
,
6177 1, ua_sess
->euid
, ua_sess
->egid
, pathname
, wait
, 0);
6195 * Return the size taken by one more packet per stream.
6197 uint64_t ust_app_get_size_one_more_packet_per_stream(struct ltt_ust_session
*usess
,
6198 uint64_t cur_nr_packets
)
6200 uint64_t tot_size
= 0;
6201 struct ust_app
*app
;
6202 struct lttng_ht_iter iter
;
6206 switch (usess
->buffer_type
) {
6207 case LTTNG_BUFFER_PER_UID
:
6209 struct buffer_reg_uid
*reg
;
6211 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
6212 struct buffer_reg_channel
*reg_chan
;
6215 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
6216 reg_chan
, node
.node
) {
6217 if (cur_nr_packets
>= reg_chan
->num_subbuf
) {
6219 * Don't take channel into account if we
6220 * already grab all its packets.
6224 tot_size
+= reg_chan
->subbuf_size
* reg_chan
->stream_count
;
6230 case LTTNG_BUFFER_PER_PID
:
6233 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
6234 struct ust_app_channel
*ua_chan
;
6235 struct ust_app_session
*ua_sess
;
6236 struct lttng_ht_iter chan_iter
;
6238 ua_sess
= lookup_session_by_app(usess
, app
);
6240 /* Session not associated with this app. */
6244 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &chan_iter
.iter
,
6245 ua_chan
, node
.node
) {
6246 if (cur_nr_packets
>= ua_chan
->attr
.num_subbuf
) {
6248 * Don't take channel into account if we
6249 * already grab all its packets.
6253 tot_size
+= ua_chan
->attr
.subbuf_size
* ua_chan
->streams
.count
;
6267 int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id
,
6268 struct cds_list_head
*buffer_reg_uid_list
,
6269 struct consumer_output
*consumer
, uint64_t uchan_id
,
6270 int overwrite
, uint64_t *discarded
, uint64_t *lost
)
6273 uint64_t consumer_chan_key
;
6278 ret
= buffer_reg_uid_consumer_channel_key(
6279 buffer_reg_uid_list
, ust_session_id
,
6280 uchan_id
, &consumer_chan_key
);
6288 ret
= consumer_get_lost_packets(ust_session_id
,
6289 consumer_chan_key
, consumer
, lost
);
6291 ret
= consumer_get_discarded_events(ust_session_id
,
6292 consumer_chan_key
, consumer
, discarded
);
6299 int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session
*usess
,
6300 struct ltt_ust_channel
*uchan
,
6301 struct consumer_output
*consumer
, int overwrite
,
6302 uint64_t *discarded
, uint64_t *lost
)
6305 struct lttng_ht_iter iter
;
6306 struct lttng_ht_node_str
*ua_chan_node
;
6307 struct ust_app
*app
;
6308 struct ust_app_session
*ua_sess
;
6309 struct ust_app_channel
*ua_chan
;
6316 * Iterate over every registered applications. Sum counters for
6317 * all applications containing requested session and channel.
6319 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
6320 struct lttng_ht_iter uiter
;
6322 ua_sess
= lookup_session_by_app(usess
, app
);
6323 if (ua_sess
== NULL
) {
6328 lttng_ht_lookup(ua_sess
->channels
, (void *) uchan
->name
, &uiter
);
6329 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
6330 /* If the session is found for the app, the channel must be there */
6331 assert(ua_chan_node
);
6333 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
6338 ret
= consumer_get_lost_packets(usess
->id
, ua_chan
->key
,
6345 uint64_t _discarded
;
6347 ret
= consumer_get_discarded_events(usess
->id
,
6348 ua_chan
->key
, consumer
, &_discarded
);
6352 (*discarded
) += _discarded
;
6361 int ust_app_regenerate_statedump(struct ltt_ust_session
*usess
,
6362 struct ust_app
*app
)
6365 struct ust_app_session
*ua_sess
;
6367 DBG("Regenerating the metadata for ust app pid %d", app
->pid
);
6371 ua_sess
= lookup_session_by_app(usess
, app
);
6372 if (ua_sess
== NULL
) {
6373 /* The session is in teardown process. Ignore and continue. */
6377 pthread_mutex_lock(&ua_sess
->lock
);
6379 if (ua_sess
->deleted
) {
6383 pthread_mutex_lock(&app
->sock_lock
);
6384 ret
= ustctl_regenerate_statedump(app
->sock
, ua_sess
->handle
);
6385 pthread_mutex_unlock(&app
->sock_lock
);
6388 pthread_mutex_unlock(&ua_sess
->lock
);
6392 health_code_update();
6397 * Regenerate the statedump for each app in the session.
6399 int ust_app_regenerate_statedump_all(struct ltt_ust_session
*usess
)
6402 struct lttng_ht_iter iter
;
6403 struct ust_app
*app
;
6405 DBG("Regenerating the metadata for all UST apps");
6409 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
6410 if (!app
->compatible
) {
6414 ret
= ust_app_regenerate_statedump(usess
, app
);
6416 /* Continue to the next app even on error */