CUSTOM: liver timer: immediate liver timer control on data pending and destroy
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Copyright (C) 2016 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _LGPL_SOURCE
20 #include <errno.h>
21 #include <inttypes.h>
22 #include <pthread.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/stat.h>
27 #include <sys/types.h>
28 #include <unistd.h>
29 #include <urcu/compiler.h>
30 #include <lttng/ust-error.h>
31 #include <signal.h>
32
33 #include <common/common.h>
34 #include <common/sessiond-comm/sessiond-comm.h>
35
36 #include "consumer.h"
37 #include "buffer-registry.h"
38 #include "fd-limit.h"
39 #include "health-sessiond.h"
40 #include "ust-app.h"
41 #include "ust-consumer.h"
42 #include "ust-ctl.h"
43 #include "utils.h"
44 #include "session.h"
45
46 static
47 int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
48
49 /* Next available channel key. Access under next_channel_key_lock. */
50 static uint64_t _next_channel_key;
51 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
52
53 /* Next available session ID. Access under next_session_id_lock. */
54 static uint64_t _next_session_id;
55 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
56
57 /*
58 * Return the incremented value of next_channel_key.
59 */
60 static uint64_t get_next_channel_key(void)
61 {
62 uint64_t ret;
63
64 pthread_mutex_lock(&next_channel_key_lock);
65 ret = ++_next_channel_key;
66 pthread_mutex_unlock(&next_channel_key_lock);
67 return ret;
68 }
69
70 /*
71 * Return the atomically incremented value of next_session_id.
72 */
73 static uint64_t get_next_session_id(void)
74 {
75 uint64_t ret;
76
77 pthread_mutex_lock(&next_session_id_lock);
78 ret = ++_next_session_id;
79 pthread_mutex_unlock(&next_session_id_lock);
80 return ret;
81 }
82
83 static void copy_channel_attr_to_ustctl(
84 struct ustctl_consumer_channel_attr *attr,
85 struct lttng_ust_channel_attr *uattr)
86 {
87 /* Copy event attributes since the layout is different. */
88 attr->subbuf_size = uattr->subbuf_size;
89 attr->num_subbuf = uattr->num_subbuf;
90 attr->overwrite = uattr->overwrite;
91 attr->switch_timer_interval = uattr->switch_timer_interval;
92 attr->read_timer_interval = uattr->read_timer_interval;
93 attr->output = uattr->output;
94 }
95
96 /*
97 * Match function for the hash table lookup.
98 *
99 * It matches an ust app event based on three attributes which are the event
100 * name, the filter bytecode and the loglevel.
101 */
102 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
103 {
104 struct ust_app_event *event;
105 const struct ust_app_ht_key *key;
106 int ev_loglevel_value;
107
108 assert(node);
109 assert(_key);
110
111 event = caa_container_of(node, struct ust_app_event, node.node);
112 key = _key;
113 ev_loglevel_value = event->attr.loglevel;
114
115 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
116
117 /* Event name */
118 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
119 goto no_match;
120 }
121
122 /* Event loglevel. */
123 if (ev_loglevel_value != key->loglevel_type) {
124 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
125 && key->loglevel_type == 0 &&
126 ev_loglevel_value == -1) {
127 /*
128 * Match is accepted. This is because on event creation, the
129 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
130 * -1 are accepted for this loglevel type since 0 is the one set by
131 * the API when receiving an enable event.
132 */
133 } else {
134 goto no_match;
135 }
136 }
137
138 /* One of the filters is NULL, fail. */
139 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
140 goto no_match;
141 }
142
143 if (key->filter && event->filter) {
144 /* Both filters exists, check length followed by the bytecode. */
145 if (event->filter->len != key->filter->len ||
146 memcmp(event->filter->data, key->filter->data,
147 event->filter->len) != 0) {
148 goto no_match;
149 }
150 }
151
152 /* One of the exclusions is NULL, fail. */
153 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
154 goto no_match;
155 }
156
157 if (key->exclusion && event->exclusion) {
158 /* Both exclusions exists, check count followed by the names. */
159 if (event->exclusion->count != key->exclusion->count ||
160 memcmp(event->exclusion->names, key->exclusion->names,
161 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
162 goto no_match;
163 }
164 }
165
166
167 /* Match. */
168 return 1;
169
170 no_match:
171 return 0;
172 }
173
174 /*
175 * Unique add of an ust app event in the given ht. This uses the custom
176 * ht_match_ust_app_event match function and the event name as hash.
177 */
178 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
179 struct ust_app_event *event)
180 {
181 struct cds_lfht_node *node_ptr;
182 struct ust_app_ht_key key;
183 struct lttng_ht *ht;
184
185 assert(ua_chan);
186 assert(ua_chan->events);
187 assert(event);
188
189 ht = ua_chan->events;
190 key.name = event->attr.name;
191 key.filter = event->filter;
192 key.loglevel_type = event->attr.loglevel;
193 key.exclusion = event->exclusion;
194
195 node_ptr = cds_lfht_add_unique(ht->ht,
196 ht->hash_fct(event->node.key, lttng_ht_seed),
197 ht_match_ust_app_event, &key, &event->node.node);
198 assert(node_ptr == &event->node.node);
199 }
200
201 /*
202 * Close the notify socket from the given RCU head object. This MUST be called
203 * through a call_rcu().
204 */
205 static void close_notify_sock_rcu(struct rcu_head *head)
206 {
207 int ret;
208 struct ust_app_notify_sock_obj *obj =
209 caa_container_of(head, struct ust_app_notify_sock_obj, head);
210
211 /* Must have a valid fd here. */
212 assert(obj->fd >= 0);
213
214 ret = close(obj->fd);
215 if (ret) {
216 ERR("close notify sock %d RCU", obj->fd);
217 }
218 lttng_fd_put(LTTNG_FD_APPS, 1);
219
220 free(obj);
221 }
222
223 /*
224 * Return the session registry according to the buffer type of the given
225 * session.
226 *
227 * A registry per UID object MUST exists before calling this function or else
228 * it assert() if not found. RCU read side lock must be acquired.
229 */
230 static struct ust_registry_session *get_session_registry(
231 struct ust_app_session *ua_sess)
232 {
233 struct ust_registry_session *registry = NULL;
234
235 assert(ua_sess);
236
237 switch (ua_sess->buffer_type) {
238 case LTTNG_BUFFER_PER_PID:
239 {
240 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
241 if (!reg_pid) {
242 goto error;
243 }
244 registry = reg_pid->registry->reg.ust;
245 break;
246 }
247 case LTTNG_BUFFER_PER_UID:
248 {
249 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
250 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
251 if (!reg_uid) {
252 goto error;
253 }
254 registry = reg_uid->registry->reg.ust;
255 break;
256 }
257 default:
258 assert(0);
259 };
260
261 error:
262 return registry;
263 }
264
265 /*
266 * Delete ust context safely. RCU read lock must be held before calling
267 * this function.
268 */
269 static
270 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
271 struct ust_app *app)
272 {
273 int ret;
274
275 assert(ua_ctx);
276
277 if (ua_ctx->obj) {
278 pthread_mutex_lock(&app->sock_lock);
279 ret = ustctl_release_object(sock, ua_ctx->obj);
280 pthread_mutex_unlock(&app->sock_lock);
281 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
282 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
283 sock, ua_ctx->obj->handle, ret);
284 }
285 free(ua_ctx->obj);
286 }
287 free(ua_ctx);
288 }
289
290 /*
291 * Delete ust app event safely. RCU read lock must be held before calling
292 * this function.
293 */
294 static
295 void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
296 struct ust_app *app)
297 {
298 int ret;
299
300 assert(ua_event);
301
302 free(ua_event->filter);
303 if (ua_event->exclusion != NULL)
304 free(ua_event->exclusion);
305 if (ua_event->obj != NULL) {
306 pthread_mutex_lock(&app->sock_lock);
307 ret = ustctl_release_object(sock, ua_event->obj);
308 pthread_mutex_unlock(&app->sock_lock);
309 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
310 ERR("UST app sock %d release event obj failed with ret %d",
311 sock, ret);
312 }
313 free(ua_event->obj);
314 }
315 free(ua_event);
316 }
317
318 /*
319 * Release ust data object of the given stream.
320 *
321 * Return 0 on success or else a negative value.
322 */
323 static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
324 struct ust_app *app)
325 {
326 int ret = 0;
327
328 assert(stream);
329
330 if (stream->obj) {
331 pthread_mutex_lock(&app->sock_lock);
332 ret = ustctl_release_object(sock, stream->obj);
333 pthread_mutex_unlock(&app->sock_lock);
334 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
335 ERR("UST app sock %d release stream obj failed with ret %d",
336 sock, ret);
337 }
338 lttng_fd_put(LTTNG_FD_APPS, 2);
339 free(stream->obj);
340 }
341
342 return ret;
343 }
344
345 /*
346 * Delete ust app stream safely. RCU read lock must be held before calling
347 * this function.
348 */
349 static
350 void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
351 struct ust_app *app)
352 {
353 assert(stream);
354
355 (void) release_ust_app_stream(sock, stream, app);
356 free(stream);
357 }
358
359 /*
360 * We need to execute ht_destroy outside of RCU read-side critical
361 * section and outside of call_rcu thread, so we postpone its execution
362 * using ht_cleanup_push. It is simpler than to change the semantic of
363 * the many callers of delete_ust_app_session().
364 */
365 static
366 void delete_ust_app_channel_rcu(struct rcu_head *head)
367 {
368 struct ust_app_channel *ua_chan =
369 caa_container_of(head, struct ust_app_channel, rcu_head);
370
371 ht_cleanup_push(ua_chan->ctx);
372 ht_cleanup_push(ua_chan->events);
373 free(ua_chan);
374 }
375
376 /*
377 * Extract the lost packet or discarded events counter when the channel is
378 * being deleted and store the value in the parent channel so we can
379 * access it from lttng list and at stop/destroy.
380 *
381 * The session list lock must be held by the caller.
382 */
383 static
384 void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
385 {
386 uint64_t discarded = 0, lost = 0;
387 struct ltt_session *session;
388 struct ltt_ust_channel *uchan;
389
390 if (ua_chan->attr.type != LTTNG_UST_CHAN_PER_CPU) {
391 return;
392 }
393
394 rcu_read_lock();
395 session = session_find_by_id(ua_chan->session->tracing_id);
396 if (!session || !session->ust_session) {
397 /*
398 * Not finding the session is not an error because there are
399 * multiple ways the channels can be torn down.
400 *
401 * 1) The session daemon can initiate the destruction of the
402 * ust app session after receiving a destroy command or
403 * during its shutdown/teardown.
404 * 2) The application, since we are in per-pid tracing, is
405 * unregistering and tearing down its ust app session.
406 *
407 * Both paths are protected by the session list lock which
408 * ensures that the accounting of lost packets and discarded
409 * events is done exactly once. The session is then unpublished
410 * from the session list, resulting in this condition.
411 */
412 goto end;
413 }
414
415 if (ua_chan->attr.overwrite) {
416 consumer_get_lost_packets(ua_chan->session->tracing_id,
417 ua_chan->key, session->ust_session->consumer,
418 &lost);
419 } else {
420 consumer_get_discarded_events(ua_chan->session->tracing_id,
421 ua_chan->key, session->ust_session->consumer,
422 &discarded);
423 }
424 uchan = trace_ust_find_channel_by_name(
425 session->ust_session->domain_global.channels,
426 ua_chan->name);
427 if (!uchan) {
428 ERR("Missing UST channel to store discarded counters");
429 goto end;
430 }
431
432 uchan->per_pid_closed_app_discarded += discarded;
433 uchan->per_pid_closed_app_lost += lost;
434
435 end:
436 rcu_read_unlock();
437 }
438
439 /*
440 * Delete ust app channel safely. RCU read lock must be held before calling
441 * this function.
442 *
443 * The session list lock must be held by the caller.
444 */
445 static
446 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
447 struct ust_app *app)
448 {
449 int ret;
450 struct lttng_ht_iter iter;
451 struct ust_app_event *ua_event;
452 struct ust_app_ctx *ua_ctx;
453 struct ust_app_stream *stream, *stmp;
454 struct ust_registry_session *registry;
455
456 assert(ua_chan);
457
458 DBG3("UST app deleting channel %s", ua_chan->name);
459
460 /* Wipe stream */
461 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
462 cds_list_del(&stream->list);
463 delete_ust_app_stream(sock, stream, app);
464 }
465
466 /* Wipe context */
467 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
468 cds_list_del(&ua_ctx->list);
469 ret = lttng_ht_del(ua_chan->ctx, &iter);
470 assert(!ret);
471 delete_ust_app_ctx(sock, ua_ctx, app);
472 }
473
474 /* Wipe events */
475 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
476 node.node) {
477 ret = lttng_ht_del(ua_chan->events, &iter);
478 assert(!ret);
479 delete_ust_app_event(sock, ua_event, app);
480 }
481
482 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
483 /* Wipe and free registry from session registry. */
484 registry = get_session_registry(ua_chan->session);
485 if (registry) {
486 ust_registry_channel_del_free(registry, ua_chan->key);
487 }
488 save_per_pid_lost_discarded_counters(ua_chan);
489 }
490
491 if (ua_chan->obj != NULL) {
492 /* Remove channel from application UST object descriptor. */
493 iter.iter.node = &ua_chan->ust_objd_node.node;
494 ret = lttng_ht_del(app->ust_objd, &iter);
495 assert(!ret);
496 pthread_mutex_lock(&app->sock_lock);
497 ret = ustctl_release_object(sock, ua_chan->obj);
498 pthread_mutex_unlock(&app->sock_lock);
499 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
500 ERR("UST app sock %d release channel obj failed with ret %d",
501 sock, ret);
502 }
503 lttng_fd_put(LTTNG_FD_APPS, 1);
504 free(ua_chan->obj);
505 }
506
507 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
508 }
509
510 int ust_app_register_done(struct ust_app *app)
511 {
512 int ret;
513
514 pthread_mutex_lock(&app->sock_lock);
515 ret = ustctl_register_done(app->sock);
516 pthread_mutex_unlock(&app->sock_lock);
517 return ret;
518 }
519
520 int ust_app_release_object(struct ust_app *app, struct lttng_ust_object_data *data)
521 {
522 int ret, sock;
523
524 if (app) {
525 pthread_mutex_lock(&app->sock_lock);
526 sock = app->sock;
527 } else {
528 sock = -1;
529 }
530 ret = ustctl_release_object(sock, data);
531 if (app) {
532 pthread_mutex_unlock(&app->sock_lock);
533 }
534 return ret;
535 }
536
537 /*
538 * Push metadata to consumer socket.
539 *
540 * RCU read-side lock must be held to guarantee existance of socket.
541 * Must be called with the ust app session lock held.
542 * Must be called with the registry lock held.
543 *
544 * On success, return the len of metadata pushed or else a negative value.
545 * Returning a -EPIPE return value means we could not send the metadata,
546 * but it can be caused by recoverable errors (e.g. the application has
547 * terminated concurrently).
548 */
549 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
550 struct consumer_socket *socket, int send_zero_data)
551 {
552 int ret;
553 char *metadata_str = NULL;
554 size_t len, offset, new_metadata_len_sent;
555 ssize_t ret_val;
556 uint64_t metadata_key, metadata_version;
557
558 assert(registry);
559 assert(socket);
560
561 metadata_key = registry->metadata_key;
562
563 /*
564 * Means that no metadata was assigned to the session. This can
565 * happens if no start has been done previously.
566 */
567 if (!metadata_key) {
568 return 0;
569 }
570
571 offset = registry->metadata_len_sent;
572 len = registry->metadata_len - registry->metadata_len_sent;
573 new_metadata_len_sent = registry->metadata_len;
574 metadata_version = registry->metadata_version;
575 if (len == 0) {
576 DBG3("No metadata to push for metadata key %" PRIu64,
577 registry->metadata_key);
578 ret_val = len;
579 if (send_zero_data) {
580 DBG("No metadata to push");
581 goto push_data;
582 }
583 goto end;
584 }
585
586 /* Allocate only what we have to send. */
587 metadata_str = zmalloc(len);
588 if (!metadata_str) {
589 PERROR("zmalloc ust app metadata string");
590 ret_val = -ENOMEM;
591 goto error;
592 }
593 /* Copy what we haven't sent out. */
594 memcpy(metadata_str, registry->metadata + offset, len);
595
596 push_data:
597 pthread_mutex_unlock(&registry->lock);
598 /*
599 * We need to unlock the registry while we push metadata to
600 * break a circular dependency between the consumerd metadata
601 * lock and the sessiond registry lock. Indeed, pushing metadata
602 * to the consumerd awaits that it gets pushed all the way to
603 * relayd, but doing so requires grabbing the metadata lock. If
604 * a concurrent metadata request is being performed by
605 * consumerd, this can try to grab the registry lock on the
606 * sessiond while holding the metadata lock on the consumer
607 * daemon. Those push and pull schemes are performed on two
608 * different bidirectionnal communication sockets.
609 */
610 ret = consumer_push_metadata(socket, metadata_key,
611 metadata_str, len, offset, metadata_version);
612 pthread_mutex_lock(&registry->lock);
613 if (ret < 0) {
614 /*
615 * There is an acceptable race here between the registry
616 * metadata key assignment and the creation on the
617 * consumer. The session daemon can concurrently push
618 * metadata for this registry while being created on the
619 * consumer since the metadata key of the registry is
620 * assigned *before* it is setup to avoid the consumer
621 * to ask for metadata that could possibly be not found
622 * in the session daemon.
623 *
624 * The metadata will get pushed either by the session
625 * being stopped or the consumer requesting metadata if
626 * that race is triggered.
627 */
628 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
629 ret = 0;
630 } else {
631 ERR("Error pushing metadata to consumer");
632 }
633 ret_val = ret;
634 goto error_push;
635 } else {
636 /*
637 * Metadata may have been concurrently pushed, since
638 * we're not holding the registry lock while pushing to
639 * consumer. This is handled by the fact that we send
640 * the metadata content, size, and the offset at which
641 * that metadata belongs. This may arrive out of order
642 * on the consumer side, and the consumer is able to
643 * deal with overlapping fragments. The consumer
644 * supports overlapping fragments, which must be
645 * contiguous starting from offset 0. We keep the
646 * largest metadata_len_sent value of the concurrent
647 * send.
648 */
649 registry->metadata_len_sent =
650 max_t(size_t, registry->metadata_len_sent,
651 new_metadata_len_sent);
652 }
653 free(metadata_str);
654 return len;
655
656 end:
657 error:
658 if (ret_val) {
659 /*
660 * On error, flag the registry that the metadata is
661 * closed. We were unable to push anything and this
662 * means that either the consumer is not responding or
663 * the metadata cache has been destroyed on the
664 * consumer.
665 */
666 registry->metadata_closed = 1;
667 }
668 error_push:
669 free(metadata_str);
670 return ret_val;
671 }
672
673 /*
674 * For a given application and session, push metadata to consumer.
675 * Either sock or consumer is required : if sock is NULL, the default
676 * socket to send the metadata is retrieved from consumer, if sock
677 * is not NULL we use it to send the metadata.
678 * RCU read-side lock must be held while calling this function,
679 * therefore ensuring existance of registry. It also ensures existance
680 * of socket throughout this function.
681 *
682 * Return 0 on success else a negative error.
683 * Returning a -EPIPE return value means we could not send the metadata,
684 * but it can be caused by recoverable errors (e.g. the application has
685 * terminated concurrently).
686 */
687 static int push_metadata(struct ust_registry_session *registry,
688 struct consumer_output *consumer)
689 {
690 int ret_val;
691 ssize_t ret;
692 struct consumer_socket *socket;
693
694 assert(registry);
695 assert(consumer);
696
697 pthread_mutex_lock(&registry->lock);
698 if (registry->metadata_closed) {
699 ret_val = -EPIPE;
700 goto error;
701 }
702
703 /* Get consumer socket to use to push the metadata.*/
704 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
705 consumer);
706 if (!socket) {
707 ret_val = -1;
708 goto error;
709 }
710
711 ret = ust_app_push_metadata(registry, socket, 0);
712 if (ret < 0) {
713 ret_val = ret;
714 goto error;
715 }
716 pthread_mutex_unlock(&registry->lock);
717 return 0;
718
719 error:
720 pthread_mutex_unlock(&registry->lock);
721 return ret_val;
722 }
723
724 /*
725 * Send to the consumer a close metadata command for the given session. Once
726 * done, the metadata channel is deleted and the session metadata pointer is
727 * nullified. The session lock MUST be held unless the application is
728 * in the destroy path.
729 *
730 * Return 0 on success else a negative value.
731 */
732 static int close_metadata(struct ust_registry_session *registry,
733 struct consumer_output *consumer)
734 {
735 int ret;
736 struct consumer_socket *socket;
737
738 assert(registry);
739 assert(consumer);
740
741 rcu_read_lock();
742
743 pthread_mutex_lock(&registry->lock);
744
745 if (!registry->metadata_key || registry->metadata_closed) {
746 ret = 0;
747 goto end;
748 }
749
750 /* Get consumer socket to use to push the metadata.*/
751 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
752 consumer);
753 if (!socket) {
754 ret = -1;
755 goto error;
756 }
757
758 ret = consumer_close_metadata(socket, registry->metadata_key);
759 if (ret < 0) {
760 goto error;
761 }
762
763 error:
764 /*
765 * Metadata closed. Even on error this means that the consumer is not
766 * responding or not found so either way a second close should NOT be emit
767 * for this registry.
768 */
769 registry->metadata_closed = 1;
770 end:
771 pthread_mutex_unlock(&registry->lock);
772 rcu_read_unlock();
773 return ret;
774 }
775
776 /*
777 * We need to execute ht_destroy outside of RCU read-side critical
778 * section and outside of call_rcu thread, so we postpone its execution
779 * using ht_cleanup_push. It is simpler than to change the semantic of
780 * the many callers of delete_ust_app_session().
781 */
782 static
783 void delete_ust_app_session_rcu(struct rcu_head *head)
784 {
785 struct ust_app_session *ua_sess =
786 caa_container_of(head, struct ust_app_session, rcu_head);
787
788 ht_cleanup_push(ua_sess->channels);
789 free(ua_sess);
790 }
791
792 /*
793 * Delete ust app session safely. RCU read lock must be held before calling
794 * this function.
795 *
796 * The session list lock must be held by the caller.
797 */
798 static
799 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
800 struct ust_app *app)
801 {
802 int ret;
803 struct lttng_ht_iter iter;
804 struct ust_app_channel *ua_chan;
805 struct ust_registry_session *registry;
806
807 assert(ua_sess);
808
809 pthread_mutex_lock(&ua_sess->lock);
810
811 assert(!ua_sess->deleted);
812 ua_sess->deleted = true;
813
814 registry = get_session_registry(ua_sess);
815 /* Registry can be null on error path during initialization. */
816 if (registry) {
817 /* Push metadata for application before freeing the application. */
818 (void) push_metadata(registry, ua_sess->consumer);
819
820 /*
821 * Don't ask to close metadata for global per UID buffers. Close
822 * metadata only on destroy trace session in this case. Also, the
823 * previous push metadata could have flag the metadata registry to
824 * close so don't send a close command if closed.
825 */
826 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
827 /* And ask to close it for this session registry. */
828 (void) close_metadata(registry, ua_sess->consumer);
829 }
830 }
831
832 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
833 node.node) {
834 ret = lttng_ht_del(ua_sess->channels, &iter);
835 assert(!ret);
836 delete_ust_app_channel(sock, ua_chan, app);
837 }
838
839 /* In case of per PID, the registry is kept in the session. */
840 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
841 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
842 if (reg_pid) {
843 /*
844 * Registry can be null on error path during
845 * initialization.
846 */
847 buffer_reg_pid_remove(reg_pid);
848 buffer_reg_pid_destroy(reg_pid);
849 }
850 }
851
852 if (ua_sess->handle != -1) {
853 pthread_mutex_lock(&app->sock_lock);
854 ret = ustctl_release_handle(sock, ua_sess->handle);
855 pthread_mutex_unlock(&app->sock_lock);
856 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
857 ERR("UST app sock %d release session handle failed with ret %d",
858 sock, ret);
859 }
860 /* Remove session from application UST object descriptor. */
861 iter.iter.node = &ua_sess->ust_objd_node.node;
862 ret = lttng_ht_del(app->ust_sessions_objd, &iter);
863 assert(!ret);
864 }
865
866 pthread_mutex_unlock(&ua_sess->lock);
867
868 consumer_output_put(ua_sess->consumer);
869
870 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
871 }
872
873 /*
874 * Delete a traceable application structure from the global list. Never call
875 * this function outside of a call_rcu call.
876 *
877 * RCU read side lock should _NOT_ be held when calling this function.
878 */
879 static
880 void delete_ust_app(struct ust_app *app)
881 {
882 int ret, sock;
883 struct ust_app_session *ua_sess, *tmp_ua_sess;
884
885 /*
886 * The session list lock must be held during this function to guarantee
887 * the existence of ua_sess.
888 */
889 session_lock_list();
890 /* Delete ust app sessions info */
891 sock = app->sock;
892 app->sock = -1;
893
894 /* Wipe sessions */
895 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
896 teardown_node) {
897 /* Free every object in the session and the session. */
898 rcu_read_lock();
899 delete_ust_app_session(sock, ua_sess, app);
900 rcu_read_unlock();
901 }
902
903 ht_cleanup_push(app->sessions);
904 ht_cleanup_push(app->ust_sessions_objd);
905 ht_cleanup_push(app->ust_objd);
906
907 /*
908 * Wait until we have deleted the application from the sock hash table
909 * before closing this socket, otherwise an application could re-use the
910 * socket ID and race with the teardown, using the same hash table entry.
911 *
912 * It's OK to leave the close in call_rcu. We want it to stay unique for
913 * all RCU readers that could run concurrently with unregister app,
914 * therefore we _need_ to only close that socket after a grace period. So
915 * it should stay in this RCU callback.
916 *
917 * This close() is a very important step of the synchronization model so
918 * every modification to this function must be carefully reviewed.
919 */
920 ret = close(sock);
921 if (ret) {
922 PERROR("close");
923 }
924 lttng_fd_put(LTTNG_FD_APPS, 1);
925
926 DBG2("UST app pid %d deleted", app->pid);
927 free(app);
928 session_unlock_list();
929 }
930
931 /*
932 * URCU intermediate call to delete an UST app.
933 */
934 static
935 void delete_ust_app_rcu(struct rcu_head *head)
936 {
937 struct lttng_ht_node_ulong *node =
938 caa_container_of(head, struct lttng_ht_node_ulong, head);
939 struct ust_app *app =
940 caa_container_of(node, struct ust_app, pid_n);
941
942 DBG3("Call RCU deleting app PID %d", app->pid);
943 delete_ust_app(app);
944 }
945
946 /*
947 * Delete the session from the application ht and delete the data structure by
948 * freeing every object inside and releasing them.
949 *
950 * The session list lock must be held by the caller.
951 */
952 static void destroy_app_session(struct ust_app *app,
953 struct ust_app_session *ua_sess)
954 {
955 int ret;
956 struct lttng_ht_iter iter;
957
958 assert(app);
959 assert(ua_sess);
960
961 iter.iter.node = &ua_sess->node.node;
962 ret = lttng_ht_del(app->sessions, &iter);
963 if (ret) {
964 /* Already scheduled for teardown. */
965 goto end;
966 }
967
968 /* Once deleted, free the data structure. */
969 delete_ust_app_session(app->sock, ua_sess, app);
970
971 end:
972 return;
973 }
974
975 /*
976 * Alloc new UST app session.
977 */
978 static
979 struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
980 {
981 struct ust_app_session *ua_sess;
982
983 /* Init most of the default value by allocating and zeroing */
984 ua_sess = zmalloc(sizeof(struct ust_app_session));
985 if (ua_sess == NULL) {
986 PERROR("malloc");
987 goto error_free;
988 }
989
990 ua_sess->handle = -1;
991 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
992 ua_sess->metadata_attr.type = LTTNG_UST_CHAN_METADATA;
993 pthread_mutex_init(&ua_sess->lock, NULL);
994
995 return ua_sess;
996
997 error_free:
998 return NULL;
999 }
1000
1001 /*
1002 * Alloc new UST app channel.
1003 */
1004 static
1005 struct ust_app_channel *alloc_ust_app_channel(char *name,
1006 struct ust_app_session *ua_sess,
1007 struct lttng_ust_channel_attr *attr)
1008 {
1009 struct ust_app_channel *ua_chan;
1010
1011 /* Init most of the default value by allocating and zeroing */
1012 ua_chan = zmalloc(sizeof(struct ust_app_channel));
1013 if (ua_chan == NULL) {
1014 PERROR("malloc");
1015 goto error;
1016 }
1017
1018 /* Setup channel name */
1019 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
1020 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1021
1022 ua_chan->enabled = 1;
1023 ua_chan->handle = -1;
1024 ua_chan->session = ua_sess;
1025 ua_chan->key = get_next_channel_key();
1026 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1027 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1028 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
1029
1030 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
1031 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
1032
1033 /* Copy attributes */
1034 if (attr) {
1035 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
1036 ua_chan->attr.subbuf_size = attr->subbuf_size;
1037 ua_chan->attr.num_subbuf = attr->num_subbuf;
1038 ua_chan->attr.overwrite = attr->overwrite;
1039 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
1040 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
1041 ua_chan->attr.output = attr->output;
1042 }
1043 /* By default, the channel is a per cpu channel. */
1044 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1045
1046 DBG3("UST app channel %s allocated", ua_chan->name);
1047
1048 return ua_chan;
1049
1050 error:
1051 return NULL;
1052 }
1053
1054 /*
1055 * Allocate and initialize a UST app stream.
1056 *
1057 * Return newly allocated stream pointer or NULL on error.
1058 */
1059 struct ust_app_stream *ust_app_alloc_stream(void)
1060 {
1061 struct ust_app_stream *stream = NULL;
1062
1063 stream = zmalloc(sizeof(*stream));
1064 if (stream == NULL) {
1065 PERROR("zmalloc ust app stream");
1066 goto error;
1067 }
1068
1069 /* Zero could be a valid value for a handle so flag it to -1. */
1070 stream->handle = -1;
1071
1072 error:
1073 return stream;
1074 }
1075
1076 /*
1077 * Alloc new UST app event.
1078 */
1079 static
1080 struct ust_app_event *alloc_ust_app_event(char *name,
1081 struct lttng_ust_event *attr)
1082 {
1083 struct ust_app_event *ua_event;
1084
1085 /* Init most of the default value by allocating and zeroing */
1086 ua_event = zmalloc(sizeof(struct ust_app_event));
1087 if (ua_event == NULL) {
1088 PERROR("malloc");
1089 goto error;
1090 }
1091
1092 ua_event->enabled = 1;
1093 strncpy(ua_event->name, name, sizeof(ua_event->name));
1094 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1095 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
1096
1097 /* Copy attributes */
1098 if (attr) {
1099 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1100 }
1101
1102 DBG3("UST app event %s allocated", ua_event->name);
1103
1104 return ua_event;
1105
1106 error:
1107 return NULL;
1108 }
1109
1110 /*
1111 * Alloc new UST app context.
1112 */
1113 static
1114 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
1115 {
1116 struct ust_app_ctx *ua_ctx;
1117
1118 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
1119 if (ua_ctx == NULL) {
1120 goto error;
1121 }
1122
1123 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1124
1125 if (uctx) {
1126 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
1127 if (uctx->ctx == LTTNG_UST_CONTEXT_APP_CONTEXT) {
1128 char *provider_name = NULL, *ctx_name = NULL;
1129
1130 provider_name = strdup(uctx->u.app_ctx.provider_name);
1131 ctx_name = strdup(uctx->u.app_ctx.ctx_name);
1132 if (!provider_name || !ctx_name) {
1133 free(provider_name);
1134 free(ctx_name);
1135 goto error;
1136 }
1137
1138 ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
1139 ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
1140 }
1141 }
1142
1143 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
1144 return ua_ctx;
1145 error:
1146 free(ua_ctx);
1147 return NULL;
1148 }
1149
1150 /*
1151 * Allocate a filter and copy the given original filter.
1152 *
1153 * Return allocated filter or NULL on error.
1154 */
1155 static struct lttng_filter_bytecode *copy_filter_bytecode(
1156 struct lttng_filter_bytecode *orig_f)
1157 {
1158 struct lttng_filter_bytecode *filter = NULL;
1159
1160 /* Copy filter bytecode */
1161 filter = zmalloc(sizeof(*filter) + orig_f->len);
1162 if (!filter) {
1163 PERROR("zmalloc alloc filter bytecode");
1164 goto error;
1165 }
1166
1167 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1168
1169 error:
1170 return filter;
1171 }
1172
1173 /*
1174 * Create a liblttng-ust filter bytecode from given bytecode.
1175 *
1176 * Return allocated filter or NULL on error.
1177 */
1178 static struct lttng_ust_filter_bytecode *create_ust_bytecode_from_bytecode(
1179 struct lttng_filter_bytecode *orig_f)
1180 {
1181 struct lttng_ust_filter_bytecode *filter = NULL;
1182
1183 /* Copy filter bytecode */
1184 filter = zmalloc(sizeof(*filter) + orig_f->len);
1185 if (!filter) {
1186 PERROR("zmalloc alloc ust filter bytecode");
1187 goto error;
1188 }
1189
1190 assert(sizeof(struct lttng_filter_bytecode) ==
1191 sizeof(struct lttng_ust_filter_bytecode));
1192 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1193 error:
1194 return filter;
1195 }
1196
1197 /*
1198 * Find an ust_app using the sock and return it. RCU read side lock must be
1199 * held before calling this helper function.
1200 */
1201 struct ust_app *ust_app_find_by_sock(int sock)
1202 {
1203 struct lttng_ht_node_ulong *node;
1204 struct lttng_ht_iter iter;
1205
1206 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
1207 node = lttng_ht_iter_get_node_ulong(&iter);
1208 if (node == NULL) {
1209 DBG2("UST app find by sock %d not found", sock);
1210 goto error;
1211 }
1212
1213 return caa_container_of(node, struct ust_app, sock_n);
1214
1215 error:
1216 return NULL;
1217 }
1218
1219 /*
1220 * Find an ust_app using the notify sock and return it. RCU read side lock must
1221 * be held before calling this helper function.
1222 */
1223 static struct ust_app *find_app_by_notify_sock(int sock)
1224 {
1225 struct lttng_ht_node_ulong *node;
1226 struct lttng_ht_iter iter;
1227
1228 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1229 &iter);
1230 node = lttng_ht_iter_get_node_ulong(&iter);
1231 if (node == NULL) {
1232 DBG2("UST app find by notify sock %d not found", sock);
1233 goto error;
1234 }
1235
1236 return caa_container_of(node, struct ust_app, notify_sock_n);
1237
1238 error:
1239 return NULL;
1240 }
1241
1242 /*
1243 * Lookup for an ust app event based on event name, filter bytecode and the
1244 * event loglevel.
1245 *
1246 * Return an ust_app_event object or NULL on error.
1247 */
1248 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1249 char *name, struct lttng_filter_bytecode *filter,
1250 int loglevel_value,
1251 const struct lttng_event_exclusion *exclusion)
1252 {
1253 struct lttng_ht_iter iter;
1254 struct lttng_ht_node_str *node;
1255 struct ust_app_event *event = NULL;
1256 struct ust_app_ht_key key;
1257
1258 assert(name);
1259 assert(ht);
1260
1261 /* Setup key for event lookup. */
1262 key.name = name;
1263 key.filter = filter;
1264 key.loglevel_type = loglevel_value;
1265 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1266 key.exclusion = exclusion;
1267
1268 /* Lookup using the event name as hash and a custom match fct. */
1269 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1270 ht_match_ust_app_event, &key, &iter.iter);
1271 node = lttng_ht_iter_get_node_str(&iter);
1272 if (node == NULL) {
1273 goto end;
1274 }
1275
1276 event = caa_container_of(node, struct ust_app_event, node);
1277
1278 end:
1279 return event;
1280 }
1281
1282 /*
1283 * Create the channel context on the tracer.
1284 *
1285 * Called with UST app session lock held.
1286 */
1287 static
1288 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1289 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1290 {
1291 int ret;
1292
1293 health_code_update();
1294
1295 pthread_mutex_lock(&app->sock_lock);
1296 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1297 ua_chan->obj, &ua_ctx->obj);
1298 pthread_mutex_unlock(&app->sock_lock);
1299 if (ret < 0) {
1300 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1301 ERR("UST app create channel context failed for app (pid: %d) "
1302 "with ret %d", app->pid, ret);
1303 } else {
1304 /*
1305 * This is normal behavior, an application can die during the
1306 * creation process. Don't report an error so the execution can
1307 * continue normally.
1308 */
1309 ret = 0;
1310 DBG3("UST app disable event failed. Application is dead.");
1311 }
1312 goto error;
1313 }
1314
1315 ua_ctx->handle = ua_ctx->obj->handle;
1316
1317 DBG2("UST app context handle %d created successfully for channel %s",
1318 ua_ctx->handle, ua_chan->name);
1319
1320 error:
1321 health_code_update();
1322 return ret;
1323 }
1324
1325 /*
1326 * Set the filter on the tracer.
1327 */
1328 static
1329 int set_ust_event_filter(struct ust_app_event *ua_event,
1330 struct ust_app *app)
1331 {
1332 int ret;
1333 struct lttng_ust_filter_bytecode *ust_bytecode = NULL;
1334
1335 health_code_update();
1336
1337 if (!ua_event->filter) {
1338 ret = 0;
1339 goto error;
1340 }
1341
1342 ust_bytecode = create_ust_bytecode_from_bytecode(ua_event->filter);
1343 if (!ust_bytecode) {
1344 ret = -LTTNG_ERR_NOMEM;
1345 goto error;
1346 }
1347 pthread_mutex_lock(&app->sock_lock);
1348 ret = ustctl_set_filter(app->sock, ust_bytecode,
1349 ua_event->obj);
1350 pthread_mutex_unlock(&app->sock_lock);
1351 if (ret < 0) {
1352 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1353 ERR("UST app event %s filter failed for app (pid: %d) "
1354 "with ret %d", ua_event->attr.name, app->pid, ret);
1355 } else {
1356 /*
1357 * This is normal behavior, an application can die during the
1358 * creation process. Don't report an error so the execution can
1359 * continue normally.
1360 */
1361 ret = 0;
1362 DBG3("UST app filter event failed. Application is dead.");
1363 }
1364 goto error;
1365 }
1366
1367 DBG2("UST filter set successfully for event %s", ua_event->name);
1368
1369 error:
1370 health_code_update();
1371 free(ust_bytecode);
1372 return ret;
1373 }
1374
1375 static
1376 struct lttng_ust_event_exclusion *create_ust_exclusion_from_exclusion(
1377 struct lttng_event_exclusion *exclusion)
1378 {
1379 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1380 size_t exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1381 LTTNG_UST_SYM_NAME_LEN * exclusion->count;
1382
1383 ust_exclusion = zmalloc(exclusion_alloc_size);
1384 if (!ust_exclusion) {
1385 PERROR("malloc");
1386 goto end;
1387 }
1388
1389 assert(sizeof(struct lttng_event_exclusion) ==
1390 sizeof(struct lttng_ust_event_exclusion));
1391 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1392 end:
1393 return ust_exclusion;
1394 }
1395
1396 /*
1397 * Set event exclusions on the tracer.
1398 */
1399 static
1400 int set_ust_event_exclusion(struct ust_app_event *ua_event,
1401 struct ust_app *app)
1402 {
1403 int ret;
1404 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1405
1406 health_code_update();
1407
1408 if (!ua_event->exclusion || !ua_event->exclusion->count) {
1409 ret = 0;
1410 goto error;
1411 }
1412
1413 ust_exclusion = create_ust_exclusion_from_exclusion(
1414 ua_event->exclusion);
1415 if (!ust_exclusion) {
1416 ret = -LTTNG_ERR_NOMEM;
1417 goto error;
1418 }
1419 pthread_mutex_lock(&app->sock_lock);
1420 ret = ustctl_set_exclusion(app->sock, ust_exclusion, ua_event->obj);
1421 pthread_mutex_unlock(&app->sock_lock);
1422 if (ret < 0) {
1423 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1424 ERR("UST app event %s exclusions failed for app (pid: %d) "
1425 "with ret %d", ua_event->attr.name, app->pid, ret);
1426 } else {
1427 /*
1428 * This is normal behavior, an application can die during the
1429 * creation process. Don't report an error so the execution can
1430 * continue normally.
1431 */
1432 ret = 0;
1433 DBG3("UST app event exclusion failed. Application is dead.");
1434 }
1435 goto error;
1436 }
1437
1438 DBG2("UST exclusion set successfully for event %s", ua_event->name);
1439
1440 error:
1441 health_code_update();
1442 free(ust_exclusion);
1443 return ret;
1444 }
1445
1446 /*
1447 * Disable the specified event on to UST tracer for the UST session.
1448 */
1449 static int disable_ust_event(struct ust_app *app,
1450 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1451 {
1452 int ret;
1453
1454 health_code_update();
1455
1456 pthread_mutex_lock(&app->sock_lock);
1457 ret = ustctl_disable(app->sock, ua_event->obj);
1458 pthread_mutex_unlock(&app->sock_lock);
1459 if (ret < 0) {
1460 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1461 ERR("UST app event %s disable failed for app (pid: %d) "
1462 "and session handle %d with ret %d",
1463 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1464 } else {
1465 /*
1466 * This is normal behavior, an application can die during the
1467 * creation process. Don't report an error so the execution can
1468 * continue normally.
1469 */
1470 ret = 0;
1471 DBG3("UST app disable event failed. Application is dead.");
1472 }
1473 goto error;
1474 }
1475
1476 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1477 ua_event->attr.name, app->pid);
1478
1479 error:
1480 health_code_update();
1481 return ret;
1482 }
1483
1484 /*
1485 * Disable the specified channel on to UST tracer for the UST session.
1486 */
1487 static int disable_ust_channel(struct ust_app *app,
1488 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1489 {
1490 int ret;
1491
1492 health_code_update();
1493
1494 pthread_mutex_lock(&app->sock_lock);
1495 ret = ustctl_disable(app->sock, ua_chan->obj);
1496 pthread_mutex_unlock(&app->sock_lock);
1497 if (ret < 0) {
1498 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1499 ERR("UST app channel %s disable failed for app (pid: %d) "
1500 "and session handle %d with ret %d",
1501 ua_chan->name, app->pid, ua_sess->handle, ret);
1502 } else {
1503 /*
1504 * This is normal behavior, an application can die during the
1505 * creation process. Don't report an error so the execution can
1506 * continue normally.
1507 */
1508 ret = 0;
1509 DBG3("UST app disable channel failed. Application is dead.");
1510 }
1511 goto error;
1512 }
1513
1514 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1515 ua_chan->name, app->pid);
1516
1517 error:
1518 health_code_update();
1519 return ret;
1520 }
1521
1522 /*
1523 * Enable the specified channel on to UST tracer for the UST session.
1524 */
1525 static int enable_ust_channel(struct ust_app *app,
1526 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1527 {
1528 int ret;
1529
1530 health_code_update();
1531
1532 pthread_mutex_lock(&app->sock_lock);
1533 ret = ustctl_enable(app->sock, ua_chan->obj);
1534 pthread_mutex_unlock(&app->sock_lock);
1535 if (ret < 0) {
1536 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1537 ERR("UST app channel %s enable failed for app (pid: %d) "
1538 "and session handle %d with ret %d",
1539 ua_chan->name, app->pid, ua_sess->handle, ret);
1540 } else {
1541 /*
1542 * This is normal behavior, an application can die during the
1543 * creation process. Don't report an error so the execution can
1544 * continue normally.
1545 */
1546 ret = 0;
1547 DBG3("UST app enable channel failed. Application is dead.");
1548 }
1549 goto error;
1550 }
1551
1552 ua_chan->enabled = 1;
1553
1554 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1555 ua_chan->name, app->pid);
1556
1557 error:
1558 health_code_update();
1559 return ret;
1560 }
1561
1562 /*
1563 * Enable the specified event on to UST tracer for the UST session.
1564 */
1565 static int enable_ust_event(struct ust_app *app,
1566 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1567 {
1568 int ret;
1569
1570 health_code_update();
1571
1572 pthread_mutex_lock(&app->sock_lock);
1573 ret = ustctl_enable(app->sock, ua_event->obj);
1574 pthread_mutex_unlock(&app->sock_lock);
1575 if (ret < 0) {
1576 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1577 ERR("UST app event %s enable failed for app (pid: %d) "
1578 "and session handle %d with ret %d",
1579 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1580 } else {
1581 /*
1582 * This is normal behavior, an application can die during the
1583 * creation process. Don't report an error so the execution can
1584 * continue normally.
1585 */
1586 ret = 0;
1587 DBG3("UST app enable event failed. Application is dead.");
1588 }
1589 goto error;
1590 }
1591
1592 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1593 ua_event->attr.name, app->pid);
1594
1595 error:
1596 health_code_update();
1597 return ret;
1598 }
1599
1600 /*
1601 * Send channel and stream buffer to application.
1602 *
1603 * Return 0 on success. On error, a negative value is returned.
1604 */
1605 static int send_channel_pid_to_ust(struct ust_app *app,
1606 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1607 {
1608 int ret;
1609 struct ust_app_stream *stream, *stmp;
1610
1611 assert(app);
1612 assert(ua_sess);
1613 assert(ua_chan);
1614
1615 health_code_update();
1616
1617 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1618 app->sock);
1619
1620 /* Send channel to the application. */
1621 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1622 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1623 ret = -ENOTCONN; /* Caused by app exiting. */
1624 goto error;
1625 } else if (ret < 0) {
1626 goto error;
1627 }
1628
1629 health_code_update();
1630
1631 /* Send all streams to application. */
1632 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1633 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1634 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1635 ret = -ENOTCONN; /* Caused by app exiting. */
1636 goto error;
1637 } else if (ret < 0) {
1638 goto error;
1639 }
1640 /* We don't need the stream anymore once sent to the tracer. */
1641 cds_list_del(&stream->list);
1642 delete_ust_app_stream(-1, stream, app);
1643 }
1644 /* Flag the channel that it is sent to the application. */
1645 ua_chan->is_sent = 1;
1646
1647 error:
1648 health_code_update();
1649 return ret;
1650 }
1651
1652 /*
1653 * Create the specified event onto the UST tracer for a UST session.
1654 *
1655 * Should be called with session mutex held.
1656 */
1657 static
1658 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1659 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1660 {
1661 int ret = 0;
1662
1663 health_code_update();
1664
1665 /* Create UST event on tracer */
1666 pthread_mutex_lock(&app->sock_lock);
1667 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1668 &ua_event->obj);
1669 pthread_mutex_unlock(&app->sock_lock);
1670 if (ret < 0) {
1671 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1672 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1673 ua_event->attr.name, app->pid, ret);
1674 } else {
1675 /*
1676 * This is normal behavior, an application can die during the
1677 * creation process. Don't report an error so the execution can
1678 * continue normally.
1679 */
1680 ret = 0;
1681 DBG3("UST app create event failed. Application is dead.");
1682 }
1683 goto error;
1684 }
1685
1686 ua_event->handle = ua_event->obj->handle;
1687
1688 DBG2("UST app event %s created successfully for pid:%d",
1689 ua_event->attr.name, app->pid);
1690
1691 health_code_update();
1692
1693 /* Set filter if one is present. */
1694 if (ua_event->filter) {
1695 ret = set_ust_event_filter(ua_event, app);
1696 if (ret < 0) {
1697 goto error;
1698 }
1699 }
1700
1701 /* Set exclusions for the event */
1702 if (ua_event->exclusion) {
1703 ret = set_ust_event_exclusion(ua_event, app);
1704 if (ret < 0) {
1705 goto error;
1706 }
1707 }
1708
1709 /* If event not enabled, disable it on the tracer */
1710 if (ua_event->enabled) {
1711 /*
1712 * We now need to explicitly enable the event, since it
1713 * is now disabled at creation.
1714 */
1715 ret = enable_ust_event(app, ua_sess, ua_event);
1716 if (ret < 0) {
1717 /*
1718 * If we hit an EPERM, something is wrong with our enable call. If
1719 * we get an EEXIST, there is a problem on the tracer side since we
1720 * just created it.
1721 */
1722 switch (ret) {
1723 case -LTTNG_UST_ERR_PERM:
1724 /* Code flow problem */
1725 assert(0);
1726 case -LTTNG_UST_ERR_EXIST:
1727 /* It's OK for our use case. */
1728 ret = 0;
1729 break;
1730 default:
1731 break;
1732 }
1733 goto error;
1734 }
1735 }
1736
1737 error:
1738 health_code_update();
1739 return ret;
1740 }
1741
1742 /*
1743 * Copy data between an UST app event and a LTT event.
1744 */
1745 static void shadow_copy_event(struct ust_app_event *ua_event,
1746 struct ltt_ust_event *uevent)
1747 {
1748 size_t exclusion_alloc_size;
1749
1750 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1751 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1752
1753 ua_event->enabled = uevent->enabled;
1754
1755 /* Copy event attributes */
1756 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1757
1758 /* Copy filter bytecode */
1759 if (uevent->filter) {
1760 ua_event->filter = copy_filter_bytecode(uevent->filter);
1761 /* Filter might be NULL here in case of ENONEM. */
1762 }
1763
1764 /* Copy exclusion data */
1765 if (uevent->exclusion) {
1766 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
1767 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
1768 ua_event->exclusion = zmalloc(exclusion_alloc_size);
1769 if (ua_event->exclusion == NULL) {
1770 PERROR("malloc");
1771 } else {
1772 memcpy(ua_event->exclusion, uevent->exclusion,
1773 exclusion_alloc_size);
1774 }
1775 }
1776 }
1777
1778 /*
1779 * Copy data between an UST app channel and a LTT channel.
1780 */
1781 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1782 struct ltt_ust_channel *uchan)
1783 {
1784 struct lttng_ht_iter iter;
1785 struct ltt_ust_event *uevent;
1786 struct ltt_ust_context *uctx;
1787 struct ust_app_event *ua_event;
1788
1789 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1790
1791 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1792 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1793
1794 ua_chan->tracefile_size = uchan->tracefile_size;
1795 ua_chan->tracefile_count = uchan->tracefile_count;
1796
1797 /* Copy event attributes since the layout is different. */
1798 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1799 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1800 ua_chan->attr.overwrite = uchan->attr.overwrite;
1801 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1802 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1803 ua_chan->attr.output = uchan->attr.output;
1804 /*
1805 * Note that the attribute channel type is not set since the channel on the
1806 * tracing registry side does not have this information.
1807 */
1808
1809 ua_chan->enabled = uchan->enabled;
1810 ua_chan->tracing_channel_id = uchan->id;
1811
1812 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
1813 struct ust_app_ctx *ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1814
1815 if (ua_ctx == NULL) {
1816 continue;
1817 }
1818 lttng_ht_node_init_ulong(&ua_ctx->node,
1819 (unsigned long) ua_ctx->ctx.ctx);
1820 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
1821 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1822 }
1823
1824 /* Copy all events from ltt ust channel to ust app channel */
1825 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
1826 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1827 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
1828 if (ua_event == NULL) {
1829 DBG2("UST event %s not found on shadow copy channel",
1830 uevent->attr.name);
1831 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
1832 if (ua_event == NULL) {
1833 continue;
1834 }
1835 shadow_copy_event(ua_event, uevent);
1836 add_unique_ust_app_event(ua_chan, ua_event);
1837 }
1838 }
1839
1840 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1841 }
1842
1843 /*
1844 * Copy data between a UST app session and a regular LTT session.
1845 */
1846 static void shadow_copy_session(struct ust_app_session *ua_sess,
1847 struct ltt_ust_session *usess, struct ust_app *app)
1848 {
1849 struct lttng_ht_node_str *ua_chan_node;
1850 struct lttng_ht_iter iter;
1851 struct ltt_ust_channel *uchan;
1852 struct ust_app_channel *ua_chan;
1853 time_t rawtime;
1854 struct tm *timeinfo;
1855 char datetime[16];
1856 int ret;
1857 char tmp_shm_path[PATH_MAX];
1858
1859 /* Get date and time for unique app path */
1860 time(&rawtime);
1861 timeinfo = localtime(&rawtime);
1862 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1863
1864 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1865
1866 ua_sess->tracing_id = usess->id;
1867 ua_sess->id = get_next_session_id();
1868 ua_sess->uid = app->uid;
1869 ua_sess->gid = app->gid;
1870 ua_sess->euid = usess->uid;
1871 ua_sess->egid = usess->gid;
1872 ua_sess->buffer_type = usess->buffer_type;
1873 ua_sess->bits_per_long = app->bits_per_long;
1874
1875 /* There is only one consumer object per session possible. */
1876 consumer_output_get(usess->consumer);
1877 ua_sess->consumer = usess->consumer;
1878
1879 ua_sess->output_traces = usess->output_traces;
1880 ua_sess->live_timer_interval = usess->live_timer_interval;
1881 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
1882 &usess->metadata_attr);
1883
1884 switch (ua_sess->buffer_type) {
1885 case LTTNG_BUFFER_PER_PID:
1886 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1887 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1888 datetime);
1889 break;
1890 case LTTNG_BUFFER_PER_UID:
1891 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1892 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1893 break;
1894 default:
1895 assert(0);
1896 goto error;
1897 }
1898 if (ret < 0) {
1899 PERROR("asprintf UST shadow copy session");
1900 assert(0);
1901 goto error;
1902 }
1903
1904 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
1905 sizeof(ua_sess->root_shm_path));
1906 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
1907 strncpy(ua_sess->shm_path, usess->shm_path,
1908 sizeof(ua_sess->shm_path));
1909 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1910 if (ua_sess->shm_path[0]) {
1911 switch (ua_sess->buffer_type) {
1912 case LTTNG_BUFFER_PER_PID:
1913 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
1914 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
1915 app->name, app->pid, datetime);
1916 break;
1917 case LTTNG_BUFFER_PER_UID:
1918 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
1919 DEFAULT_UST_TRACE_UID_PATH,
1920 app->uid, app->bits_per_long);
1921 break;
1922 default:
1923 assert(0);
1924 goto error;
1925 }
1926 if (ret < 0) {
1927 PERROR("sprintf UST shadow copy session");
1928 assert(0);
1929 goto error;
1930 }
1931 strncat(ua_sess->shm_path, tmp_shm_path,
1932 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
1933 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1934 }
1935
1936 /* Iterate over all channels in global domain. */
1937 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1938 uchan, node.node) {
1939 struct lttng_ht_iter uiter;
1940
1941 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1942 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
1943 if (ua_chan_node != NULL) {
1944 /* Session exist. Contiuing. */
1945 continue;
1946 }
1947
1948 DBG2("Channel %s not found on shadow session copy, creating it",
1949 uchan->name);
1950 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess,
1951 &uchan->attr);
1952 if (ua_chan == NULL) {
1953 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1954 continue;
1955 }
1956 shadow_copy_channel(ua_chan, uchan);
1957 /*
1958 * The concept of metadata channel does not exist on the tracing
1959 * registry side of the session daemon so this can only be a per CPU
1960 * channel and not metadata.
1961 */
1962 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1963
1964 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
1965 }
1966 return;
1967
1968 error:
1969 consumer_output_put(ua_sess->consumer);
1970 }
1971
1972 /*
1973 * Lookup sesison wrapper.
1974 */
1975 static
1976 void __lookup_session_by_app(struct ltt_ust_session *usess,
1977 struct ust_app *app, struct lttng_ht_iter *iter)
1978 {
1979 /* Get right UST app session from app */
1980 lttng_ht_lookup(app->sessions, &usess->id, iter);
1981 }
1982
1983 /*
1984 * Return ust app session from the app session hashtable using the UST session
1985 * id.
1986 */
1987 static struct ust_app_session *lookup_session_by_app(
1988 struct ltt_ust_session *usess, struct ust_app *app)
1989 {
1990 struct lttng_ht_iter iter;
1991 struct lttng_ht_node_u64 *node;
1992
1993 __lookup_session_by_app(usess, app, &iter);
1994 node = lttng_ht_iter_get_node_u64(&iter);
1995 if (node == NULL) {
1996 goto error;
1997 }
1998
1999 return caa_container_of(node, struct ust_app_session, node);
2000
2001 error:
2002 return NULL;
2003 }
2004
2005 /*
2006 * Setup buffer registry per PID for the given session and application. If none
2007 * is found, a new one is created, added to the global registry and
2008 * initialized. If regp is valid, it's set with the newly created object.
2009 *
2010 * Return 0 on success or else a negative value.
2011 */
2012 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
2013 struct ust_app *app, struct buffer_reg_pid **regp)
2014 {
2015 int ret = 0;
2016 struct buffer_reg_pid *reg_pid;
2017
2018 assert(ua_sess);
2019 assert(app);
2020
2021 rcu_read_lock();
2022
2023 reg_pid = buffer_reg_pid_find(ua_sess->id);
2024 if (!reg_pid) {
2025 /*
2026 * This is the create channel path meaning that if there is NO
2027 * registry available, we have to create one for this session.
2028 */
2029 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
2030 ua_sess->root_shm_path, ua_sess->shm_path);
2031 if (ret < 0) {
2032 goto error;
2033 }
2034 } else {
2035 goto end;
2036 }
2037
2038 /* Initialize registry. */
2039 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
2040 app->bits_per_long, app->uint8_t_alignment,
2041 app->uint16_t_alignment, app->uint32_t_alignment,
2042 app->uint64_t_alignment, app->long_alignment,
2043 app->byte_order, app->version.major,
2044 app->version.minor, reg_pid->root_shm_path,
2045 reg_pid->shm_path,
2046 ua_sess->euid, ua_sess->egid);
2047 if (ret < 0) {
2048 /*
2049 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2050 * destroy the buffer registry, because it is always expected
2051 * that if the buffer registry can be found, its ust registry is
2052 * non-NULL.
2053 */
2054 buffer_reg_pid_destroy(reg_pid);
2055 goto error;
2056 }
2057
2058 buffer_reg_pid_add(reg_pid);
2059
2060 DBG3("UST app buffer registry per PID created successfully");
2061
2062 end:
2063 if (regp) {
2064 *regp = reg_pid;
2065 }
2066 error:
2067 rcu_read_unlock();
2068 return ret;
2069 }
2070
2071 /*
2072 * Setup buffer registry per UID for the given session and application. If none
2073 * is found, a new one is created, added to the global registry and
2074 * initialized. If regp is valid, it's set with the newly created object.
2075 *
2076 * Return 0 on success or else a negative value.
2077 */
2078 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
2079 struct ust_app_session *ua_sess,
2080 struct ust_app *app, struct buffer_reg_uid **regp)
2081 {
2082 int ret = 0;
2083 struct buffer_reg_uid *reg_uid;
2084
2085 assert(usess);
2086 assert(app);
2087
2088 rcu_read_lock();
2089
2090 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2091 if (!reg_uid) {
2092 /*
2093 * This is the create channel path meaning that if there is NO
2094 * registry available, we have to create one for this session.
2095 */
2096 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
2097 LTTNG_DOMAIN_UST, &reg_uid,
2098 ua_sess->root_shm_path, ua_sess->shm_path);
2099 if (ret < 0) {
2100 goto error;
2101 }
2102 } else {
2103 goto end;
2104 }
2105
2106 /* Initialize registry. */
2107 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
2108 app->bits_per_long, app->uint8_t_alignment,
2109 app->uint16_t_alignment, app->uint32_t_alignment,
2110 app->uint64_t_alignment, app->long_alignment,
2111 app->byte_order, app->version.major,
2112 app->version.minor, reg_uid->root_shm_path,
2113 reg_uid->shm_path, usess->uid, usess->gid);
2114 if (ret < 0) {
2115 /*
2116 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2117 * destroy the buffer registry, because it is always expected
2118 * that if the buffer registry can be found, its ust registry is
2119 * non-NULL.
2120 */
2121 buffer_reg_uid_destroy(reg_uid, NULL);
2122 goto error;
2123 }
2124 /* Add node to teardown list of the session. */
2125 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2126
2127 buffer_reg_uid_add(reg_uid);
2128
2129 DBG3("UST app buffer registry per UID created successfully");
2130 end:
2131 if (regp) {
2132 *regp = reg_uid;
2133 }
2134 error:
2135 rcu_read_unlock();
2136 return ret;
2137 }
2138
2139 /*
2140 * Create a session on the tracer side for the given app.
2141 *
2142 * On success, ua_sess_ptr is populated with the session pointer or else left
2143 * untouched. If the session was created, is_created is set to 1. On error,
2144 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2145 * be NULL.
2146 *
2147 * Returns 0 on success or else a negative code which is either -ENOMEM or
2148 * -ENOTCONN which is the default code if the ustctl_create_session fails.
2149 */
2150 static int create_ust_app_session(struct ltt_ust_session *usess,
2151 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2152 int *is_created)
2153 {
2154 int ret, created = 0;
2155 struct ust_app_session *ua_sess;
2156
2157 assert(usess);
2158 assert(app);
2159 assert(ua_sess_ptr);
2160
2161 health_code_update();
2162
2163 ua_sess = lookup_session_by_app(usess, app);
2164 if (ua_sess == NULL) {
2165 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
2166 app->pid, usess->id);
2167 ua_sess = alloc_ust_app_session(app);
2168 if (ua_sess == NULL) {
2169 /* Only malloc can failed so something is really wrong */
2170 ret = -ENOMEM;
2171 goto error;
2172 }
2173 shadow_copy_session(ua_sess, usess, app);
2174 created = 1;
2175 }
2176
2177 switch (usess->buffer_type) {
2178 case LTTNG_BUFFER_PER_PID:
2179 /* Init local registry. */
2180 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
2181 if (ret < 0) {
2182 delete_ust_app_session(-1, ua_sess, app);
2183 goto error;
2184 }
2185 break;
2186 case LTTNG_BUFFER_PER_UID:
2187 /* Look for a global registry. If none exists, create one. */
2188 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
2189 if (ret < 0) {
2190 delete_ust_app_session(-1, ua_sess, app);
2191 goto error;
2192 }
2193 break;
2194 default:
2195 assert(0);
2196 ret = -EINVAL;
2197 goto error;
2198 }
2199
2200 health_code_update();
2201
2202 if (ua_sess->handle == -1) {
2203 pthread_mutex_lock(&app->sock_lock);
2204 ret = ustctl_create_session(app->sock);
2205 pthread_mutex_unlock(&app->sock_lock);
2206 if (ret < 0) {
2207 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2208 ERR("Creating session for app pid %d with ret %d",
2209 app->pid, ret);
2210 } else {
2211 DBG("UST app creating session failed. Application is dead");
2212 /*
2213 * This is normal behavior, an application can die during the
2214 * creation process. Don't report an error so the execution can
2215 * continue normally. This will get flagged ENOTCONN and the
2216 * caller will handle it.
2217 */
2218 ret = 0;
2219 }
2220 delete_ust_app_session(-1, ua_sess, app);
2221 if (ret != -ENOMEM) {
2222 /*
2223 * Tracer is probably gone or got an internal error so let's
2224 * behave like it will soon unregister or not usable.
2225 */
2226 ret = -ENOTCONN;
2227 }
2228 goto error;
2229 }
2230
2231 ua_sess->handle = ret;
2232
2233 /* Add ust app session to app's HT */
2234 lttng_ht_node_init_u64(&ua_sess->node,
2235 ua_sess->tracing_id);
2236 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
2237 lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
2238 lttng_ht_add_unique_ulong(app->ust_sessions_objd,
2239 &ua_sess->ust_objd_node);
2240
2241 DBG2("UST app session created successfully with handle %d", ret);
2242 }
2243
2244 *ua_sess_ptr = ua_sess;
2245 if (is_created) {
2246 *is_created = created;
2247 }
2248
2249 /* Everything went well. */
2250 ret = 0;
2251
2252 error:
2253 health_code_update();
2254 return ret;
2255 }
2256
2257 /*
2258 * Match function for a hash table lookup of ust_app_ctx.
2259 *
2260 * It matches an ust app context based on the context type and, in the case
2261 * of perf counters, their name.
2262 */
2263 static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2264 {
2265 struct ust_app_ctx *ctx;
2266 const struct lttng_ust_context_attr *key;
2267
2268 assert(node);
2269 assert(_key);
2270
2271 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2272 key = _key;
2273
2274 /* Context type */
2275 if (ctx->ctx.ctx != key->ctx) {
2276 goto no_match;
2277 }
2278
2279 switch(key->ctx) {
2280 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
2281 if (strncmp(key->u.perf_counter.name,
2282 ctx->ctx.u.perf_counter.name,
2283 sizeof(key->u.perf_counter.name))) {
2284 goto no_match;
2285 }
2286 break;
2287 case LTTNG_UST_CONTEXT_APP_CONTEXT:
2288 if (strcmp(key->u.app_ctx.provider_name,
2289 ctx->ctx.u.app_ctx.provider_name) ||
2290 strcmp(key->u.app_ctx.ctx_name,
2291 ctx->ctx.u.app_ctx.ctx_name)) {
2292 goto no_match;
2293 }
2294 break;
2295 default:
2296 break;
2297 }
2298
2299 /* Match. */
2300 return 1;
2301
2302 no_match:
2303 return 0;
2304 }
2305
2306 /*
2307 * Lookup for an ust app context from an lttng_ust_context.
2308 *
2309 * Must be called while holding RCU read side lock.
2310 * Return an ust_app_ctx object or NULL on error.
2311 */
2312 static
2313 struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
2314 struct lttng_ust_context_attr *uctx)
2315 {
2316 struct lttng_ht_iter iter;
2317 struct lttng_ht_node_ulong *node;
2318 struct ust_app_ctx *app_ctx = NULL;
2319
2320 assert(uctx);
2321 assert(ht);
2322
2323 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2324 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2325 ht_match_ust_app_ctx, uctx, &iter.iter);
2326 node = lttng_ht_iter_get_node_ulong(&iter);
2327 if (!node) {
2328 goto end;
2329 }
2330
2331 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2332
2333 end:
2334 return app_ctx;
2335 }
2336
2337 /*
2338 * Create a context for the channel on the tracer.
2339 *
2340 * Called with UST app session lock held and a RCU read side lock.
2341 */
2342 static
2343 int create_ust_app_channel_context(struct ust_app_session *ua_sess,
2344 struct ust_app_channel *ua_chan,
2345 struct lttng_ust_context_attr *uctx,
2346 struct ust_app *app)
2347 {
2348 int ret = 0;
2349 struct ust_app_ctx *ua_ctx;
2350
2351 DBG2("UST app adding context to channel %s", ua_chan->name);
2352
2353 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2354 if (ua_ctx) {
2355 ret = -EEXIST;
2356 goto error;
2357 }
2358
2359 ua_ctx = alloc_ust_app_ctx(uctx);
2360 if (ua_ctx == NULL) {
2361 /* malloc failed */
2362 ret = -1;
2363 goto error;
2364 }
2365
2366 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
2367 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
2368 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
2369
2370 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2371 if (ret < 0) {
2372 goto error;
2373 }
2374
2375 error:
2376 return ret;
2377 }
2378
2379 /*
2380 * Enable on the tracer side a ust app event for the session and channel.
2381 *
2382 * Called with UST app session lock held.
2383 */
2384 static
2385 int enable_ust_app_event(struct ust_app_session *ua_sess,
2386 struct ust_app_event *ua_event, struct ust_app *app)
2387 {
2388 int ret;
2389
2390 ret = enable_ust_event(app, ua_sess, ua_event);
2391 if (ret < 0) {
2392 goto error;
2393 }
2394
2395 ua_event->enabled = 1;
2396
2397 error:
2398 return ret;
2399 }
2400
2401 /*
2402 * Disable on the tracer side a ust app event for the session and channel.
2403 */
2404 static int disable_ust_app_event(struct ust_app_session *ua_sess,
2405 struct ust_app_event *ua_event, struct ust_app *app)
2406 {
2407 int ret;
2408
2409 ret = disable_ust_event(app, ua_sess, ua_event);
2410 if (ret < 0) {
2411 goto error;
2412 }
2413
2414 ua_event->enabled = 0;
2415
2416 error:
2417 return ret;
2418 }
2419
2420 /*
2421 * Lookup ust app channel for session and disable it on the tracer side.
2422 */
2423 static
2424 int disable_ust_app_channel(struct ust_app_session *ua_sess,
2425 struct ust_app_channel *ua_chan, struct ust_app *app)
2426 {
2427 int ret;
2428
2429 ret = disable_ust_channel(app, ua_sess, ua_chan);
2430 if (ret < 0) {
2431 goto error;
2432 }
2433
2434 ua_chan->enabled = 0;
2435
2436 error:
2437 return ret;
2438 }
2439
2440 /*
2441 * Lookup ust app channel for session and enable it on the tracer side. This
2442 * MUST be called with a RCU read side lock acquired.
2443 */
2444 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2445 struct ltt_ust_channel *uchan, struct ust_app *app)
2446 {
2447 int ret = 0;
2448 struct lttng_ht_iter iter;
2449 struct lttng_ht_node_str *ua_chan_node;
2450 struct ust_app_channel *ua_chan;
2451
2452 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2453 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2454 if (ua_chan_node == NULL) {
2455 DBG2("Unable to find channel %s in ust session id %" PRIu64,
2456 uchan->name, ua_sess->tracing_id);
2457 goto error;
2458 }
2459
2460 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2461
2462 ret = enable_ust_channel(app, ua_sess, ua_chan);
2463 if (ret < 0) {
2464 goto error;
2465 }
2466
2467 error:
2468 return ret;
2469 }
2470
2471 /*
2472 * Ask the consumer to create a channel and get it if successful.
2473 *
2474 * Called with UST app session lock held.
2475 *
2476 * Return 0 on success or else a negative value.
2477 */
2478 static int do_consumer_create_channel(struct ltt_ust_session *usess,
2479 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2480 int bitness, struct ust_registry_session *registry)
2481 {
2482 int ret;
2483 unsigned int nb_fd = 0;
2484 struct consumer_socket *socket;
2485
2486 assert(usess);
2487 assert(ua_sess);
2488 assert(ua_chan);
2489 assert(registry);
2490
2491 rcu_read_lock();
2492 health_code_update();
2493
2494 /* Get the right consumer socket for the application. */
2495 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2496 if (!socket) {
2497 ret = -EINVAL;
2498 goto error;
2499 }
2500
2501 health_code_update();
2502
2503 /* Need one fd for the channel. */
2504 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2505 if (ret < 0) {
2506 ERR("Exhausted number of available FD upon create channel");
2507 goto error;
2508 }
2509
2510 /*
2511 * Ask consumer to create channel. The consumer will return the number of
2512 * stream we have to expect.
2513 */
2514 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2515 registry);
2516 if (ret < 0) {
2517 goto error_ask;
2518 }
2519
2520 /*
2521 * Compute the number of fd needed before receiving them. It must be 2 per
2522 * stream (2 being the default value here).
2523 */
2524 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2525
2526 /* Reserve the amount of file descriptor we need. */
2527 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2528 if (ret < 0) {
2529 ERR("Exhausted number of available FD upon create channel");
2530 goto error_fd_get_stream;
2531 }
2532
2533 health_code_update();
2534
2535 /*
2536 * Now get the channel from the consumer. This call wil populate the stream
2537 * list of that channel and set the ust objects.
2538 */
2539 if (usess->consumer->enabled) {
2540 ret = ust_consumer_get_channel(socket, ua_chan);
2541 if (ret < 0) {
2542 goto error_destroy;
2543 }
2544 }
2545
2546 rcu_read_unlock();
2547 return 0;
2548
2549 error_destroy:
2550 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2551 error_fd_get_stream:
2552 /*
2553 * Initiate a destroy channel on the consumer since we had an error
2554 * handling it on our side. The return value is of no importance since we
2555 * already have a ret value set by the previous error that we need to
2556 * return.
2557 */
2558 (void) ust_consumer_destroy_channel(socket, ua_chan);
2559 error_ask:
2560 lttng_fd_put(LTTNG_FD_APPS, 1);
2561 error:
2562 health_code_update();
2563 rcu_read_unlock();
2564 return ret;
2565 }
2566
2567 /*
2568 * Duplicate the ust data object of the ust app stream and save it in the
2569 * buffer registry stream.
2570 *
2571 * Return 0 on success or else a negative value.
2572 */
2573 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2574 struct ust_app_stream *stream)
2575 {
2576 int ret;
2577
2578 assert(reg_stream);
2579 assert(stream);
2580
2581 /* Reserve the amount of file descriptor we need. */
2582 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2583 if (ret < 0) {
2584 ERR("Exhausted number of available FD upon duplicate stream");
2585 goto error;
2586 }
2587
2588 /* Duplicate object for stream once the original is in the registry. */
2589 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2590 reg_stream->obj.ust);
2591 if (ret < 0) {
2592 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2593 reg_stream->obj.ust, stream->obj, ret);
2594 lttng_fd_put(LTTNG_FD_APPS, 2);
2595 goto error;
2596 }
2597 stream->handle = stream->obj->handle;
2598
2599 error:
2600 return ret;
2601 }
2602
2603 /*
2604 * Duplicate the ust data object of the ust app. channel and save it in the
2605 * buffer registry channel.
2606 *
2607 * Return 0 on success or else a negative value.
2608 */
2609 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2610 struct ust_app_channel *ua_chan)
2611 {
2612 int ret;
2613
2614 assert(reg_chan);
2615 assert(ua_chan);
2616
2617 /* Need two fds for the channel. */
2618 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2619 if (ret < 0) {
2620 ERR("Exhausted number of available FD upon duplicate channel");
2621 goto error_fd_get;
2622 }
2623
2624 /* Duplicate object for stream once the original is in the registry. */
2625 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2626 if (ret < 0) {
2627 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2628 reg_chan->obj.ust, ua_chan->obj, ret);
2629 goto error;
2630 }
2631 ua_chan->handle = ua_chan->obj->handle;
2632
2633 return 0;
2634
2635 error:
2636 lttng_fd_put(LTTNG_FD_APPS, 1);
2637 error_fd_get:
2638 return ret;
2639 }
2640
2641 /*
2642 * For a given channel buffer registry, setup all streams of the given ust
2643 * application channel.
2644 *
2645 * Return 0 on success or else a negative value.
2646 */
2647 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2648 struct ust_app_channel *ua_chan,
2649 struct ust_app *app)
2650 {
2651 int ret = 0;
2652 struct ust_app_stream *stream, *stmp;
2653
2654 assert(reg_chan);
2655 assert(ua_chan);
2656
2657 DBG2("UST app setup buffer registry stream");
2658
2659 /* Send all streams to application. */
2660 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2661 struct buffer_reg_stream *reg_stream;
2662
2663 ret = buffer_reg_stream_create(&reg_stream);
2664 if (ret < 0) {
2665 goto error;
2666 }
2667
2668 /*
2669 * Keep original pointer and nullify it in the stream so the delete
2670 * stream call does not release the object.
2671 */
2672 reg_stream->obj.ust = stream->obj;
2673 stream->obj = NULL;
2674 buffer_reg_stream_add(reg_stream, reg_chan);
2675
2676 /* We don't need the streams anymore. */
2677 cds_list_del(&stream->list);
2678 delete_ust_app_stream(-1, stream, app);
2679 }
2680
2681 error:
2682 return ret;
2683 }
2684
2685 /*
2686 * Create a buffer registry channel for the given session registry and
2687 * application channel object. If regp pointer is valid, it's set with the
2688 * created object. Important, the created object is NOT added to the session
2689 * registry hash table.
2690 *
2691 * Return 0 on success else a negative value.
2692 */
2693 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2694 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2695 {
2696 int ret;
2697 struct buffer_reg_channel *reg_chan = NULL;
2698
2699 assert(reg_sess);
2700 assert(ua_chan);
2701
2702 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2703
2704 /* Create buffer registry channel. */
2705 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2706 if (ret < 0) {
2707 goto error_create;
2708 }
2709 assert(reg_chan);
2710 reg_chan->consumer_key = ua_chan->key;
2711 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
2712 reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
2713
2714 /* Create and add a channel registry to session. */
2715 ret = ust_registry_channel_add(reg_sess->reg.ust,
2716 ua_chan->tracing_channel_id);
2717 if (ret < 0) {
2718 goto error;
2719 }
2720 buffer_reg_channel_add(reg_sess, reg_chan);
2721
2722 if (regp) {
2723 *regp = reg_chan;
2724 }
2725
2726 return 0;
2727
2728 error:
2729 /* Safe because the registry channel object was not added to any HT. */
2730 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2731 error_create:
2732 return ret;
2733 }
2734
2735 /*
2736 * Setup buffer registry channel for the given session registry and application
2737 * channel object. If regp pointer is valid, it's set with the created object.
2738 *
2739 * Return 0 on success else a negative value.
2740 */
2741 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2742 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan,
2743 struct ust_app *app)
2744 {
2745 int ret;
2746
2747 assert(reg_sess);
2748 assert(reg_chan);
2749 assert(ua_chan);
2750 assert(ua_chan->obj);
2751
2752 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2753
2754 /* Setup all streams for the registry. */
2755 ret = setup_buffer_reg_streams(reg_chan, ua_chan, app);
2756 if (ret < 0) {
2757 goto error;
2758 }
2759
2760 reg_chan->obj.ust = ua_chan->obj;
2761 ua_chan->obj = NULL;
2762
2763 return 0;
2764
2765 error:
2766 buffer_reg_channel_remove(reg_sess, reg_chan);
2767 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2768 return ret;
2769 }
2770
2771 /*
2772 * Send buffer registry channel to the application.
2773 *
2774 * Return 0 on success else a negative value.
2775 */
2776 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2777 struct ust_app *app, struct ust_app_session *ua_sess,
2778 struct ust_app_channel *ua_chan)
2779 {
2780 int ret;
2781 struct buffer_reg_stream *reg_stream;
2782
2783 assert(reg_chan);
2784 assert(app);
2785 assert(ua_sess);
2786 assert(ua_chan);
2787
2788 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2789
2790 ret = duplicate_channel_object(reg_chan, ua_chan);
2791 if (ret < 0) {
2792 goto error;
2793 }
2794
2795 /* Send channel to the application. */
2796 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2797 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2798 ret = -ENOTCONN; /* Caused by app exiting. */
2799 goto error;
2800 } else if (ret < 0) {
2801 goto error;
2802 }
2803
2804 health_code_update();
2805
2806 /* Send all streams to application. */
2807 pthread_mutex_lock(&reg_chan->stream_list_lock);
2808 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2809 struct ust_app_stream stream;
2810
2811 ret = duplicate_stream_object(reg_stream, &stream);
2812 if (ret < 0) {
2813 goto error_stream_unlock;
2814 }
2815
2816 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2817 if (ret < 0) {
2818 (void) release_ust_app_stream(-1, &stream, app);
2819 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2820 ret = -ENOTCONN; /* Caused by app exiting. */
2821 }
2822 goto error_stream_unlock;
2823 }
2824
2825 /*
2826 * The return value is not important here. This function will output an
2827 * error if needed.
2828 */
2829 (void) release_ust_app_stream(-1, &stream, app);
2830 }
2831 ua_chan->is_sent = 1;
2832
2833 error_stream_unlock:
2834 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2835 error:
2836 return ret;
2837 }
2838
2839 /*
2840 * Create and send to the application the created buffers with per UID buffers.
2841 *
2842 * Return 0 on success else a negative value.
2843 */
2844 static int create_channel_per_uid(struct ust_app *app,
2845 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2846 struct ust_app_channel *ua_chan)
2847 {
2848 int ret;
2849 struct buffer_reg_uid *reg_uid;
2850 struct buffer_reg_channel *reg_chan;
2851
2852 assert(app);
2853 assert(usess);
2854 assert(ua_sess);
2855 assert(ua_chan);
2856
2857 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2858
2859 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2860 /*
2861 * The session creation handles the creation of this global registry
2862 * object. If none can be find, there is a code flow problem or a
2863 * teardown race.
2864 */
2865 assert(reg_uid);
2866
2867 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2868 reg_uid);
2869 if (!reg_chan) {
2870 /* Create the buffer registry channel object. */
2871 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2872 if (ret < 0) {
2873 ERR("Error creating the UST channel \"%s\" registry instance",
2874 ua_chan->name);
2875 goto error;
2876 }
2877 assert(reg_chan);
2878
2879 /*
2880 * Create the buffers on the consumer side. This call populates the
2881 * ust app channel object with all streams and data object.
2882 */
2883 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2884 app->bits_per_long, reg_uid->registry->reg.ust);
2885 if (ret < 0) {
2886 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2887 ua_chan->name);
2888
2889 /*
2890 * Let's remove the previously created buffer registry channel so
2891 * it's not visible anymore in the session registry.
2892 */
2893 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2894 ua_chan->tracing_channel_id);
2895 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2896 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2897 goto error;
2898 }
2899
2900 /*
2901 * Setup the streams and add it to the session registry.
2902 */
2903 ret = setup_buffer_reg_channel(reg_uid->registry,
2904 ua_chan, reg_chan, app);
2905 if (ret < 0) {
2906 ERR("Error setting up UST channel \"%s\"",
2907 ua_chan->name);
2908 goto error;
2909 }
2910
2911 }
2912
2913 /* Send buffers to the application. */
2914 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2915 if (ret < 0) {
2916 if (ret != -ENOTCONN) {
2917 ERR("Error sending channel to application");
2918 }
2919 goto error;
2920 }
2921
2922 error:
2923 return ret;
2924 }
2925
2926 /*
2927 * Create and send to the application the created buffers with per PID buffers.
2928 *
2929 * Called with UST app session lock held.
2930 *
2931 * Return 0 on success else a negative value.
2932 */
2933 static int create_channel_per_pid(struct ust_app *app,
2934 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2935 struct ust_app_channel *ua_chan)
2936 {
2937 int ret;
2938 struct ust_registry_session *registry;
2939
2940 assert(app);
2941 assert(usess);
2942 assert(ua_sess);
2943 assert(ua_chan);
2944
2945 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2946
2947 rcu_read_lock();
2948
2949 registry = get_session_registry(ua_sess);
2950 /* The UST app session lock is held, registry shall not be null. */
2951 assert(registry);
2952
2953 /* Create and add a new channel registry to session. */
2954 ret = ust_registry_channel_add(registry, ua_chan->key);
2955 if (ret < 0) {
2956 ERR("Error creating the UST channel \"%s\" registry instance",
2957 ua_chan->name);
2958 goto error;
2959 }
2960
2961 /* Create and get channel on the consumer side. */
2962 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2963 app->bits_per_long, registry);
2964 if (ret < 0) {
2965 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2966 ua_chan->name);
2967 goto error;
2968 }
2969
2970 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2971 if (ret < 0) {
2972 if (ret != -ENOTCONN) {
2973 ERR("Error sending channel to application");
2974 }
2975 goto error;
2976 }
2977
2978 error:
2979 rcu_read_unlock();
2980 return ret;
2981 }
2982
2983 /*
2984 * From an already allocated ust app channel, create the channel buffers if
2985 * need and send it to the application. This MUST be called with a RCU read
2986 * side lock acquired.
2987 *
2988 * Called with UST app session lock held.
2989 *
2990 * Return 0 on success or else a negative value. Returns -ENOTCONN if
2991 * the application exited concurrently.
2992 */
2993 static int do_create_channel(struct ust_app *app,
2994 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2995 struct ust_app_channel *ua_chan)
2996 {
2997 int ret;
2998
2999 assert(app);
3000 assert(usess);
3001 assert(ua_sess);
3002 assert(ua_chan);
3003
3004 /* Handle buffer type before sending the channel to the application. */
3005 switch (usess->buffer_type) {
3006 case LTTNG_BUFFER_PER_UID:
3007 {
3008 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
3009 if (ret < 0) {
3010 goto error;
3011 }
3012 break;
3013 }
3014 case LTTNG_BUFFER_PER_PID:
3015 {
3016 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
3017 if (ret < 0) {
3018 goto error;
3019 }
3020 break;
3021 }
3022 default:
3023 assert(0);
3024 ret = -EINVAL;
3025 goto error;
3026 }
3027
3028 /* Initialize ust objd object using the received handle and add it. */
3029 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
3030 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
3031
3032 /* If channel is not enabled, disable it on the tracer */
3033 if (!ua_chan->enabled) {
3034 ret = disable_ust_channel(app, ua_sess, ua_chan);
3035 if (ret < 0) {
3036 goto error;
3037 }
3038 }
3039
3040 error:
3041 return ret;
3042 }
3043
3044 /*
3045 * Create UST app channel and create it on the tracer. Set ua_chanp of the
3046 * newly created channel if not NULL.
3047 *
3048 * Called with UST app session lock and RCU read-side lock held.
3049 *
3050 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3051 * the application exited concurrently.
3052 */
3053 static int create_ust_app_channel(struct ust_app_session *ua_sess,
3054 struct ltt_ust_channel *uchan, struct ust_app *app,
3055 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
3056 struct ust_app_channel **ua_chanp)
3057 {
3058 int ret = 0;
3059 struct lttng_ht_iter iter;
3060 struct lttng_ht_node_str *ua_chan_node;
3061 struct ust_app_channel *ua_chan;
3062
3063 /* Lookup channel in the ust app session */
3064 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
3065 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
3066 if (ua_chan_node != NULL) {
3067 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3068 goto end;
3069 }
3070
3071 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
3072 if (ua_chan == NULL) {
3073 /* Only malloc can fail here */
3074 ret = -ENOMEM;
3075 goto error_alloc;
3076 }
3077 shadow_copy_channel(ua_chan, uchan);
3078
3079 /* Set channel type. */
3080 ua_chan->attr.type = type;
3081
3082 ret = do_create_channel(app, usess, ua_sess, ua_chan);
3083 if (ret < 0) {
3084 goto error;
3085 }
3086
3087 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
3088 app->pid);
3089
3090 /* Only add the channel if successful on the tracer side. */
3091 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
3092
3093 end:
3094 if (ua_chanp) {
3095 *ua_chanp = ua_chan;
3096 }
3097
3098 /* Everything went well. */
3099 return 0;
3100
3101 error:
3102 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
3103 error_alloc:
3104 return ret;
3105 }
3106
3107 /*
3108 * Create UST app event and create it on the tracer side.
3109 *
3110 * Called with ust app session mutex held.
3111 */
3112 static
3113 int create_ust_app_event(struct ust_app_session *ua_sess,
3114 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
3115 struct ust_app *app)
3116 {
3117 int ret = 0;
3118 struct ust_app_event *ua_event;
3119
3120 /* Get event node */
3121 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
3122 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
3123 if (ua_event != NULL) {
3124 ret = -EEXIST;
3125 goto end;
3126 }
3127
3128 /* Does not exist so create one */
3129 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
3130 if (ua_event == NULL) {
3131 /* Only malloc can failed so something is really wrong */
3132 ret = -ENOMEM;
3133 goto end;
3134 }
3135 shadow_copy_event(ua_event, uevent);
3136
3137 /* Create it on the tracer side */
3138 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
3139 if (ret < 0) {
3140 /* Not found previously means that it does not exist on the tracer */
3141 assert(ret != -LTTNG_UST_ERR_EXIST);
3142 goto error;
3143 }
3144
3145 add_unique_ust_app_event(ua_chan, ua_event);
3146
3147 DBG2("UST app create event %s for PID %d completed", ua_event->name,
3148 app->pid);
3149
3150 end:
3151 return ret;
3152
3153 error:
3154 /* Valid. Calling here is already in a read side lock */
3155 delete_ust_app_event(-1, ua_event, app);
3156 return ret;
3157 }
3158
3159 /*
3160 * Create UST metadata and open it on the tracer side.
3161 *
3162 * Called with UST app session lock held and RCU read side lock.
3163 */
3164 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
3165 struct ust_app *app, struct consumer_output *consumer)
3166 {
3167 int ret = 0;
3168 struct ust_app_channel *metadata;
3169 struct consumer_socket *socket;
3170 struct ust_registry_session *registry;
3171
3172 assert(ua_sess);
3173 assert(app);
3174 assert(consumer);
3175
3176 registry = get_session_registry(ua_sess);
3177 /* The UST app session is held registry shall not be null. */
3178 assert(registry);
3179
3180 pthread_mutex_lock(&registry->lock);
3181
3182 /* Metadata already exists for this registry or it was closed previously */
3183 if (registry->metadata_key || registry->metadata_closed) {
3184 ret = 0;
3185 goto error;
3186 }
3187
3188 /* Allocate UST metadata */
3189 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
3190 if (!metadata) {
3191 /* malloc() failed */
3192 ret = -ENOMEM;
3193 goto error;
3194 }
3195
3196 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
3197
3198 /* Need one fd for the channel. */
3199 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3200 if (ret < 0) {
3201 ERR("Exhausted number of available FD upon create metadata");
3202 goto error;
3203 }
3204
3205 /* Get the right consumer socket for the application. */
3206 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
3207 if (!socket) {
3208 ret = -EINVAL;
3209 goto error_consumer;
3210 }
3211
3212 /*
3213 * Keep metadata key so we can identify it on the consumer side. Assign it
3214 * to the registry *before* we ask the consumer so we avoid the race of the
3215 * consumer requesting the metadata and the ask_channel call on our side
3216 * did not returned yet.
3217 */
3218 registry->metadata_key = metadata->key;
3219
3220 /*
3221 * Ask the metadata channel creation to the consumer. The metadata object
3222 * will be created by the consumer and kept their. However, the stream is
3223 * never added or monitored until we do a first push metadata to the
3224 * consumer.
3225 */
3226 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
3227 registry);
3228 if (ret < 0) {
3229 /* Nullify the metadata key so we don't try to close it later on. */
3230 registry->metadata_key = 0;
3231 goto error_consumer;
3232 }
3233
3234 /*
3235 * The setup command will make the metadata stream be sent to the relayd,
3236 * if applicable, and the thread managing the metadatas. This is important
3237 * because after this point, if an error occurs, the only way the stream
3238 * can be deleted is to be monitored in the consumer.
3239 */
3240 ret = consumer_setup_metadata(socket, metadata->key);
3241 if (ret < 0) {
3242 /* Nullify the metadata key so we don't try to close it later on. */
3243 registry->metadata_key = 0;
3244 goto error_consumer;
3245 }
3246
3247 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
3248 metadata->key, app->pid);
3249
3250 error_consumer:
3251 lttng_fd_put(LTTNG_FD_APPS, 1);
3252 delete_ust_app_channel(-1, metadata, app);
3253 error:
3254 pthread_mutex_unlock(&registry->lock);
3255 return ret;
3256 }
3257
3258 /*
3259 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3260 * acquired before calling this function.
3261 */
3262 struct ust_app *ust_app_find_by_pid(pid_t pid)
3263 {
3264 struct ust_app *app = NULL;
3265 struct lttng_ht_node_ulong *node;
3266 struct lttng_ht_iter iter;
3267
3268 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
3269 node = lttng_ht_iter_get_node_ulong(&iter);
3270 if (node == NULL) {
3271 DBG2("UST app no found with pid %d", pid);
3272 goto error;
3273 }
3274
3275 DBG2("Found UST app by pid %d", pid);
3276
3277 app = caa_container_of(node, struct ust_app, pid_n);
3278
3279 error:
3280 return app;
3281 }
3282
3283 /*
3284 * Allocate and init an UST app object using the registration information and
3285 * the command socket. This is called when the command socket connects to the
3286 * session daemon.
3287 *
3288 * The object is returned on success or else NULL.
3289 */
3290 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
3291 {
3292 struct ust_app *lta = NULL;
3293
3294 assert(msg);
3295 assert(sock >= 0);
3296
3297 DBG3("UST app creating application for socket %d", sock);
3298
3299 if ((msg->bits_per_long == 64 &&
3300 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
3301 || (msg->bits_per_long == 32 &&
3302 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
3303 ERR("Registration failed: application \"%s\" (pid: %d) has "
3304 "%d-bit long, but no consumerd for this size is available.\n",
3305 msg->name, msg->pid, msg->bits_per_long);
3306 goto error;
3307 }
3308
3309 lta = zmalloc(sizeof(struct ust_app));
3310 if (lta == NULL) {
3311 PERROR("malloc");
3312 goto error;
3313 }
3314
3315 lta->ppid = msg->ppid;
3316 lta->uid = msg->uid;
3317 lta->gid = msg->gid;
3318
3319 lta->bits_per_long = msg->bits_per_long;
3320 lta->uint8_t_alignment = msg->uint8_t_alignment;
3321 lta->uint16_t_alignment = msg->uint16_t_alignment;
3322 lta->uint32_t_alignment = msg->uint32_t_alignment;
3323 lta->uint64_t_alignment = msg->uint64_t_alignment;
3324 lta->long_alignment = msg->long_alignment;
3325 lta->byte_order = msg->byte_order;
3326
3327 lta->v_major = msg->major;
3328 lta->v_minor = msg->minor;
3329 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3330 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3331 lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3332 lta->notify_sock = -1;
3333
3334 /* Copy name and make sure it's NULL terminated. */
3335 strncpy(lta->name, msg->name, sizeof(lta->name));
3336 lta->name[UST_APP_PROCNAME_LEN] = '\0';
3337
3338 /*
3339 * Before this can be called, when receiving the registration information,
3340 * the application compatibility is checked. So, at this point, the
3341 * application can work with this session daemon.
3342 */
3343 lta->compatible = 1;
3344
3345 lta->pid = msg->pid;
3346 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
3347 lta->sock = sock;
3348 pthread_mutex_init(&lta->sock_lock, NULL);
3349 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
3350
3351 CDS_INIT_LIST_HEAD(&lta->teardown_head);
3352 error:
3353 return lta;
3354 }
3355
3356 /*
3357 * For a given application object, add it to every hash table.
3358 */
3359 void ust_app_add(struct ust_app *app)
3360 {
3361 assert(app);
3362 assert(app->notify_sock >= 0);
3363
3364 rcu_read_lock();
3365
3366 /*
3367 * On a re-registration, we want to kick out the previous registration of
3368 * that pid
3369 */
3370 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
3371
3372 /*
3373 * The socket _should_ be unique until _we_ call close. So, a add_unique
3374 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3375 * already in the table.
3376 */
3377 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
3378
3379 /* Add application to the notify socket hash table. */
3380 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
3381 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
3382
3383 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
3384 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
3385 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
3386 app->v_minor);
3387
3388 rcu_read_unlock();
3389 }
3390
3391 /*
3392 * Set the application version into the object.
3393 *
3394 * Return 0 on success else a negative value either an errno code or a
3395 * LTTng-UST error code.
3396 */
3397 int ust_app_version(struct ust_app *app)
3398 {
3399 int ret;
3400
3401 assert(app);
3402
3403 pthread_mutex_lock(&app->sock_lock);
3404 ret = ustctl_tracer_version(app->sock, &app->version);
3405 pthread_mutex_unlock(&app->sock_lock);
3406 if (ret < 0) {
3407 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3408 ERR("UST app %d version failed with ret %d", app->sock, ret);
3409 } else {
3410 DBG3("UST app %d version failed. Application is dead", app->sock);
3411 }
3412 }
3413
3414 return ret;
3415 }
3416
3417 /*
3418 * Unregister app by removing it from the global traceable app list and freeing
3419 * the data struct.
3420 *
3421 * The socket is already closed at this point so no close to sock.
3422 */
3423 void ust_app_unregister(int sock)
3424 {
3425 struct ust_app *lta;
3426 struct lttng_ht_node_ulong *node;
3427 struct lttng_ht_iter ust_app_sock_iter;
3428 struct lttng_ht_iter iter;
3429 struct ust_app_session *ua_sess;
3430 int ret;
3431
3432 rcu_read_lock();
3433
3434 /* Get the node reference for a call_rcu */
3435 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
3436 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
3437 assert(node);
3438
3439 lta = caa_container_of(node, struct ust_app, sock_n);
3440 DBG("PID %d unregistering with sock %d", lta->pid, sock);
3441
3442 /*
3443 * For per-PID buffers, perform "push metadata" and flush all
3444 * application streams before removing app from hash tables,
3445 * ensuring proper behavior of data_pending check.
3446 * Remove sessions so they are not visible during deletion.
3447 */
3448 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
3449 node.node) {
3450 struct ust_registry_session *registry;
3451
3452 ret = lttng_ht_del(lta->sessions, &iter);
3453 if (ret) {
3454 /* The session was already removed so scheduled for teardown. */
3455 continue;
3456 }
3457
3458 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
3459 (void) ust_app_flush_app_session(lta, ua_sess);
3460 }
3461
3462 /*
3463 * Add session to list for teardown. This is safe since at this point we
3464 * are the only one using this list.
3465 */
3466 pthread_mutex_lock(&ua_sess->lock);
3467
3468 if (ua_sess->deleted) {
3469 pthread_mutex_unlock(&ua_sess->lock);
3470 continue;
3471 }
3472
3473 /*
3474 * Normally, this is done in the delete session process which is
3475 * executed in the call rcu below. However, upon registration we can't
3476 * afford to wait for the grace period before pushing data or else the
3477 * data pending feature can race between the unregistration and stop
3478 * command where the data pending command is sent *before* the grace
3479 * period ended.
3480 *
3481 * The close metadata below nullifies the metadata pointer in the
3482 * session so the delete session will NOT push/close a second time.
3483 */
3484 registry = get_session_registry(ua_sess);
3485 if (registry) {
3486 /* Push metadata for application before freeing the application. */
3487 (void) push_metadata(registry, ua_sess->consumer);
3488
3489 /*
3490 * Don't ask to close metadata for global per UID buffers. Close
3491 * metadata only on destroy trace session in this case. Also, the
3492 * previous push metadata could have flag the metadata registry to
3493 * close so don't send a close command if closed.
3494 */
3495 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
3496 /* And ask to close it for this session registry. */
3497 (void) close_metadata(registry, ua_sess->consumer);
3498 }
3499 }
3500 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
3501
3502 pthread_mutex_unlock(&ua_sess->lock);
3503 }
3504
3505 /* Remove application from PID hash table */
3506 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
3507 assert(!ret);
3508
3509 /*
3510 * Remove application from notify hash table. The thread handling the
3511 * notify socket could have deleted the node so ignore on error because
3512 * either way it's valid. The close of that socket is handled by the other
3513 * thread.
3514 */
3515 iter.iter.node = &lta->notify_sock_n.node;
3516 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3517
3518 /*
3519 * Ignore return value since the node might have been removed before by an
3520 * add replace during app registration because the PID can be reassigned by
3521 * the OS.
3522 */
3523 iter.iter.node = &lta->pid_n.node;
3524 ret = lttng_ht_del(ust_app_ht, &iter);
3525 if (ret) {
3526 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
3527 lta->pid);
3528 }
3529
3530 /* Free memory */
3531 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
3532
3533 rcu_read_unlock();
3534 return;
3535 }
3536
3537 /*
3538 * Fill events array with all events name of all registered apps.
3539 */
3540 int ust_app_list_events(struct lttng_event **events)
3541 {
3542 int ret, handle;
3543 size_t nbmem, count = 0;
3544 struct lttng_ht_iter iter;
3545 struct ust_app *app;
3546 struct lttng_event *tmp_event;
3547
3548 nbmem = UST_APP_EVENT_LIST_SIZE;
3549 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
3550 if (tmp_event == NULL) {
3551 PERROR("zmalloc ust app events");
3552 ret = -ENOMEM;
3553 goto error;
3554 }
3555
3556 rcu_read_lock();
3557
3558 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3559 struct lttng_ust_tracepoint_iter uiter;
3560
3561 health_code_update();
3562
3563 if (!app->compatible) {
3564 /*
3565 * TODO: In time, we should notice the caller of this error by
3566 * telling him that this is a version error.
3567 */
3568 continue;
3569 }
3570 pthread_mutex_lock(&app->sock_lock);
3571 handle = ustctl_tracepoint_list(app->sock);
3572 if (handle < 0) {
3573 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3574 ERR("UST app list events getting handle failed for app pid %d",
3575 app->pid);
3576 }
3577 pthread_mutex_unlock(&app->sock_lock);
3578 continue;
3579 }
3580
3581 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
3582 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3583 /* Handle ustctl error. */
3584 if (ret < 0) {
3585 int release_ret;
3586
3587 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3588 ERR("UST app tp list get failed for app %d with ret %d",
3589 app->sock, ret);
3590 } else {
3591 DBG3("UST app tp list get failed. Application is dead");
3592 /*
3593 * This is normal behavior, an application can die during the
3594 * creation process. Don't report an error so the execution can
3595 * continue normally. Continue normal execution.
3596 */
3597 break;
3598 }
3599 free(tmp_event);
3600 release_ret = ustctl_release_handle(app->sock, handle);
3601 if (release_ret < 0 &&
3602 release_ret != -LTTNG_UST_ERR_EXITING &&
3603 release_ret != -EPIPE) {
3604 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3605 }
3606 pthread_mutex_unlock(&app->sock_lock);
3607 goto rcu_error;
3608 }
3609
3610 health_code_update();
3611 if (count >= nbmem) {
3612 /* In case the realloc fails, we free the memory */
3613 struct lttng_event *new_tmp_event;
3614 size_t new_nbmem;
3615
3616 new_nbmem = nbmem << 1;
3617 DBG2("Reallocating event list from %zu to %zu entries",
3618 nbmem, new_nbmem);
3619 new_tmp_event = realloc(tmp_event,
3620 new_nbmem * sizeof(struct lttng_event));
3621 if (new_tmp_event == NULL) {
3622 int release_ret;
3623
3624 PERROR("realloc ust app events");
3625 free(tmp_event);
3626 ret = -ENOMEM;
3627 release_ret = ustctl_release_handle(app->sock, handle);
3628 if (release_ret < 0 &&
3629 release_ret != -LTTNG_UST_ERR_EXITING &&
3630 release_ret != -EPIPE) {
3631 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3632 }
3633 pthread_mutex_unlock(&app->sock_lock);
3634 goto rcu_error;
3635 }
3636 /* Zero the new memory */
3637 memset(new_tmp_event + nbmem, 0,
3638 (new_nbmem - nbmem) * sizeof(struct lttng_event));
3639 nbmem = new_nbmem;
3640 tmp_event = new_tmp_event;
3641 }
3642 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3643 tmp_event[count].loglevel = uiter.loglevel;
3644 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3645 tmp_event[count].pid = app->pid;
3646 tmp_event[count].enabled = -1;
3647 count++;
3648 }
3649 ret = ustctl_release_handle(app->sock, handle);
3650 pthread_mutex_unlock(&app->sock_lock);
3651 if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3652 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
3653 }
3654 }
3655
3656 ret = count;
3657 *events = tmp_event;
3658
3659 DBG2("UST app list events done (%zu events)", count);
3660
3661 rcu_error:
3662 rcu_read_unlock();
3663 error:
3664 health_code_update();
3665 return ret;
3666 }
3667
3668 /*
3669 * Fill events array with all events name of all registered apps.
3670 */
3671 int ust_app_list_event_fields(struct lttng_event_field **fields)
3672 {
3673 int ret, handle;
3674 size_t nbmem, count = 0;
3675 struct lttng_ht_iter iter;
3676 struct ust_app *app;
3677 struct lttng_event_field *tmp_event;
3678
3679 nbmem = UST_APP_EVENT_LIST_SIZE;
3680 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3681 if (tmp_event == NULL) {
3682 PERROR("zmalloc ust app event fields");
3683 ret = -ENOMEM;
3684 goto error;
3685 }
3686
3687 rcu_read_lock();
3688
3689 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3690 struct lttng_ust_field_iter uiter;
3691
3692 health_code_update();
3693
3694 if (!app->compatible) {
3695 /*
3696 * TODO: In time, we should notice the caller of this error by
3697 * telling him that this is a version error.
3698 */
3699 continue;
3700 }
3701 pthread_mutex_lock(&app->sock_lock);
3702 handle = ustctl_tracepoint_field_list(app->sock);
3703 if (handle < 0) {
3704 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3705 ERR("UST app list field getting handle failed for app pid %d",
3706 app->pid);
3707 }
3708 pthread_mutex_unlock(&app->sock_lock);
3709 continue;
3710 }
3711
3712 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
3713 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3714 /* Handle ustctl error. */
3715 if (ret < 0) {
3716 int release_ret;
3717
3718 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3719 ERR("UST app tp list field failed for app %d with ret %d",
3720 app->sock, ret);
3721 } else {
3722 DBG3("UST app tp list field failed. Application is dead");
3723 /*
3724 * This is normal behavior, an application can die during the
3725 * creation process. Don't report an error so the execution can
3726 * continue normally. Reset list and count for next app.
3727 */
3728 break;
3729 }
3730 free(tmp_event);
3731 release_ret = ustctl_release_handle(app->sock, handle);
3732 pthread_mutex_unlock(&app->sock_lock);
3733 if (release_ret < 0 &&
3734 release_ret != -LTTNG_UST_ERR_EXITING &&
3735 release_ret != -EPIPE) {
3736 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3737 }
3738 goto rcu_error;
3739 }
3740
3741 health_code_update();
3742 if (count >= nbmem) {
3743 /* In case the realloc fails, we free the memory */
3744 struct lttng_event_field *new_tmp_event;
3745 size_t new_nbmem;
3746
3747 new_nbmem = nbmem << 1;
3748 DBG2("Reallocating event field list from %zu to %zu entries",
3749 nbmem, new_nbmem);
3750 new_tmp_event = realloc(tmp_event,
3751 new_nbmem * sizeof(struct lttng_event_field));
3752 if (new_tmp_event == NULL) {
3753 int release_ret;
3754
3755 PERROR("realloc ust app event fields");
3756 free(tmp_event);
3757 ret = -ENOMEM;
3758 release_ret = ustctl_release_handle(app->sock, handle);
3759 pthread_mutex_unlock(&app->sock_lock);
3760 if (release_ret &&
3761 release_ret != -LTTNG_UST_ERR_EXITING &&
3762 release_ret != -EPIPE) {
3763 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3764 }
3765 goto rcu_error;
3766 }
3767 /* Zero the new memory */
3768 memset(new_tmp_event + nbmem, 0,
3769 (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
3770 nbmem = new_nbmem;
3771 tmp_event = new_tmp_event;
3772 }
3773
3774 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
3775 /* Mapping between these enums matches 1 to 1. */
3776 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
3777 tmp_event[count].nowrite = uiter.nowrite;
3778
3779 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3780 tmp_event[count].event.loglevel = uiter.loglevel;
3781 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
3782 tmp_event[count].event.pid = app->pid;
3783 tmp_event[count].event.enabled = -1;
3784 count++;
3785 }
3786 ret = ustctl_release_handle(app->sock, handle);
3787 pthread_mutex_unlock(&app->sock_lock);
3788 if (ret < 0 &&
3789 ret != -LTTNG_UST_ERR_EXITING &&
3790 ret != -EPIPE) {
3791 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
3792 }
3793 }
3794
3795 ret = count;
3796 *fields = tmp_event;
3797
3798 DBG2("UST app list event fields done (%zu events)", count);
3799
3800 rcu_error:
3801 rcu_read_unlock();
3802 error:
3803 health_code_update();
3804 return ret;
3805 }
3806
3807 /*
3808 * Free and clean all traceable apps of the global list.
3809 *
3810 * Should _NOT_ be called with RCU read-side lock held.
3811 */
3812 void ust_app_clean_list(void)
3813 {
3814 int ret;
3815 struct ust_app *app;
3816 struct lttng_ht_iter iter;
3817
3818 DBG2("UST app cleaning registered apps hash table");
3819
3820 rcu_read_lock();
3821
3822 if (ust_app_ht) {
3823 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3824 ret = lttng_ht_del(ust_app_ht, &iter);
3825 assert(!ret);
3826 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3827 }
3828 }
3829
3830 /* Cleanup socket hash table */
3831 if (ust_app_ht_by_sock) {
3832 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3833 sock_n.node) {
3834 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3835 assert(!ret);
3836 }
3837 }
3838
3839 /* Cleanup notify socket hash table */
3840 if (ust_app_ht_by_notify_sock) {
3841 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
3842 notify_sock_n.node) {
3843 ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3844 assert(!ret);
3845 }
3846 }
3847 rcu_read_unlock();
3848
3849 /* Destroy is done only when the ht is empty */
3850 if (ust_app_ht) {
3851 ht_cleanup_push(ust_app_ht);
3852 }
3853 if (ust_app_ht_by_sock) {
3854 ht_cleanup_push(ust_app_ht_by_sock);
3855 }
3856 if (ust_app_ht_by_notify_sock) {
3857 ht_cleanup_push(ust_app_ht_by_notify_sock);
3858 }
3859 }
3860
3861 /*
3862 * Init UST app hash table.
3863 */
3864 int ust_app_ht_alloc(void)
3865 {
3866 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3867 if (!ust_app_ht) {
3868 return -1;
3869 }
3870 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3871 if (!ust_app_ht_by_sock) {
3872 return -1;
3873 }
3874 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3875 if (!ust_app_ht_by_notify_sock) {
3876 return -1;
3877 }
3878 return 0;
3879 }
3880
3881 /*
3882 * For a specific UST session, disable the channel for all registered apps.
3883 */
3884 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
3885 struct ltt_ust_channel *uchan)
3886 {
3887 int ret = 0;
3888 struct lttng_ht_iter iter;
3889 struct lttng_ht_node_str *ua_chan_node;
3890 struct ust_app *app;
3891 struct ust_app_session *ua_sess;
3892 struct ust_app_channel *ua_chan;
3893
3894 if (usess == NULL || uchan == NULL) {
3895 ERR("Disabling UST global channel with NULL values");
3896 ret = -1;
3897 goto error;
3898 }
3899
3900 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
3901 uchan->name, usess->id);
3902
3903 rcu_read_lock();
3904
3905 /* For every registered applications */
3906 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3907 struct lttng_ht_iter uiter;
3908 if (!app->compatible) {
3909 /*
3910 * TODO: In time, we should notice the caller of this error by
3911 * telling him that this is a version error.
3912 */
3913 continue;
3914 }
3915 ua_sess = lookup_session_by_app(usess, app);
3916 if (ua_sess == NULL) {
3917 continue;
3918 }
3919
3920 /* Get channel */
3921 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3922 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3923 /* If the session if found for the app, the channel must be there */
3924 assert(ua_chan_node);
3925
3926 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3927 /* The channel must not be already disabled */
3928 assert(ua_chan->enabled == 1);
3929
3930 /* Disable channel onto application */
3931 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
3932 if (ret < 0) {
3933 /* XXX: We might want to report this error at some point... */
3934 continue;
3935 }
3936 }
3937
3938 rcu_read_unlock();
3939
3940 error:
3941 return ret;
3942 }
3943
3944 /*
3945 * For a specific UST session, enable the channel for all registered apps.
3946 */
3947 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
3948 struct ltt_ust_channel *uchan)
3949 {
3950 int ret = 0;
3951 struct lttng_ht_iter iter;
3952 struct ust_app *app;
3953 struct ust_app_session *ua_sess;
3954
3955 if (usess == NULL || uchan == NULL) {
3956 ERR("Adding UST global channel to NULL values");
3957 ret = -1;
3958 goto error;
3959 }
3960
3961 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
3962 uchan->name, usess->id);
3963
3964 rcu_read_lock();
3965
3966 /* For every registered applications */
3967 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3968 if (!app->compatible) {
3969 /*
3970 * TODO: In time, we should notice the caller of this error by
3971 * telling him that this is a version error.
3972 */
3973 continue;
3974 }
3975 ua_sess = lookup_session_by_app(usess, app);
3976 if (ua_sess == NULL) {
3977 continue;
3978 }
3979
3980 /* Enable channel onto application */
3981 ret = enable_ust_app_channel(ua_sess, uchan, app);
3982 if (ret < 0) {
3983 /* XXX: We might want to report this error at some point... */
3984 continue;
3985 }
3986 }
3987
3988 rcu_read_unlock();
3989
3990 error:
3991 return ret;
3992 }
3993
3994 /*
3995 * Disable an event in a channel and for a specific session.
3996 */
3997 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
3998 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3999 {
4000 int ret = 0;
4001 struct lttng_ht_iter iter, uiter;
4002 struct lttng_ht_node_str *ua_chan_node;
4003 struct ust_app *app;
4004 struct ust_app_session *ua_sess;
4005 struct ust_app_channel *ua_chan;
4006 struct ust_app_event *ua_event;
4007
4008 DBG("UST app disabling event %s for all apps in channel "
4009 "%s for session id %" PRIu64,
4010 uevent->attr.name, uchan->name, usess->id);
4011
4012 rcu_read_lock();
4013
4014 /* For all registered applications */
4015 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4016 if (!app->compatible) {
4017 /*
4018 * TODO: In time, we should notice the caller of this error by
4019 * telling him that this is a version error.
4020 */
4021 continue;
4022 }
4023 ua_sess = lookup_session_by_app(usess, app);
4024 if (ua_sess == NULL) {
4025 /* Next app */
4026 continue;
4027 }
4028
4029 /* Lookup channel in the ust app session */
4030 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4031 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4032 if (ua_chan_node == NULL) {
4033 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
4034 "Skipping", uchan->name, usess->id, app->pid);
4035 continue;
4036 }
4037 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4038
4039 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4040 uevent->filter, uevent->attr.loglevel,
4041 uevent->exclusion);
4042 if (ua_event == NULL) {
4043 DBG2("Event %s not found in channel %s for app pid %d."
4044 "Skipping", uevent->attr.name, uchan->name, app->pid);
4045 continue;
4046 }
4047
4048 ret = disable_ust_app_event(ua_sess, ua_event, app);
4049 if (ret < 0) {
4050 /* XXX: Report error someday... */
4051 continue;
4052 }
4053 }
4054
4055 rcu_read_unlock();
4056
4057 return ret;
4058 }
4059
4060 /*
4061 * For a specific UST session, create the channel for all registered apps.
4062 */
4063 int ust_app_create_channel_glb(struct ltt_ust_session *usess,
4064 struct ltt_ust_channel *uchan)
4065 {
4066 int ret = 0, created;
4067 struct lttng_ht_iter iter;
4068 struct ust_app *app;
4069 struct ust_app_session *ua_sess = NULL;
4070
4071 /* Very wrong code flow */
4072 assert(usess);
4073 assert(uchan);
4074
4075 DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
4076 uchan->name, usess->id);
4077
4078 rcu_read_lock();
4079
4080 /* For every registered applications */
4081 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4082 if (!app->compatible) {
4083 /*
4084 * TODO: In time, we should notice the caller of this error by
4085 * telling him that this is a version error.
4086 */
4087 continue;
4088 }
4089 if (!(trace_ust_id_tracker_lookup(LTTNG_TRACKER_VPID, usess, app->pid)
4090 && trace_ust_id_tracker_lookup(LTTNG_TRACKER_VUID, usess, app->uid)
4091 && trace_ust_id_tracker_lookup(LTTNG_TRACKER_VGID, usess, app->gid))) {
4092 /* Skip. */
4093 continue;
4094 }
4095
4096 /*
4097 * Create session on the tracer side and add it to app session HT. Note
4098 * that if session exist, it will simply return a pointer to the ust
4099 * app session.
4100 */
4101 ret = create_ust_app_session(usess, app, &ua_sess, &created);
4102 if (ret < 0) {
4103 switch (ret) {
4104 case -ENOTCONN:
4105 /*
4106 * The application's socket is not valid. Either a bad socket
4107 * or a timeout on it. We can't inform the caller that for a
4108 * specific app, the session failed so lets continue here.
4109 */
4110 ret = 0; /* Not an error. */
4111 continue;
4112 case -ENOMEM:
4113 default:
4114 goto error_rcu_unlock;
4115 }
4116 }
4117 assert(ua_sess);
4118
4119 pthread_mutex_lock(&ua_sess->lock);
4120
4121 if (ua_sess->deleted) {
4122 pthread_mutex_unlock(&ua_sess->lock);
4123 continue;
4124 }
4125
4126 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
4127 sizeof(uchan->name))) {
4128 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr, &uchan->attr);
4129 ret = 0;
4130 } else {
4131 /* Create channel onto application. We don't need the chan ref. */
4132 ret = create_ust_app_channel(ua_sess, uchan, app,
4133 LTTNG_UST_CHAN_PER_CPU, usess, NULL);
4134 }
4135 pthread_mutex_unlock(&ua_sess->lock);
4136 if (ret < 0) {
4137 /* Cleanup the created session if it's the case. */
4138 if (created) {
4139 destroy_app_session(app, ua_sess);
4140 }
4141 switch (ret) {
4142 case -ENOTCONN:
4143 /*
4144 * The application's socket is not valid. Either a bad socket
4145 * or a timeout on it. We can't inform the caller that for a
4146 * specific app, the session failed so lets continue here.
4147 */
4148 ret = 0; /* Not an error. */
4149 continue;
4150 case -ENOMEM:
4151 default:
4152 goto error_rcu_unlock;
4153 }
4154 }
4155 }
4156
4157 error_rcu_unlock:
4158 rcu_read_unlock();
4159 return ret;
4160 }
4161
4162 /*
4163 * Enable event for a specific session and channel on the tracer.
4164 */
4165 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
4166 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4167 {
4168 int ret = 0;
4169 struct lttng_ht_iter iter, uiter;
4170 struct lttng_ht_node_str *ua_chan_node;
4171 struct ust_app *app;
4172 struct ust_app_session *ua_sess;
4173 struct ust_app_channel *ua_chan;
4174 struct ust_app_event *ua_event;
4175
4176 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
4177 uevent->attr.name, usess->id);
4178
4179 /*
4180 * NOTE: At this point, this function is called only if the session and
4181 * channel passed are already created for all apps. and enabled on the
4182 * tracer also.
4183 */
4184
4185 rcu_read_lock();
4186
4187 /* For all registered applications */
4188 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4189 if (!app->compatible) {
4190 /*
4191 * TODO: In time, we should notice the caller of this error by
4192 * telling him that this is a version error.
4193 */
4194 continue;
4195 }
4196 ua_sess = lookup_session_by_app(usess, app);
4197 if (!ua_sess) {
4198 /* The application has problem or is probably dead. */
4199 continue;
4200 }
4201
4202 pthread_mutex_lock(&ua_sess->lock);
4203
4204 if (ua_sess->deleted) {
4205 pthread_mutex_unlock(&ua_sess->lock);
4206 continue;
4207 }
4208
4209 /* Lookup channel in the ust app session */
4210 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4211 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4212 /*
4213 * It is possible that the channel cannot be found is
4214 * the channel/event creation occurs concurrently with
4215 * an application exit.
4216 */
4217 if (!ua_chan_node) {
4218 pthread_mutex_unlock(&ua_sess->lock);
4219 continue;
4220 }
4221
4222 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4223
4224 /* Get event node */
4225 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4226 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
4227 if (ua_event == NULL) {
4228 DBG3("UST app enable event %s not found for app PID %d."
4229 "Skipping app", uevent->attr.name, app->pid);
4230 goto next_app;
4231 }
4232
4233 ret = enable_ust_app_event(ua_sess, ua_event, app);
4234 if (ret < 0) {
4235 pthread_mutex_unlock(&ua_sess->lock);
4236 goto error;
4237 }
4238 next_app:
4239 pthread_mutex_unlock(&ua_sess->lock);
4240 }
4241
4242 error:
4243 rcu_read_unlock();
4244 return ret;
4245 }
4246
4247 /*
4248 * For a specific existing UST session and UST channel, creates the event for
4249 * all registered apps.
4250 */
4251 int ust_app_create_event_glb(struct ltt_ust_session *usess,
4252 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4253 {
4254 int ret = 0;
4255 struct lttng_ht_iter iter, uiter;
4256 struct lttng_ht_node_str *ua_chan_node;
4257 struct ust_app *app;
4258 struct ust_app_session *ua_sess;
4259 struct ust_app_channel *ua_chan;
4260
4261 DBG("UST app creating event %s for all apps for session id %" PRIu64,
4262 uevent->attr.name, usess->id);
4263
4264 rcu_read_lock();
4265
4266 /* For all registered applications */
4267 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4268 if (!app->compatible) {
4269 /*
4270 * TODO: In time, we should notice the caller of this error by
4271 * telling him that this is a version error.
4272 */
4273 continue;
4274 }
4275 ua_sess = lookup_session_by_app(usess, app);
4276 if (!ua_sess) {
4277 /* The application has problem or is probably dead. */
4278 continue;
4279 }
4280
4281 pthread_mutex_lock(&ua_sess->lock);
4282
4283 if (ua_sess->deleted) {
4284 pthread_mutex_unlock(&ua_sess->lock);
4285 continue;
4286 }
4287
4288 /* Lookup channel in the ust app session */
4289 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4290 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4291 /* If the channel is not found, there is a code flow error */
4292 assert(ua_chan_node);
4293
4294 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4295
4296 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4297 pthread_mutex_unlock(&ua_sess->lock);
4298 if (ret < 0) {
4299 if (ret != -LTTNG_UST_ERR_EXIST) {
4300 /* Possible value at this point: -ENOMEM. If so, we stop! */
4301 break;
4302 }
4303 DBG2("UST app event %s already exist on app PID %d",
4304 uevent->attr.name, app->pid);
4305 continue;
4306 }
4307 }
4308
4309 rcu_read_unlock();
4310
4311 return ret;
4312 }
4313
4314 /*
4315 * Start tracing for a specific UST session and app.
4316 *
4317 * Called with UST app session lock held.
4318 *
4319 */
4320 static
4321 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
4322 {
4323 int ret = 0;
4324 struct ust_app_session *ua_sess;
4325
4326 DBG("Starting tracing for ust app pid %d", app->pid);
4327
4328 rcu_read_lock();
4329
4330 if (!app->compatible) {
4331 goto end;
4332 }
4333
4334 ua_sess = lookup_session_by_app(usess, app);
4335 if (ua_sess == NULL) {
4336 /* The session is in teardown process. Ignore and continue. */
4337 goto end;
4338 }
4339
4340 pthread_mutex_lock(&ua_sess->lock);
4341
4342 if (ua_sess->deleted) {
4343 pthread_mutex_unlock(&ua_sess->lock);
4344 goto end;
4345 }
4346
4347 /* Upon restart, we skip the setup, already done */
4348 if (ua_sess->started) {
4349 goto skip_setup;
4350 }
4351
4352 /* Create directories if consumer is LOCAL and has a path defined. */
4353 if (usess->consumer->type == CONSUMER_DST_LOCAL &&
4354 strlen(usess->consumer->dst.trace_path) > 0) {
4355 ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
4356 S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
4357 if (ret < 0) {
4358 if (errno != EEXIST) {
4359 ERR("Trace directory creation error");
4360 goto error_unlock;
4361 }
4362 }
4363 }
4364
4365 /*
4366 * Create the metadata for the application. This returns gracefully if a
4367 * metadata was already set for the session.
4368 */
4369 ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
4370 if (ret < 0) {
4371 goto error_unlock;
4372 }
4373
4374 health_code_update();
4375
4376 skip_setup:
4377 /* This start the UST tracing */
4378 pthread_mutex_lock(&app->sock_lock);
4379 ret = ustctl_start_session(app->sock, ua_sess->handle);
4380 pthread_mutex_unlock(&app->sock_lock);
4381 if (ret < 0) {
4382 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4383 ERR("Error starting tracing for app pid: %d (ret: %d)",
4384 app->pid, ret);
4385 } else {
4386 DBG("UST app start session failed. Application is dead.");
4387 /*
4388 * This is normal behavior, an application can die during the
4389 * creation process. Don't report an error so the execution can
4390 * continue normally.
4391 */
4392 pthread_mutex_unlock(&ua_sess->lock);
4393 goto end;
4394 }
4395 goto error_unlock;
4396 }
4397
4398 /* Indicate that the session has been started once */
4399 ua_sess->started = 1;
4400
4401 pthread_mutex_unlock(&ua_sess->lock);
4402
4403 health_code_update();
4404
4405 /* Quiescent wait after starting trace */
4406 pthread_mutex_lock(&app->sock_lock);
4407 ret = ustctl_wait_quiescent(app->sock);
4408 pthread_mutex_unlock(&app->sock_lock);
4409 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4410 ERR("UST app wait quiescent failed for app pid %d ret %d",
4411 app->pid, ret);
4412 }
4413
4414 end:
4415 rcu_read_unlock();
4416 health_code_update();
4417 return 0;
4418
4419 error_unlock:
4420 pthread_mutex_unlock(&ua_sess->lock);
4421 rcu_read_unlock();
4422 health_code_update();
4423 return -1;
4424 }
4425
4426 /*
4427 * Stop tracing for a specific UST session and app.
4428 */
4429 static
4430 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
4431 {
4432 int ret = 0;
4433 struct ust_app_session *ua_sess;
4434 struct ust_registry_session *registry;
4435
4436 DBG("Stopping tracing for ust app pid %d", app->pid);
4437
4438 rcu_read_lock();
4439
4440 if (!app->compatible) {
4441 goto end_no_session;
4442 }
4443
4444 ua_sess = lookup_session_by_app(usess, app);
4445 if (ua_sess == NULL) {
4446 goto end_no_session;
4447 }
4448
4449 pthread_mutex_lock(&ua_sess->lock);
4450
4451 if (ua_sess->deleted) {
4452 pthread_mutex_unlock(&ua_sess->lock);
4453 goto end_no_session;
4454 }
4455
4456 /*
4457 * If started = 0, it means that stop trace has been called for a session
4458 * that was never started. It's possible since we can have a fail start
4459 * from either the application manager thread or the command thread. Simply
4460 * indicate that this is a stop error.
4461 */
4462 if (!ua_sess->started) {
4463 goto error_rcu_unlock;
4464 }
4465
4466 health_code_update();
4467
4468 /* This inhibits UST tracing */
4469 pthread_mutex_lock(&app->sock_lock);
4470 ret = ustctl_stop_session(app->sock, ua_sess->handle);
4471 pthread_mutex_unlock(&app->sock_lock);
4472 if (ret < 0) {
4473 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4474 ERR("Error stopping tracing for app pid: %d (ret: %d)",
4475 app->pid, ret);
4476 } else {
4477 DBG("UST app stop session failed. Application is dead.");
4478 /*
4479 * This is normal behavior, an application can die during the
4480 * creation process. Don't report an error so the execution can
4481 * continue normally.
4482 */
4483 goto end_unlock;
4484 }
4485 goto error_rcu_unlock;
4486 }
4487
4488 health_code_update();
4489
4490 /* Quiescent wait after stopping trace */
4491 pthread_mutex_lock(&app->sock_lock);
4492 ret = ustctl_wait_quiescent(app->sock);
4493 pthread_mutex_unlock(&app->sock_lock);
4494 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4495 ERR("UST app wait quiescent failed for app pid %d ret %d",
4496 app->pid, ret);
4497 }
4498
4499 health_code_update();
4500
4501 registry = get_session_registry(ua_sess);
4502
4503 /* The UST app session is held registry shall not be null. */
4504 assert(registry);
4505
4506 /* Push metadata for application before freeing the application. */
4507 (void) push_metadata(registry, ua_sess->consumer);
4508
4509 end_unlock:
4510 pthread_mutex_unlock(&ua_sess->lock);
4511 end_no_session:
4512 rcu_read_unlock();
4513 health_code_update();
4514 return 0;
4515
4516 error_rcu_unlock:
4517 pthread_mutex_unlock(&ua_sess->lock);
4518 rcu_read_unlock();
4519 health_code_update();
4520 return -1;
4521 }
4522
4523 static
4524 int ust_app_flush_app_session(struct ust_app *app,
4525 struct ust_app_session *ua_sess)
4526 {
4527 int ret, retval = 0;
4528 struct lttng_ht_iter iter;
4529 struct ust_app_channel *ua_chan;
4530 struct consumer_socket *socket;
4531
4532 DBG("Flushing app session buffers for ust app pid %d", app->pid);
4533
4534 rcu_read_lock();
4535
4536 if (!app->compatible) {
4537 goto end_not_compatible;
4538 }
4539
4540 pthread_mutex_lock(&ua_sess->lock);
4541
4542 if (ua_sess->deleted) {
4543 goto end_deleted;
4544 }
4545
4546 health_code_update();
4547
4548 /* Flushing buffers */
4549 socket = consumer_find_socket_by_bitness(app->bits_per_long,
4550 ua_sess->consumer);
4551
4552 /* Flush buffers and push metadata. */
4553 switch (ua_sess->buffer_type) {
4554 case LTTNG_BUFFER_PER_PID:
4555 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4556 node.node) {
4557 health_code_update();
4558 ret = consumer_flush_channel(socket, ua_chan->key);
4559 if (ret) {
4560 ERR("Error flushing consumer channel");
4561 retval = -1;
4562 continue;
4563 }
4564 }
4565 break;
4566 case LTTNG_BUFFER_PER_UID:
4567 default:
4568 assert(0);
4569 break;
4570 }
4571
4572 health_code_update();
4573
4574 end_deleted:
4575 pthread_mutex_unlock(&ua_sess->lock);
4576
4577 end_not_compatible:
4578 rcu_read_unlock();
4579 health_code_update();
4580 return retval;
4581 }
4582
4583 /*
4584 * Flush buffers for all applications for a specific UST session.
4585 * Called with UST session lock held.
4586 */
4587 static
4588 int ust_app_flush_session(struct ltt_ust_session *usess)
4589
4590 {
4591 int ret = 0;
4592
4593 DBG("Flushing session buffers for all ust apps");
4594
4595 rcu_read_lock();
4596
4597 /* Flush buffers and push metadata. */
4598 switch (usess->buffer_type) {
4599 case LTTNG_BUFFER_PER_UID:
4600 {
4601 struct buffer_reg_uid *reg;
4602 struct lttng_ht_iter iter;
4603
4604 /* Flush all per UID buffers associated to that session. */
4605 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4606 struct ust_registry_session *ust_session_reg;
4607 struct buffer_reg_channel *reg_chan;
4608 struct consumer_socket *socket;
4609
4610 /* Get consumer socket to use to push the metadata.*/
4611 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
4612 usess->consumer);
4613 if (!socket) {
4614 /* Ignore request if no consumer is found for the session. */
4615 continue;
4616 }
4617
4618 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
4619 reg_chan, node.node) {
4620 /*
4621 * The following call will print error values so the return
4622 * code is of little importance because whatever happens, we
4623 * have to try them all.
4624 */
4625 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
4626 }
4627
4628 ust_session_reg = reg->registry->reg.ust;
4629 /* Push metadata. */
4630 (void) push_metadata(ust_session_reg, usess->consumer);
4631 }
4632 break;
4633 }
4634 case LTTNG_BUFFER_PER_PID:
4635 {
4636 struct ust_app_session *ua_sess;
4637 struct lttng_ht_iter iter;
4638 struct ust_app *app;
4639
4640 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4641 ua_sess = lookup_session_by_app(usess, app);
4642 if (ua_sess == NULL) {
4643 continue;
4644 }
4645 (void) ust_app_flush_app_session(app, ua_sess);
4646 }
4647 break;
4648 }
4649 default:
4650 ret = -1;
4651 assert(0);
4652 break;
4653 }
4654
4655 rcu_read_unlock();
4656 health_code_update();
4657 return ret;
4658 }
4659
4660 static
4661 int ust_app_clear_quiescent_app_session(struct ust_app *app,
4662 struct ust_app_session *ua_sess)
4663 {
4664 int ret = 0;
4665 struct lttng_ht_iter iter;
4666 struct ust_app_channel *ua_chan;
4667 struct consumer_socket *socket;
4668
4669 DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
4670
4671 rcu_read_lock();
4672
4673 if (!app->compatible) {
4674 goto end_not_compatible;
4675 }
4676
4677 pthread_mutex_lock(&ua_sess->lock);
4678
4679 if (ua_sess->deleted) {
4680 goto end_unlock;
4681 }
4682
4683 health_code_update();
4684
4685 socket = consumer_find_socket_by_bitness(app->bits_per_long,
4686 ua_sess->consumer);
4687 if (!socket) {
4688 ERR("Failed to find consumer (%" PRIu32 ") socket",
4689 app->bits_per_long);
4690 ret = -1;
4691 goto end_unlock;
4692 }
4693
4694 /* Clear quiescent state. */
4695 switch (ua_sess->buffer_type) {
4696 case LTTNG_BUFFER_PER_PID:
4697 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter,
4698 ua_chan, node.node) {
4699 health_code_update();
4700 ret = consumer_clear_quiescent_channel(socket,
4701 ua_chan->key);
4702 if (ret) {
4703 ERR("Error clearing quiescent state for consumer channel");
4704 ret = -1;
4705 continue;
4706 }
4707 }
4708 break;
4709 case LTTNG_BUFFER_PER_UID:
4710 default:
4711 assert(0);
4712 ret = -1;
4713 break;
4714 }
4715
4716 health_code_update();
4717
4718 end_unlock:
4719 pthread_mutex_unlock(&ua_sess->lock);
4720
4721 end_not_compatible:
4722 rcu_read_unlock();
4723 health_code_update();
4724 return ret;
4725 }
4726
4727 /*
4728 * Clear quiescent state in each stream for all applications for a
4729 * specific UST session.
4730 * Called with UST session lock held.
4731 */
4732 static
4733 int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
4734
4735 {
4736 int ret = 0;
4737
4738 DBG("Clearing stream quiescent state for all ust apps");
4739
4740 rcu_read_lock();
4741
4742 switch (usess->buffer_type) {
4743 case LTTNG_BUFFER_PER_UID:
4744 {
4745 struct lttng_ht_iter iter;
4746 struct buffer_reg_uid *reg;
4747
4748 /*
4749 * Clear quiescent for all per UID buffers associated to
4750 * that session.
4751 */
4752 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4753 struct consumer_socket *socket;
4754 struct buffer_reg_channel *reg_chan;
4755
4756 /* Get associated consumer socket.*/
4757 socket = consumer_find_socket_by_bitness(
4758 reg->bits_per_long, usess->consumer);
4759 if (!socket) {
4760 /*
4761 * Ignore request if no consumer is found for
4762 * the session.
4763 */
4764 continue;
4765 }
4766
4767 cds_lfht_for_each_entry(reg->registry->channels->ht,
4768 &iter.iter, reg_chan, node.node) {
4769 /*
4770 * The following call will print error values so
4771 * the return code is of little importance
4772 * because whatever happens, we have to try them
4773 * all.
4774 */
4775 (void) consumer_clear_quiescent_channel(socket,
4776 reg_chan->consumer_key);
4777 }
4778 }
4779 break;
4780 }
4781 case LTTNG_BUFFER_PER_PID:
4782 {
4783 struct ust_app_session *ua_sess;
4784 struct lttng_ht_iter iter;
4785 struct ust_app *app;
4786
4787 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
4788 pid_n.node) {
4789 ua_sess = lookup_session_by_app(usess, app);
4790 if (ua_sess == NULL) {
4791 continue;
4792 }
4793 (void) ust_app_clear_quiescent_app_session(app,
4794 ua_sess);
4795 }
4796 break;
4797 }
4798 default:
4799 ret = -1;
4800 assert(0);
4801 break;
4802 }
4803
4804 rcu_read_unlock();
4805 health_code_update();
4806 return ret;
4807 }
4808
4809 /*
4810 * Destroy a specific UST session in apps.
4811 */
4812 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
4813 {
4814 int ret;
4815 struct ust_app_session *ua_sess;
4816 struct lttng_ht_iter iter;
4817 struct lttng_ht_node_u64 *node;
4818
4819 DBG("Destroy tracing for ust app pid %d", app->pid);
4820
4821 rcu_read_lock();
4822
4823 if (!app->compatible) {
4824 goto end;
4825 }
4826
4827 __lookup_session_by_app(usess, app, &iter);
4828 node = lttng_ht_iter_get_node_u64(&iter);
4829 if (node == NULL) {
4830 /* Session is being or is deleted. */
4831 goto end;
4832 }
4833 ua_sess = caa_container_of(node, struct ust_app_session, node);
4834
4835 health_code_update();
4836 destroy_app_session(app, ua_sess);
4837
4838 health_code_update();
4839
4840 /* Quiescent wait after stopping trace */
4841 pthread_mutex_lock(&app->sock_lock);
4842 ret = ustctl_wait_quiescent(app->sock);
4843 pthread_mutex_unlock(&app->sock_lock);
4844 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4845 ERR("UST app wait quiescent failed for app pid %d ret %d",
4846 app->pid, ret);
4847 }
4848 end:
4849 rcu_read_unlock();
4850 health_code_update();
4851 return 0;
4852 }
4853
4854 /*
4855 * Start tracing for the UST session.
4856 */
4857 int ust_app_start_trace_all(struct ltt_ust_session *usess)
4858 {
4859 int ret = 0;
4860 struct lttng_ht_iter iter;
4861 struct ust_app *app;
4862
4863 DBG("Starting all UST traces");
4864
4865 rcu_read_lock();
4866
4867 /*
4868 * In a start-stop-start use-case, we need to clear the quiescent state
4869 * of each channel set by the prior stop command, thus ensuring that a
4870 * following stop or destroy is sure to grab a timestamp_end near those
4871 * operations, even if the packet is empty.
4872 */
4873 (void) ust_app_clear_quiescent_session(usess);
4874
4875 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4876 ret = ust_app_start_trace(usess, app);
4877 if (ret < 0) {
4878 /* Continue to next apps even on error */
4879 continue;
4880 }
4881 }
4882
4883 rcu_read_unlock();
4884
4885 return 0;
4886 }
4887
4888 /*
4889 * Start tracing for the UST session.
4890 * Called with UST session lock held.
4891 */
4892 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
4893 {
4894 int ret = 0;
4895 struct lttng_ht_iter iter;
4896 struct ust_app *app;
4897
4898 DBG("Stopping all UST traces");
4899
4900 rcu_read_lock();
4901
4902 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4903 ret = ust_app_stop_trace(usess, app);
4904 if (ret < 0) {
4905 /* Continue to next apps even on error */
4906 continue;
4907 }
4908 }
4909
4910 (void) ust_app_flush_session(usess);
4911
4912 rcu_read_unlock();
4913
4914 return 0;
4915 }
4916
4917 /*
4918 * Force stop live timers associated with the ust session.
4919 */
4920 int ust_force_stop_live_timer(struct ltt_ust_session *usess)
4921 {
4922 int ret = 0;
4923
4924 if (usess->live_timer_interval == 0) {
4925 goto skip;
4926 }
4927
4928 DBG("Stop all live timer associated with UST session %p.", usess);
4929
4930 rcu_read_lock();
4931
4932 switch (usess->buffer_type) {
4933 case LTTNG_BUFFER_PER_UID:
4934 {
4935 struct buffer_reg_uid *reg;
4936 struct lttng_ht_iter iter;
4937
4938 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4939 struct ust_registry_session *ust_session_reg;
4940 struct buffer_reg_channel *reg_chan;
4941 struct consumer_socket *socket;
4942
4943 /* Get consumer socket to use */
4944 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
4945 usess->consumer);
4946 if (!socket) {
4947 /* Ignore request if no consumer is found for the session. */
4948 continue;
4949 }
4950
4951 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
4952 reg_chan, node.node) {
4953 ret = consumer_channel_stop_live_timer(socket, reg_chan->consumer_key);
4954 if (ret) {
4955 ERR("Error stopping live timer for channel %" PRIu64, reg_chan->consumer_key);
4956 }
4957 }
4958 }
4959 break;
4960 }
4961 case LTTNG_BUFFER_PER_PID:
4962 {
4963 struct lttng_ht_iter iter_i;
4964 struct ust_app *app;
4965 uint64_t chan_reg_key;
4966
4967 cds_lfht_for_each_entry(ust_app_ht->ht, &iter_i.iter, app,
4968 pid_n.node) {
4969 int ret;
4970 struct ust_app_session *ua_sess;
4971 struct lttng_ht_iter iter_j, iter_k;
4972 struct lttng_ht_node_u64 *node;
4973 struct ust_app_channel *ua_chan;
4974
4975 DBG("Stopping live timer associated with ust app pid "
4976 "%d",
4977 app->pid);
4978
4979 if (!app->compatible) {
4980 goto end;
4981 }
4982
4983 __lookup_session_by_app(usess, app, &iter_j);
4984 node = lttng_ht_iter_get_node_u64(&iter_j);
4985 if (node == NULL) {
4986 /* Session is being or is deleted. */
4987 goto end;
4988 }
4989 ua_sess = caa_container_of(node, struct ust_app_session,
4990 node);
4991
4992 health_code_update();
4993
4994 cds_lfht_for_each_entry(ua_sess->channels->ht,
4995 &iter_k.iter, ua_chan,
4996 node.node) {
4997 struct consumer_socket *consumer_socket;
4998
4999 /* Stop live timer immediately if any */
5000 consumer_socket =
5001 consumer_find_socket_by_bitness(
5002 app->bits_per_long,
5003 ua_chan->session->consumer);
5004 ret = consumer_channel_stop_live_timer(
5005 consumer_socket, ua_chan->key);
5006 if (ret) {
5007 ERR("Error stopping live timer");
5008 }
5009 }
5010 break;
5011 }
5012 break;
5013 }
5014 default:
5015 break;
5016 }
5017
5018 end:
5019 rcu_read_unlock();
5020 health_code_update();
5021 skip:
5022 return ret;
5023 }
5024
5025 /*
5026 * Force start live timers associated with the ust session.
5027 */
5028 int ust_force_start_live_timer(struct ltt_ust_session *usess)
5029 {
5030 int ret = 0;
5031
5032 if (usess->live_timer_interval == 0) {
5033 goto skip;
5034 }
5035
5036 DBG("Start all live timer associated with UST session %p", usess);
5037
5038 rcu_read_lock();
5039
5040 switch (usess->buffer_type) {
5041 case LTTNG_BUFFER_PER_UID:
5042 {
5043 struct buffer_reg_uid *reg;
5044 struct lttng_ht_iter iter;
5045
5046 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5047 struct ust_registry_session *ust_session_reg;
5048 struct buffer_reg_channel *reg_chan;
5049 struct consumer_socket *socket;
5050
5051 /* Get consumer socket to use */
5052 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5053 usess->consumer);
5054 if (!socket) {
5055 /* Ignore request if no consumer is found for the session. */
5056 continue;
5057 }
5058
5059 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5060 reg_chan, node.node) {
5061 ret = consumer_channel_start_live_timer(socket, reg_chan->consumer_key);
5062 if (ret) {
5063 ERR("Error stopping live timer for channel %" PRIu64, reg_chan->consumer_key);
5064 }
5065 }
5066 }
5067 break;
5068 }
5069 case LTTNG_BUFFER_PER_PID:
5070 {
5071 struct lttng_ht_iter iter_i;
5072 struct ust_app *app;
5073 uint64_t chan_reg_key;
5074
5075 cds_lfht_for_each_entry(ust_app_ht->ht, &iter_i.iter, app,
5076 pid_n.node) {
5077 int ret;
5078 struct ust_app_session *ua_sess;
5079 struct lttng_ht_iter iter_j, iter_k;
5080 struct lttng_ht_node_u64 *node;
5081 struct ust_app_channel *ua_chan;
5082
5083 DBG("Stopping live timer associated with ust app pid "
5084 "%d",
5085 app->pid);
5086
5087 if (!app->compatible) {
5088 goto end;
5089 }
5090
5091 __lookup_session_by_app(usess, app, &iter_j);
5092 node = lttng_ht_iter_get_node_u64(&iter_j);
5093 if (node == NULL) {
5094 /* Session is being or is deleted. */
5095 goto end;
5096 }
5097 ua_sess = caa_container_of(node, struct ust_app_session,
5098 node);
5099
5100 health_code_update();
5101
5102 cds_lfht_for_each_entry(ua_sess->channels->ht,
5103 &iter_k.iter, ua_chan,
5104 node.node) {
5105 struct consumer_socket *consumer_socket;
5106
5107 /* Stop live timer immediately if any */
5108 consumer_socket =
5109 consumer_find_socket_by_bitness(
5110 app->bits_per_long,
5111 ua_chan->session->consumer);
5112 ret = consumer_channel_start_live_timer(
5113 consumer_socket, ua_chan->key);
5114 if (ret) {
5115 ERR("Error stopping live timer");
5116 }
5117 }
5118 break;
5119 }
5120 break;
5121 }
5122 default:
5123 break;
5124 }
5125
5126 end:
5127 rcu_read_unlock();
5128 health_code_update();
5129 skip:
5130 return ret;
5131 }
5132
5133 /*
5134 * Destroy app UST session.
5135 */
5136 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
5137 {
5138 int ret = 0;
5139 struct lttng_ht_iter iter;
5140 struct ust_app *app;
5141
5142 DBG("Destroy all UST traces");
5143
5144 rcu_read_lock();
5145
5146 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5147 ret = destroy_trace(usess, app);
5148 if (ret < 0) {
5149 /* Continue to next apps even on error */
5150 continue;
5151 }
5152 }
5153
5154 rcu_read_unlock();
5155
5156 return 0;
5157 }
5158
5159 static
5160 void ust_app_global_create(struct ltt_ust_session *usess, struct ust_app *app)
5161 {
5162 int ret = 0;
5163 struct lttng_ht_iter iter, uiter;
5164 struct ust_app_session *ua_sess = NULL;
5165 struct ust_app_channel *ua_chan;
5166 struct ust_app_event *ua_event;
5167 struct ust_app_ctx *ua_ctx;
5168 int is_created = 0;
5169
5170 ret = create_ust_app_session(usess, app, &ua_sess, &is_created);
5171 if (ret < 0) {
5172 /* Tracer is probably gone or ENOMEM. */
5173 goto error;
5174 }
5175 if (!is_created) {
5176 /* App session already created. */
5177 goto end;
5178 }
5179 assert(ua_sess);
5180
5181 pthread_mutex_lock(&ua_sess->lock);
5182
5183 if (ua_sess->deleted) {
5184 pthread_mutex_unlock(&ua_sess->lock);
5185 goto end;
5186 }
5187
5188 /*
5189 * We can iterate safely here over all UST app session since the create ust
5190 * app session above made a shadow copy of the UST global domain from the
5191 * ltt ust session.
5192 */
5193 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
5194 node.node) {
5195 ret = do_create_channel(app, usess, ua_sess, ua_chan);
5196 if (ret < 0 && ret != -ENOTCONN) {
5197 /*
5198 * Stop everything. On error, the application
5199 * failed, no more file descriptor are available
5200 * or ENOMEM so stopping here is the only thing
5201 * we can do for now. The only exception is
5202 * -ENOTCONN, which indicates that the application
5203 * has exit.
5204 */
5205 goto error_unlock;
5206 }
5207
5208 /*
5209 * Add context using the list so they are enabled in the same order the
5210 * user added them.
5211 */
5212 cds_list_for_each_entry(ua_ctx, &ua_chan->ctx_list, list) {
5213 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
5214 if (ret < 0) {
5215 goto error_unlock;
5216 }
5217 }
5218
5219
5220 /* For each events */
5221 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
5222 node.node) {
5223 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
5224 if (ret < 0) {
5225 goto error_unlock;
5226 }
5227 }
5228 }
5229
5230 pthread_mutex_unlock(&ua_sess->lock);
5231
5232 if (usess->active) {
5233 ret = ust_app_start_trace(usess, app);
5234 if (ret < 0) {
5235 goto error;
5236 }
5237
5238 DBG2("UST trace started for app pid %d", app->pid);
5239 }
5240 end:
5241 /* Everything went well at this point. */
5242 return;
5243
5244 error_unlock:
5245 pthread_mutex_unlock(&ua_sess->lock);
5246 error:
5247 if (ua_sess) {
5248 destroy_app_session(app, ua_sess);
5249 }
5250 return;
5251 }
5252
5253 static
5254 void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
5255 {
5256 struct ust_app_session *ua_sess;
5257
5258 ua_sess = lookup_session_by_app(usess, app);
5259 if (ua_sess == NULL) {
5260 return;
5261 }
5262 destroy_app_session(app, ua_sess);
5263 }
5264
5265 /*
5266 * Add channels/events from UST global domain to registered apps at sock.
5267 *
5268 * Called with session lock held.
5269 * Called with RCU read-side lock held.
5270 */
5271 void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
5272 {
5273 assert(usess);
5274
5275 DBG2("UST app global update for app sock %d for session id %" PRIu64,
5276 app->sock, usess->id);
5277
5278 if (!app->compatible) {
5279 return;
5280 }
5281
5282 if (trace_ust_id_tracker_lookup(LTTNG_TRACKER_VPID, usess, app->pid)
5283 && trace_ust_id_tracker_lookup(LTTNG_TRACKER_VUID, usess, app->uid)
5284 && trace_ust_id_tracker_lookup(LTTNG_TRACKER_VGID, usess, app->gid)) {
5285 ust_app_global_create(usess, app);
5286 } else {
5287 ust_app_global_destroy(usess, app);
5288 }
5289 }
5290
5291 /*
5292 * Called with session lock held.
5293 */
5294 void ust_app_global_update_all(struct ltt_ust_session *usess)
5295 {
5296 struct lttng_ht_iter iter;
5297 struct ust_app *app;
5298
5299 rcu_read_lock();
5300 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5301 ust_app_global_update(usess, app);
5302 }
5303 rcu_read_unlock();
5304 }
5305
5306 /*
5307 * Add context to a specific channel for global UST domain.
5308 */
5309 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
5310 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
5311 {
5312 int ret = 0;
5313 struct lttng_ht_node_str *ua_chan_node;
5314 struct lttng_ht_iter iter, uiter;
5315 struct ust_app_channel *ua_chan = NULL;
5316 struct ust_app_session *ua_sess;
5317 struct ust_app *app;
5318
5319 rcu_read_lock();
5320
5321 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5322 if (!app->compatible) {
5323 /*
5324 * TODO: In time, we should notice the caller of this error by
5325 * telling him that this is a version error.
5326 */
5327 continue;
5328 }
5329 ua_sess = lookup_session_by_app(usess, app);
5330 if (ua_sess == NULL) {
5331 continue;
5332 }
5333
5334 pthread_mutex_lock(&ua_sess->lock);
5335
5336 if (ua_sess->deleted) {
5337 pthread_mutex_unlock(&ua_sess->lock);
5338 continue;
5339 }
5340
5341 /* Lookup channel in the ust app session */
5342 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
5343 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
5344 if (ua_chan_node == NULL) {
5345 goto next_app;
5346 }
5347 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
5348 node);
5349 ret = create_ust_app_channel_context(ua_sess, ua_chan, &uctx->ctx, app);
5350 if (ret < 0) {
5351 goto next_app;
5352 }
5353 next_app:
5354 pthread_mutex_unlock(&ua_sess->lock);
5355 }
5356
5357 rcu_read_unlock();
5358 return ret;
5359 }
5360
5361 /*
5362 * Enable event for a channel from a UST session for a specific PID.
5363 */
5364 int ust_app_enable_event_pid(struct ltt_ust_session *usess,
5365 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
5366 {
5367 int ret = 0;
5368 struct lttng_ht_iter iter;
5369 struct lttng_ht_node_str *ua_chan_node;
5370 struct ust_app *app;
5371 struct ust_app_session *ua_sess;
5372 struct ust_app_channel *ua_chan;
5373 struct ust_app_event *ua_event;
5374
5375 DBG("UST app enabling event %s for PID %d", uevent->attr.name, pid);
5376
5377 rcu_read_lock();
5378
5379 app = ust_app_find_by_pid(pid);
5380 if (app == NULL) {
5381 ERR("UST app enable event per PID %d not found", pid);
5382 ret = -1;
5383 goto end;
5384 }
5385
5386 if (!app->compatible) {
5387 ret = 0;
5388 goto end;
5389 }
5390
5391 ua_sess = lookup_session_by_app(usess, app);
5392 if (!ua_sess) {
5393 /* The application has problem or is probably dead. */
5394 ret = 0;
5395 goto end;
5396 }
5397
5398 pthread_mutex_lock(&ua_sess->lock);
5399
5400 if (ua_sess->deleted) {
5401 ret = 0;
5402 goto end_unlock;
5403 }
5404
5405 /* Lookup channel in the ust app session */
5406 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
5407 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
5408 /* If the channel is not found, there is a code flow error */
5409 assert(ua_chan_node);
5410
5411 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
5412
5413 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
5414 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
5415 if (ua_event == NULL) {
5416 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
5417 if (ret < 0) {
5418 goto end_unlock;
5419 }
5420 } else {
5421 ret = enable_ust_app_event(ua_sess, ua_event, app);
5422 if (ret < 0) {
5423 goto end_unlock;
5424 }
5425 }
5426
5427 end_unlock:
5428 pthread_mutex_unlock(&ua_sess->lock);
5429 end:
5430 rcu_read_unlock();
5431 return ret;
5432 }
5433
5434 /*
5435 * Receive registration and populate the given msg structure.
5436 *
5437 * On success return 0 else a negative value returned by the ustctl call.
5438 */
5439 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
5440 {
5441 int ret;
5442 uint32_t pid, ppid, uid, gid;
5443
5444 assert(msg);
5445
5446 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
5447 &pid, &ppid, &uid, &gid,
5448 &msg->bits_per_long,
5449 &msg->uint8_t_alignment,
5450 &msg->uint16_t_alignment,
5451 &msg->uint32_t_alignment,
5452 &msg->uint64_t_alignment,
5453 &msg->long_alignment,
5454 &msg->byte_order,
5455 msg->name);
5456 if (ret < 0) {
5457 switch (-ret) {
5458 case EPIPE:
5459 case ECONNRESET:
5460 case LTTNG_UST_ERR_EXITING:
5461 DBG3("UST app recv reg message failed. Application died");
5462 break;
5463 case LTTNG_UST_ERR_UNSUP_MAJOR:
5464 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
5465 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
5466 LTTNG_UST_ABI_MINOR_VERSION);
5467 break;
5468 default:
5469 ERR("UST app recv reg message failed with ret %d", ret);
5470 break;
5471 }
5472 goto error;
5473 }
5474 msg->pid = (pid_t) pid;
5475 msg->ppid = (pid_t) ppid;
5476 msg->uid = (uid_t) uid;
5477 msg->gid = (gid_t) gid;
5478
5479 error:
5480 return ret;
5481 }
5482
5483 /*
5484 * Return a ust app session object using the application object and the
5485 * session object descriptor has a key. If not found, NULL is returned.
5486 * A RCU read side lock MUST be acquired when calling this function.
5487 */
5488 static struct ust_app_session *find_session_by_objd(struct ust_app *app,
5489 int objd)
5490 {
5491 struct lttng_ht_node_ulong *node;
5492 struct lttng_ht_iter iter;
5493 struct ust_app_session *ua_sess = NULL;
5494
5495 assert(app);
5496
5497 lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
5498 node = lttng_ht_iter_get_node_ulong(&iter);
5499 if (node == NULL) {
5500 DBG2("UST app session find by objd %d not found", objd);
5501 goto error;
5502 }
5503
5504 ua_sess = caa_container_of(node, struct ust_app_session, ust_objd_node);
5505
5506 error:
5507 return ua_sess;
5508 }
5509
5510 /*
5511 * Return a ust app channel object using the application object and the channel
5512 * object descriptor has a key. If not found, NULL is returned. A RCU read side
5513 * lock MUST be acquired before calling this function.
5514 */
5515 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
5516 int objd)
5517 {
5518 struct lttng_ht_node_ulong *node;
5519 struct lttng_ht_iter iter;
5520 struct ust_app_channel *ua_chan = NULL;
5521
5522 assert(app);
5523
5524 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
5525 node = lttng_ht_iter_get_node_ulong(&iter);
5526 if (node == NULL) {
5527 DBG2("UST app channel find by objd %d not found", objd);
5528 goto error;
5529 }
5530
5531 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
5532
5533 error:
5534 return ua_chan;
5535 }
5536
5537 /*
5538 * Reply to a register channel notification from an application on the notify
5539 * socket. The channel metadata is also created.
5540 *
5541 * The session UST registry lock is acquired in this function.
5542 *
5543 * On success 0 is returned else a negative value.
5544 */
5545 static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
5546 size_t nr_fields, struct ustctl_field *fields)
5547 {
5548 int ret, ret_code = 0;
5549 uint32_t chan_id, reg_count;
5550 uint64_t chan_reg_key;
5551 enum ustctl_channel_header type;
5552 struct ust_app *app;
5553 struct ust_app_channel *ua_chan;
5554 struct ust_app_session *ua_sess;
5555 struct ust_registry_session *registry;
5556 struct ust_registry_channel *chan_reg;
5557
5558 rcu_read_lock();
5559
5560 /* Lookup application. If not found, there is a code flow error. */
5561 app = find_app_by_notify_sock(sock);
5562 if (!app) {
5563 DBG("Application socket %d is being torn down. Abort event notify",
5564 sock);
5565 ret = 0;
5566 goto error_rcu_unlock;
5567 }
5568
5569 /* Lookup channel by UST object descriptor. */
5570 ua_chan = find_channel_by_objd(app, cobjd);
5571 if (!ua_chan) {
5572 DBG("Application channel is being torn down. Abort event notify");
5573 ret = 0;
5574 goto error_rcu_unlock;
5575 }
5576
5577 assert(ua_chan->session);
5578 ua_sess = ua_chan->session;
5579
5580 /* Get right session registry depending on the session buffer type. */
5581 registry = get_session_registry(ua_sess);
5582 if (!registry) {
5583 DBG("Application session is being torn down. Abort event notify");
5584 ret = 0;
5585 goto error_rcu_unlock;
5586 };
5587
5588 /* Depending on the buffer type, a different channel key is used. */
5589 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
5590 chan_reg_key = ua_chan->tracing_channel_id;
5591 } else {
5592 chan_reg_key = ua_chan->key;
5593 }
5594
5595 pthread_mutex_lock(&registry->lock);
5596
5597 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
5598 assert(chan_reg);
5599
5600 if (!chan_reg->register_done) {
5601 reg_count = ust_registry_get_event_count(chan_reg);
5602 if (reg_count < 31) {
5603 type = USTCTL_CHANNEL_HEADER_COMPACT;
5604 } else {
5605 type = USTCTL_CHANNEL_HEADER_LARGE;
5606 }
5607
5608 chan_reg->nr_ctx_fields = nr_fields;
5609 chan_reg->ctx_fields = fields;
5610 fields = NULL;
5611 chan_reg->header_type = type;
5612 } else {
5613 /* Get current already assigned values. */
5614 type = chan_reg->header_type;
5615 }
5616 /* Channel id is set during the object creation. */
5617 chan_id = chan_reg->chan_id;
5618
5619 /* Append to metadata */
5620 if (!chan_reg->metadata_dumped) {
5621 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
5622 if (ret_code) {
5623 ERR("Error appending channel metadata (errno = %d)", ret_code);
5624 goto reply;
5625 }
5626 }
5627
5628 reply:
5629 DBG3("UST app replying to register channel key %" PRIu64
5630 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
5631 ret_code);
5632
5633 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
5634 if (ret < 0) {
5635 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5636 ERR("UST app reply channel failed with ret %d", ret);
5637 } else {
5638 DBG3("UST app reply channel failed. Application died");
5639 }
5640 goto error;
5641 }
5642
5643 /* This channel registry registration is completed. */
5644 chan_reg->register_done = 1;
5645
5646 error:
5647 pthread_mutex_unlock(&registry->lock);
5648 error_rcu_unlock:
5649 rcu_read_unlock();
5650 free(fields);
5651 return ret;
5652 }
5653
5654 /*
5655 * Add event to the UST channel registry. When the event is added to the
5656 * registry, the metadata is also created. Once done, this replies to the
5657 * application with the appropriate error code.
5658 *
5659 * The session UST registry lock is acquired in the function.
5660 *
5661 * On success 0 is returned else a negative value.
5662 */
5663 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
5664 char *sig, size_t nr_fields, struct ustctl_field *fields,
5665 int loglevel_value, char *model_emf_uri)
5666 {
5667 int ret, ret_code;
5668 uint32_t event_id = 0;
5669 uint64_t chan_reg_key;
5670 struct ust_app *app;
5671 struct ust_app_channel *ua_chan;
5672 struct ust_app_session *ua_sess;
5673 struct ust_registry_session *registry;
5674
5675 rcu_read_lock();
5676
5677 /* Lookup application. If not found, there is a code flow error. */
5678 app = find_app_by_notify_sock(sock);
5679 if (!app) {
5680 DBG("Application socket %d is being torn down. Abort event notify",
5681 sock);
5682 ret = 0;
5683 goto error_rcu_unlock;
5684 }
5685
5686 /* Lookup channel by UST object descriptor. */
5687 ua_chan = find_channel_by_objd(app, cobjd);
5688 if (!ua_chan) {
5689 DBG("Application channel is being torn down. Abort event notify");
5690 ret = 0;
5691 goto error_rcu_unlock;
5692 }
5693
5694 assert(ua_chan->session);
5695 ua_sess = ua_chan->session;
5696
5697 registry = get_session_registry(ua_sess);
5698 if (!registry) {
5699 DBG("Application session is being torn down. Abort event notify");
5700 ret = 0;
5701 goto error_rcu_unlock;
5702 }
5703
5704 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
5705 chan_reg_key = ua_chan->tracing_channel_id;
5706 } else {
5707 chan_reg_key = ua_chan->key;
5708 }
5709
5710 pthread_mutex_lock(&registry->lock);
5711
5712 /*
5713 * From this point on, this call acquires the ownership of the sig, fields
5714 * and model_emf_uri meaning any free are done inside it if needed. These
5715 * three variables MUST NOT be read/write after this.
5716 */
5717 ret_code = ust_registry_create_event(registry, chan_reg_key,
5718 sobjd, cobjd, name, sig, nr_fields, fields,
5719 loglevel_value, model_emf_uri, ua_sess->buffer_type,
5720 &event_id, app);
5721 sig = NULL;
5722 fields = NULL;
5723 model_emf_uri = NULL;
5724
5725 /*
5726 * The return value is returned to ustctl so in case of an error, the
5727 * application can be notified. In case of an error, it's important not to
5728 * return a negative error or else the application will get closed.
5729 */
5730 ret = ustctl_reply_register_event(sock, event_id, ret_code);
5731 if (ret < 0) {
5732 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5733 ERR("UST app reply event failed with ret %d", ret);
5734 } else {
5735 DBG3("UST app reply event failed. Application died");
5736 }
5737 /*
5738 * No need to wipe the create event since the application socket will
5739 * get close on error hence cleaning up everything by itself.
5740 */
5741 goto error;
5742 }
5743
5744 DBG3("UST registry event %s with id %" PRId32 " added successfully",
5745 name, event_id);
5746
5747 error:
5748 pthread_mutex_unlock(&registry->lock);
5749 error_rcu_unlock:
5750 rcu_read_unlock();
5751 free(sig);
5752 free(fields);
5753 free(model_emf_uri);
5754 return ret;
5755 }
5756
5757 /*
5758 * Add enum to the UST session registry. Once done, this replies to the
5759 * application with the appropriate error code.
5760 *
5761 * The session UST registry lock is acquired within this function.
5762 *
5763 * On success 0 is returned else a negative value.
5764 */
5765 static int add_enum_ust_registry(int sock, int sobjd, char *name,
5766 struct ustctl_enum_entry *entries, size_t nr_entries)
5767 {
5768 int ret = 0, ret_code;
5769 struct ust_app *app;
5770 struct ust_app_session *ua_sess;
5771 struct ust_registry_session *registry;
5772 uint64_t enum_id = -1ULL;
5773
5774 rcu_read_lock();
5775
5776 /* Lookup application. If not found, there is a code flow error. */
5777 app = find_app_by_notify_sock(sock);
5778 if (!app) {
5779 /* Return an error since this is not an error */
5780 DBG("Application socket %d is being torn down. Aborting enum registration",
5781 sock);
5782 free(entries);
5783 goto error_rcu_unlock;
5784 }
5785
5786 /* Lookup session by UST object descriptor. */
5787 ua_sess = find_session_by_objd(app, sobjd);
5788 if (!ua_sess) {
5789 /* Return an error since this is not an error */
5790 DBG("Application session is being torn down (session not found). Aborting enum registration.");
5791 free(entries);
5792 goto error_rcu_unlock;
5793 }
5794
5795 registry = get_session_registry(ua_sess);
5796 if (!registry) {
5797 DBG("Application session is being torn down (registry not found). Aborting enum registration.");
5798 free(entries);
5799 goto error_rcu_unlock;
5800 }
5801
5802 pthread_mutex_lock(&registry->lock);
5803
5804 /*
5805 * From this point on, the callee acquires the ownership of
5806 * entries. The variable entries MUST NOT be read/written after
5807 * call.
5808 */
5809 ret_code = ust_registry_create_or_find_enum(registry, sobjd, name,
5810 entries, nr_entries, &enum_id);
5811 entries = NULL;
5812
5813 /*
5814 * The return value is returned to ustctl so in case of an error, the
5815 * application can be notified. In case of an error, it's important not to
5816 * return a negative error or else the application will get closed.
5817 */
5818 ret = ustctl_reply_register_enum(sock, enum_id, ret_code);
5819 if (ret < 0) {
5820 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5821 ERR("UST app reply enum failed with ret %d", ret);
5822 } else {
5823 DBG3("UST app reply enum failed. Application died");
5824 }
5825 /*
5826 * No need to wipe the create enum since the application socket will
5827 * get close on error hence cleaning up everything by itself.
5828 */
5829 goto error;
5830 }
5831
5832 DBG3("UST registry enum %s added successfully or already found", name);
5833
5834 error:
5835 pthread_mutex_unlock(&registry->lock);
5836 error_rcu_unlock:
5837 rcu_read_unlock();
5838 return ret;
5839 }
5840
5841 /*
5842 * Handle application notification through the given notify socket.
5843 *
5844 * Return 0 on success or else a negative value.
5845 */
5846 int ust_app_recv_notify(int sock)
5847 {
5848 int ret;
5849 enum ustctl_notify_cmd cmd;
5850
5851 DBG3("UST app receiving notify from sock %d", sock);
5852
5853 ret = ustctl_recv_notify(sock, &cmd);
5854 if (ret < 0) {
5855 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5856 ERR("UST app recv notify failed with ret %d", ret);
5857 } else {
5858 DBG3("UST app recv notify failed. Application died");
5859 }
5860 goto error;
5861 }
5862
5863 switch (cmd) {
5864 case USTCTL_NOTIFY_CMD_EVENT:
5865 {
5866 int sobjd, cobjd, loglevel_value;
5867 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
5868 size_t nr_fields;
5869 struct ustctl_field *fields;
5870
5871 DBG2("UST app ustctl register event received");
5872
5873 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name,
5874 &loglevel_value, &sig, &nr_fields, &fields,
5875 &model_emf_uri);
5876 if (ret < 0) {
5877 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5878 ERR("UST app recv event failed with ret %d", ret);
5879 } else {
5880 DBG3("UST app recv event failed. Application died");
5881 }
5882 goto error;
5883 }
5884
5885 /*
5886 * Add event to the UST registry coming from the notify socket. This
5887 * call will free if needed the sig, fields and model_emf_uri. This
5888 * code path loses the ownsership of these variables and transfer them
5889 * to the this function.
5890 */
5891 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
5892 fields, loglevel_value, model_emf_uri);
5893 if (ret < 0) {
5894 goto error;
5895 }
5896
5897 break;
5898 }
5899 case USTCTL_NOTIFY_CMD_CHANNEL:
5900 {
5901 int sobjd, cobjd;
5902 size_t nr_fields;
5903 struct ustctl_field *fields;
5904
5905 DBG2("UST app ustctl register channel received");
5906
5907 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
5908 &fields);
5909 if (ret < 0) {
5910 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5911 ERR("UST app recv channel failed with ret %d", ret);
5912 } else {
5913 DBG3("UST app recv channel failed. Application died");
5914 }
5915 goto error;
5916 }
5917
5918 /*
5919 * The fields ownership are transfered to this function call meaning
5920 * that if needed it will be freed. After this, it's invalid to access
5921 * fields or clean it up.
5922 */
5923 ret = reply_ust_register_channel(sock, sobjd, cobjd, nr_fields,
5924 fields);
5925 if (ret < 0) {
5926 goto error;
5927 }
5928
5929 break;
5930 }
5931 case USTCTL_NOTIFY_CMD_ENUM:
5932 {
5933 int sobjd;
5934 char name[LTTNG_UST_SYM_NAME_LEN];
5935 size_t nr_entries;
5936 struct ustctl_enum_entry *entries;
5937
5938 DBG2("UST app ustctl register enum received");
5939
5940 ret = ustctl_recv_register_enum(sock, &sobjd, name,
5941 &entries, &nr_entries);
5942 if (ret < 0) {
5943 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5944 ERR("UST app recv enum failed with ret %d", ret);
5945 } else {
5946 DBG3("UST app recv enum failed. Application died");
5947 }
5948 goto error;
5949 }
5950
5951 /* Callee assumes ownership of entries */
5952 ret = add_enum_ust_registry(sock, sobjd, name,
5953 entries, nr_entries);
5954 if (ret < 0) {
5955 goto error;
5956 }
5957
5958 break;
5959 }
5960 default:
5961 /* Should NEVER happen. */
5962 assert(0);
5963 }
5964
5965 error:
5966 return ret;
5967 }
5968
5969 /*
5970 * Once the notify socket hangs up, this is called. First, it tries to find the
5971 * corresponding application. On failure, the call_rcu to close the socket is
5972 * executed. If an application is found, it tries to delete it from the notify
5973 * socket hash table. Whathever the result, it proceeds to the call_rcu.
5974 *
5975 * Note that an object needs to be allocated here so on ENOMEM failure, the
5976 * call RCU is not done but the rest of the cleanup is.
5977 */
5978 void ust_app_notify_sock_unregister(int sock)
5979 {
5980 int err_enomem = 0;
5981 struct lttng_ht_iter iter;
5982 struct ust_app *app;
5983 struct ust_app_notify_sock_obj *obj;
5984
5985 assert(sock >= 0);
5986
5987 rcu_read_lock();
5988
5989 obj = zmalloc(sizeof(*obj));
5990 if (!obj) {
5991 /*
5992 * An ENOMEM is kind of uncool. If this strikes we continue the
5993 * procedure but the call_rcu will not be called. In this case, we
5994 * accept the fd leak rather than possibly creating an unsynchronized
5995 * state between threads.
5996 *
5997 * TODO: The notify object should be created once the notify socket is
5998 * registered and stored independantely from the ust app object. The
5999 * tricky part is to synchronize the teardown of the application and
6000 * this notify object. Let's keep that in mind so we can avoid this
6001 * kind of shenanigans with ENOMEM in the teardown path.
6002 */
6003 err_enomem = 1;
6004 } else {
6005 obj->fd = sock;
6006 }
6007
6008 DBG("UST app notify socket unregister %d", sock);
6009
6010 /*
6011 * Lookup application by notify socket. If this fails, this means that the
6012 * hash table delete has already been done by the application
6013 * unregistration process so we can safely close the notify socket in a
6014 * call RCU.
6015 */
6016 app = find_app_by_notify_sock(sock);
6017 if (!app) {
6018 goto close_socket;
6019 }
6020
6021 iter.iter.node = &app->notify_sock_n.node;
6022
6023 /*
6024 * Whatever happens here either we fail or succeed, in both cases we have
6025 * to close the socket after a grace period to continue to the call RCU
6026 * here. If the deletion is successful, the application is not visible
6027 * anymore by other threads and is it fails it means that it was already
6028 * deleted from the hash table so either way we just have to close the
6029 * socket.
6030 */
6031 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
6032
6033 close_socket:
6034 rcu_read_unlock();
6035
6036 /*
6037 * Close socket after a grace period to avoid for the socket to be reused
6038 * before the application object is freed creating potential race between
6039 * threads trying to add unique in the global hash table.
6040 */
6041 if (!err_enomem) {
6042 call_rcu(&obj->head, close_notify_sock_rcu);
6043 }
6044 }
6045
6046 /*
6047 * Destroy a ust app data structure and free its memory.
6048 */
6049 void ust_app_destroy(struct ust_app *app)
6050 {
6051 if (!app) {
6052 return;
6053 }
6054
6055 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
6056 }
6057
6058 /*
6059 * Take a snapshot for a given UST session. The snapshot is sent to the given
6060 * output.
6061 *
6062 * Return 0 on success or else a negative value.
6063 */
6064 int ust_app_snapshot_record(struct ltt_ust_session *usess,
6065 struct snapshot_output *output, int wait,
6066 uint64_t nb_packets_per_stream)
6067 {
6068 int ret = 0;
6069 struct lttng_ht_iter iter;
6070 struct ust_app *app;
6071 char pathname[PATH_MAX];
6072
6073 assert(usess);
6074 assert(output);
6075
6076 rcu_read_lock();
6077
6078 switch (usess->buffer_type) {
6079 case LTTNG_BUFFER_PER_UID:
6080 {
6081 struct buffer_reg_uid *reg;
6082
6083 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6084 struct buffer_reg_channel *reg_chan;
6085 struct consumer_socket *socket;
6086
6087 if (!reg->registry->reg.ust->metadata_key) {
6088 /* Skip since no metadata is present */
6089 continue;
6090 }
6091
6092 /* Get consumer socket to use to push the metadata.*/
6093 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
6094 usess->consumer);
6095 if (!socket) {
6096 ret = -EINVAL;
6097 goto error;
6098 }
6099
6100 memset(pathname, 0, sizeof(pathname));
6101 ret = snprintf(pathname, sizeof(pathname),
6102 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
6103 reg->uid, reg->bits_per_long);
6104 if (ret < 0) {
6105 PERROR("snprintf snapshot path");
6106 goto error;
6107 }
6108
6109 /* Add the UST default trace dir to path. */
6110 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6111 reg_chan, node.node) {
6112 ret = consumer_snapshot_channel(socket, reg_chan->consumer_key,
6113 output, 0, usess->uid, usess->gid, pathname, wait,
6114 nb_packets_per_stream);
6115 if (ret < 0) {
6116 goto error;
6117 }
6118 }
6119 ret = consumer_snapshot_channel(socket,
6120 reg->registry->reg.ust->metadata_key, output, 1,
6121 usess->uid, usess->gid, pathname, wait, 0);
6122 if (ret < 0) {
6123 goto error;
6124 }
6125 }
6126 break;
6127 }
6128 case LTTNG_BUFFER_PER_PID:
6129 {
6130 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6131 struct consumer_socket *socket;
6132 struct lttng_ht_iter chan_iter;
6133 struct ust_app_channel *ua_chan;
6134 struct ust_app_session *ua_sess;
6135 struct ust_registry_session *registry;
6136
6137 ua_sess = lookup_session_by_app(usess, app);
6138 if (!ua_sess) {
6139 /* Session not associated with this app. */
6140 continue;
6141 }
6142
6143 /* Get the right consumer socket for the application. */
6144 socket = consumer_find_socket_by_bitness(app->bits_per_long,
6145 output->consumer);
6146 if (!socket) {
6147 ret = -EINVAL;
6148 goto error;
6149 }
6150
6151 /* Add the UST default trace dir to path. */
6152 memset(pathname, 0, sizeof(pathname));
6153 ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
6154 ua_sess->path);
6155 if (ret < 0) {
6156 PERROR("snprintf snapshot path");
6157 goto error;
6158 }
6159
6160 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6161 ua_chan, node.node) {
6162 ret = consumer_snapshot_channel(socket, ua_chan->key, output,
6163 0, ua_sess->euid, ua_sess->egid, pathname, wait,
6164 nb_packets_per_stream);
6165 if (ret < 0) {
6166 goto error;
6167 }
6168 }
6169
6170 registry = get_session_registry(ua_sess);
6171 if (!registry) {
6172 DBG("Application session is being torn down. Abort snapshot record.");
6173 ret = -1;
6174 goto error;
6175 }
6176 ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
6177 1, ua_sess->euid, ua_sess->egid, pathname, wait, 0);
6178 if (ret < 0) {
6179 goto error;
6180 }
6181 }
6182 break;
6183 }
6184 default:
6185 assert(0);
6186 break;
6187 }
6188
6189 error:
6190 rcu_read_unlock();
6191 return ret;
6192 }
6193
6194 /*
6195 * Return the size taken by one more packet per stream.
6196 */
6197 uint64_t ust_app_get_size_one_more_packet_per_stream(struct ltt_ust_session *usess,
6198 uint64_t cur_nr_packets)
6199 {
6200 uint64_t tot_size = 0;
6201 struct ust_app *app;
6202 struct lttng_ht_iter iter;
6203
6204 assert(usess);
6205
6206 switch (usess->buffer_type) {
6207 case LTTNG_BUFFER_PER_UID:
6208 {
6209 struct buffer_reg_uid *reg;
6210
6211 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6212 struct buffer_reg_channel *reg_chan;
6213
6214 rcu_read_lock();
6215 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6216 reg_chan, node.node) {
6217 if (cur_nr_packets >= reg_chan->num_subbuf) {
6218 /*
6219 * Don't take channel into account if we
6220 * already grab all its packets.
6221 */
6222 continue;
6223 }
6224 tot_size += reg_chan->subbuf_size * reg_chan->stream_count;
6225 }
6226 rcu_read_unlock();
6227 }
6228 break;
6229 }
6230 case LTTNG_BUFFER_PER_PID:
6231 {
6232 rcu_read_lock();
6233 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6234 struct ust_app_channel *ua_chan;
6235 struct ust_app_session *ua_sess;
6236 struct lttng_ht_iter chan_iter;
6237
6238 ua_sess = lookup_session_by_app(usess, app);
6239 if (!ua_sess) {
6240 /* Session not associated with this app. */
6241 continue;
6242 }
6243
6244 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6245 ua_chan, node.node) {
6246 if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
6247 /*
6248 * Don't take channel into account if we
6249 * already grab all its packets.
6250 */
6251 continue;
6252 }
6253 tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
6254 }
6255 }
6256 rcu_read_unlock();
6257 break;
6258 }
6259 default:
6260 assert(0);
6261 break;
6262 }
6263
6264 return tot_size;
6265 }
6266
6267 int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id,
6268 struct cds_list_head *buffer_reg_uid_list,
6269 struct consumer_output *consumer, uint64_t uchan_id,
6270 int overwrite, uint64_t *discarded, uint64_t *lost)
6271 {
6272 int ret;
6273 uint64_t consumer_chan_key;
6274
6275 *discarded = 0;
6276 *lost = 0;
6277
6278 ret = buffer_reg_uid_consumer_channel_key(
6279 buffer_reg_uid_list, ust_session_id,
6280 uchan_id, &consumer_chan_key);
6281 if (ret < 0) {
6282 /* Not found */
6283 ret = 0;
6284 goto end;
6285 }
6286
6287 if (overwrite) {
6288 ret = consumer_get_lost_packets(ust_session_id,
6289 consumer_chan_key, consumer, lost);
6290 } else {
6291 ret = consumer_get_discarded_events(ust_session_id,
6292 consumer_chan_key, consumer, discarded);
6293 }
6294
6295 end:
6296 return ret;
6297 }
6298
6299 int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
6300 struct ltt_ust_channel *uchan,
6301 struct consumer_output *consumer, int overwrite,
6302 uint64_t *discarded, uint64_t *lost)
6303 {
6304 int ret = 0;
6305 struct lttng_ht_iter iter;
6306 struct lttng_ht_node_str *ua_chan_node;
6307 struct ust_app *app;
6308 struct ust_app_session *ua_sess;
6309 struct ust_app_channel *ua_chan;
6310
6311 *discarded = 0;
6312 *lost = 0;
6313
6314 rcu_read_lock();
6315 /*
6316 * Iterate over every registered applications. Sum counters for
6317 * all applications containing requested session and channel.
6318 */
6319 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6320 struct lttng_ht_iter uiter;
6321
6322 ua_sess = lookup_session_by_app(usess, app);
6323 if (ua_sess == NULL) {
6324 continue;
6325 }
6326
6327 /* Get channel */
6328 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
6329 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
6330 /* If the session is found for the app, the channel must be there */
6331 assert(ua_chan_node);
6332
6333 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
6334
6335 if (overwrite) {
6336 uint64_t _lost;
6337
6338 ret = consumer_get_lost_packets(usess->id, ua_chan->key,
6339 consumer, &_lost);
6340 if (ret < 0) {
6341 break;
6342 }
6343 (*lost) += _lost;
6344 } else {
6345 uint64_t _discarded;
6346
6347 ret = consumer_get_discarded_events(usess->id,
6348 ua_chan->key, consumer, &_discarded);
6349 if (ret < 0) {
6350 break;
6351 }
6352 (*discarded) += _discarded;
6353 }
6354 }
6355
6356 rcu_read_unlock();
6357 return ret;
6358 }
6359
6360 static
6361 int ust_app_regenerate_statedump(struct ltt_ust_session *usess,
6362 struct ust_app *app)
6363 {
6364 int ret = 0;
6365 struct ust_app_session *ua_sess;
6366
6367 DBG("Regenerating the metadata for ust app pid %d", app->pid);
6368
6369 rcu_read_lock();
6370
6371 ua_sess = lookup_session_by_app(usess, app);
6372 if (ua_sess == NULL) {
6373 /* The session is in teardown process. Ignore and continue. */
6374 goto end;
6375 }
6376
6377 pthread_mutex_lock(&ua_sess->lock);
6378
6379 if (ua_sess->deleted) {
6380 goto end_unlock;
6381 }
6382
6383 pthread_mutex_lock(&app->sock_lock);
6384 ret = ustctl_regenerate_statedump(app->sock, ua_sess->handle);
6385 pthread_mutex_unlock(&app->sock_lock);
6386
6387 end_unlock:
6388 pthread_mutex_unlock(&ua_sess->lock);
6389
6390 end:
6391 rcu_read_unlock();
6392 health_code_update();
6393 return ret;
6394 }
6395
6396 /*
6397 * Regenerate the statedump for each app in the session.
6398 */
6399 int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
6400 {
6401 int ret = 0;
6402 struct lttng_ht_iter iter;
6403 struct ust_app *app;
6404
6405 DBG("Regenerating the metadata for all UST apps");
6406
6407 rcu_read_lock();
6408
6409 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6410 if (!app->compatible) {
6411 continue;
6412 }
6413
6414 ret = ust_app_regenerate_statedump(usess, app);
6415 if (ret < 0) {
6416 /* Continue to the next app even on error */
6417 continue;
6418 }
6419 }
6420
6421 rcu_read_unlock();
6422
6423 return 0;
6424 }
This page took 0.207332 seconds and 5 git commands to generate.