Fix: build failure with -fno-common
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Copyright (C) 2016 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _LGPL_SOURCE
20 #include <errno.h>
21 #include <inttypes.h>
22 #include <pthread.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/stat.h>
27 #include <sys/types.h>
28 #include <unistd.h>
29 #include <urcu/compiler.h>
30 #include <signal.h>
31
32 #include <common/common.h>
33 #include <common/sessiond-comm/sessiond-comm.h>
34
35 #include "buffer-registry.h"
36 #include "fd-limit.h"
37 #include "health-sessiond.h"
38 #include "ust-app.h"
39 #include "ust-consumer.h"
40 #include "lttng-ust-ctl.h"
41 #include "lttng-ust-error.h"
42 #include "utils.h"
43 #include "session.h"
44 #include "lttng-sessiond.h"
45 #include "notification-thread-commands.h"
46 #include "rotate.h"
47
48 struct lttng_ht *ust_app_ht;
49 struct lttng_ht *ust_app_ht_by_sock;
50 struct lttng_ht *ust_app_ht_by_notify_sock;
51
52 static
53 int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
54
55 /* Next available channel key. Access under next_channel_key_lock. */
56 static uint64_t _next_channel_key;
57 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
58
59 /* Next available session ID. Access under next_session_id_lock. */
60 static uint64_t _next_session_id;
61 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
62
63 /*
64 * Return the incremented value of next_channel_key.
65 */
66 static uint64_t get_next_channel_key(void)
67 {
68 uint64_t ret;
69
70 pthread_mutex_lock(&next_channel_key_lock);
71 ret = ++_next_channel_key;
72 pthread_mutex_unlock(&next_channel_key_lock);
73 return ret;
74 }
75
76 /*
77 * Return the atomically incremented value of next_session_id.
78 */
79 static uint64_t get_next_session_id(void)
80 {
81 uint64_t ret;
82
83 pthread_mutex_lock(&next_session_id_lock);
84 ret = ++_next_session_id;
85 pthread_mutex_unlock(&next_session_id_lock);
86 return ret;
87 }
88
89 static void copy_channel_attr_to_ustctl(
90 struct ustctl_consumer_channel_attr *attr,
91 struct lttng_ust_channel_attr *uattr)
92 {
93 /* Copy event attributes since the layout is different. */
94 attr->subbuf_size = uattr->subbuf_size;
95 attr->num_subbuf = uattr->num_subbuf;
96 attr->overwrite = uattr->overwrite;
97 attr->switch_timer_interval = uattr->switch_timer_interval;
98 attr->read_timer_interval = uattr->read_timer_interval;
99 attr->output = uattr->output;
100 attr->blocking_timeout = uattr->u.s.blocking_timeout;
101 }
102
103 /*
104 * Match function for the hash table lookup.
105 *
106 * It matches an ust app event based on three attributes which are the event
107 * name, the filter bytecode and the loglevel.
108 */
109 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
110 {
111 struct ust_app_event *event;
112 const struct ust_app_ht_key *key;
113 int ev_loglevel_value;
114
115 assert(node);
116 assert(_key);
117
118 event = caa_container_of(node, struct ust_app_event, node.node);
119 key = _key;
120 ev_loglevel_value = event->attr.loglevel;
121
122 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
123
124 /* Event name */
125 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
126 goto no_match;
127 }
128
129 /* Event loglevel. */
130 if (ev_loglevel_value != key->loglevel_type) {
131 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
132 && key->loglevel_type == 0 &&
133 ev_loglevel_value == -1) {
134 /*
135 * Match is accepted. This is because on event creation, the
136 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
137 * -1 are accepted for this loglevel type since 0 is the one set by
138 * the API when receiving an enable event.
139 */
140 } else {
141 goto no_match;
142 }
143 }
144
145 /* One of the filters is NULL, fail. */
146 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
147 goto no_match;
148 }
149
150 if (key->filter && event->filter) {
151 /* Both filters exists, check length followed by the bytecode. */
152 if (event->filter->len != key->filter->len ||
153 memcmp(event->filter->data, key->filter->data,
154 event->filter->len) != 0) {
155 goto no_match;
156 }
157 }
158
159 /* One of the exclusions is NULL, fail. */
160 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
161 goto no_match;
162 }
163
164 if (key->exclusion && event->exclusion) {
165 /* Both exclusions exists, check count followed by the names. */
166 if (event->exclusion->count != key->exclusion->count ||
167 memcmp(event->exclusion->names, key->exclusion->names,
168 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
169 goto no_match;
170 }
171 }
172
173
174 /* Match. */
175 return 1;
176
177 no_match:
178 return 0;
179 }
180
181 /*
182 * Unique add of an ust app event in the given ht. This uses the custom
183 * ht_match_ust_app_event match function and the event name as hash.
184 */
185 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
186 struct ust_app_event *event)
187 {
188 struct cds_lfht_node *node_ptr;
189 struct ust_app_ht_key key;
190 struct lttng_ht *ht;
191
192 assert(ua_chan);
193 assert(ua_chan->events);
194 assert(event);
195
196 ht = ua_chan->events;
197 key.name = event->attr.name;
198 key.filter = event->filter;
199 key.loglevel_type = event->attr.loglevel;
200 key.exclusion = event->exclusion;
201
202 node_ptr = cds_lfht_add_unique(ht->ht,
203 ht->hash_fct(event->node.key, lttng_ht_seed),
204 ht_match_ust_app_event, &key, &event->node.node);
205 assert(node_ptr == &event->node.node);
206 }
207
208 /*
209 * Close the notify socket from the given RCU head object. This MUST be called
210 * through a call_rcu().
211 */
212 static void close_notify_sock_rcu(struct rcu_head *head)
213 {
214 int ret;
215 struct ust_app_notify_sock_obj *obj =
216 caa_container_of(head, struct ust_app_notify_sock_obj, head);
217
218 /* Must have a valid fd here. */
219 assert(obj->fd >= 0);
220
221 ret = close(obj->fd);
222 if (ret) {
223 ERR("close notify sock %d RCU", obj->fd);
224 }
225 lttng_fd_put(LTTNG_FD_APPS, 1);
226
227 free(obj);
228 }
229
230 /*
231 * Return the session registry according to the buffer type of the given
232 * session.
233 *
234 * A registry per UID object MUST exists before calling this function or else
235 * it assert() if not found. RCU read side lock must be acquired.
236 */
237 static struct ust_registry_session *get_session_registry(
238 struct ust_app_session *ua_sess)
239 {
240 struct ust_registry_session *registry = NULL;
241
242 assert(ua_sess);
243
244 switch (ua_sess->buffer_type) {
245 case LTTNG_BUFFER_PER_PID:
246 {
247 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
248 if (!reg_pid) {
249 goto error;
250 }
251 registry = reg_pid->registry->reg.ust;
252 break;
253 }
254 case LTTNG_BUFFER_PER_UID:
255 {
256 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
257 ua_sess->tracing_id, ua_sess->bits_per_long,
258 ua_sess->real_credentials.uid);
259 if (!reg_uid) {
260 goto error;
261 }
262 registry = reg_uid->registry->reg.ust;
263 break;
264 }
265 default:
266 assert(0);
267 };
268
269 error:
270 return registry;
271 }
272
273 /*
274 * Delete ust context safely. RCU read lock must be held before calling
275 * this function.
276 */
277 static
278 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
279 struct ust_app *app)
280 {
281 int ret;
282
283 assert(ua_ctx);
284
285 if (ua_ctx->obj) {
286 pthread_mutex_lock(&app->sock_lock);
287 ret = ustctl_release_object(sock, ua_ctx->obj);
288 pthread_mutex_unlock(&app->sock_lock);
289 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
290 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
291 sock, ua_ctx->obj->handle, ret);
292 }
293 free(ua_ctx->obj);
294 }
295 free(ua_ctx);
296 }
297
298 /*
299 * Delete ust app event safely. RCU read lock must be held before calling
300 * this function.
301 */
302 static
303 void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
304 struct ust_app *app)
305 {
306 int ret;
307
308 assert(ua_event);
309
310 free(ua_event->filter);
311 if (ua_event->exclusion != NULL)
312 free(ua_event->exclusion);
313 if (ua_event->obj != NULL) {
314 pthread_mutex_lock(&app->sock_lock);
315 ret = ustctl_release_object(sock, ua_event->obj);
316 pthread_mutex_unlock(&app->sock_lock);
317 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
318 ERR("UST app sock %d release event obj failed with ret %d",
319 sock, ret);
320 }
321 free(ua_event->obj);
322 }
323 free(ua_event);
324 }
325
326 /*
327 * Release ust data object of the given stream.
328 *
329 * Return 0 on success or else a negative value.
330 */
331 static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
332 struct ust_app *app)
333 {
334 int ret = 0;
335
336 assert(stream);
337
338 if (stream->obj) {
339 pthread_mutex_lock(&app->sock_lock);
340 ret = ustctl_release_object(sock, stream->obj);
341 pthread_mutex_unlock(&app->sock_lock);
342 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
343 ERR("UST app sock %d release stream obj failed with ret %d",
344 sock, ret);
345 }
346 lttng_fd_put(LTTNG_FD_APPS, 2);
347 free(stream->obj);
348 }
349
350 return ret;
351 }
352
353 /*
354 * Delete ust app stream safely. RCU read lock must be held before calling
355 * this function.
356 */
357 static
358 void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
359 struct ust_app *app)
360 {
361 assert(stream);
362
363 (void) release_ust_app_stream(sock, stream, app);
364 free(stream);
365 }
366
367 /*
368 * We need to execute ht_destroy outside of RCU read-side critical
369 * section and outside of call_rcu thread, so we postpone its execution
370 * using ht_cleanup_push. It is simpler than to change the semantic of
371 * the many callers of delete_ust_app_session().
372 */
373 static
374 void delete_ust_app_channel_rcu(struct rcu_head *head)
375 {
376 struct ust_app_channel *ua_chan =
377 caa_container_of(head, struct ust_app_channel, rcu_head);
378
379 ht_cleanup_push(ua_chan->ctx);
380 ht_cleanup_push(ua_chan->events);
381 free(ua_chan);
382 }
383
384 /*
385 * Extract the lost packet or discarded events counter when the channel is
386 * being deleted and store the value in the parent channel so we can
387 * access it from lttng list and at stop/destroy.
388 *
389 * The session list lock must be held by the caller.
390 */
391 static
392 void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
393 {
394 uint64_t discarded = 0, lost = 0;
395 struct ltt_session *session;
396 struct ltt_ust_channel *uchan;
397
398 if (ua_chan->attr.type != LTTNG_UST_CHAN_PER_CPU) {
399 return;
400 }
401
402 rcu_read_lock();
403 session = session_find_by_id(ua_chan->session->tracing_id);
404 if (!session || !session->ust_session) {
405 /*
406 * Not finding the session is not an error because there are
407 * multiple ways the channels can be torn down.
408 *
409 * 1) The session daemon can initiate the destruction of the
410 * ust app session after receiving a destroy command or
411 * during its shutdown/teardown.
412 * 2) The application, since we are in per-pid tracing, is
413 * unregistering and tearing down its ust app session.
414 *
415 * Both paths are protected by the session list lock which
416 * ensures that the accounting of lost packets and discarded
417 * events is done exactly once. The session is then unpublished
418 * from the session list, resulting in this condition.
419 */
420 goto end;
421 }
422
423 if (ua_chan->attr.overwrite) {
424 consumer_get_lost_packets(ua_chan->session->tracing_id,
425 ua_chan->key, session->ust_session->consumer,
426 &lost);
427 } else {
428 consumer_get_discarded_events(ua_chan->session->tracing_id,
429 ua_chan->key, session->ust_session->consumer,
430 &discarded);
431 }
432 uchan = trace_ust_find_channel_by_name(
433 session->ust_session->domain_global.channels,
434 ua_chan->name);
435 if (!uchan) {
436 ERR("Missing UST channel to store discarded counters");
437 goto end;
438 }
439
440 uchan->per_pid_closed_app_discarded += discarded;
441 uchan->per_pid_closed_app_lost += lost;
442
443 end:
444 rcu_read_unlock();
445 if (session) {
446 session_put(session);
447 }
448 }
449
450 /*
451 * Delete ust app channel safely. RCU read lock must be held before calling
452 * this function.
453 *
454 * The session list lock must be held by the caller.
455 */
456 static
457 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
458 struct ust_app *app)
459 {
460 int ret;
461 struct lttng_ht_iter iter;
462 struct ust_app_event *ua_event;
463 struct ust_app_ctx *ua_ctx;
464 struct ust_app_stream *stream, *stmp;
465 struct ust_registry_session *registry;
466
467 assert(ua_chan);
468
469 DBG3("UST app deleting channel %s", ua_chan->name);
470
471 /* Wipe stream */
472 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
473 cds_list_del(&stream->list);
474 delete_ust_app_stream(sock, stream, app);
475 }
476
477 /* Wipe context */
478 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
479 cds_list_del(&ua_ctx->list);
480 ret = lttng_ht_del(ua_chan->ctx, &iter);
481 assert(!ret);
482 delete_ust_app_ctx(sock, ua_ctx, app);
483 }
484
485 /* Wipe events */
486 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
487 node.node) {
488 ret = lttng_ht_del(ua_chan->events, &iter);
489 assert(!ret);
490 delete_ust_app_event(sock, ua_event, app);
491 }
492
493 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
494 /* Wipe and free registry from session registry. */
495 registry = get_session_registry(ua_chan->session);
496 if (registry) {
497 ust_registry_channel_del_free(registry, ua_chan->key,
498 sock >= 0);
499 }
500 /*
501 * A negative socket can be used by the caller when
502 * cleaning-up a ua_chan in an error path. Skip the
503 * accounting in this case.
504 */
505 if (sock >= 0) {
506 save_per_pid_lost_discarded_counters(ua_chan);
507 }
508 }
509
510 if (ua_chan->obj != NULL) {
511 /* Remove channel from application UST object descriptor. */
512 iter.iter.node = &ua_chan->ust_objd_node.node;
513 ret = lttng_ht_del(app->ust_objd, &iter);
514 assert(!ret);
515 pthread_mutex_lock(&app->sock_lock);
516 ret = ustctl_release_object(sock, ua_chan->obj);
517 pthread_mutex_unlock(&app->sock_lock);
518 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
519 ERR("UST app sock %d release channel obj failed with ret %d",
520 sock, ret);
521 }
522 lttng_fd_put(LTTNG_FD_APPS, 1);
523 free(ua_chan->obj);
524 }
525 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
526 }
527
528 int ust_app_register_done(struct ust_app *app)
529 {
530 int ret;
531
532 pthread_mutex_lock(&app->sock_lock);
533 ret = ustctl_register_done(app->sock);
534 pthread_mutex_unlock(&app->sock_lock);
535 return ret;
536 }
537
538 int ust_app_release_object(struct ust_app *app, struct lttng_ust_object_data *data)
539 {
540 int ret, sock;
541
542 if (app) {
543 pthread_mutex_lock(&app->sock_lock);
544 sock = app->sock;
545 } else {
546 sock = -1;
547 }
548 ret = ustctl_release_object(sock, data);
549 if (app) {
550 pthread_mutex_unlock(&app->sock_lock);
551 }
552 return ret;
553 }
554
555 /*
556 * Push metadata to consumer socket.
557 *
558 * RCU read-side lock must be held to guarantee existance of socket.
559 * Must be called with the ust app session lock held.
560 * Must be called with the registry lock held.
561 *
562 * On success, return the len of metadata pushed or else a negative value.
563 * Returning a -EPIPE return value means we could not send the metadata,
564 * but it can be caused by recoverable errors (e.g. the application has
565 * terminated concurrently).
566 */
567 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
568 struct consumer_socket *socket, int send_zero_data)
569 {
570 int ret;
571 char *metadata_str = NULL;
572 size_t len, offset, new_metadata_len_sent;
573 ssize_t ret_val;
574 uint64_t metadata_key, metadata_version;
575
576 assert(registry);
577 assert(socket);
578
579 metadata_key = registry->metadata_key;
580
581 /*
582 * Means that no metadata was assigned to the session. This can
583 * happens if no start has been done previously.
584 */
585 if (!metadata_key) {
586 return 0;
587 }
588
589 offset = registry->metadata_len_sent;
590 len = registry->metadata_len - registry->metadata_len_sent;
591 new_metadata_len_sent = registry->metadata_len;
592 metadata_version = registry->metadata_version;
593 if (len == 0) {
594 DBG3("No metadata to push for metadata key %" PRIu64,
595 registry->metadata_key);
596 ret_val = len;
597 if (send_zero_data) {
598 DBG("No metadata to push");
599 goto push_data;
600 }
601 goto end;
602 }
603
604 /* Allocate only what we have to send. */
605 metadata_str = zmalloc(len);
606 if (!metadata_str) {
607 PERROR("zmalloc ust app metadata string");
608 ret_val = -ENOMEM;
609 goto error;
610 }
611 /* Copy what we haven't sent out. */
612 memcpy(metadata_str, registry->metadata + offset, len);
613
614 push_data:
615 pthread_mutex_unlock(&registry->lock);
616 /*
617 * We need to unlock the registry while we push metadata to
618 * break a circular dependency between the consumerd metadata
619 * lock and the sessiond registry lock. Indeed, pushing metadata
620 * to the consumerd awaits that it gets pushed all the way to
621 * relayd, but doing so requires grabbing the metadata lock. If
622 * a concurrent metadata request is being performed by
623 * consumerd, this can try to grab the registry lock on the
624 * sessiond while holding the metadata lock on the consumer
625 * daemon. Those push and pull schemes are performed on two
626 * different bidirectionnal communication sockets.
627 */
628 ret = consumer_push_metadata(socket, metadata_key,
629 metadata_str, len, offset, metadata_version);
630 pthread_mutex_lock(&registry->lock);
631 if (ret < 0) {
632 /*
633 * There is an acceptable race here between the registry
634 * metadata key assignment and the creation on the
635 * consumer. The session daemon can concurrently push
636 * metadata for this registry while being created on the
637 * consumer since the metadata key of the registry is
638 * assigned *before* it is setup to avoid the consumer
639 * to ask for metadata that could possibly be not found
640 * in the session daemon.
641 *
642 * The metadata will get pushed either by the session
643 * being stopped or the consumer requesting metadata if
644 * that race is triggered.
645 */
646 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
647 ret = 0;
648 } else {
649 ERR("Error pushing metadata to consumer");
650 }
651 ret_val = ret;
652 goto error_push;
653 } else {
654 /*
655 * Metadata may have been concurrently pushed, since
656 * we're not holding the registry lock while pushing to
657 * consumer. This is handled by the fact that we send
658 * the metadata content, size, and the offset at which
659 * that metadata belongs. This may arrive out of order
660 * on the consumer side, and the consumer is able to
661 * deal with overlapping fragments. The consumer
662 * supports overlapping fragments, which must be
663 * contiguous starting from offset 0. We keep the
664 * largest metadata_len_sent value of the concurrent
665 * send.
666 */
667 registry->metadata_len_sent =
668 max_t(size_t, registry->metadata_len_sent,
669 new_metadata_len_sent);
670 }
671 free(metadata_str);
672 return len;
673
674 end:
675 error:
676 if (ret_val) {
677 /*
678 * On error, flag the registry that the metadata is
679 * closed. We were unable to push anything and this
680 * means that either the consumer is not responding or
681 * the metadata cache has been destroyed on the
682 * consumer.
683 */
684 registry->metadata_closed = 1;
685 }
686 error_push:
687 free(metadata_str);
688 return ret_val;
689 }
690
691 /*
692 * For a given application and session, push metadata to consumer.
693 * Either sock or consumer is required : if sock is NULL, the default
694 * socket to send the metadata is retrieved from consumer, if sock
695 * is not NULL we use it to send the metadata.
696 * RCU read-side lock must be held while calling this function,
697 * therefore ensuring existance of registry. It also ensures existance
698 * of socket throughout this function.
699 *
700 * Return 0 on success else a negative error.
701 * Returning a -EPIPE return value means we could not send the metadata,
702 * but it can be caused by recoverable errors (e.g. the application has
703 * terminated concurrently).
704 */
705 static int push_metadata(struct ust_registry_session *registry,
706 struct consumer_output *consumer)
707 {
708 int ret_val;
709 ssize_t ret;
710 struct consumer_socket *socket;
711
712 assert(registry);
713 assert(consumer);
714
715 pthread_mutex_lock(&registry->lock);
716 if (registry->metadata_closed) {
717 ret_val = -EPIPE;
718 goto error;
719 }
720
721 /* Get consumer socket to use to push the metadata.*/
722 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
723 consumer);
724 if (!socket) {
725 ret_val = -1;
726 goto error;
727 }
728
729 ret = ust_app_push_metadata(registry, socket, 0);
730 if (ret < 0) {
731 ret_val = ret;
732 goto error;
733 }
734 pthread_mutex_unlock(&registry->lock);
735 return 0;
736
737 error:
738 pthread_mutex_unlock(&registry->lock);
739 return ret_val;
740 }
741
742 /*
743 * Send to the consumer a close metadata command for the given session. Once
744 * done, the metadata channel is deleted and the session metadata pointer is
745 * nullified. The session lock MUST be held unless the application is
746 * in the destroy path.
747 *
748 * Do not hold the registry lock while communicating with the consumerd, because
749 * doing so causes inter-process deadlocks between consumerd and sessiond with
750 * the metadata request notification.
751 *
752 * Return 0 on success else a negative value.
753 */
754 static int close_metadata(struct ust_registry_session *registry,
755 struct consumer_output *consumer)
756 {
757 int ret;
758 struct consumer_socket *socket;
759 uint64_t metadata_key;
760 bool registry_was_already_closed;
761
762 assert(registry);
763 assert(consumer);
764
765 rcu_read_lock();
766
767 pthread_mutex_lock(&registry->lock);
768 metadata_key = registry->metadata_key;
769 registry_was_already_closed = registry->metadata_closed;
770 if (metadata_key != 0) {
771 /*
772 * Metadata closed. Even on error this means that the consumer
773 * is not responding or not found so either way a second close
774 * should NOT be emit for this registry.
775 */
776 registry->metadata_closed = 1;
777 }
778 pthread_mutex_unlock(&registry->lock);
779
780 if (metadata_key == 0 || registry_was_already_closed) {
781 ret = 0;
782 goto end;
783 }
784
785 /* Get consumer socket to use to push the metadata.*/
786 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
787 consumer);
788 if (!socket) {
789 ret = -1;
790 goto end;
791 }
792
793 ret = consumer_close_metadata(socket, metadata_key);
794 if (ret < 0) {
795 goto end;
796 }
797
798 end:
799 rcu_read_unlock();
800 return ret;
801 }
802
803 /*
804 * We need to execute ht_destroy outside of RCU read-side critical
805 * section and outside of call_rcu thread, so we postpone its execution
806 * using ht_cleanup_push. It is simpler than to change the semantic of
807 * the many callers of delete_ust_app_session().
808 */
809 static
810 void delete_ust_app_session_rcu(struct rcu_head *head)
811 {
812 struct ust_app_session *ua_sess =
813 caa_container_of(head, struct ust_app_session, rcu_head);
814
815 ht_cleanup_push(ua_sess->channels);
816 free(ua_sess);
817 }
818
819 /*
820 * Delete ust app session safely. RCU read lock must be held before calling
821 * this function.
822 *
823 * The session list lock must be held by the caller.
824 */
825 static
826 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
827 struct ust_app *app)
828 {
829 int ret;
830 struct lttng_ht_iter iter;
831 struct ust_app_channel *ua_chan;
832 struct ust_registry_session *registry;
833
834 assert(ua_sess);
835
836 pthread_mutex_lock(&ua_sess->lock);
837
838 assert(!ua_sess->deleted);
839 ua_sess->deleted = true;
840
841 registry = get_session_registry(ua_sess);
842 /* Registry can be null on error path during initialization. */
843 if (registry) {
844 /* Push metadata for application before freeing the application. */
845 (void) push_metadata(registry, ua_sess->consumer);
846
847 /*
848 * Don't ask to close metadata for global per UID buffers. Close
849 * metadata only on destroy trace session in this case. Also, the
850 * previous push metadata could have flag the metadata registry to
851 * close so don't send a close command if closed.
852 */
853 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
854 /* And ask to close it for this session registry. */
855 (void) close_metadata(registry, ua_sess->consumer);
856 }
857 }
858
859 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
860 node.node) {
861 ret = lttng_ht_del(ua_sess->channels, &iter);
862 assert(!ret);
863 delete_ust_app_channel(sock, ua_chan, app);
864 }
865
866 /* In case of per PID, the registry is kept in the session. */
867 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
868 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
869 if (reg_pid) {
870 /*
871 * Registry can be null on error path during
872 * initialization.
873 */
874 buffer_reg_pid_remove(reg_pid);
875 buffer_reg_pid_destroy(reg_pid);
876 }
877 }
878
879 if (ua_sess->handle != -1) {
880 pthread_mutex_lock(&app->sock_lock);
881 ret = ustctl_release_handle(sock, ua_sess->handle);
882 pthread_mutex_unlock(&app->sock_lock);
883 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
884 ERR("UST app sock %d release session handle failed with ret %d",
885 sock, ret);
886 }
887 /* Remove session from application UST object descriptor. */
888 iter.iter.node = &ua_sess->ust_objd_node.node;
889 ret = lttng_ht_del(app->ust_sessions_objd, &iter);
890 assert(!ret);
891 }
892
893 pthread_mutex_unlock(&ua_sess->lock);
894
895 consumer_output_put(ua_sess->consumer);
896
897 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
898 }
899
900 /*
901 * Delete a traceable application structure from the global list. Never call
902 * this function outside of a call_rcu call.
903 *
904 * RCU read side lock should _NOT_ be held when calling this function.
905 */
906 static
907 void delete_ust_app(struct ust_app *app)
908 {
909 int ret, sock;
910 struct ust_app_session *ua_sess, *tmp_ua_sess;
911
912 /*
913 * The session list lock must be held during this function to guarantee
914 * the existence of ua_sess.
915 */
916 session_lock_list();
917 /* Delete ust app sessions info */
918 sock = app->sock;
919 app->sock = -1;
920
921 /* Wipe sessions */
922 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
923 teardown_node) {
924 /* Free every object in the session and the session. */
925 rcu_read_lock();
926 delete_ust_app_session(sock, ua_sess, app);
927 rcu_read_unlock();
928 }
929
930 ht_cleanup_push(app->sessions);
931 ht_cleanup_push(app->ust_sessions_objd);
932 ht_cleanup_push(app->ust_objd);
933
934 /*
935 * Wait until we have deleted the application from the sock hash table
936 * before closing this socket, otherwise an application could re-use the
937 * socket ID and race with the teardown, using the same hash table entry.
938 *
939 * It's OK to leave the close in call_rcu. We want it to stay unique for
940 * all RCU readers that could run concurrently with unregister app,
941 * therefore we _need_ to only close that socket after a grace period. So
942 * it should stay in this RCU callback.
943 *
944 * This close() is a very important step of the synchronization model so
945 * every modification to this function must be carefully reviewed.
946 */
947 ret = close(sock);
948 if (ret) {
949 PERROR("close");
950 }
951 lttng_fd_put(LTTNG_FD_APPS, 1);
952
953 DBG2("UST app pid %d deleted", app->pid);
954 free(app);
955 session_unlock_list();
956 }
957
958 /*
959 * URCU intermediate call to delete an UST app.
960 */
961 static
962 void delete_ust_app_rcu(struct rcu_head *head)
963 {
964 struct lttng_ht_node_ulong *node =
965 caa_container_of(head, struct lttng_ht_node_ulong, head);
966 struct ust_app *app =
967 caa_container_of(node, struct ust_app, pid_n);
968
969 DBG3("Call RCU deleting app PID %d", app->pid);
970 delete_ust_app(app);
971 }
972
973 /*
974 * Delete the session from the application ht and delete the data structure by
975 * freeing every object inside and releasing them.
976 *
977 * The session list lock must be held by the caller.
978 */
979 static void destroy_app_session(struct ust_app *app,
980 struct ust_app_session *ua_sess)
981 {
982 int ret;
983 struct lttng_ht_iter iter;
984
985 assert(app);
986 assert(ua_sess);
987
988 iter.iter.node = &ua_sess->node.node;
989 ret = lttng_ht_del(app->sessions, &iter);
990 if (ret) {
991 /* Already scheduled for teardown. */
992 goto end;
993 }
994
995 /* Once deleted, free the data structure. */
996 delete_ust_app_session(app->sock, ua_sess, app);
997
998 end:
999 return;
1000 }
1001
1002 /*
1003 * Alloc new UST app session.
1004 */
1005 static
1006 struct ust_app_session *alloc_ust_app_session(void)
1007 {
1008 struct ust_app_session *ua_sess;
1009
1010 /* Init most of the default value by allocating and zeroing */
1011 ua_sess = zmalloc(sizeof(struct ust_app_session));
1012 if (ua_sess == NULL) {
1013 PERROR("malloc");
1014 goto error_free;
1015 }
1016
1017 ua_sess->handle = -1;
1018 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1019 ua_sess->metadata_attr.type = LTTNG_UST_CHAN_METADATA;
1020 pthread_mutex_init(&ua_sess->lock, NULL);
1021
1022 return ua_sess;
1023
1024 error_free:
1025 return NULL;
1026 }
1027
1028 /*
1029 * Alloc new UST app channel.
1030 */
1031 static
1032 struct ust_app_channel *alloc_ust_app_channel(char *name,
1033 struct ust_app_session *ua_sess,
1034 struct lttng_ust_channel_attr *attr)
1035 {
1036 struct ust_app_channel *ua_chan;
1037
1038 /* Init most of the default value by allocating and zeroing */
1039 ua_chan = zmalloc(sizeof(struct ust_app_channel));
1040 if (ua_chan == NULL) {
1041 PERROR("malloc");
1042 goto error;
1043 }
1044
1045 /* Setup channel name */
1046 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
1047 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1048
1049 ua_chan->enabled = 1;
1050 ua_chan->handle = -1;
1051 ua_chan->session = ua_sess;
1052 ua_chan->key = get_next_channel_key();
1053 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1054 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1055 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
1056
1057 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
1058 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
1059
1060 /* Copy attributes */
1061 if (attr) {
1062 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
1063 ua_chan->attr.subbuf_size = attr->subbuf_size;
1064 ua_chan->attr.num_subbuf = attr->num_subbuf;
1065 ua_chan->attr.overwrite = attr->overwrite;
1066 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
1067 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
1068 ua_chan->attr.output = attr->output;
1069 ua_chan->attr.blocking_timeout = attr->u.s.blocking_timeout;
1070 }
1071 /* By default, the channel is a per cpu channel. */
1072 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1073
1074 DBG3("UST app channel %s allocated", ua_chan->name);
1075
1076 return ua_chan;
1077
1078 error:
1079 return NULL;
1080 }
1081
1082 /*
1083 * Allocate and initialize a UST app stream.
1084 *
1085 * Return newly allocated stream pointer or NULL on error.
1086 */
1087 struct ust_app_stream *ust_app_alloc_stream(void)
1088 {
1089 struct ust_app_stream *stream = NULL;
1090
1091 stream = zmalloc(sizeof(*stream));
1092 if (stream == NULL) {
1093 PERROR("zmalloc ust app stream");
1094 goto error;
1095 }
1096
1097 /* Zero could be a valid value for a handle so flag it to -1. */
1098 stream->handle = -1;
1099
1100 error:
1101 return stream;
1102 }
1103
1104 /*
1105 * Alloc new UST app event.
1106 */
1107 static
1108 struct ust_app_event *alloc_ust_app_event(char *name,
1109 struct lttng_ust_event *attr)
1110 {
1111 struct ust_app_event *ua_event;
1112
1113 /* Init most of the default value by allocating and zeroing */
1114 ua_event = zmalloc(sizeof(struct ust_app_event));
1115 if (ua_event == NULL) {
1116 PERROR("Failed to allocate ust_app_event structure");
1117 goto error;
1118 }
1119
1120 ua_event->enabled = 1;
1121 strncpy(ua_event->name, name, sizeof(ua_event->name));
1122 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1123 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
1124
1125 /* Copy attributes */
1126 if (attr) {
1127 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1128 }
1129
1130 DBG3("UST app event %s allocated", ua_event->name);
1131
1132 return ua_event;
1133
1134 error:
1135 return NULL;
1136 }
1137
1138 /*
1139 * Alloc new UST app context.
1140 */
1141 static
1142 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
1143 {
1144 struct ust_app_ctx *ua_ctx;
1145
1146 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
1147 if (ua_ctx == NULL) {
1148 goto error;
1149 }
1150
1151 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1152
1153 if (uctx) {
1154 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
1155 if (uctx->ctx == LTTNG_UST_CONTEXT_APP_CONTEXT) {
1156 char *provider_name = NULL, *ctx_name = NULL;
1157
1158 provider_name = strdup(uctx->u.app_ctx.provider_name);
1159 ctx_name = strdup(uctx->u.app_ctx.ctx_name);
1160 if (!provider_name || !ctx_name) {
1161 free(provider_name);
1162 free(ctx_name);
1163 goto error;
1164 }
1165
1166 ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
1167 ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
1168 }
1169 }
1170
1171 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
1172 return ua_ctx;
1173 error:
1174 free(ua_ctx);
1175 return NULL;
1176 }
1177
1178 /*
1179 * Allocate a filter and copy the given original filter.
1180 *
1181 * Return allocated filter or NULL on error.
1182 */
1183 static struct lttng_filter_bytecode *copy_filter_bytecode(
1184 struct lttng_filter_bytecode *orig_f)
1185 {
1186 struct lttng_filter_bytecode *filter = NULL;
1187
1188 /* Copy filter bytecode */
1189 filter = zmalloc(sizeof(*filter) + orig_f->len);
1190 if (!filter) {
1191 PERROR("zmalloc alloc filter bytecode");
1192 goto error;
1193 }
1194
1195 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1196
1197 error:
1198 return filter;
1199 }
1200
1201 /*
1202 * Create a liblttng-ust filter bytecode from given bytecode.
1203 *
1204 * Return allocated filter or NULL on error.
1205 */
1206 static struct lttng_ust_filter_bytecode *create_ust_bytecode_from_bytecode(
1207 struct lttng_filter_bytecode *orig_f)
1208 {
1209 struct lttng_ust_filter_bytecode *filter = NULL;
1210
1211 /* Copy filter bytecode */
1212 filter = zmalloc(sizeof(*filter) + orig_f->len);
1213 if (!filter) {
1214 PERROR("zmalloc alloc ust filter bytecode");
1215 goto error;
1216 }
1217
1218 assert(sizeof(struct lttng_filter_bytecode) ==
1219 sizeof(struct lttng_ust_filter_bytecode));
1220 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1221 error:
1222 return filter;
1223 }
1224
1225 /*
1226 * Find an ust_app using the sock and return it. RCU read side lock must be
1227 * held before calling this helper function.
1228 */
1229 struct ust_app *ust_app_find_by_sock(int sock)
1230 {
1231 struct lttng_ht_node_ulong *node;
1232 struct lttng_ht_iter iter;
1233
1234 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
1235 node = lttng_ht_iter_get_node_ulong(&iter);
1236 if (node == NULL) {
1237 DBG2("UST app find by sock %d not found", sock);
1238 goto error;
1239 }
1240
1241 return caa_container_of(node, struct ust_app, sock_n);
1242
1243 error:
1244 return NULL;
1245 }
1246
1247 /*
1248 * Find an ust_app using the notify sock and return it. RCU read side lock must
1249 * be held before calling this helper function.
1250 */
1251 static struct ust_app *find_app_by_notify_sock(int sock)
1252 {
1253 struct lttng_ht_node_ulong *node;
1254 struct lttng_ht_iter iter;
1255
1256 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1257 &iter);
1258 node = lttng_ht_iter_get_node_ulong(&iter);
1259 if (node == NULL) {
1260 DBG2("UST app find by notify sock %d not found", sock);
1261 goto error;
1262 }
1263
1264 return caa_container_of(node, struct ust_app, notify_sock_n);
1265
1266 error:
1267 return NULL;
1268 }
1269
1270 /*
1271 * Lookup for an ust app event based on event name, filter bytecode and the
1272 * event loglevel.
1273 *
1274 * Return an ust_app_event object or NULL on error.
1275 */
1276 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1277 const char *name, const struct lttng_filter_bytecode *filter,
1278 int loglevel_value,
1279 const struct lttng_event_exclusion *exclusion)
1280 {
1281 struct lttng_ht_iter iter;
1282 struct lttng_ht_node_str *node;
1283 struct ust_app_event *event = NULL;
1284 struct ust_app_ht_key key;
1285
1286 assert(name);
1287 assert(ht);
1288
1289 /* Setup key for event lookup. */
1290 key.name = name;
1291 key.filter = filter;
1292 key.loglevel_type = loglevel_value;
1293 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1294 key.exclusion = exclusion;
1295
1296 /* Lookup using the event name as hash and a custom match fct. */
1297 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1298 ht_match_ust_app_event, &key, &iter.iter);
1299 node = lttng_ht_iter_get_node_str(&iter);
1300 if (node == NULL) {
1301 goto end;
1302 }
1303
1304 event = caa_container_of(node, struct ust_app_event, node);
1305
1306 end:
1307 return event;
1308 }
1309
1310 /*
1311 * Create the channel context on the tracer.
1312 *
1313 * Called with UST app session lock held.
1314 */
1315 static
1316 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1317 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1318 {
1319 int ret;
1320
1321 health_code_update();
1322
1323 pthread_mutex_lock(&app->sock_lock);
1324 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1325 ua_chan->obj, &ua_ctx->obj);
1326 pthread_mutex_unlock(&app->sock_lock);
1327 if (ret < 0) {
1328 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1329 ERR("UST app create channel context failed for app (pid: %d) "
1330 "with ret %d", app->pid, ret);
1331 } else {
1332 /*
1333 * This is normal behavior, an application can die during the
1334 * creation process. Don't report an error so the execution can
1335 * continue normally.
1336 */
1337 ret = 0;
1338 DBG3("UST app add context failed. Application is dead.");
1339 }
1340 goto error;
1341 }
1342
1343 ua_ctx->handle = ua_ctx->obj->handle;
1344
1345 DBG2("UST app context handle %d created successfully for channel %s",
1346 ua_ctx->handle, ua_chan->name);
1347
1348 error:
1349 health_code_update();
1350 return ret;
1351 }
1352
1353 /*
1354 * Set the filter on the tracer.
1355 */
1356 static
1357 int set_ust_event_filter(struct ust_app_event *ua_event,
1358 struct ust_app *app)
1359 {
1360 int ret;
1361 struct lttng_ust_filter_bytecode *ust_bytecode = NULL;
1362
1363 health_code_update();
1364
1365 if (!ua_event->filter) {
1366 ret = 0;
1367 goto error;
1368 }
1369
1370 ust_bytecode = create_ust_bytecode_from_bytecode(ua_event->filter);
1371 if (!ust_bytecode) {
1372 ret = -LTTNG_ERR_NOMEM;
1373 goto error;
1374 }
1375 pthread_mutex_lock(&app->sock_lock);
1376 ret = ustctl_set_filter(app->sock, ust_bytecode,
1377 ua_event->obj);
1378 pthread_mutex_unlock(&app->sock_lock);
1379 if (ret < 0) {
1380 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1381 ERR("UST app event %s filter failed for app (pid: %d) "
1382 "with ret %d", ua_event->attr.name, app->pid, ret);
1383 } else {
1384 /*
1385 * This is normal behavior, an application can die during the
1386 * creation process. Don't report an error so the execution can
1387 * continue normally.
1388 */
1389 ret = 0;
1390 DBG3("UST app filter event failed. Application is dead.");
1391 }
1392 goto error;
1393 }
1394
1395 DBG2("UST filter set successfully for event %s", ua_event->name);
1396
1397 error:
1398 health_code_update();
1399 free(ust_bytecode);
1400 return ret;
1401 }
1402
1403 static
1404 struct lttng_ust_event_exclusion *create_ust_exclusion_from_exclusion(
1405 struct lttng_event_exclusion *exclusion)
1406 {
1407 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1408 size_t exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1409 LTTNG_UST_SYM_NAME_LEN * exclusion->count;
1410
1411 ust_exclusion = zmalloc(exclusion_alloc_size);
1412 if (!ust_exclusion) {
1413 PERROR("malloc");
1414 goto end;
1415 }
1416
1417 assert(sizeof(struct lttng_event_exclusion) ==
1418 sizeof(struct lttng_ust_event_exclusion));
1419 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1420 end:
1421 return ust_exclusion;
1422 }
1423
1424 /*
1425 * Set event exclusions on the tracer.
1426 */
1427 static
1428 int set_ust_event_exclusion(struct ust_app_event *ua_event,
1429 struct ust_app *app)
1430 {
1431 int ret;
1432 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1433
1434 health_code_update();
1435
1436 if (!ua_event->exclusion || !ua_event->exclusion->count) {
1437 ret = 0;
1438 goto error;
1439 }
1440
1441 ust_exclusion = create_ust_exclusion_from_exclusion(
1442 ua_event->exclusion);
1443 if (!ust_exclusion) {
1444 ret = -LTTNG_ERR_NOMEM;
1445 goto error;
1446 }
1447 pthread_mutex_lock(&app->sock_lock);
1448 ret = ustctl_set_exclusion(app->sock, ust_exclusion, ua_event->obj);
1449 pthread_mutex_unlock(&app->sock_lock);
1450 if (ret < 0) {
1451 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1452 ERR("UST app event %s exclusions failed for app (pid: %d) "
1453 "with ret %d", ua_event->attr.name, app->pid, ret);
1454 } else {
1455 /*
1456 * This is normal behavior, an application can die during the
1457 * creation process. Don't report an error so the execution can
1458 * continue normally.
1459 */
1460 ret = 0;
1461 DBG3("UST app event exclusion failed. Application is dead.");
1462 }
1463 goto error;
1464 }
1465
1466 DBG2("UST exclusion set successfully for event %s", ua_event->name);
1467
1468 error:
1469 health_code_update();
1470 free(ust_exclusion);
1471 return ret;
1472 }
1473
1474 /*
1475 * Disable the specified event on to UST tracer for the UST session.
1476 */
1477 static int disable_ust_event(struct ust_app *app,
1478 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1479 {
1480 int ret;
1481
1482 health_code_update();
1483
1484 pthread_mutex_lock(&app->sock_lock);
1485 ret = ustctl_disable(app->sock, ua_event->obj);
1486 pthread_mutex_unlock(&app->sock_lock);
1487 if (ret < 0) {
1488 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1489 ERR("UST app event %s disable failed for app (pid: %d) "
1490 "and session handle %d with ret %d",
1491 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1492 } else {
1493 /*
1494 * This is normal behavior, an application can die during the
1495 * creation process. Don't report an error so the execution can
1496 * continue normally.
1497 */
1498 ret = 0;
1499 DBG3("UST app disable event failed. Application is dead.");
1500 }
1501 goto error;
1502 }
1503
1504 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1505 ua_event->attr.name, app->pid);
1506
1507 error:
1508 health_code_update();
1509 return ret;
1510 }
1511
1512 /*
1513 * Disable the specified channel on to UST tracer for the UST session.
1514 */
1515 static int disable_ust_channel(struct ust_app *app,
1516 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1517 {
1518 int ret;
1519
1520 health_code_update();
1521
1522 pthread_mutex_lock(&app->sock_lock);
1523 ret = ustctl_disable(app->sock, ua_chan->obj);
1524 pthread_mutex_unlock(&app->sock_lock);
1525 if (ret < 0) {
1526 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1527 ERR("UST app channel %s disable failed for app (pid: %d) "
1528 "and session handle %d with ret %d",
1529 ua_chan->name, app->pid, ua_sess->handle, ret);
1530 } else {
1531 /*
1532 * This is normal behavior, an application can die during the
1533 * creation process. Don't report an error so the execution can
1534 * continue normally.
1535 */
1536 ret = 0;
1537 DBG3("UST app disable channel failed. Application is dead.");
1538 }
1539 goto error;
1540 }
1541
1542 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1543 ua_chan->name, app->pid);
1544
1545 error:
1546 health_code_update();
1547 return ret;
1548 }
1549
1550 /*
1551 * Enable the specified channel on to UST tracer for the UST session.
1552 */
1553 static int enable_ust_channel(struct ust_app *app,
1554 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1555 {
1556 int ret;
1557
1558 health_code_update();
1559
1560 pthread_mutex_lock(&app->sock_lock);
1561 ret = ustctl_enable(app->sock, ua_chan->obj);
1562 pthread_mutex_unlock(&app->sock_lock);
1563 if (ret < 0) {
1564 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1565 ERR("UST app channel %s enable failed for app (pid: %d) "
1566 "and session handle %d with ret %d",
1567 ua_chan->name, app->pid, ua_sess->handle, ret);
1568 } else {
1569 /*
1570 * This is normal behavior, an application can die during the
1571 * creation process. Don't report an error so the execution can
1572 * continue normally.
1573 */
1574 ret = 0;
1575 DBG3("UST app enable channel failed. Application is dead.");
1576 }
1577 goto error;
1578 }
1579
1580 ua_chan->enabled = 1;
1581
1582 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1583 ua_chan->name, app->pid);
1584
1585 error:
1586 health_code_update();
1587 return ret;
1588 }
1589
1590 /*
1591 * Enable the specified event on to UST tracer for the UST session.
1592 */
1593 static int enable_ust_event(struct ust_app *app,
1594 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1595 {
1596 int ret;
1597
1598 health_code_update();
1599
1600 pthread_mutex_lock(&app->sock_lock);
1601 ret = ustctl_enable(app->sock, ua_event->obj);
1602 pthread_mutex_unlock(&app->sock_lock);
1603 if (ret < 0) {
1604 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1605 ERR("UST app event %s enable failed for app (pid: %d) "
1606 "and session handle %d with ret %d",
1607 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1608 } else {
1609 /*
1610 * This is normal behavior, an application can die during the
1611 * creation process. Don't report an error so the execution can
1612 * continue normally.
1613 */
1614 ret = 0;
1615 DBG3("UST app enable event failed. Application is dead.");
1616 }
1617 goto error;
1618 }
1619
1620 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1621 ua_event->attr.name, app->pid);
1622
1623 error:
1624 health_code_update();
1625 return ret;
1626 }
1627
1628 /*
1629 * Send channel and stream buffer to application.
1630 *
1631 * Return 0 on success. On error, a negative value is returned.
1632 */
1633 static int send_channel_pid_to_ust(struct ust_app *app,
1634 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1635 {
1636 int ret;
1637 struct ust_app_stream *stream, *stmp;
1638
1639 assert(app);
1640 assert(ua_sess);
1641 assert(ua_chan);
1642
1643 health_code_update();
1644
1645 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1646 app->sock);
1647
1648 /* Send channel to the application. */
1649 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1650 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1651 ret = -ENOTCONN; /* Caused by app exiting. */
1652 goto error;
1653 } else if (ret < 0) {
1654 goto error;
1655 }
1656
1657 health_code_update();
1658
1659 /* Send all streams to application. */
1660 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1661 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1662 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1663 ret = -ENOTCONN; /* Caused by app exiting. */
1664 goto error;
1665 } else if (ret < 0) {
1666 goto error;
1667 }
1668 /* We don't need the stream anymore once sent to the tracer. */
1669 cds_list_del(&stream->list);
1670 delete_ust_app_stream(-1, stream, app);
1671 }
1672 /* Flag the channel that it is sent to the application. */
1673 ua_chan->is_sent = 1;
1674
1675 error:
1676 health_code_update();
1677 return ret;
1678 }
1679
1680 /*
1681 * Create the specified event onto the UST tracer for a UST session.
1682 *
1683 * Should be called with session mutex held.
1684 */
1685 static
1686 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1687 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1688 {
1689 int ret = 0;
1690
1691 health_code_update();
1692
1693 /* Create UST event on tracer */
1694 pthread_mutex_lock(&app->sock_lock);
1695 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1696 &ua_event->obj);
1697 pthread_mutex_unlock(&app->sock_lock);
1698 if (ret < 0) {
1699 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1700 abort();
1701 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1702 ua_event->attr.name, app->pid, ret);
1703 } else {
1704 /*
1705 * This is normal behavior, an application can die during the
1706 * creation process. Don't report an error so the execution can
1707 * continue normally.
1708 */
1709 ret = 0;
1710 DBG3("UST app create event failed. Application is dead.");
1711 }
1712 goto error;
1713 }
1714
1715 ua_event->handle = ua_event->obj->handle;
1716
1717 DBG2("UST app event %s created successfully for pid:%d",
1718 ua_event->attr.name, app->pid);
1719
1720 health_code_update();
1721
1722 /* Set filter if one is present. */
1723 if (ua_event->filter) {
1724 ret = set_ust_event_filter(ua_event, app);
1725 if (ret < 0) {
1726 goto error;
1727 }
1728 }
1729
1730 /* Set exclusions for the event */
1731 if (ua_event->exclusion) {
1732 ret = set_ust_event_exclusion(ua_event, app);
1733 if (ret < 0) {
1734 goto error;
1735 }
1736 }
1737
1738 /* If event not enabled, disable it on the tracer */
1739 if (ua_event->enabled) {
1740 /*
1741 * We now need to explicitly enable the event, since it
1742 * is now disabled at creation.
1743 */
1744 ret = enable_ust_event(app, ua_sess, ua_event);
1745 if (ret < 0) {
1746 /*
1747 * If we hit an EPERM, something is wrong with our enable call. If
1748 * we get an EEXIST, there is a problem on the tracer side since we
1749 * just created it.
1750 */
1751 switch (ret) {
1752 case -LTTNG_UST_ERR_PERM:
1753 /* Code flow problem */
1754 assert(0);
1755 case -LTTNG_UST_ERR_EXIST:
1756 /* It's OK for our use case. */
1757 ret = 0;
1758 break;
1759 default:
1760 break;
1761 }
1762 goto error;
1763 }
1764 }
1765
1766 error:
1767 health_code_update();
1768 return ret;
1769 }
1770
1771 /*
1772 * Copy data between an UST app event and a LTT event.
1773 */
1774 static void shadow_copy_event(struct ust_app_event *ua_event,
1775 struct ltt_ust_event *uevent)
1776 {
1777 size_t exclusion_alloc_size;
1778
1779 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1780 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1781
1782 ua_event->enabled = uevent->enabled;
1783
1784 /* Copy event attributes */
1785 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1786
1787 /* Copy filter bytecode */
1788 if (uevent->filter) {
1789 ua_event->filter = copy_filter_bytecode(uevent->filter);
1790 /* Filter might be NULL here in case of ENONEM. */
1791 }
1792
1793 /* Copy exclusion data */
1794 if (uevent->exclusion) {
1795 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
1796 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
1797 ua_event->exclusion = zmalloc(exclusion_alloc_size);
1798 if (ua_event->exclusion == NULL) {
1799 PERROR("malloc");
1800 } else {
1801 memcpy(ua_event->exclusion, uevent->exclusion,
1802 exclusion_alloc_size);
1803 }
1804 }
1805 }
1806
1807 /*
1808 * Copy data between an UST app channel and a LTT channel.
1809 */
1810 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1811 struct ltt_ust_channel *uchan)
1812 {
1813 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1814
1815 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1816 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1817
1818 ua_chan->tracefile_size = uchan->tracefile_size;
1819 ua_chan->tracefile_count = uchan->tracefile_count;
1820
1821 /* Copy event attributes since the layout is different. */
1822 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1823 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1824 ua_chan->attr.overwrite = uchan->attr.overwrite;
1825 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1826 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1827 ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
1828 ua_chan->attr.output = uchan->attr.output;
1829 ua_chan->attr.blocking_timeout = uchan->attr.u.s.blocking_timeout;
1830
1831 /*
1832 * Note that the attribute channel type is not set since the channel on the
1833 * tracing registry side does not have this information.
1834 */
1835
1836 ua_chan->enabled = uchan->enabled;
1837 ua_chan->tracing_channel_id = uchan->id;
1838
1839 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1840 }
1841
1842 /*
1843 * Copy data between a UST app session and a regular LTT session.
1844 */
1845 static void shadow_copy_session(struct ust_app_session *ua_sess,
1846 struct ltt_ust_session *usess, struct ust_app *app)
1847 {
1848 struct tm *timeinfo;
1849 char datetime[16];
1850 int ret;
1851 char tmp_shm_path[PATH_MAX];
1852
1853 timeinfo = localtime(&app->registration_time);
1854 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1855
1856 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1857
1858 ua_sess->tracing_id = usess->id;
1859 ua_sess->id = get_next_session_id();
1860 ua_sess->real_credentials.uid = app->uid;
1861 ua_sess->real_credentials.gid = app->gid;
1862 ua_sess->effective_credentials.uid = usess->uid;
1863 ua_sess->effective_credentials.gid = usess->gid;
1864 ua_sess->buffer_type = usess->buffer_type;
1865 ua_sess->bits_per_long = app->bits_per_long;
1866
1867 /* There is only one consumer object per session possible. */
1868 consumer_output_get(usess->consumer);
1869 ua_sess->consumer = usess->consumer;
1870
1871 ua_sess->output_traces = usess->output_traces;
1872 ua_sess->live_timer_interval = usess->live_timer_interval;
1873 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
1874 &usess->metadata_attr);
1875
1876 switch (ua_sess->buffer_type) {
1877 case LTTNG_BUFFER_PER_PID:
1878 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1879 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1880 datetime);
1881 break;
1882 case LTTNG_BUFFER_PER_UID:
1883 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1884 DEFAULT_UST_TRACE_UID_PATH,
1885 ua_sess->real_credentials.uid,
1886 app->bits_per_long);
1887 break;
1888 default:
1889 assert(0);
1890 goto error;
1891 }
1892 if (ret < 0) {
1893 PERROR("asprintf UST shadow copy session");
1894 assert(0);
1895 goto error;
1896 }
1897
1898 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
1899 sizeof(ua_sess->root_shm_path));
1900 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
1901 strncpy(ua_sess->shm_path, usess->shm_path,
1902 sizeof(ua_sess->shm_path));
1903 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1904 if (ua_sess->shm_path[0]) {
1905 switch (ua_sess->buffer_type) {
1906 case LTTNG_BUFFER_PER_PID:
1907 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
1908 "/" DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
1909 app->name, app->pid, datetime);
1910 break;
1911 case LTTNG_BUFFER_PER_UID:
1912 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
1913 "/" DEFAULT_UST_TRACE_UID_PATH,
1914 app->uid, app->bits_per_long);
1915 break;
1916 default:
1917 assert(0);
1918 goto error;
1919 }
1920 if (ret < 0) {
1921 PERROR("sprintf UST shadow copy session");
1922 assert(0);
1923 goto error;
1924 }
1925 strncat(ua_sess->shm_path, tmp_shm_path,
1926 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
1927 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1928 }
1929 return;
1930
1931 error:
1932 consumer_output_put(ua_sess->consumer);
1933 }
1934
1935 /*
1936 * Lookup sesison wrapper.
1937 */
1938 static
1939 void __lookup_session_by_app(const struct ltt_ust_session *usess,
1940 struct ust_app *app, struct lttng_ht_iter *iter)
1941 {
1942 /* Get right UST app session from app */
1943 lttng_ht_lookup(app->sessions, &usess->id, iter);
1944 }
1945
1946 /*
1947 * Return ust app session from the app session hashtable using the UST session
1948 * id.
1949 */
1950 static struct ust_app_session *lookup_session_by_app(
1951 const struct ltt_ust_session *usess, struct ust_app *app)
1952 {
1953 struct lttng_ht_iter iter;
1954 struct lttng_ht_node_u64 *node;
1955
1956 __lookup_session_by_app(usess, app, &iter);
1957 node = lttng_ht_iter_get_node_u64(&iter);
1958 if (node == NULL) {
1959 goto error;
1960 }
1961
1962 return caa_container_of(node, struct ust_app_session, node);
1963
1964 error:
1965 return NULL;
1966 }
1967
1968 /*
1969 * Setup buffer registry per PID for the given session and application. If none
1970 * is found, a new one is created, added to the global registry and
1971 * initialized. If regp is valid, it's set with the newly created object.
1972 *
1973 * Return 0 on success or else a negative value.
1974 */
1975 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
1976 struct ust_app *app, struct buffer_reg_pid **regp)
1977 {
1978 int ret = 0;
1979 struct buffer_reg_pid *reg_pid;
1980
1981 assert(ua_sess);
1982 assert(app);
1983
1984 rcu_read_lock();
1985
1986 reg_pid = buffer_reg_pid_find(ua_sess->id);
1987 if (!reg_pid) {
1988 /*
1989 * This is the create channel path meaning that if there is NO
1990 * registry available, we have to create one for this session.
1991 */
1992 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
1993 ua_sess->root_shm_path, ua_sess->shm_path);
1994 if (ret < 0) {
1995 goto error;
1996 }
1997 } else {
1998 goto end;
1999 }
2000
2001 /* Initialize registry. */
2002 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
2003 app->bits_per_long, app->uint8_t_alignment,
2004 app->uint16_t_alignment, app->uint32_t_alignment,
2005 app->uint64_t_alignment, app->long_alignment,
2006 app->byte_order, app->version.major, app->version.minor,
2007 reg_pid->root_shm_path, reg_pid->shm_path,
2008 ua_sess->effective_credentials.uid,
2009 ua_sess->effective_credentials.gid, ua_sess->tracing_id,
2010 app->uid);
2011 if (ret < 0) {
2012 /*
2013 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2014 * destroy the buffer registry, because it is always expected
2015 * that if the buffer registry can be found, its ust registry is
2016 * non-NULL.
2017 */
2018 buffer_reg_pid_destroy(reg_pid);
2019 goto error;
2020 }
2021
2022 buffer_reg_pid_add(reg_pid);
2023
2024 DBG3("UST app buffer registry per PID created successfully");
2025
2026 end:
2027 if (regp) {
2028 *regp = reg_pid;
2029 }
2030 error:
2031 rcu_read_unlock();
2032 return ret;
2033 }
2034
2035 /*
2036 * Setup buffer registry per UID for the given session and application. If none
2037 * is found, a new one is created, added to the global registry and
2038 * initialized. If regp is valid, it's set with the newly created object.
2039 *
2040 * Return 0 on success or else a negative value.
2041 */
2042 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
2043 struct ust_app_session *ua_sess,
2044 struct ust_app *app, struct buffer_reg_uid **regp)
2045 {
2046 int ret = 0;
2047 struct buffer_reg_uid *reg_uid;
2048
2049 assert(usess);
2050 assert(app);
2051
2052 rcu_read_lock();
2053
2054 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2055 if (!reg_uid) {
2056 /*
2057 * This is the create channel path meaning that if there is NO
2058 * registry available, we have to create one for this session.
2059 */
2060 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
2061 LTTNG_DOMAIN_UST, &reg_uid,
2062 ua_sess->root_shm_path, ua_sess->shm_path);
2063 if (ret < 0) {
2064 goto error;
2065 }
2066 } else {
2067 goto end;
2068 }
2069
2070 /* Initialize registry. */
2071 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
2072 app->bits_per_long, app->uint8_t_alignment,
2073 app->uint16_t_alignment, app->uint32_t_alignment,
2074 app->uint64_t_alignment, app->long_alignment,
2075 app->byte_order, app->version.major,
2076 app->version.minor, reg_uid->root_shm_path,
2077 reg_uid->shm_path, usess->uid, usess->gid,
2078 ua_sess->tracing_id, app->uid);
2079 if (ret < 0) {
2080 /*
2081 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2082 * destroy the buffer registry, because it is always expected
2083 * that if the buffer registry can be found, its ust registry is
2084 * non-NULL.
2085 */
2086 buffer_reg_uid_destroy(reg_uid, NULL);
2087 goto error;
2088 }
2089 /* Add node to teardown list of the session. */
2090 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2091
2092 buffer_reg_uid_add(reg_uid);
2093
2094 DBG3("UST app buffer registry per UID created successfully");
2095 end:
2096 if (regp) {
2097 *regp = reg_uid;
2098 }
2099 error:
2100 rcu_read_unlock();
2101 return ret;
2102 }
2103
2104 /*
2105 * Create a session on the tracer side for the given app.
2106 *
2107 * On success, ua_sess_ptr is populated with the session pointer or else left
2108 * untouched. If the session was created, is_created is set to 1. On error,
2109 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2110 * be NULL.
2111 *
2112 * Returns 0 on success or else a negative code which is either -ENOMEM or
2113 * -ENOTCONN which is the default code if the ustctl_create_session fails.
2114 */
2115 static int find_or_create_ust_app_session(struct ltt_ust_session *usess,
2116 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2117 int *is_created)
2118 {
2119 int ret, created = 0;
2120 struct ust_app_session *ua_sess;
2121
2122 assert(usess);
2123 assert(app);
2124 assert(ua_sess_ptr);
2125
2126 health_code_update();
2127
2128 ua_sess = lookup_session_by_app(usess, app);
2129 if (ua_sess == NULL) {
2130 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
2131 app->pid, usess->id);
2132 ua_sess = alloc_ust_app_session();
2133 if (ua_sess == NULL) {
2134 /* Only malloc can failed so something is really wrong */
2135 ret = -ENOMEM;
2136 goto error;
2137 }
2138 shadow_copy_session(ua_sess, usess, app);
2139 created = 1;
2140 }
2141
2142 switch (usess->buffer_type) {
2143 case LTTNG_BUFFER_PER_PID:
2144 /* Init local registry. */
2145 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
2146 if (ret < 0) {
2147 delete_ust_app_session(-1, ua_sess, app);
2148 goto error;
2149 }
2150 break;
2151 case LTTNG_BUFFER_PER_UID:
2152 /* Look for a global registry. If none exists, create one. */
2153 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
2154 if (ret < 0) {
2155 delete_ust_app_session(-1, ua_sess, app);
2156 goto error;
2157 }
2158 break;
2159 default:
2160 assert(0);
2161 ret = -EINVAL;
2162 goto error;
2163 }
2164
2165 health_code_update();
2166
2167 if (ua_sess->handle == -1) {
2168 pthread_mutex_lock(&app->sock_lock);
2169 ret = ustctl_create_session(app->sock);
2170 pthread_mutex_unlock(&app->sock_lock);
2171 if (ret < 0) {
2172 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2173 ERR("Creating session for app pid %d with ret %d",
2174 app->pid, ret);
2175 } else {
2176 DBG("UST app creating session failed. Application is dead");
2177 /*
2178 * This is normal behavior, an application can die during the
2179 * creation process. Don't report an error so the execution can
2180 * continue normally. This will get flagged ENOTCONN and the
2181 * caller will handle it.
2182 */
2183 ret = 0;
2184 }
2185 delete_ust_app_session(-1, ua_sess, app);
2186 if (ret != -ENOMEM) {
2187 /*
2188 * Tracer is probably gone or got an internal error so let's
2189 * behave like it will soon unregister or not usable.
2190 */
2191 ret = -ENOTCONN;
2192 }
2193 goto error;
2194 }
2195
2196 ua_sess->handle = ret;
2197
2198 /* Add ust app session to app's HT */
2199 lttng_ht_node_init_u64(&ua_sess->node,
2200 ua_sess->tracing_id);
2201 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
2202 lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
2203 lttng_ht_add_unique_ulong(app->ust_sessions_objd,
2204 &ua_sess->ust_objd_node);
2205
2206 DBG2("UST app session created successfully with handle %d", ret);
2207 }
2208
2209 *ua_sess_ptr = ua_sess;
2210 if (is_created) {
2211 *is_created = created;
2212 }
2213
2214 /* Everything went well. */
2215 ret = 0;
2216
2217 error:
2218 health_code_update();
2219 return ret;
2220 }
2221
2222 /*
2223 * Match function for a hash table lookup of ust_app_ctx.
2224 *
2225 * It matches an ust app context based on the context type and, in the case
2226 * of perf counters, their name.
2227 */
2228 static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2229 {
2230 struct ust_app_ctx *ctx;
2231 const struct lttng_ust_context_attr *key;
2232
2233 assert(node);
2234 assert(_key);
2235
2236 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2237 key = _key;
2238
2239 /* Context type */
2240 if (ctx->ctx.ctx != key->ctx) {
2241 goto no_match;
2242 }
2243
2244 switch(key->ctx) {
2245 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
2246 if (strncmp(key->u.perf_counter.name,
2247 ctx->ctx.u.perf_counter.name,
2248 sizeof(key->u.perf_counter.name))) {
2249 goto no_match;
2250 }
2251 break;
2252 case LTTNG_UST_CONTEXT_APP_CONTEXT:
2253 if (strcmp(key->u.app_ctx.provider_name,
2254 ctx->ctx.u.app_ctx.provider_name) ||
2255 strcmp(key->u.app_ctx.ctx_name,
2256 ctx->ctx.u.app_ctx.ctx_name)) {
2257 goto no_match;
2258 }
2259 break;
2260 default:
2261 break;
2262 }
2263
2264 /* Match. */
2265 return 1;
2266
2267 no_match:
2268 return 0;
2269 }
2270
2271 /*
2272 * Lookup for an ust app context from an lttng_ust_context.
2273 *
2274 * Must be called while holding RCU read side lock.
2275 * Return an ust_app_ctx object or NULL on error.
2276 */
2277 static
2278 struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
2279 struct lttng_ust_context_attr *uctx)
2280 {
2281 struct lttng_ht_iter iter;
2282 struct lttng_ht_node_ulong *node;
2283 struct ust_app_ctx *app_ctx = NULL;
2284
2285 assert(uctx);
2286 assert(ht);
2287
2288 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2289 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2290 ht_match_ust_app_ctx, uctx, &iter.iter);
2291 node = lttng_ht_iter_get_node_ulong(&iter);
2292 if (!node) {
2293 goto end;
2294 }
2295
2296 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2297
2298 end:
2299 return app_ctx;
2300 }
2301
2302 /*
2303 * Create a context for the channel on the tracer.
2304 *
2305 * Called with UST app session lock held and a RCU read side lock.
2306 */
2307 static
2308 int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
2309 struct lttng_ust_context_attr *uctx,
2310 struct ust_app *app)
2311 {
2312 int ret = 0;
2313 struct ust_app_ctx *ua_ctx;
2314
2315 DBG2("UST app adding context to channel %s", ua_chan->name);
2316
2317 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2318 if (ua_ctx) {
2319 ret = -EEXIST;
2320 goto error;
2321 }
2322
2323 ua_ctx = alloc_ust_app_ctx(uctx);
2324 if (ua_ctx == NULL) {
2325 /* malloc failed */
2326 ret = -ENOMEM;
2327 goto error;
2328 }
2329
2330 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
2331 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
2332 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
2333
2334 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2335 if (ret < 0) {
2336 goto error;
2337 }
2338
2339 error:
2340 return ret;
2341 }
2342
2343 /*
2344 * Enable on the tracer side a ust app event for the session and channel.
2345 *
2346 * Called with UST app session lock held.
2347 */
2348 static
2349 int enable_ust_app_event(struct ust_app_session *ua_sess,
2350 struct ust_app_event *ua_event, struct ust_app *app)
2351 {
2352 int ret;
2353
2354 ret = enable_ust_event(app, ua_sess, ua_event);
2355 if (ret < 0) {
2356 goto error;
2357 }
2358
2359 ua_event->enabled = 1;
2360
2361 error:
2362 return ret;
2363 }
2364
2365 /*
2366 * Disable on the tracer side a ust app event for the session and channel.
2367 */
2368 static int disable_ust_app_event(struct ust_app_session *ua_sess,
2369 struct ust_app_event *ua_event, struct ust_app *app)
2370 {
2371 int ret;
2372
2373 ret = disable_ust_event(app, ua_sess, ua_event);
2374 if (ret < 0) {
2375 goto error;
2376 }
2377
2378 ua_event->enabled = 0;
2379
2380 error:
2381 return ret;
2382 }
2383
2384 /*
2385 * Lookup ust app channel for session and disable it on the tracer side.
2386 */
2387 static
2388 int disable_ust_app_channel(struct ust_app_session *ua_sess,
2389 struct ust_app_channel *ua_chan, struct ust_app *app)
2390 {
2391 int ret;
2392
2393 ret = disable_ust_channel(app, ua_sess, ua_chan);
2394 if (ret < 0) {
2395 goto error;
2396 }
2397
2398 ua_chan->enabled = 0;
2399
2400 error:
2401 return ret;
2402 }
2403
2404 /*
2405 * Lookup ust app channel for session and enable it on the tracer side. This
2406 * MUST be called with a RCU read side lock acquired.
2407 */
2408 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2409 struct ltt_ust_channel *uchan, struct ust_app *app)
2410 {
2411 int ret = 0;
2412 struct lttng_ht_iter iter;
2413 struct lttng_ht_node_str *ua_chan_node;
2414 struct ust_app_channel *ua_chan;
2415
2416 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2417 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2418 if (ua_chan_node == NULL) {
2419 DBG2("Unable to find channel %s in ust session id %" PRIu64,
2420 uchan->name, ua_sess->tracing_id);
2421 goto error;
2422 }
2423
2424 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2425
2426 ret = enable_ust_channel(app, ua_sess, ua_chan);
2427 if (ret < 0) {
2428 goto error;
2429 }
2430
2431 error:
2432 return ret;
2433 }
2434
2435 /*
2436 * Ask the consumer to create a channel and get it if successful.
2437 *
2438 * Called with UST app session lock held.
2439 *
2440 * Return 0 on success or else a negative value.
2441 */
2442 static int do_consumer_create_channel(struct ltt_ust_session *usess,
2443 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2444 int bitness, struct ust_registry_session *registry,
2445 uint64_t trace_archive_id)
2446 {
2447 int ret;
2448 unsigned int nb_fd = 0;
2449 struct consumer_socket *socket;
2450
2451 assert(usess);
2452 assert(ua_sess);
2453 assert(ua_chan);
2454 assert(registry);
2455
2456 rcu_read_lock();
2457 health_code_update();
2458
2459 /* Get the right consumer socket for the application. */
2460 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2461 if (!socket) {
2462 ret = -EINVAL;
2463 goto error;
2464 }
2465
2466 health_code_update();
2467
2468 /* Need one fd for the channel. */
2469 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2470 if (ret < 0) {
2471 ERR("Exhausted number of available FD upon create channel");
2472 goto error;
2473 }
2474
2475 /*
2476 * Ask consumer to create channel. The consumer will return the number of
2477 * stream we have to expect.
2478 */
2479 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2480 registry, usess->current_trace_chunk);
2481 if (ret < 0) {
2482 goto error_ask;
2483 }
2484
2485 /*
2486 * Compute the number of fd needed before receiving them. It must be 2 per
2487 * stream (2 being the default value here).
2488 */
2489 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2490
2491 /* Reserve the amount of file descriptor we need. */
2492 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2493 if (ret < 0) {
2494 ERR("Exhausted number of available FD upon create channel");
2495 goto error_fd_get_stream;
2496 }
2497
2498 health_code_update();
2499
2500 /*
2501 * Now get the channel from the consumer. This call wil populate the stream
2502 * list of that channel and set the ust objects.
2503 */
2504 if (usess->consumer->enabled) {
2505 ret = ust_consumer_get_channel(socket, ua_chan);
2506 if (ret < 0) {
2507 goto error_destroy;
2508 }
2509 }
2510
2511 rcu_read_unlock();
2512 return 0;
2513
2514 error_destroy:
2515 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2516 error_fd_get_stream:
2517 /*
2518 * Initiate a destroy channel on the consumer since we had an error
2519 * handling it on our side. The return value is of no importance since we
2520 * already have a ret value set by the previous error that we need to
2521 * return.
2522 */
2523 (void) ust_consumer_destroy_channel(socket, ua_chan);
2524 error_ask:
2525 lttng_fd_put(LTTNG_FD_APPS, 1);
2526 error:
2527 health_code_update();
2528 rcu_read_unlock();
2529 return ret;
2530 }
2531
2532 /*
2533 * Duplicate the ust data object of the ust app stream and save it in the
2534 * buffer registry stream.
2535 *
2536 * Return 0 on success or else a negative value.
2537 */
2538 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2539 struct ust_app_stream *stream)
2540 {
2541 int ret;
2542
2543 assert(reg_stream);
2544 assert(stream);
2545
2546 /* Reserve the amount of file descriptor we need. */
2547 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2548 if (ret < 0) {
2549 ERR("Exhausted number of available FD upon duplicate stream");
2550 goto error;
2551 }
2552
2553 /* Duplicate object for stream once the original is in the registry. */
2554 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2555 reg_stream->obj.ust);
2556 if (ret < 0) {
2557 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2558 reg_stream->obj.ust, stream->obj, ret);
2559 lttng_fd_put(LTTNG_FD_APPS, 2);
2560 goto error;
2561 }
2562 stream->handle = stream->obj->handle;
2563
2564 error:
2565 return ret;
2566 }
2567
2568 /*
2569 * Duplicate the ust data object of the ust app. channel and save it in the
2570 * buffer registry channel.
2571 *
2572 * Return 0 on success or else a negative value.
2573 */
2574 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2575 struct ust_app_channel *ua_chan)
2576 {
2577 int ret;
2578
2579 assert(reg_chan);
2580 assert(ua_chan);
2581
2582 /* Need two fds for the channel. */
2583 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2584 if (ret < 0) {
2585 ERR("Exhausted number of available FD upon duplicate channel");
2586 goto error_fd_get;
2587 }
2588
2589 /* Duplicate object for stream once the original is in the registry. */
2590 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2591 if (ret < 0) {
2592 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2593 reg_chan->obj.ust, ua_chan->obj, ret);
2594 goto error;
2595 }
2596 ua_chan->handle = ua_chan->obj->handle;
2597
2598 return 0;
2599
2600 error:
2601 lttng_fd_put(LTTNG_FD_APPS, 1);
2602 error_fd_get:
2603 return ret;
2604 }
2605
2606 /*
2607 * For a given channel buffer registry, setup all streams of the given ust
2608 * application channel.
2609 *
2610 * Return 0 on success or else a negative value.
2611 */
2612 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2613 struct ust_app_channel *ua_chan,
2614 struct ust_app *app)
2615 {
2616 int ret = 0;
2617 struct ust_app_stream *stream, *stmp;
2618
2619 assert(reg_chan);
2620 assert(ua_chan);
2621
2622 DBG2("UST app setup buffer registry stream");
2623
2624 /* Send all streams to application. */
2625 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2626 struct buffer_reg_stream *reg_stream;
2627
2628 ret = buffer_reg_stream_create(&reg_stream);
2629 if (ret < 0) {
2630 goto error;
2631 }
2632
2633 /*
2634 * Keep original pointer and nullify it in the stream so the delete
2635 * stream call does not release the object.
2636 */
2637 reg_stream->obj.ust = stream->obj;
2638 stream->obj = NULL;
2639 buffer_reg_stream_add(reg_stream, reg_chan);
2640
2641 /* We don't need the streams anymore. */
2642 cds_list_del(&stream->list);
2643 delete_ust_app_stream(-1, stream, app);
2644 }
2645
2646 error:
2647 return ret;
2648 }
2649
2650 /*
2651 * Create a buffer registry channel for the given session registry and
2652 * application channel object. If regp pointer is valid, it's set with the
2653 * created object. Important, the created object is NOT added to the session
2654 * registry hash table.
2655 *
2656 * Return 0 on success else a negative value.
2657 */
2658 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2659 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2660 {
2661 int ret;
2662 struct buffer_reg_channel *reg_chan = NULL;
2663
2664 assert(reg_sess);
2665 assert(ua_chan);
2666
2667 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2668
2669 /* Create buffer registry channel. */
2670 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2671 if (ret < 0) {
2672 goto error_create;
2673 }
2674 assert(reg_chan);
2675 reg_chan->consumer_key = ua_chan->key;
2676 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
2677 reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
2678
2679 /* Create and add a channel registry to session. */
2680 ret = ust_registry_channel_add(reg_sess->reg.ust,
2681 ua_chan->tracing_channel_id);
2682 if (ret < 0) {
2683 goto error;
2684 }
2685 buffer_reg_channel_add(reg_sess, reg_chan);
2686
2687 if (regp) {
2688 *regp = reg_chan;
2689 }
2690
2691 return 0;
2692
2693 error:
2694 /* Safe because the registry channel object was not added to any HT. */
2695 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2696 error_create:
2697 return ret;
2698 }
2699
2700 /*
2701 * Setup buffer registry channel for the given session registry and application
2702 * channel object. If regp pointer is valid, it's set with the created object.
2703 *
2704 * Return 0 on success else a negative value.
2705 */
2706 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2707 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan,
2708 struct ust_app *app)
2709 {
2710 int ret;
2711
2712 assert(reg_sess);
2713 assert(reg_chan);
2714 assert(ua_chan);
2715 assert(ua_chan->obj);
2716
2717 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2718
2719 /* Setup all streams for the registry. */
2720 ret = setup_buffer_reg_streams(reg_chan, ua_chan, app);
2721 if (ret < 0) {
2722 goto error;
2723 }
2724
2725 reg_chan->obj.ust = ua_chan->obj;
2726 ua_chan->obj = NULL;
2727
2728 return 0;
2729
2730 error:
2731 buffer_reg_channel_remove(reg_sess, reg_chan);
2732 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2733 return ret;
2734 }
2735
2736 /*
2737 * Send buffer registry channel to the application.
2738 *
2739 * Return 0 on success else a negative value.
2740 */
2741 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2742 struct ust_app *app, struct ust_app_session *ua_sess,
2743 struct ust_app_channel *ua_chan)
2744 {
2745 int ret;
2746 struct buffer_reg_stream *reg_stream;
2747
2748 assert(reg_chan);
2749 assert(app);
2750 assert(ua_sess);
2751 assert(ua_chan);
2752
2753 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2754
2755 ret = duplicate_channel_object(reg_chan, ua_chan);
2756 if (ret < 0) {
2757 goto error;
2758 }
2759
2760 /* Send channel to the application. */
2761 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2762 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2763 ret = -ENOTCONN; /* Caused by app exiting. */
2764 goto error;
2765 } else if (ret < 0) {
2766 goto error;
2767 }
2768
2769 health_code_update();
2770
2771 /* Send all streams to application. */
2772 pthread_mutex_lock(&reg_chan->stream_list_lock);
2773 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2774 struct ust_app_stream stream;
2775
2776 ret = duplicate_stream_object(reg_stream, &stream);
2777 if (ret < 0) {
2778 goto error_stream_unlock;
2779 }
2780
2781 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2782 if (ret < 0) {
2783 (void) release_ust_app_stream(-1, &stream, app);
2784 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2785 ret = -ENOTCONN; /* Caused by app exiting. */
2786 }
2787 goto error_stream_unlock;
2788 }
2789
2790 /*
2791 * The return value is not important here. This function will output an
2792 * error if needed.
2793 */
2794 (void) release_ust_app_stream(-1, &stream, app);
2795 }
2796 ua_chan->is_sent = 1;
2797
2798 error_stream_unlock:
2799 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2800 error:
2801 return ret;
2802 }
2803
2804 /*
2805 * Create and send to the application the created buffers with per UID buffers.
2806 *
2807 * This MUST be called with a RCU read side lock acquired.
2808 * The session list lock and the session's lock must be acquired.
2809 *
2810 * Return 0 on success else a negative value.
2811 */
2812 static int create_channel_per_uid(struct ust_app *app,
2813 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2814 struct ust_app_channel *ua_chan)
2815 {
2816 int ret;
2817 struct buffer_reg_uid *reg_uid;
2818 struct buffer_reg_channel *reg_chan;
2819 struct ltt_session *session = NULL;
2820 enum lttng_error_code notification_ret;
2821 struct ust_registry_channel *chan_reg;
2822
2823 assert(app);
2824 assert(usess);
2825 assert(ua_sess);
2826 assert(ua_chan);
2827
2828 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2829
2830 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2831 /*
2832 * The session creation handles the creation of this global registry
2833 * object. If none can be find, there is a code flow problem or a
2834 * teardown race.
2835 */
2836 assert(reg_uid);
2837
2838 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2839 reg_uid);
2840 if (reg_chan) {
2841 goto send_channel;
2842 }
2843
2844 /* Create the buffer registry channel object. */
2845 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2846 if (ret < 0) {
2847 ERR("Error creating the UST channel \"%s\" registry instance",
2848 ua_chan->name);
2849 goto error;
2850 }
2851
2852 session = session_find_by_id(ua_sess->tracing_id);
2853 assert(session);
2854 assert(pthread_mutex_trylock(&session->lock));
2855 assert(session_trylock_list());
2856
2857 /*
2858 * Create the buffers on the consumer side. This call populates the
2859 * ust app channel object with all streams and data object.
2860 */
2861 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2862 app->bits_per_long, reg_uid->registry->reg.ust,
2863 session->most_recent_chunk_id.value);
2864 if (ret < 0) {
2865 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2866 ua_chan->name);
2867
2868 /*
2869 * Let's remove the previously created buffer registry channel so
2870 * it's not visible anymore in the session registry.
2871 */
2872 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2873 ua_chan->tracing_channel_id, false);
2874 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2875 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2876 goto error;
2877 }
2878
2879 /*
2880 * Setup the streams and add it to the session registry.
2881 */
2882 ret = setup_buffer_reg_channel(reg_uid->registry,
2883 ua_chan, reg_chan, app);
2884 if (ret < 0) {
2885 ERR("Error setting up UST channel \"%s\"", ua_chan->name);
2886 goto error;
2887 }
2888
2889 /* Notify the notification subsystem of the channel's creation. */
2890 pthread_mutex_lock(&reg_uid->registry->reg.ust->lock);
2891 chan_reg = ust_registry_channel_find(reg_uid->registry->reg.ust,
2892 ua_chan->tracing_channel_id);
2893 assert(chan_reg);
2894 chan_reg->consumer_key = ua_chan->key;
2895 chan_reg = NULL;
2896 pthread_mutex_unlock(&reg_uid->registry->reg.ust->lock);
2897
2898 notification_ret = notification_thread_command_add_channel(
2899 notification_thread_handle, session->name,
2900 ua_sess->effective_credentials.uid,
2901 ua_sess->effective_credentials.gid, ua_chan->name,
2902 ua_chan->key, LTTNG_DOMAIN_UST,
2903 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
2904 if (notification_ret != LTTNG_OK) {
2905 ret = - (int) notification_ret;
2906 ERR("Failed to add channel to notification thread");
2907 goto error;
2908 }
2909
2910 send_channel:
2911 /* Send buffers to the application. */
2912 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2913 if (ret < 0) {
2914 if (ret != -ENOTCONN) {
2915 ERR("Error sending channel to application");
2916 }
2917 goto error;
2918 }
2919
2920 error:
2921 if (session) {
2922 session_put(session);
2923 }
2924 return ret;
2925 }
2926
2927 /*
2928 * Create and send to the application the created buffers with per PID buffers.
2929 *
2930 * Called with UST app session lock held.
2931 * The session list lock and the session's lock must be acquired.
2932 *
2933 * Return 0 on success else a negative value.
2934 */
2935 static int create_channel_per_pid(struct ust_app *app,
2936 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2937 struct ust_app_channel *ua_chan)
2938 {
2939 int ret;
2940 struct ust_registry_session *registry;
2941 enum lttng_error_code cmd_ret;
2942 struct ltt_session *session = NULL;
2943 uint64_t chan_reg_key;
2944 struct ust_registry_channel *chan_reg;
2945
2946 assert(app);
2947 assert(usess);
2948 assert(ua_sess);
2949 assert(ua_chan);
2950
2951 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2952
2953 rcu_read_lock();
2954
2955 registry = get_session_registry(ua_sess);
2956 /* The UST app session lock is held, registry shall not be null. */
2957 assert(registry);
2958
2959 /* Create and add a new channel registry to session. */
2960 ret = ust_registry_channel_add(registry, ua_chan->key);
2961 if (ret < 0) {
2962 ERR("Error creating the UST channel \"%s\" registry instance",
2963 ua_chan->name);
2964 goto error;
2965 }
2966
2967 session = session_find_by_id(ua_sess->tracing_id);
2968 assert(session);
2969
2970 assert(pthread_mutex_trylock(&session->lock));
2971 assert(session_trylock_list());
2972
2973 /* Create and get channel on the consumer side. */
2974 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2975 app->bits_per_long, registry,
2976 session->most_recent_chunk_id.value);
2977 if (ret < 0) {
2978 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2979 ua_chan->name);
2980 goto error_remove_from_registry;
2981 }
2982
2983 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2984 if (ret < 0) {
2985 if (ret != -ENOTCONN) {
2986 ERR("Error sending channel to application");
2987 }
2988 goto error_remove_from_registry;
2989 }
2990
2991 chan_reg_key = ua_chan->key;
2992 pthread_mutex_lock(&registry->lock);
2993 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
2994 assert(chan_reg);
2995 chan_reg->consumer_key = ua_chan->key;
2996 pthread_mutex_unlock(&registry->lock);
2997
2998 cmd_ret = notification_thread_command_add_channel(
2999 notification_thread_handle, session->name,
3000 ua_sess->effective_credentials.uid,
3001 ua_sess->effective_credentials.gid, ua_chan->name,
3002 ua_chan->key, LTTNG_DOMAIN_UST,
3003 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3004 if (cmd_ret != LTTNG_OK) {
3005 ret = - (int) cmd_ret;
3006 ERR("Failed to add channel to notification thread");
3007 goto error_remove_from_registry;
3008 }
3009
3010 error_remove_from_registry:
3011 if (ret) {
3012 ust_registry_channel_del_free(registry, ua_chan->key, false);
3013 }
3014 error:
3015 rcu_read_unlock();
3016 if (session) {
3017 session_put(session);
3018 }
3019 return ret;
3020 }
3021
3022 /*
3023 * From an already allocated ust app channel, create the channel buffers if
3024 * needed and send them to the application. This MUST be called with a RCU read
3025 * side lock acquired.
3026 *
3027 * Called with UST app session lock held.
3028 *
3029 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3030 * the application exited concurrently.
3031 */
3032 static int ust_app_channel_send(struct ust_app *app,
3033 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3034 struct ust_app_channel *ua_chan)
3035 {
3036 int ret;
3037
3038 assert(app);
3039 assert(usess);
3040 assert(usess->active);
3041 assert(ua_sess);
3042 assert(ua_chan);
3043
3044 /* Handle buffer type before sending the channel to the application. */
3045 switch (usess->buffer_type) {
3046 case LTTNG_BUFFER_PER_UID:
3047 {
3048 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
3049 if (ret < 0) {
3050 goto error;
3051 }
3052 break;
3053 }
3054 case LTTNG_BUFFER_PER_PID:
3055 {
3056 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
3057 if (ret < 0) {
3058 goto error;
3059 }
3060 break;
3061 }
3062 default:
3063 assert(0);
3064 ret = -EINVAL;
3065 goto error;
3066 }
3067
3068 /* Initialize ust objd object using the received handle and add it. */
3069 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
3070 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
3071
3072 /* If channel is not enabled, disable it on the tracer */
3073 if (!ua_chan->enabled) {
3074 ret = disable_ust_channel(app, ua_sess, ua_chan);
3075 if (ret < 0) {
3076 goto error;
3077 }
3078 }
3079
3080 error:
3081 return ret;
3082 }
3083
3084 /*
3085 * Create UST app channel and return it through ua_chanp if not NULL.
3086 *
3087 * Called with UST app session lock and RCU read-side lock held.
3088 *
3089 * Return 0 on success or else a negative value.
3090 */
3091 static int ust_app_channel_allocate(struct ust_app_session *ua_sess,
3092 struct ltt_ust_channel *uchan,
3093 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
3094 struct ust_app_channel **ua_chanp)
3095 {
3096 int ret = 0;
3097 struct lttng_ht_iter iter;
3098 struct lttng_ht_node_str *ua_chan_node;
3099 struct ust_app_channel *ua_chan;
3100
3101 /* Lookup channel in the ust app session */
3102 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
3103 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
3104 if (ua_chan_node != NULL) {
3105 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3106 goto end;
3107 }
3108
3109 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
3110 if (ua_chan == NULL) {
3111 /* Only malloc can fail here */
3112 ret = -ENOMEM;
3113 goto error;
3114 }
3115 shadow_copy_channel(ua_chan, uchan);
3116
3117 /* Set channel type. */
3118 ua_chan->attr.type = type;
3119
3120 /* Only add the channel if successful on the tracer side. */
3121 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
3122 end:
3123 if (ua_chanp) {
3124 *ua_chanp = ua_chan;
3125 }
3126
3127 /* Everything went well. */
3128 return 0;
3129
3130 error:
3131 return ret;
3132 }
3133
3134 /*
3135 * Create UST app event and create it on the tracer side.
3136 *
3137 * Called with ust app session mutex held.
3138 */
3139 static
3140 int create_ust_app_event(struct ust_app_session *ua_sess,
3141 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
3142 struct ust_app *app)
3143 {
3144 int ret = 0;
3145 struct ust_app_event *ua_event;
3146
3147 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
3148 if (ua_event == NULL) {
3149 /* Only failure mode of alloc_ust_app_event(). */
3150 ret = -ENOMEM;
3151 goto end;
3152 }
3153 shadow_copy_event(ua_event, uevent);
3154
3155 /* Create it on the tracer side */
3156 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
3157 if (ret < 0) {
3158 /*
3159 * Not found previously means that it does not exist on the
3160 * tracer. If the application reports that the event existed,
3161 * it means there is a bug in the sessiond or lttng-ust
3162 * (or corruption, etc.)
3163 */
3164 if (ret == -LTTNG_UST_ERR_EXIST) {
3165 ERR("Tracer for application reported that an event being created already existed: "
3166 "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d",
3167 uevent->attr.name,
3168 app->pid, app->ppid, app->uid,
3169 app->gid);
3170 }
3171 goto error;
3172 }
3173
3174 add_unique_ust_app_event(ua_chan, ua_event);
3175
3176 DBG2("UST app create event %s for PID %d completed", ua_event->name,
3177 app->pid);
3178
3179 end:
3180 return ret;
3181
3182 error:
3183 /* Valid. Calling here is already in a read side lock */
3184 delete_ust_app_event(-1, ua_event, app);
3185 return ret;
3186 }
3187
3188 /*
3189 * Create UST metadata and open it on the tracer side.
3190 *
3191 * Called with UST app session lock held and RCU read side lock.
3192 */
3193 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
3194 struct ust_app *app, struct consumer_output *consumer)
3195 {
3196 int ret = 0;
3197 struct ust_app_channel *metadata;
3198 struct consumer_socket *socket;
3199 struct ust_registry_session *registry;
3200 struct ltt_session *session = NULL;
3201
3202 assert(ua_sess);
3203 assert(app);
3204 assert(consumer);
3205
3206 registry = get_session_registry(ua_sess);
3207 /* The UST app session is held registry shall not be null. */
3208 assert(registry);
3209
3210 pthread_mutex_lock(&registry->lock);
3211
3212 /* Metadata already exists for this registry or it was closed previously */
3213 if (registry->metadata_key || registry->metadata_closed) {
3214 ret = 0;
3215 goto error;
3216 }
3217
3218 /* Allocate UST metadata */
3219 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
3220 if (!metadata) {
3221 /* malloc() failed */
3222 ret = -ENOMEM;
3223 goto error;
3224 }
3225
3226 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
3227
3228 /* Need one fd for the channel. */
3229 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3230