Fix: channel errors on local stress-tests
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Copyright (C) 2016 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _LGPL_SOURCE
20 #include <errno.h>
21 #include <inttypes.h>
22 #include <pthread.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/stat.h>
27 #include <sys/types.h>
28 #include <unistd.h>
29 #include <urcu/compiler.h>
30 #include <lttng/ust-error.h>
31 #include <signal.h>
32
33 #include <common/common.h>
34 #include <common/sessiond-comm/sessiond-comm.h>
35
36 #include "buffer-registry.h"
37 #include "fd-limit.h"
38 #include "health-sessiond.h"
39 #include "ust-app.h"
40 #include "ust-consumer.h"
41 #include "ust-ctl.h"
42 #include "utils.h"
43 #include "session.h"
44 #include "lttng-sessiond.h"
45 #include "notification-thread-commands.h"
46 #include "rotate.h"
47
48 static
49 int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
50
51 /* Next available channel key. Access under next_channel_key_lock. */
52 static uint64_t _next_channel_key;
53 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
54
55 /* Next available session ID. Access under next_session_id_lock. */
56 static uint64_t _next_session_id;
57 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
58
59 /*
60 * Return the incremented value of next_channel_key.
61 */
62 static uint64_t get_next_channel_key(void)
63 {
64 uint64_t ret;
65
66 pthread_mutex_lock(&next_channel_key_lock);
67 ret = ++_next_channel_key;
68 pthread_mutex_unlock(&next_channel_key_lock);
69 return ret;
70 }
71
72 /*
73 * Return the atomically incremented value of next_session_id.
74 */
75 static uint64_t get_next_session_id(void)
76 {
77 uint64_t ret;
78
79 pthread_mutex_lock(&next_session_id_lock);
80 ret = ++_next_session_id;
81 pthread_mutex_unlock(&next_session_id_lock);
82 return ret;
83 }
84
85 static void copy_channel_attr_to_ustctl(
86 struct ustctl_consumer_channel_attr *attr,
87 struct lttng_ust_channel_attr *uattr)
88 {
89 /* Copy event attributes since the layout is different. */
90 attr->subbuf_size = uattr->subbuf_size;
91 attr->num_subbuf = uattr->num_subbuf;
92 attr->overwrite = uattr->overwrite;
93 attr->switch_timer_interval = uattr->switch_timer_interval;
94 attr->read_timer_interval = uattr->read_timer_interval;
95 attr->output = uattr->output;
96 attr->blocking_timeout = uattr->u.s.blocking_timeout;
97 }
98
99 /*
100 * Match function for the hash table lookup.
101 *
102 * It matches an ust app event based on three attributes which are the event
103 * name, the filter bytecode and the loglevel.
104 */
105 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
106 {
107 struct ust_app_event *event;
108 const struct ust_app_ht_key *key;
109 int ev_loglevel_value;
110
111 assert(node);
112 assert(_key);
113
114 event = caa_container_of(node, struct ust_app_event, node.node);
115 key = _key;
116 ev_loglevel_value = event->attr.loglevel;
117
118 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
119
120 /* Event name */
121 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
122 goto no_match;
123 }
124
125 /* Event loglevel. */
126 if (ev_loglevel_value != key->loglevel_type) {
127 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
128 && key->loglevel_type == 0 &&
129 ev_loglevel_value == -1) {
130 /*
131 * Match is accepted. This is because on event creation, the
132 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
133 * -1 are accepted for this loglevel type since 0 is the one set by
134 * the API when receiving an enable event.
135 */
136 } else {
137 goto no_match;
138 }
139 }
140
141 /* One of the filters is NULL, fail. */
142 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
143 goto no_match;
144 }
145
146 if (key->filter && event->filter) {
147 /* Both filters exists, check length followed by the bytecode. */
148 if (event->filter->len != key->filter->len ||
149 memcmp(event->filter->data, key->filter->data,
150 event->filter->len) != 0) {
151 goto no_match;
152 }
153 }
154
155 /* One of the exclusions is NULL, fail. */
156 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
157 goto no_match;
158 }
159
160 if (key->exclusion && event->exclusion) {
161 /* Both exclusions exists, check count followed by the names. */
162 if (event->exclusion->count != key->exclusion->count ||
163 memcmp(event->exclusion->names, key->exclusion->names,
164 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
165 goto no_match;
166 }
167 }
168
169
170 /* Match. */
171 return 1;
172
173 no_match:
174 return 0;
175 }
176
177 /*
178 * Unique add of an ust app event in the given ht. This uses the custom
179 * ht_match_ust_app_event match function and the event name as hash.
180 */
181 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
182 struct ust_app_event *event)
183 {
184 struct cds_lfht_node *node_ptr;
185 struct ust_app_ht_key key;
186 struct lttng_ht *ht;
187
188 assert(ua_chan);
189 assert(ua_chan->events);
190 assert(event);
191
192 ht = ua_chan->events;
193 key.name = event->attr.name;
194 key.filter = event->filter;
195 key.loglevel_type = event->attr.loglevel;
196 key.exclusion = event->exclusion;
197
198 node_ptr = cds_lfht_add_unique(ht->ht,
199 ht->hash_fct(event->node.key, lttng_ht_seed),
200 ht_match_ust_app_event, &key, &event->node.node);
201 assert(node_ptr == &event->node.node);
202 }
203
204 /*
205 * Close the notify socket from the given RCU head object. This MUST be called
206 * through a call_rcu().
207 */
208 static void close_notify_sock_rcu(struct rcu_head *head)
209 {
210 int ret;
211 struct ust_app_notify_sock_obj *obj =
212 caa_container_of(head, struct ust_app_notify_sock_obj, head);
213
214 /* Must have a valid fd here. */
215 assert(obj->fd >= 0);
216
217 ret = close(obj->fd);
218 if (ret) {
219 ERR("close notify sock %d RCU", obj->fd);
220 }
221 lttng_fd_put(LTTNG_FD_APPS, 1);
222
223 free(obj);
224 }
225
226 /*
227 * Return the session registry according to the buffer type of the given
228 * session.
229 *
230 * A registry per UID object MUST exists before calling this function or else
231 * it assert() if not found. RCU read side lock must be acquired.
232 */
233 static struct ust_registry_session *get_session_registry(
234 struct ust_app_session *ua_sess)
235 {
236 struct ust_registry_session *registry = NULL;
237
238 assert(ua_sess);
239
240 switch (ua_sess->buffer_type) {
241 case LTTNG_BUFFER_PER_PID:
242 {
243 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
244 if (!reg_pid) {
245 goto error;
246 }
247 registry = reg_pid->registry->reg.ust;
248 break;
249 }
250 case LTTNG_BUFFER_PER_UID:
251 {
252 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
253 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
254 if (!reg_uid) {
255 goto error;
256 }
257 registry = reg_uid->registry->reg.ust;
258 break;
259 }
260 default:
261 assert(0);
262 };
263
264 error:
265 return registry;
266 }
267
268 /*
269 * Delete ust context safely. RCU read lock must be held before calling
270 * this function.
271 */
272 static
273 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
274 struct ust_app *app)
275 {
276 int ret;
277
278 assert(ua_ctx);
279
280 if (ua_ctx->obj) {
281 pthread_mutex_lock(&app->sock_lock);
282 ret = ustctl_release_object(sock, ua_ctx->obj);
283 pthread_mutex_unlock(&app->sock_lock);
284 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
285 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
286 sock, ua_ctx->obj->handle, ret);
287 }
288 free(ua_ctx->obj);
289 }
290 free(ua_ctx);
291 }
292
293 /*
294 * Delete ust app event safely. RCU read lock must be held before calling
295 * this function.
296 */
297 static
298 void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
299 struct ust_app *app)
300 {
301 int ret;
302
303 assert(ua_event);
304
305 free(ua_event->filter);
306 if (ua_event->exclusion != NULL)
307 free(ua_event->exclusion);
308 if (ua_event->obj != NULL) {
309 pthread_mutex_lock(&app->sock_lock);
310 ret = ustctl_release_object(sock, ua_event->obj);
311 pthread_mutex_unlock(&app->sock_lock);
312 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
313 ERR("UST app sock %d release event obj failed with ret %d",
314 sock, ret);
315 }
316 free(ua_event->obj);
317 }
318 free(ua_event);
319 }
320
321 /*
322 * Release ust data object of the given stream.
323 *
324 * Return 0 on success or else a negative value.
325 */
326 static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
327 struct ust_app *app)
328 {
329 int ret = 0;
330
331 assert(stream);
332
333 if (stream->obj) {
334 pthread_mutex_lock(&app->sock_lock);
335 ret = ustctl_release_object(sock, stream->obj);
336 pthread_mutex_unlock(&app->sock_lock);
337 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
338 ERR("UST app sock %d release stream obj failed with ret %d",
339 sock, ret);
340 }
341 lttng_fd_put(LTTNG_FD_APPS, 2);
342 free(stream->obj);
343 }
344
345 return ret;
346 }
347
348 /*
349 * Delete ust app stream safely. RCU read lock must be held before calling
350 * this function.
351 */
352 static
353 void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
354 struct ust_app *app)
355 {
356 assert(stream);
357
358 (void) release_ust_app_stream(sock, stream, app);
359 free(stream);
360 }
361
362 /*
363 * We need to execute ht_destroy outside of RCU read-side critical
364 * section and outside of call_rcu thread, so we postpone its execution
365 * using ht_cleanup_push. It is simpler than to change the semantic of
366 * the many callers of delete_ust_app_session().
367 */
368 static
369 void delete_ust_app_channel_rcu(struct rcu_head *head)
370 {
371 struct ust_app_channel *ua_chan =
372 caa_container_of(head, struct ust_app_channel, rcu_head);
373
374 ht_cleanup_push(ua_chan->ctx);
375 ht_cleanup_push(ua_chan->events);
376 free(ua_chan);
377 }
378
379 /*
380 * Extract the lost packet or discarded events counter when the channel is
381 * being deleted and store the value in the parent channel so we can
382 * access it from lttng list and at stop/destroy.
383 *
384 * The session list lock must be held by the caller.
385 */
386 static
387 void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
388 {
389 uint64_t discarded = 0, lost = 0;
390 struct ltt_session *session;
391 struct ltt_ust_channel *uchan;
392
393 if (ua_chan->attr.type != LTTNG_UST_CHAN_PER_CPU) {
394 return;
395 }
396
397 rcu_read_lock();
398 session = session_find_by_id(ua_chan->session->tracing_id);
399 if (!session || !session->ust_session) {
400 /*
401 * Not finding the session is not an error because there are
402 * multiple ways the channels can be torn down.
403 *
404 * 1) The session daemon can initiate the destruction of the
405 * ust app session after receiving a destroy command or
406 * during its shutdown/teardown.
407 * 2) The application, since we are in per-pid tracing, is
408 * unregistering and tearing down its ust app session.
409 *
410 * Both paths are protected by the session list lock which
411 * ensures that the accounting of lost packets and discarded
412 * events is done exactly once. The session is then unpublished
413 * from the session list, resulting in this condition.
414 */
415 goto end;
416 }
417
418 if (ua_chan->attr.overwrite) {
419 consumer_get_lost_packets(ua_chan->session->tracing_id,
420 ua_chan->key, session->ust_session->consumer,
421 &lost);
422 } else {
423 consumer_get_discarded_events(ua_chan->session->tracing_id,
424 ua_chan->key, session->ust_session->consumer,
425 &discarded);
426 }
427 uchan = trace_ust_find_channel_by_name(
428 session->ust_session->domain_global.channels,
429 ua_chan->name);
430 if (!uchan) {
431 ERR("Missing UST channel to store discarded counters");
432 goto end;
433 }
434
435 uchan->per_pid_closed_app_discarded += discarded;
436 uchan->per_pid_closed_app_lost += lost;
437
438 end:
439 rcu_read_unlock();
440 }
441
442 /*
443 * Delete ust app channel safely. RCU read lock must be held before calling
444 * this function.
445 *
446 * The session list lock must be held by the caller.
447 */
448 static
449 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
450 struct ust_app *app)
451 {
452 int ret;
453 struct lttng_ht_iter iter;
454 struct ust_app_event *ua_event;
455 struct ust_app_ctx *ua_ctx;
456 struct ust_app_stream *stream, *stmp;
457 struct ust_registry_session *registry;
458
459 assert(ua_chan);
460
461 DBG3("UST app deleting channel %s", ua_chan->name);
462
463 /* Wipe stream */
464 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
465 cds_list_del(&stream->list);
466 delete_ust_app_stream(sock, stream, app);
467 }
468
469 /* Wipe context */
470 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
471 cds_list_del(&ua_ctx->list);
472 ret = lttng_ht_del(ua_chan->ctx, &iter);
473 assert(!ret);
474 delete_ust_app_ctx(sock, ua_ctx, app);
475 }
476
477 /* Wipe events */
478 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
479 node.node) {
480 ret = lttng_ht_del(ua_chan->events, &iter);
481 assert(!ret);
482 delete_ust_app_event(sock, ua_event, app);
483 }
484
485 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
486 /* Wipe and free registry from session registry. */
487 registry = get_session_registry(ua_chan->session);
488 if (registry) {
489 ust_registry_channel_del_free(registry, ua_chan->key,
490 sock >= 0);
491 }
492 if (sock >= 0) {
493 save_per_pid_lost_discarded_counters(ua_chan);
494 }
495 }
496
497 if (ua_chan->obj != NULL) {
498 /* Remove channel from application UST object descriptor. */
499 iter.iter.node = &ua_chan->ust_objd_node.node;
500 ret = lttng_ht_del(app->ust_objd, &iter);
501 assert(!ret);
502 pthread_mutex_lock(&app->sock_lock);
503 ret = ustctl_release_object(sock, ua_chan->obj);
504 pthread_mutex_unlock(&app->sock_lock);
505 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
506 ERR("UST app sock %d release channel obj failed with ret %d",
507 sock, ret);
508 }
509 lttng_fd_put(LTTNG_FD_APPS, 1);
510 free(ua_chan->obj);
511 }
512 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
513 }
514
515 int ust_app_register_done(struct ust_app *app)
516 {
517 int ret;
518
519 pthread_mutex_lock(&app->sock_lock);
520 ret = ustctl_register_done(app->sock);
521 pthread_mutex_unlock(&app->sock_lock);
522 return ret;
523 }
524
525 int ust_app_release_object(struct ust_app *app, struct lttng_ust_object_data *data)
526 {
527 int ret, sock;
528
529 if (app) {
530 pthread_mutex_lock(&app->sock_lock);
531 sock = app->sock;
532 } else {
533 sock = -1;
534 }
535 ret = ustctl_release_object(sock, data);
536 if (app) {
537 pthread_mutex_unlock(&app->sock_lock);
538 }
539 return ret;
540 }
541
542 /*
543 * Push metadata to consumer socket.
544 *
545 * RCU read-side lock must be held to guarantee existance of socket.
546 * Must be called with the ust app session lock held.
547 * Must be called with the registry lock held.
548 *
549 * On success, return the len of metadata pushed or else a negative value.
550 * Returning a -EPIPE return value means we could not send the metadata,
551 * but it can be caused by recoverable errors (e.g. the application has
552 * terminated concurrently).
553 */
554 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
555 struct consumer_socket *socket, int send_zero_data)
556 {
557 int ret;
558 char *metadata_str = NULL;
559 size_t len, offset, new_metadata_len_sent;
560 ssize_t ret_val;
561 uint64_t metadata_key, metadata_version;
562
563 assert(registry);
564 assert(socket);
565
566 metadata_key = registry->metadata_key;
567
568 /*
569 * Means that no metadata was assigned to the session. This can
570 * happens if no start has been done previously.
571 */
572 if (!metadata_key) {
573 return 0;
574 }
575
576 offset = registry->metadata_len_sent;
577 len = registry->metadata_len - registry->metadata_len_sent;
578 new_metadata_len_sent = registry->metadata_len;
579 metadata_version = registry->metadata_version;
580 if (len == 0) {
581 DBG3("No metadata to push for metadata key %" PRIu64,
582 registry->metadata_key);
583 ret_val = len;
584 if (send_zero_data) {
585 DBG("No metadata to push");
586 goto push_data;
587 }
588 goto end;
589 }
590
591 /* Allocate only what we have to send. */
592 metadata_str = zmalloc(len);
593 if (!metadata_str) {
594 PERROR("zmalloc ust app metadata string");
595 ret_val = -ENOMEM;
596 goto error;
597 }
598 /* Copy what we haven't sent out. */
599 memcpy(metadata_str, registry->metadata + offset, len);
600
601 push_data:
602 pthread_mutex_unlock(&registry->lock);
603 /*
604 * We need to unlock the registry while we push metadata to
605 * break a circular dependency between the consumerd metadata
606 * lock and the sessiond registry lock. Indeed, pushing metadata
607 * to the consumerd awaits that it gets pushed all the way to
608 * relayd, but doing so requires grabbing the metadata lock. If
609 * a concurrent metadata request is being performed by
610 * consumerd, this can try to grab the registry lock on the
611 * sessiond while holding the metadata lock on the consumer
612 * daemon. Those push and pull schemes are performed on two
613 * different bidirectionnal communication sockets.
614 */
615 ret = consumer_push_metadata(socket, metadata_key,
616 metadata_str, len, offset, metadata_version);
617 pthread_mutex_lock(&registry->lock);
618 if (ret < 0) {
619 /*
620 * There is an acceptable race here between the registry
621 * metadata key assignment and the creation on the
622 * consumer. The session daemon can concurrently push
623 * metadata for this registry while being created on the
624 * consumer since the metadata key of the registry is
625 * assigned *before* it is setup to avoid the consumer
626 * to ask for metadata that could possibly be not found
627 * in the session daemon.
628 *
629 * The metadata will get pushed either by the session
630 * being stopped or the consumer requesting metadata if
631 * that race is triggered.
632 */
633 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
634 ret = 0;
635 } else {
636 ERR("Error pushing metadata to consumer");
637 }
638 ret_val = ret;
639 goto error_push;
640 } else {
641 /*
642 * Metadata may have been concurrently pushed, since
643 * we're not holding the registry lock while pushing to
644 * consumer. This is handled by the fact that we send
645 * the metadata content, size, and the offset at which
646 * that metadata belongs. This may arrive out of order
647 * on the consumer side, and the consumer is able to
648 * deal with overlapping fragments. The consumer
649 * supports overlapping fragments, which must be
650 * contiguous starting from offset 0. We keep the
651 * largest metadata_len_sent value of the concurrent
652 * send.
653 */
654 registry->metadata_len_sent =
655 max_t(size_t, registry->metadata_len_sent,
656 new_metadata_len_sent);
657 }
658 free(metadata_str);
659 return len;
660
661 end:
662 error:
663 if (ret_val) {
664 /*
665 * On error, flag the registry that the metadata is
666 * closed. We were unable to push anything and this
667 * means that either the consumer is not responding or
668 * the metadata cache has been destroyed on the
669 * consumer.
670 */
671 registry->metadata_closed = 1;
672 }
673 error_push:
674 free(metadata_str);
675 return ret_val;
676 }
677
678 /*
679 * For a given application and session, push metadata to consumer.
680 * Either sock or consumer is required : if sock is NULL, the default
681 * socket to send the metadata is retrieved from consumer, if sock
682 * is not NULL we use it to send the metadata.
683 * RCU read-side lock must be held while calling this function,
684 * therefore ensuring existance of registry. It also ensures existance
685 * of socket throughout this function.
686 *
687 * Return 0 on success else a negative error.
688 * Returning a -EPIPE return value means we could not send the metadata,
689 * but it can be caused by recoverable errors (e.g. the application has
690 * terminated concurrently).
691 */
692 static int push_metadata(struct ust_registry_session *registry,
693 struct consumer_output *consumer)
694 {
695 int ret_val;
696 ssize_t ret;
697 struct consumer_socket *socket;
698
699 assert(registry);
700 assert(consumer);
701
702 pthread_mutex_lock(&registry->lock);
703 if (registry->metadata_closed) {
704 ret_val = -EPIPE;
705 goto error;
706 }
707
708 /* Get consumer socket to use to push the metadata.*/
709 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
710 consumer);
711 if (!socket) {
712 ret_val = -1;
713 goto error;
714 }
715
716 ret = ust_app_push_metadata(registry, socket, 0);
717 if (ret < 0) {
718 ret_val = ret;
719 goto error;
720 }
721 pthread_mutex_unlock(&registry->lock);
722 return 0;
723
724 error:
725 pthread_mutex_unlock(&registry->lock);
726 return ret_val;
727 }
728
729 /*
730 * Send to the consumer a close metadata command for the given session. Once
731 * done, the metadata channel is deleted and the session metadata pointer is
732 * nullified. The session lock MUST be held unless the application is
733 * in the destroy path.
734 *
735 * Return 0 on success else a negative value.
736 */
737 static int close_metadata(struct ust_registry_session *registry,
738 struct consumer_output *consumer)
739 {
740 int ret;
741 struct consumer_socket *socket;
742
743 assert(registry);
744 assert(consumer);
745
746 rcu_read_lock();
747
748 pthread_mutex_lock(&registry->lock);
749
750 if (!registry->metadata_key || registry->metadata_closed) {
751 ret = 0;
752 goto end;
753 }
754
755 /* Get consumer socket to use to push the metadata.*/
756 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
757 consumer);
758 if (!socket) {
759 ret = -1;
760 goto error;
761 }
762
763 ret = consumer_close_metadata(socket, registry->metadata_key);
764 if (ret < 0) {
765 goto error;
766 }
767
768 error:
769 /*
770 * Metadata closed. Even on error this means that the consumer is not
771 * responding or not found so either way a second close should NOT be emit
772 * for this registry.
773 */
774 registry->metadata_closed = 1;
775 end:
776 pthread_mutex_unlock(&registry->lock);
777 rcu_read_unlock();
778 return ret;
779 }
780
781 /*
782 * We need to execute ht_destroy outside of RCU read-side critical
783 * section and outside of call_rcu thread, so we postpone its execution
784 * using ht_cleanup_push. It is simpler than to change the semantic of
785 * the many callers of delete_ust_app_session().
786 */
787 static
788 void delete_ust_app_session_rcu(struct rcu_head *head)
789 {
790 struct ust_app_session *ua_sess =
791 caa_container_of(head, struct ust_app_session, rcu_head);
792
793 ht_cleanup_push(ua_sess->channels);
794 free(ua_sess);
795 }
796
797 /*
798 * Delete ust app session safely. RCU read lock must be held before calling
799 * this function.
800 *
801 * The session list lock must be held by the caller.
802 */
803 static
804 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
805 struct ust_app *app)
806 {
807 int ret;
808 struct lttng_ht_iter iter;
809 struct ust_app_channel *ua_chan;
810 struct ust_registry_session *registry;
811
812 assert(ua_sess);
813
814 pthread_mutex_lock(&ua_sess->lock);
815
816 assert(!ua_sess->deleted);
817 ua_sess->deleted = true;
818
819 registry = get_session_registry(ua_sess);
820 /* Registry can be null on error path during initialization. */
821 if (registry) {
822 /* Push metadata for application before freeing the application. */
823 (void) push_metadata(registry, ua_sess->consumer);
824
825 /*
826 * Don't ask to close metadata for global per UID buffers. Close
827 * metadata only on destroy trace session in this case. Also, the
828 * previous push metadata could have flag the metadata registry to
829 * close so don't send a close command if closed.
830 */
831 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
832 /* And ask to close it for this session registry. */
833 (void) close_metadata(registry, ua_sess->consumer);
834 }
835 }
836
837 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
838 node.node) {
839 ret = lttng_ht_del(ua_sess->channels, &iter);
840 assert(!ret);
841 delete_ust_app_channel(sock, ua_chan, app);
842 }
843
844 /* In case of per PID, the registry is kept in the session. */
845 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
846 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
847 if (reg_pid) {
848 /*
849 * Registry can be null on error path during
850 * initialization.
851 */
852 buffer_reg_pid_remove(reg_pid);
853 buffer_reg_pid_destroy(reg_pid);
854 }
855 }
856
857 if (ua_sess->handle != -1) {
858 pthread_mutex_lock(&app->sock_lock);
859 ret = ustctl_release_handle(sock, ua_sess->handle);
860 pthread_mutex_unlock(&app->sock_lock);
861 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
862 ERR("UST app sock %d release session handle failed with ret %d",
863 sock, ret);
864 }
865 /* Remove session from application UST object descriptor. */
866 iter.iter.node = &ua_sess->ust_objd_node.node;
867 ret = lttng_ht_del(app->ust_sessions_objd, &iter);
868 assert(!ret);
869 }
870
871 pthread_mutex_unlock(&ua_sess->lock);
872
873 consumer_output_put(ua_sess->consumer);
874
875 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
876 }
877
878 /*
879 * Delete a traceable application structure from the global list. Never call
880 * this function outside of a call_rcu call.
881 *
882 * RCU read side lock should _NOT_ be held when calling this function.
883 */
884 static
885 void delete_ust_app(struct ust_app *app)
886 {
887 int ret, sock;
888 struct ust_app_session *ua_sess, *tmp_ua_sess;
889
890 /*
891 * The session list lock must be held during this function to guarantee
892 * the existence of ua_sess.
893 */
894 session_lock_list();
895 /* Delete ust app sessions info */
896 sock = app->sock;
897 app->sock = -1;
898
899 /* Wipe sessions */
900 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
901 teardown_node) {
902 /* Free every object in the session and the session. */
903 rcu_read_lock();
904 delete_ust_app_session(sock, ua_sess, app);
905 rcu_read_unlock();
906 }
907
908 ht_cleanup_push(app->sessions);
909 ht_cleanup_push(app->ust_sessions_objd);
910 ht_cleanup_push(app->ust_objd);
911
912 /*
913 * Wait until we have deleted the application from the sock hash table
914 * before closing this socket, otherwise an application could re-use the
915 * socket ID and race with the teardown, using the same hash table entry.
916 *
917 * It's OK to leave the close in call_rcu. We want it to stay unique for
918 * all RCU readers that could run concurrently with unregister app,
919 * therefore we _need_ to only close that socket after a grace period. So
920 * it should stay in this RCU callback.
921 *
922 * This close() is a very important step of the synchronization model so
923 * every modification to this function must be carefully reviewed.
924 */
925 ret = close(sock);
926 if (ret) {
927 PERROR("close");
928 }
929 lttng_fd_put(LTTNG_FD_APPS, 1);
930
931 DBG2("UST app pid %d deleted", app->pid);
932 free(app);
933 session_unlock_list();
934 }
935
936 /*
937 * URCU intermediate call to delete an UST app.
938 */
939 static
940 void delete_ust_app_rcu(struct rcu_head *head)
941 {
942 struct lttng_ht_node_ulong *node =
943 caa_container_of(head, struct lttng_ht_node_ulong, head);
944 struct ust_app *app =
945 caa_container_of(node, struct ust_app, pid_n);
946
947 DBG3("Call RCU deleting app PID %d", app->pid);
948 delete_ust_app(app);
949 }
950
951 /*
952 * Delete the session from the application ht and delete the data structure by
953 * freeing every object inside and releasing them.
954 *
955 * The session list lock must be held by the caller.
956 */
957 static void destroy_app_session(struct ust_app *app,
958 struct ust_app_session *ua_sess)
959 {
960 int ret;
961 struct lttng_ht_iter iter;
962
963 assert(app);
964 assert(ua_sess);
965
966 iter.iter.node = &ua_sess->node.node;
967 ret = lttng_ht_del(app->sessions, &iter);
968 if (ret) {
969 /* Already scheduled for teardown. */
970 goto end;
971 }
972
973 /* Once deleted, free the data structure. */
974 delete_ust_app_session(app->sock, ua_sess, app);
975
976 end:
977 return;
978 }
979
980 /*
981 * Alloc new UST app session.
982 */
983 static
984 struct ust_app_session *alloc_ust_app_session(void)
985 {
986 struct ust_app_session *ua_sess;
987
988 /* Init most of the default value by allocating and zeroing */
989 ua_sess = zmalloc(sizeof(struct ust_app_session));
990 if (ua_sess == NULL) {
991 PERROR("malloc");
992 goto error_free;
993 }
994
995 ua_sess->handle = -1;
996 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
997 ua_sess->metadata_attr.type = LTTNG_UST_CHAN_METADATA;
998 pthread_mutex_init(&ua_sess->lock, NULL);
999
1000 return ua_sess;
1001
1002 error_free:
1003 return NULL;
1004 }
1005
1006 /*
1007 * Alloc new UST app channel.
1008 */
1009 static
1010 struct ust_app_channel *alloc_ust_app_channel(char *name,
1011 struct ust_app_session *ua_sess,
1012 struct lttng_ust_channel_attr *attr)
1013 {
1014 struct ust_app_channel *ua_chan;
1015
1016 /* Init most of the default value by allocating and zeroing */
1017 ua_chan = zmalloc(sizeof(struct ust_app_channel));
1018 if (ua_chan == NULL) {
1019 PERROR("malloc");
1020 goto error;
1021 }
1022
1023 /* Setup channel name */
1024 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
1025 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1026
1027 ua_chan->enabled = 1;
1028 ua_chan->handle = -1;
1029 ua_chan->session = ua_sess;
1030 ua_chan->key = get_next_channel_key();
1031 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1032 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1033 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
1034
1035 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
1036 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
1037
1038 /* Copy attributes */
1039 if (attr) {
1040 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
1041 ua_chan->attr.subbuf_size = attr->subbuf_size;
1042 ua_chan->attr.num_subbuf = attr->num_subbuf;
1043 ua_chan->attr.overwrite = attr->overwrite;
1044 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
1045 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
1046 ua_chan->attr.output = attr->output;
1047 ua_chan->attr.blocking_timeout = attr->u.s.blocking_timeout;
1048 }
1049 /* By default, the channel is a per cpu channel. */
1050 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1051
1052 DBG3("UST app channel %s allocated", ua_chan->name);
1053
1054 return ua_chan;
1055
1056 error:
1057 return NULL;
1058 }
1059
1060 /*
1061 * Allocate and initialize a UST app stream.
1062 *
1063 * Return newly allocated stream pointer or NULL on error.
1064 */
1065 struct ust_app_stream *ust_app_alloc_stream(void)
1066 {
1067 struct ust_app_stream *stream = NULL;
1068
1069 stream = zmalloc(sizeof(*stream));
1070 if (stream == NULL) {
1071 PERROR("zmalloc ust app stream");
1072 goto error;
1073 }
1074
1075 /* Zero could be a valid value for a handle so flag it to -1. */
1076 stream->handle = -1;
1077
1078 error:
1079 return stream;
1080 }
1081
1082 /*
1083 * Alloc new UST app event.
1084 */
1085 static
1086 struct ust_app_event *alloc_ust_app_event(char *name,
1087 struct lttng_ust_event *attr)
1088 {
1089 struct ust_app_event *ua_event;
1090
1091 /* Init most of the default value by allocating and zeroing */
1092 ua_event = zmalloc(sizeof(struct ust_app_event));
1093 if (ua_event == NULL) {
1094 PERROR("malloc");
1095 goto error;
1096 }
1097
1098 ua_event->enabled = 1;
1099 strncpy(ua_event->name, name, sizeof(ua_event->name));
1100 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1101 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
1102
1103 /* Copy attributes */
1104 if (attr) {
1105 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1106 }
1107
1108 DBG3("UST app event %s allocated", ua_event->name);
1109
1110 return ua_event;
1111
1112 error:
1113 return NULL;
1114 }
1115
1116 /*
1117 * Alloc new UST app context.
1118 */
1119 static
1120 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
1121 {
1122 struct ust_app_ctx *ua_ctx;
1123
1124 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
1125 if (ua_ctx == NULL) {
1126 goto error;
1127 }
1128
1129 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1130
1131 if (uctx) {
1132 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
1133 if (uctx->ctx == LTTNG_UST_CONTEXT_APP_CONTEXT) {
1134 char *provider_name = NULL, *ctx_name = NULL;
1135
1136 provider_name = strdup(uctx->u.app_ctx.provider_name);
1137 ctx_name = strdup(uctx->u.app_ctx.ctx_name);
1138 if (!provider_name || !ctx_name) {
1139 free(provider_name);
1140 free(ctx_name);
1141 goto error;
1142 }
1143
1144 ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
1145 ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
1146 }
1147 }
1148
1149 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
1150 return ua_ctx;
1151 error:
1152 free(ua_ctx);
1153 return NULL;
1154 }
1155
1156 /*
1157 * Allocate a filter and copy the given original filter.
1158 *
1159 * Return allocated filter or NULL on error.
1160 */
1161 static struct lttng_filter_bytecode *copy_filter_bytecode(
1162 struct lttng_filter_bytecode *orig_f)
1163 {
1164 struct lttng_filter_bytecode *filter = NULL;
1165
1166 /* Copy filter bytecode */
1167 filter = zmalloc(sizeof(*filter) + orig_f->len);
1168 if (!filter) {
1169 PERROR("zmalloc alloc filter bytecode");
1170 goto error;
1171 }
1172
1173 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1174
1175 error:
1176 return filter;
1177 }
1178
1179 /*
1180 * Create a liblttng-ust filter bytecode from given bytecode.
1181 *
1182 * Return allocated filter or NULL on error.
1183 */
1184 static struct lttng_ust_filter_bytecode *create_ust_bytecode_from_bytecode(
1185 struct lttng_filter_bytecode *orig_f)
1186 {
1187 struct lttng_ust_filter_bytecode *filter = NULL;
1188
1189 /* Copy filter bytecode */
1190 filter = zmalloc(sizeof(*filter) + orig_f->len);
1191 if (!filter) {
1192 PERROR("zmalloc alloc ust filter bytecode");
1193 goto error;
1194 }
1195
1196 assert(sizeof(struct lttng_filter_bytecode) ==
1197 sizeof(struct lttng_ust_filter_bytecode));
1198 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1199 error:
1200 return filter;
1201 }
1202
1203 /*
1204 * Find an ust_app using the sock and return it. RCU read side lock must be
1205 * held before calling this helper function.
1206 */
1207 struct ust_app *ust_app_find_by_sock(int sock)
1208 {
1209 struct lttng_ht_node_ulong *node;
1210 struct lttng_ht_iter iter;
1211
1212 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
1213 node = lttng_ht_iter_get_node_ulong(&iter);
1214 if (node == NULL) {
1215 DBG2("UST app find by sock %d not found", sock);
1216 goto error;
1217 }
1218
1219 return caa_container_of(node, struct ust_app, sock_n);
1220
1221 error:
1222 return NULL;
1223 }
1224
1225 /*
1226 * Find an ust_app using the notify sock and return it. RCU read side lock must
1227 * be held before calling this helper function.
1228 */
1229 static struct ust_app *find_app_by_notify_sock(int sock)
1230 {
1231 struct lttng_ht_node_ulong *node;
1232 struct lttng_ht_iter iter;
1233
1234 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1235 &iter);
1236 node = lttng_ht_iter_get_node_ulong(&iter);
1237 if (node == NULL) {
1238 DBG2("UST app find by notify sock %d not found", sock);
1239 goto error;
1240 }
1241
1242 return caa_container_of(node, struct ust_app, notify_sock_n);
1243
1244 error:
1245 return NULL;
1246 }
1247
1248 /*
1249 * Lookup for an ust app event based on event name, filter bytecode and the
1250 * event loglevel.
1251 *
1252 * Return an ust_app_event object or NULL on error.
1253 */
1254 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1255 char *name, struct lttng_filter_bytecode *filter,
1256 int loglevel_value,
1257 const struct lttng_event_exclusion *exclusion)
1258 {
1259 struct lttng_ht_iter iter;
1260 struct lttng_ht_node_str *node;
1261 struct ust_app_event *event = NULL;
1262 struct ust_app_ht_key key;
1263
1264 assert(name);
1265 assert(ht);
1266
1267 /* Setup key for event lookup. */
1268 key.name = name;
1269 key.filter = filter;
1270 key.loglevel_type = loglevel_value;
1271 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1272 key.exclusion = exclusion;
1273
1274 /* Lookup using the event name as hash and a custom match fct. */
1275 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1276 ht_match_ust_app_event, &key, &iter.iter);
1277 node = lttng_ht_iter_get_node_str(&iter);
1278 if (node == NULL) {
1279 goto end;
1280 }
1281
1282 event = caa_container_of(node, struct ust_app_event, node);
1283
1284 end:
1285 return event;
1286 }
1287
1288 /*
1289 * Create the channel context on the tracer.
1290 *
1291 * Called with UST app session lock held.
1292 */
1293 static
1294 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1295 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1296 {
1297 int ret;
1298
1299 health_code_update();
1300
1301 pthread_mutex_lock(&app->sock_lock);
1302 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1303 ua_chan->obj, &ua_ctx->obj);
1304 pthread_mutex_unlock(&app->sock_lock);
1305 if (ret < 0) {
1306 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1307 ERR("UST app create channel context failed for app (pid: %d) "
1308 "with ret %d", app->pid, ret);
1309 } else {
1310 /*
1311 * This is normal behavior, an application can die during the
1312 * creation process. Don't report an error so the execution can
1313 * continue normally.
1314 */
1315 ret = 0;
1316 DBG3("UST app disable event failed. Application is dead.");
1317 }
1318 goto error;
1319 }
1320
1321 ua_ctx->handle = ua_ctx->obj->handle;
1322
1323 DBG2("UST app context handle %d created successfully for channel %s",
1324 ua_ctx->handle, ua_chan->name);
1325
1326 error:
1327 health_code_update();
1328 return ret;
1329 }
1330
1331 /*
1332 * Set the filter on the tracer.
1333 */
1334 static
1335 int set_ust_event_filter(struct ust_app_event *ua_event,
1336 struct ust_app *app)
1337 {
1338 int ret;
1339 struct lttng_ust_filter_bytecode *ust_bytecode = NULL;
1340
1341 health_code_update();
1342
1343 if (!ua_event->filter) {
1344 ret = 0;
1345 goto error;
1346 }
1347
1348 ust_bytecode = create_ust_bytecode_from_bytecode(ua_event->filter);
1349 if (!ust_bytecode) {
1350 ret = -LTTNG_ERR_NOMEM;
1351 goto error;
1352 }
1353 pthread_mutex_lock(&app->sock_lock);
1354 ret = ustctl_set_filter(app->sock, ust_bytecode,
1355 ua_event->obj);
1356 pthread_mutex_unlock(&app->sock_lock);
1357 if (ret < 0) {
1358 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1359 ERR("UST app event %s filter failed for app (pid: %d) "
1360 "with ret %d", ua_event->attr.name, app->pid, ret);
1361 } else {
1362 /*
1363 * This is normal behavior, an application can die during the
1364 * creation process. Don't report an error so the execution can
1365 * continue normally.
1366 */
1367 ret = 0;
1368 DBG3("UST app filter event failed. Application is dead.");
1369 }
1370 goto error;
1371 }
1372
1373 DBG2("UST filter set successfully for event %s", ua_event->name);
1374
1375 error:
1376 health_code_update();
1377 free(ust_bytecode);
1378 return ret;
1379 }
1380
1381 static
1382 struct lttng_ust_event_exclusion *create_ust_exclusion_from_exclusion(
1383 struct lttng_event_exclusion *exclusion)
1384 {
1385 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1386 size_t exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1387 LTTNG_UST_SYM_NAME_LEN * exclusion->count;
1388
1389 ust_exclusion = zmalloc(exclusion_alloc_size);
1390 if (!ust_exclusion) {
1391 PERROR("malloc");
1392 goto end;
1393 }
1394
1395 assert(sizeof(struct lttng_event_exclusion) ==
1396 sizeof(struct lttng_ust_event_exclusion));
1397 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1398 end:
1399 return ust_exclusion;
1400 }
1401
1402 /*
1403 * Set event exclusions on the tracer.
1404 */
1405 static
1406 int set_ust_event_exclusion(struct ust_app_event *ua_event,
1407 struct ust_app *app)
1408 {
1409 int ret;
1410 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1411
1412 health_code_update();
1413
1414 if (!ua_event->exclusion || !ua_event->exclusion->count) {
1415 ret = 0;
1416 goto error;
1417 }
1418
1419 ust_exclusion = create_ust_exclusion_from_exclusion(
1420 ua_event->exclusion);
1421 if (!ust_exclusion) {
1422 ret = -LTTNG_ERR_NOMEM;
1423 goto error;
1424 }
1425 pthread_mutex_lock(&app->sock_lock);
1426 ret = ustctl_set_exclusion(app->sock, ust_exclusion, ua_event->obj);
1427 pthread_mutex_unlock(&app->sock_lock);
1428 if (ret < 0) {
1429 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1430 ERR("UST app event %s exclusions failed for app (pid: %d) "
1431 "with ret %d", ua_event->attr.name, app->pid, ret);
1432 } else {
1433 /*
1434 * This is normal behavior, an application can die during the
1435 * creation process. Don't report an error so the execution can
1436 * continue normally.
1437 */
1438 ret = 0;
1439 DBG3("UST app event exclusion failed. Application is dead.");
1440 }
1441 goto error;
1442 }
1443
1444 DBG2("UST exclusion set successfully for event %s", ua_event->name);
1445
1446 error:
1447 health_code_update();
1448 free(ust_exclusion);
1449 return ret;
1450 }
1451
1452 /*
1453 * Disable the specified event on to UST tracer for the UST session.
1454 */
1455 static int disable_ust_event(struct ust_app *app,
1456 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1457 {
1458 int ret;
1459
1460 health_code_update();
1461
1462 pthread_mutex_lock(&app->sock_lock);
1463 ret = ustctl_disable(app->sock, ua_event->obj);
1464 pthread_mutex_unlock(&app->sock_lock);
1465 if (ret < 0) {
1466 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1467 ERR("UST app event %s disable failed for app (pid: %d) "
1468 "and session handle %d with ret %d",
1469 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1470 } else {
1471 /*
1472 * This is normal behavior, an application can die during the
1473 * creation process. Don't report an error so the execution can
1474 * continue normally.
1475 */
1476 ret = 0;
1477 DBG3("UST app disable event failed. Application is dead.");
1478 }
1479 goto error;
1480 }
1481
1482 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1483 ua_event->attr.name, app->pid);
1484
1485 error:
1486 health_code_update();
1487 return ret;
1488 }
1489
1490 /*
1491 * Disable the specified channel on to UST tracer for the UST session.
1492 */
1493 static int disable_ust_channel(struct ust_app *app,
1494 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1495 {
1496 int ret;
1497
1498 health_code_update();
1499
1500 pthread_mutex_lock(&app->sock_lock);
1501 ret = ustctl_disable(app->sock, ua_chan->obj);
1502 pthread_mutex_unlock(&app->sock_lock);
1503 if (ret < 0) {
1504 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1505 ERR("UST app channel %s disable failed for app (pid: %d) "
1506 "and session handle %d with ret %d",
1507 ua_chan->name, app->pid, ua_sess->handle, ret);
1508 } else {
1509 /*
1510 * This is normal behavior, an application can die during the
1511 * creation process. Don't report an error so the execution can
1512 * continue normally.
1513 */
1514 ret = 0;
1515 DBG3("UST app disable channel failed. Application is dead.");
1516 }
1517 goto error;
1518 }
1519
1520 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1521 ua_chan->name, app->pid);
1522
1523 error:
1524 health_code_update();
1525 return ret;
1526 }
1527
1528 /*
1529 * Enable the specified channel on to UST tracer for the UST session.
1530 */
1531 static int enable_ust_channel(struct ust_app *app,
1532 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1533 {
1534 int ret;
1535
1536 health_code_update();
1537
1538 pthread_mutex_lock(&app->sock_lock);
1539 ret = ustctl_enable(app->sock, ua_chan->obj);
1540 pthread_mutex_unlock(&app->sock_lock);
1541 if (ret < 0) {
1542 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1543 ERR("UST app channel %s enable failed for app (pid: %d) "
1544 "and session handle %d with ret %d",
1545 ua_chan->name, app->pid, ua_sess->handle, ret);
1546 } else {
1547 /*
1548 * This is normal behavior, an application can die during the
1549 * creation process. Don't report an error so the execution can
1550 * continue normally.
1551 */
1552 ret = 0;
1553 DBG3("UST app enable channel failed. Application is dead.");
1554 }
1555 goto error;
1556 }
1557
1558 ua_chan->enabled = 1;
1559
1560 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1561 ua_chan->name, app->pid);
1562
1563 error:
1564 health_code_update();
1565 return ret;
1566 }
1567
1568 /*
1569 * Enable the specified event on to UST tracer for the UST session.
1570 */
1571 static int enable_ust_event(struct ust_app *app,
1572 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1573 {
1574 int ret;
1575
1576 health_code_update();
1577
1578 pthread_mutex_lock(&app->sock_lock);
1579 ret = ustctl_enable(app->sock, ua_event->obj);
1580 pthread_mutex_unlock(&app->sock_lock);
1581 if (ret < 0) {
1582 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1583 ERR("UST app event %s enable failed for app (pid: %d) "
1584 "and session handle %d with ret %d",
1585 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1586 } else {
1587 /*
1588 * This is normal behavior, an application can die during the
1589 * creation process. Don't report an error so the execution can
1590 * continue normally.
1591 */
1592 ret = 0;
1593 DBG3("UST app enable event failed. Application is dead.");
1594 }
1595 goto error;
1596 }
1597
1598 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1599 ua_event->attr.name, app->pid);
1600
1601 error:
1602 health_code_update();
1603 return ret;
1604 }
1605
1606 /*
1607 * Send channel and stream buffer to application.
1608 *
1609 * Return 0 on success. On error, a negative value is returned.
1610 */
1611 static int send_channel_pid_to_ust(struct ust_app *app,
1612 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1613 {
1614 int ret;
1615 struct ust_app_stream *stream, *stmp;
1616
1617 assert(app);
1618 assert(ua_sess);
1619 assert(ua_chan);
1620
1621 health_code_update();
1622
1623 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1624 app->sock);
1625
1626 /* Send channel to the application. */
1627 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1628 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1629 ret = -ENOTCONN; /* Caused by app exiting. */
1630 goto error;
1631 } else if (ret < 0) {
1632 goto error;
1633 }
1634
1635 health_code_update();
1636
1637 /* Send all streams to application. */
1638 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1639 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1640 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1641 ret = -ENOTCONN; /* Caused by app exiting. */
1642 goto error;
1643 } else if (ret < 0) {
1644 goto error;
1645 }
1646 /* We don't need the stream anymore once sent to the tracer. */
1647 cds_list_del(&stream->list);
1648 delete_ust_app_stream(-1, stream, app);
1649 }
1650 /* Flag the channel that it is sent to the application. */
1651 ua_chan->is_sent = 1;
1652
1653 error:
1654 health_code_update();
1655 return ret;
1656 }
1657
1658 /*
1659 * Create the specified event onto the UST tracer for a UST session.
1660 *
1661 * Should be called with session mutex held.
1662 */
1663 static
1664 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1665 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1666 {
1667 int ret = 0;
1668
1669 health_code_update();
1670
1671 /* Create UST event on tracer */
1672 pthread_mutex_lock(&app->sock_lock);
1673 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1674 &ua_event->obj);
1675 pthread_mutex_unlock(&app->sock_lock);
1676 if (ret < 0) {
1677 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1678 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1679 ua_event->attr.name, app->pid, ret);
1680 } else {
1681 /*
1682 * This is normal behavior, an application can die during the
1683 * creation process. Don't report an error so the execution can
1684 * continue normally.
1685 */
1686 ret = 0;
1687 DBG3("UST app create event failed. Application is dead.");
1688 }
1689 goto error;
1690 }
1691
1692 ua_event->handle = ua_event->obj->handle;
1693
1694 DBG2("UST app event %s created successfully for pid:%d",
1695 ua_event->attr.name, app->pid);
1696
1697 health_code_update();
1698
1699 /* Set filter if one is present. */
1700 if (ua_event->filter) {
1701 ret = set_ust_event_filter(ua_event, app);
1702 if (ret < 0) {
1703 goto error;
1704 }
1705 }
1706
1707 /* Set exclusions for the event */
1708 if (ua_event->exclusion) {
1709 ret = set_ust_event_exclusion(ua_event, app);
1710 if (ret < 0) {
1711 goto error;
1712 }
1713 }
1714
1715 /* If event not enabled, disable it on the tracer */
1716 if (ua_event->enabled) {
1717 /*
1718 * We now need to explicitly enable the event, since it
1719 * is now disabled at creation.
1720 */
1721 ret = enable_ust_event(app, ua_sess, ua_event);
1722 if (ret < 0) {
1723 /*
1724 * If we hit an EPERM, something is wrong with our enable call. If
1725 * we get an EEXIST, there is a problem on the tracer side since we
1726 * just created it.
1727 */
1728 switch (ret) {
1729 case -LTTNG_UST_ERR_PERM:
1730 /* Code flow problem */
1731 assert(0);
1732 case -LTTNG_UST_ERR_EXIST:
1733 /* It's OK for our use case. */
1734 ret = 0;
1735 break;
1736 default:
1737 break;
1738 }
1739 goto error;
1740 }
1741 }
1742
1743 error:
1744 health_code_update();
1745 return ret;
1746 }
1747
1748 /*
1749 * Copy data between an UST app event and a LTT event.
1750 */
1751 static void shadow_copy_event(struct ust_app_event *ua_event,
1752 struct ltt_ust_event *uevent)
1753 {
1754 size_t exclusion_alloc_size;
1755
1756 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1757 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1758
1759 ua_event->enabled = uevent->enabled;
1760
1761 /* Copy event attributes */
1762 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1763
1764 /* Copy filter bytecode */
1765 if (uevent->filter) {
1766 ua_event->filter = copy_filter_bytecode(uevent->filter);
1767 /* Filter might be NULL here in case of ENONEM. */
1768 }
1769
1770 /* Copy exclusion data */
1771 if (uevent->exclusion) {
1772 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
1773 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
1774 ua_event->exclusion = zmalloc(exclusion_alloc_size);
1775 if (ua_event->exclusion == NULL) {
1776 PERROR("malloc");
1777 } else {
1778 memcpy(ua_event->exclusion, uevent->exclusion,
1779 exclusion_alloc_size);
1780 }
1781 }
1782 }
1783
1784 /*
1785 * Copy data between an UST app channel and a LTT channel.
1786 */
1787 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1788 struct ltt_ust_channel *uchan)
1789 {
1790 struct lttng_ht_iter iter;
1791 struct ltt_ust_event *uevent;
1792 struct ltt_ust_context *uctx;
1793 struct ust_app_event *ua_event;
1794
1795 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1796
1797 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1798 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1799
1800 ua_chan->tracefile_size = uchan->tracefile_size;
1801 ua_chan->tracefile_count = uchan->tracefile_count;
1802
1803 /* Copy event attributes since the layout is different. */
1804 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1805 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1806 ua_chan->attr.overwrite = uchan->attr.overwrite;
1807 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1808 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1809 ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
1810 ua_chan->attr.output = uchan->attr.output;
1811 ua_chan->attr.blocking_timeout = uchan->attr.u.s.blocking_timeout;
1812
1813 /*
1814 * Note that the attribute channel type is not set since the channel on the
1815 * tracing registry side does not have this information.
1816 */
1817
1818 ua_chan->enabled = uchan->enabled;
1819 ua_chan->tracing_channel_id = uchan->id;
1820
1821 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
1822 struct ust_app_ctx *ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1823
1824 if (ua_ctx == NULL) {
1825 continue;
1826 }
1827 lttng_ht_node_init_ulong(&ua_ctx->node,
1828 (unsigned long) ua_ctx->ctx.ctx);
1829 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
1830 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1831 }
1832
1833 /* Copy all events from ltt ust channel to ust app channel */
1834 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
1835 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1836 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
1837 if (ua_event == NULL) {
1838 DBG2("UST event %s not found on shadow copy channel",
1839 uevent->attr.name);
1840 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
1841 if (ua_event == NULL) {
1842 continue;
1843 }
1844 shadow_copy_event(ua_event, uevent);
1845 add_unique_ust_app_event(ua_chan, ua_event);
1846 }
1847 }
1848
1849 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1850 }
1851
1852 /*
1853 * Copy data between a UST app session and a regular LTT session.
1854 */
1855 static void shadow_copy_session(struct ust_app_session *ua_sess,
1856 struct ltt_ust_session *usess, struct ust_app *app)
1857 {
1858 struct lttng_ht_node_str *ua_chan_node;
1859 struct lttng_ht_iter iter;
1860 struct ltt_ust_channel *uchan;
1861 struct ust_app_channel *ua_chan;
1862 time_t rawtime;
1863 struct tm *timeinfo;
1864 char datetime[16];
1865 int ret;
1866 char tmp_shm_path[PATH_MAX];
1867
1868 /* Get date and time for unique app path */
1869 time(&rawtime);
1870 timeinfo = localtime(&rawtime);
1871 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1872
1873 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1874
1875 ua_sess->tracing_id = usess->id;
1876 ua_sess->id = get_next_session_id();
1877 ua_sess->uid = app->uid;
1878 ua_sess->gid = app->gid;
1879 ua_sess->euid = usess->uid;
1880 ua_sess->egid = usess->gid;
1881 ua_sess->buffer_type = usess->buffer_type;
1882 ua_sess->bits_per_long = app->bits_per_long;
1883
1884 /* There is only one consumer object per session possible. */
1885 consumer_output_get(usess->consumer);
1886 ua_sess->consumer = usess->consumer;
1887
1888 ua_sess->output_traces = usess->output_traces;
1889 ua_sess->live_timer_interval = usess->live_timer_interval;
1890 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
1891 &usess->metadata_attr);
1892
1893 switch (ua_sess->buffer_type) {
1894 case LTTNG_BUFFER_PER_PID:
1895 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1896 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1897 datetime);
1898 break;
1899 case LTTNG_BUFFER_PER_UID:
1900 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1901 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1902 break;
1903 default:
1904 assert(0);
1905 goto error;
1906 }
1907 if (ret < 0) {
1908 PERROR("asprintf UST shadow copy session");
1909 assert(0);
1910 goto error;
1911 }
1912
1913 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
1914 sizeof(ua_sess->root_shm_path));
1915 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
1916 strncpy(ua_sess->shm_path, usess->shm_path,
1917 sizeof(ua_sess->shm_path));
1918 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1919 if (ua_sess->shm_path[0]) {
1920 switch (ua_sess->buffer_type) {
1921 case LTTNG_BUFFER_PER_PID:
1922 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
1923 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
1924 app->name, app->pid, datetime);
1925 break;
1926 case LTTNG_BUFFER_PER_UID:
1927 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
1928 DEFAULT_UST_TRACE_UID_PATH,
1929 app->uid, app->bits_per_long);
1930 break;
1931 default:
1932 assert(0);
1933 goto error;
1934 }
1935 if (ret < 0) {
1936 PERROR("sprintf UST shadow copy session");
1937 assert(0);
1938 goto error;
1939 }
1940 strncat(ua_sess->shm_path, tmp_shm_path,
1941 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
1942 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1943 }
1944
1945 /* Iterate over all channels in global domain. */
1946 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1947 uchan, node.node) {
1948 struct lttng_ht_iter uiter;
1949
1950 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1951 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
1952 if (ua_chan_node != NULL) {
1953 /* Session exist. Contiuing. */
1954 continue;
1955 }
1956
1957 DBG2("Channel %s not found on shadow session copy, creating it",
1958 uchan->name);
1959 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess,
1960 &uchan->attr);
1961 if (ua_chan == NULL) {
1962 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1963 continue;
1964 }
1965 shadow_copy_channel(ua_chan, uchan);
1966 /*
1967 * The concept of metadata channel does not exist on the tracing
1968 * registry side of the session daemon so this can only be a per CPU
1969 * channel and not metadata.
1970 */
1971 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1972
1973 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
1974 }
1975 return;
1976
1977 error:
1978 consumer_output_put(ua_sess->consumer);
1979 }
1980
1981 /*
1982 * Lookup sesison wrapper.
1983 */
1984 static
1985 void __lookup_session_by_app(struct ltt_ust_session *usess,
1986 struct ust_app *app, struct lttng_ht_iter *iter)
1987 {
1988 /* Get right UST app session from app */
1989 lttng_ht_lookup(app->sessions, &usess->id, iter);
1990 }
1991
1992 /*
1993 * Return ust app session from the app session hashtable using the UST session
1994 * id.
1995 */
1996 static struct ust_app_session *lookup_session_by_app(
1997 struct ltt_ust_session *usess, struct ust_app *app)
1998 {
1999 struct lttng_ht_iter iter;
2000 struct lttng_ht_node_u64 *node;
2001
2002 __lookup_session_by_app(usess, app, &iter);
2003 node = lttng_ht_iter_get_node_u64(&iter);
2004 if (node == NULL) {
2005 goto error;
2006 }
2007
2008 return caa_container_of(node, struct ust_app_session, node);
2009
2010 error:
2011 return NULL;
2012 }
2013
2014 /*
2015 * Setup buffer registry per PID for the given session and application. If none
2016 * is found, a new one is created, added to the global registry and
2017 * initialized. If regp is valid, it's set with the newly created object.
2018 *
2019 * Return 0 on success or else a negative value.
2020 */
2021 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
2022 struct ust_app *app, struct buffer_reg_pid **regp)
2023 {
2024 int ret = 0;
2025 struct buffer_reg_pid *reg_pid;
2026
2027 assert(ua_sess);
2028 assert(app);
2029
2030 rcu_read_lock();
2031
2032 reg_pid = buffer_reg_pid_find(ua_sess->id);
2033 if (!reg_pid) {
2034 /*
2035 * This is the create channel path meaning that if there is NO
2036 * registry available, we have to create one for this session.
2037 */
2038 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
2039 ua_sess->root_shm_path, ua_sess->shm_path);
2040 if (ret < 0) {
2041 goto error;
2042 }
2043 } else {
2044 goto end;
2045 }
2046
2047 /* Initialize registry. */
2048 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
2049 app->bits_per_long, app->uint8_t_alignment,
2050 app->uint16_t_alignment, app->uint32_t_alignment,
2051 app->uint64_t_alignment, app->long_alignment,
2052 app->byte_order, app->version.major,
2053 app->version.minor, reg_pid->root_shm_path,
2054 reg_pid->shm_path,
2055 ua_sess->euid, ua_sess->egid);
2056 if (ret < 0) {
2057 /*
2058 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2059 * destroy the buffer registry, because it is always expected
2060 * that if the buffer registry can be found, its ust registry is
2061 * non-NULL.
2062 */
2063 buffer_reg_pid_destroy(reg_pid);
2064 goto error;
2065 }
2066
2067 buffer_reg_pid_add(reg_pid);
2068
2069 DBG3("UST app buffer registry per PID created successfully");
2070
2071 end:
2072 if (regp) {
2073 *regp = reg_pid;
2074 }
2075 error:
2076 rcu_read_unlock();
2077 return ret;
2078 }
2079
2080 /*
2081 * Setup buffer registry per UID for the given session and application. If none
2082 * is found, a new one is created, added to the global registry and
2083 * initialized. If regp is valid, it's set with the newly created object.
2084 *
2085 * Return 0 on success or else a negative value.
2086 */
2087 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
2088 struct ust_app_session *ua_sess,
2089 struct ust_app *app, struct buffer_reg_uid **regp)
2090 {
2091 int ret = 0;
2092 struct buffer_reg_uid *reg_uid;
2093
2094 assert(usess);
2095 assert(app);
2096
2097 rcu_read_lock();
2098
2099 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2100 if (!reg_uid) {
2101 /*
2102 * This is the create channel path meaning that if there is NO
2103 * registry available, we have to create one for this session.
2104 */
2105 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
2106 LTTNG_DOMAIN_UST, &reg_uid,
2107 ua_sess->root_shm_path, ua_sess->shm_path);
2108 if (ret < 0) {
2109 goto error;
2110 }
2111 } else {
2112 goto end;
2113 }
2114
2115 /* Initialize registry. */
2116 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
2117 app->bits_per_long, app->uint8_t_alignment,
2118 app->uint16_t_alignment, app->uint32_t_alignment,
2119 app->uint64_t_alignment, app->long_alignment,
2120 app->byte_order, app->version.major,
2121 app->version.minor, reg_uid->root_shm_path,
2122 reg_uid->shm_path, usess->uid, usess->gid);
2123 if (ret < 0) {
2124 /*
2125 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2126 * destroy the buffer registry, because it is always expected
2127 * that if the buffer registry can be found, its ust registry is
2128 * non-NULL.
2129 */
2130 buffer_reg_uid_destroy(reg_uid, NULL);
2131 goto error;
2132 }
2133 /* Add node to teardown list of the session. */
2134 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2135
2136 buffer_reg_uid_add(reg_uid);
2137
2138 DBG3("UST app buffer registry per UID created successfully");
2139 end:
2140 if (regp) {
2141 *regp = reg_uid;
2142 }
2143 error:
2144 rcu_read_unlock();
2145 return ret;
2146 }
2147
2148 /*
2149 * Create a session on the tracer side for the given app.
2150 *
2151 * On success, ua_sess_ptr is populated with the session pointer or else left
2152 * untouched. If the session was created, is_created is set to 1. On error,
2153 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2154 * be NULL.
2155 *
2156 * Returns 0 on success or else a negative code which is either -ENOMEM or
2157 * -ENOTCONN which is the default code if the ustctl_create_session fails.
2158 */
2159 static int find_or_create_ust_app_session(struct ltt_ust_session *usess,
2160 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2161 int *is_created)
2162 {
2163 int ret, created = 0;
2164 struct ust_app_session *ua_sess;
2165
2166 assert(usess);
2167 assert(app);
2168 assert(ua_sess_ptr);
2169
2170 health_code_update();
2171
2172 ua_sess = lookup_session_by_app(usess, app);
2173 if (ua_sess == NULL) {
2174 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
2175 app->pid, usess->id);
2176 ua_sess = alloc_ust_app_session();
2177 if (ua_sess == NULL) {
2178 /* Only malloc can failed so something is really wrong */
2179 ret = -ENOMEM;
2180 goto error;
2181 }
2182 shadow_copy_session(ua_sess, usess, app);
2183 created = 1;
2184 }
2185
2186 switch (usess->buffer_type) {
2187 case LTTNG_BUFFER_PER_PID:
2188 /* Init local registry. */
2189 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
2190 if (ret < 0) {
2191 delete_ust_app_session(-1, ua_sess, app);
2192 goto error;
2193 }
2194 break;
2195 case LTTNG_BUFFER_PER_UID:
2196 /* Look for a global registry. If none exists, create one. */
2197 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
2198 if (ret < 0) {
2199 delete_ust_app_session(-1, ua_sess, app);
2200 goto error;
2201 }
2202 break;
2203 default:
2204 assert(0);
2205 ret = -EINVAL;
2206 goto error;
2207 }
2208
2209 health_code_update();
2210
2211 if (ua_sess->handle == -1) {
2212 pthread_mutex_lock(&app->sock_lock);
2213 ret = ustctl_create_session(app->sock);
2214 pthread_mutex_unlock(&app->sock_lock);
2215 if (ret < 0) {
2216 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2217 ERR("Creating session for app pid %d with ret %d",
2218 app->pid, ret);
2219 } else {
2220 DBG("UST app creating session failed. Application is dead");
2221 /*
2222 * This is normal behavior, an application can die during the
2223 * creation process. Don't report an error so the execution can
2224 * continue normally. This will get flagged ENOTCONN and the
2225 * caller will handle it.
2226 */
2227 ret = 0;
2228 }
2229 delete_ust_app_session(-1, ua_sess, app);
2230 if (ret != -ENOMEM) {
2231 /*
2232 * Tracer is probably gone or got an internal error so let's
2233 * behave like it will soon unregister or not usable.
2234 */
2235 ret = -ENOTCONN;
2236 }
2237 goto error;
2238 }
2239
2240 ua_sess->handle = ret;
2241
2242 /* Add ust app session to app's HT */
2243 lttng_ht_node_init_u64(&ua_sess->node,
2244 ua_sess->tracing_id);
2245 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
2246 lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
2247 lttng_ht_add_unique_ulong(app->ust_sessions_objd,
2248 &ua_sess->ust_objd_node);
2249
2250 DBG2("UST app session created successfully with handle %d", ret);
2251 }
2252
2253 *ua_sess_ptr = ua_sess;
2254 if (is_created) {
2255 *is_created = created;
2256 }
2257
2258 /* Everything went well. */
2259 ret = 0;
2260
2261 error:
2262 health_code_update();
2263 return ret;
2264 }
2265
2266 /*
2267 * Match function for a hash table lookup of ust_app_ctx.
2268 *
2269 * It matches an ust app context based on the context type and, in the case
2270 * of perf counters, their name.
2271 */
2272 static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2273 {
2274 struct ust_app_ctx *ctx;
2275 const struct lttng_ust_context_attr *key;
2276
2277 assert(node);
2278 assert(_key);
2279
2280 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2281 key = _key;
2282
2283 /* Context type */
2284 if (ctx->ctx.ctx != key->ctx) {
2285 goto no_match;
2286 }
2287
2288 switch(key->ctx) {
2289 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
2290 if (strncmp(key->u.perf_counter.name,
2291 ctx->ctx.u.perf_counter.name,
2292 sizeof(key->u.perf_counter.name))) {
2293 goto no_match;
2294 }
2295 break;
2296 case LTTNG_UST_CONTEXT_APP_CONTEXT:
2297 if (strcmp(key->u.app_ctx.provider_name,
2298 ctx->ctx.u.app_ctx.provider_name) ||
2299 strcmp(key->u.app_ctx.ctx_name,
2300 ctx->ctx.u.app_ctx.ctx_name)) {
2301 goto no_match;
2302 }
2303 break;
2304 default:
2305 break;
2306 }
2307
2308 /* Match. */
2309 return 1;
2310
2311 no_match:
2312 return 0;
2313 }
2314
2315 /*
2316 * Lookup for an ust app context from an lttng_ust_context.
2317 *
2318 * Must be called while holding RCU read side lock.
2319 * Return an ust_app_ctx object or NULL on error.
2320 */
2321 static
2322 struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
2323 struct lttng_ust_context_attr *uctx)
2324 {
2325 struct lttng_ht_iter iter;
2326 struct lttng_ht_node_ulong *node;
2327 struct ust_app_ctx *app_ctx = NULL;
2328
2329 assert(uctx);
2330 assert(ht);
2331
2332 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2333 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2334 ht_match_ust_app_ctx, uctx, &iter.iter);
2335 node = lttng_ht_iter_get_node_ulong(&iter);
2336 if (!node) {
2337 goto end;
2338 }
2339
2340 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2341
2342 end:
2343 return app_ctx;
2344 }
2345
2346 /*
2347 * Create a context for the channel on the tracer.
2348 *
2349 * Called with UST app session lock held and a RCU read side lock.
2350 */
2351 static
2352 int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
2353 struct lttng_ust_context_attr *uctx,
2354 struct ust_app *app)
2355 {
2356 int ret = 0;
2357 struct ust_app_ctx *ua_ctx;
2358
2359 DBG2("UST app adding context to channel %s", ua_chan->name);
2360
2361 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2362 if (ua_ctx) {
2363 ret = -EEXIST;
2364 goto error;
2365 }
2366
2367 ua_ctx = alloc_ust_app_ctx(uctx);
2368 if (ua_ctx == NULL) {
2369 /* malloc failed */
2370 ret = -ENOMEM;
2371 goto error;
2372 }
2373
2374 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
2375 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
2376 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
2377
2378 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2379 if (ret < 0) {
2380 goto error;
2381 }
2382
2383 error:
2384 return ret;
2385 }
2386
2387 /*
2388 * Enable on the tracer side a ust app event for the session and channel.
2389 *
2390 * Called with UST app session lock held.
2391 */
2392 static
2393 int enable_ust_app_event(struct ust_app_session *ua_sess,
2394 struct ust_app_event *ua_event, struct ust_app *app)
2395 {
2396 int ret;
2397
2398 ret = enable_ust_event(app, ua_sess, ua_event);
2399 if (ret < 0) {
2400 goto error;
2401 }
2402
2403 ua_event->enabled = 1;
2404
2405 error:
2406 return ret;
2407 }
2408
2409 /*
2410 * Disable on the tracer side a ust app event for the session and channel.
2411 */
2412 static int disable_ust_app_event(struct ust_app_session *ua_sess,
2413 struct ust_app_event *ua_event, struct ust_app *app)
2414 {
2415 int ret;
2416
2417 ret = disable_ust_event(app, ua_sess, ua_event);
2418 if (ret < 0) {
2419 goto error;
2420 }
2421
2422 ua_event->enabled = 0;
2423
2424 error:
2425 return ret;
2426 }
2427
2428 /*
2429 * Lookup ust app channel for session and disable it on the tracer side.
2430 */
2431 static
2432 int disable_ust_app_channel(struct ust_app_session *ua_sess,
2433 struct ust_app_channel *ua_chan, struct ust_app *app)
2434 {
2435 int ret;
2436
2437 ret = disable_ust_channel(app, ua_sess, ua_chan);
2438 if (ret < 0) {
2439 goto error;
2440 }
2441
2442 ua_chan->enabled = 0;
2443
2444 error:
2445 return ret;
2446 }
2447
2448 /*
2449 * Lookup ust app channel for session and enable it on the tracer side. This
2450 * MUST be called with a RCU read side lock acquired.
2451 */
2452 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2453 struct ltt_ust_channel *uchan, struct ust_app *app)
2454 {
2455 int ret = 0;
2456 struct lttng_ht_iter iter;
2457 struct lttng_ht_node_str *ua_chan_node;
2458 struct ust_app_channel *ua_chan;
2459
2460 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2461 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2462 if (ua_chan_node == NULL) {
2463 DBG2("Unable to find channel %s in ust session id %" PRIu64,
2464 uchan->name, ua_sess->tracing_id);
2465 goto error;
2466 }
2467
2468 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2469
2470 ret = enable_ust_channel(app, ua_sess, ua_chan);
2471 if (ret < 0) {
2472 goto error;
2473 }
2474
2475 error:
2476 return ret;
2477 }
2478
2479 /*
2480 * Ask the consumer to create a channel and get it if successful.
2481 *
2482 * Called with UST app session lock held.
2483 *
2484 * Return 0 on success or else a negative value.
2485 */
2486 static int do_consumer_create_channel(struct ltt_ust_session *usess,
2487 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2488 int bitness, struct ust_registry_session *registry,
2489 uint64_t trace_archive_id)
2490 {
2491 int ret;
2492 unsigned int nb_fd = 0;
2493 struct consumer_socket *socket;
2494
2495 assert(usess);
2496 assert(ua_sess);
2497 assert(ua_chan);
2498 assert(registry);
2499
2500 rcu_read_lock();
2501 health_code_update();
2502
2503 /* Get the right consumer socket for the application. */
2504 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2505 if (!socket) {
2506 ret = -EINVAL;
2507 goto error;
2508 }
2509
2510 health_code_update();
2511
2512 /* Need one fd for the channel. */
2513 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2514 if (ret < 0) {
2515 ERR("Exhausted number of available FD upon create channel");
2516 goto error;
2517 }
2518
2519 /*
2520 * Ask consumer to create channel. The consumer will return the number of
2521 * stream we have to expect.
2522 */
2523 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2524 registry, trace_archive_id);
2525 if (ret < 0) {
2526 goto error_ask;
2527 }
2528
2529 /*
2530 * Compute the number of fd needed before receiving them. It must be 2 per
2531 * stream (2 being the default value here).
2532 */
2533 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2534
2535 /* Reserve the amount of file descriptor we need. */
2536 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2537 if (ret < 0) {
2538 ERR("Exhausted number of available FD upon create channel");
2539 goto error_fd_get_stream;
2540 }
2541
2542 health_code_update();
2543
2544 /*
2545 * Now get the channel from the consumer. This call wil populate the stream
2546 * list of that channel and set the ust objects.
2547 */
2548 if (usess->consumer->enabled) {
2549 ret = ust_consumer_get_channel(socket, ua_chan);
2550 if (ret < 0) {
2551 goto error_destroy;
2552 }
2553 }
2554
2555 rcu_read_unlock();
2556 return 0;
2557
2558 error_destroy:
2559 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2560 error_fd_get_stream:
2561 /*
2562 * Initiate a destroy channel on the consumer since we had an error
2563 * handling it on our side. The return value is of no importance since we
2564 * already have a ret value set by the previous error that we need to
2565 * return.
2566 */
2567 (void) ust_consumer_destroy_channel(socket, ua_chan);
2568 error_ask:
2569 lttng_fd_put(LTTNG_FD_APPS, 1);
2570 error:
2571 health_code_update();
2572 rcu_read_unlock();
2573 return ret;
2574 }
2575
2576 /*
2577 * Duplicate the ust data object of the ust app stream and save it in the
2578 * buffer registry stream.
2579 *
2580 * Return 0 on success or else a negative value.
2581 */
2582 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2583 struct ust_app_stream *stream)
2584 {
2585 int ret;
2586
2587 assert(reg_stream);
2588 assert(stream);
2589
2590 /* Reserve the amount of file descriptor we need. */
2591 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2592 if (ret < 0) {
2593 ERR("Exhausted number of available FD upon duplicate stream");
2594 goto error;
2595 }
2596
2597 /* Duplicate object for stream once the original is in the registry. */
2598 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2599 reg_stream->obj.ust);
2600 if (ret < 0) {
2601 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2602 reg_stream->obj.ust, stream->obj, ret);
2603 lttng_fd_put(LTTNG_FD_APPS, 2);
2604 goto error;
2605 }
2606 stream->handle = stream->obj->handle;
2607
2608 error:
2609 return ret;
2610 }
2611
2612 /*
2613 * Duplicate the ust data object of the ust app. channel and save it in the
2614 * buffer registry channel.
2615 *
2616 * Return 0 on success or else a negative value.
2617 */
2618 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2619 struct ust_app_channel *ua_chan)
2620 {
2621 int ret;
2622
2623 assert(reg_chan);
2624 assert(ua_chan);
2625
2626 /* Need two fds for the channel. */
2627 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2628 if (ret < 0) {
2629 ERR("Exhausted number of available FD upon duplicate channel");
2630 goto error_fd_get;
2631 }
2632
2633 /* Duplicate object for stream once the original is in the registry. */
2634 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2635 if (ret < 0) {
2636 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2637 reg_chan->obj.ust, ua_chan->obj, ret);
2638 goto error;
2639 }
2640 ua_chan->handle = ua_chan->obj->handle;
2641
2642 return 0;
2643
2644 error:
2645 lttng_fd_put(LTTNG_FD_APPS, 1);
2646 error_fd_get:
2647 return ret;
2648 }
2649
2650 /*
2651 * For a given channel buffer registry, setup all streams of the given ust
2652 * application channel.
2653 *
2654 * Return 0 on success or else a negative value.
2655 */
2656 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2657 struct ust_app_channel *ua_chan,
2658 struct ust_app *app)
2659 {
2660 int ret = 0;
2661 struct ust_app_stream *stream, *stmp;
2662
2663 assert(reg_chan);
2664 assert(ua_chan);
2665
2666 DBG2("UST app setup buffer registry stream");
2667
2668 /* Send all streams to application. */
2669 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2670 struct buffer_reg_stream *reg_stream;
2671
2672 ret = buffer_reg_stream_create(&reg_stream);
2673 if (ret < 0) {
2674 goto error;
2675 }
2676
2677 /*
2678 * Keep original pointer and nullify it in the stream so the delete
2679 * stream call does not release the object.
2680 */
2681 reg_stream->obj.ust = stream->obj;
2682 stream->obj = NULL;
2683 buffer_reg_stream_add(reg_stream, reg_chan);
2684
2685 /* We don't need the streams anymore. */
2686 cds_list_del(&stream->list);
2687 delete_ust_app_stream(-1, stream, app);
2688 }
2689
2690 error:
2691 return ret;
2692 }
2693
2694 /*
2695 * Create a buffer registry channel for the given session registry and
2696 * application channel object. If regp pointer is valid, it's set with the
2697 * created object. Important, the created object is NOT added to the session
2698 * registry hash table.
2699 *
2700 * Return 0 on success else a negative value.
2701 */
2702 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2703 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2704 {
2705 int ret;
2706 struct buffer_reg_channel *reg_chan = NULL;
2707
2708 assert(reg_sess);
2709 assert(ua_chan);
2710
2711 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2712
2713 /* Create buffer registry channel. */
2714 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2715 if (ret < 0) {
2716 goto error_create;
2717 }
2718 assert(reg_chan);
2719 reg_chan->consumer_key = ua_chan->key;
2720 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
2721 reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
2722
2723 /* Create and add a channel registry to session. */
2724 ret = ust_registry_channel_add(reg_sess->reg.ust,
2725 ua_chan->tracing_channel_id);
2726 if (ret < 0) {
2727 goto error;
2728 }
2729 buffer_reg_channel_add(reg_sess, reg_chan);
2730
2731 if (regp) {
2732 *regp = reg_chan;
2733 }
2734
2735 return 0;
2736
2737 error:
2738 /* Safe because the registry channel object was not added to any HT. */
2739 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2740 error_create:
2741 return ret;
2742 }
2743
2744 /*
2745 * Setup buffer registry channel for the given session registry and application
2746 * channel object. If regp pointer is valid, it's set with the created object.
2747 *
2748 * Return 0 on success else a negative value.
2749 */
2750 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2751 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan,
2752 struct ust_app *app)
2753 {
2754 int ret;
2755
2756 assert(reg_sess);
2757 assert(reg_chan);
2758 assert(ua_chan);
2759 assert(ua_chan->obj);
2760
2761 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2762
2763 /* Setup all streams for the registry. */
2764 ret = setup_buffer_reg_streams(reg_chan, ua_chan, app);
2765 if (ret < 0) {
2766 goto error;
2767 }
2768
2769 reg_chan->obj.ust = ua_chan->obj;
2770 ua_chan->obj = NULL;
2771
2772 return 0;
2773
2774 error:
2775 buffer_reg_channel_remove(reg_sess, reg_chan);
2776 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2777 return ret;
2778 }
2779
2780 /*
2781 * Send buffer registry channel to the application.
2782 *
2783 * Return 0 on success else a negative value.
2784 */
2785 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2786 struct ust_app *app, struct ust_app_session *ua_sess,
2787 struct ust_app_channel *ua_chan)
2788 {
2789 int ret;
2790 struct buffer_reg_stream *reg_stream;
2791
2792 assert(reg_chan);
2793 assert(app);
2794 assert(ua_sess);
2795 assert(ua_chan);
2796
2797 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2798
2799 ret = duplicate_channel_object(reg_chan, ua_chan);
2800 if (ret < 0) {
2801 goto error;
2802 }
2803
2804 /* Send channel to the application. */
2805 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2806 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2807 ret = -ENOTCONN; /* Caused by app exiting. */
2808 goto error;
2809 } else if (ret < 0) {
2810 goto error;
2811 }
2812
2813 health_code_update();
2814
2815 /* Send all streams to application. */
2816 pthread_mutex_lock(&reg_chan->stream_list_lock);
2817 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2818 struct ust_app_stream stream;
2819
2820 ret = duplicate_stream_object(reg_stream, &stream);
2821 if (ret < 0) {
2822 goto error_stream_unlock;
2823 }
2824
2825 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2826 if (ret < 0) {
2827 (void) release_ust_app_stream(-1, &stream, app);
2828 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2829 ret = -ENOTCONN; /* Caused by app exiting. */
2830 }
2831 goto error_stream_unlock;
2832 }
2833
2834 /*
2835 * The return value is not important here. This function will output an
2836 * error if needed.
2837 */
2838 (void) release_ust_app_stream(-1, &stream, app);
2839 }
2840 ua_chan->is_sent = 1;
2841
2842 error_stream_unlock:
2843 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2844 error:
2845 return ret;
2846 }
2847
2848 /*
2849 * Create and send to the application the created buffers with per UID buffers.
2850 *
2851 * This MUST be called with a RCU read side lock acquired.
2852 * The session list lock and the session's lock must be acquired.
2853 *
2854 * Return 0 on success else a negative value.
2855 */
2856 static int create_channel_per_uid(struct ust_app *app,
2857 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2858 struct ust_app_channel *ua_chan)
2859 {
2860 int ret;
2861 struct buffer_reg_uid *reg_uid;
2862 struct buffer_reg_channel *reg_chan;
2863 struct ltt_session *session;
2864 enum lttng_error_code notification_ret;
2865 struct ust_registry_channel *chan_reg;
2866
2867 assert(app);
2868 assert(usess);
2869 assert(ua_sess);
2870 assert(ua_chan);
2871
2872 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2873
2874 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2875 /*
2876 * The session creation handles the creation of this global registry
2877 * object. If none can be find, there is a code flow problem or a
2878 * teardown race.
2879 */
2880 assert(reg_uid);
2881
2882 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2883 reg_uid);
2884 if (reg_chan) {
2885 goto send_channel;
2886 }
2887
2888 /* Create the buffer registry channel object. */
2889 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2890 if (ret < 0) {
2891 ERR("Error creating the UST channel \"%s\" registry instance",
2892 ua_chan->name);
2893 goto error;
2894 }
2895
2896 session = session_find_by_id(ua_sess->tracing_id);
2897 assert(session);
2898 assert(pthread_mutex_trylock(&session->lock));
2899 assert(session_trylock_list());
2900
2901 /*
2902 * Create the buffers on the consumer side. This call populates the
2903 * ust app channel object with all streams and data object.
2904 */
2905 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2906 app->bits_per_long, reg_uid->registry->reg.ust,
2907 session->current_archive_id);
2908 if (ret < 0) {
2909 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2910 ua_chan->name);
2911
2912 /*
2913 * Let's remove the previously created buffer registry channel so
2914 * it's not visible anymore in the session registry.
2915 */
2916 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2917 ua_chan->tracing_channel_id, false);
2918 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2919 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2920 goto error;
2921 }
2922
2923 /*
2924 * Setup the streams and add it to the session registry.
2925 */
2926 ret = setup_buffer_reg_channel(reg_uid->registry,
2927 ua_chan, reg_chan, app);
2928 if (ret < 0) {
2929 ERR("Error setting up UST channel \"%s\"", ua_chan->name);
2930 goto error;
2931 }
2932
2933 /* Notify the notification subsystem of the channel's creation. */
2934 pthread_mutex_lock(&reg_uid->registry->reg.ust->lock);
2935 chan_reg = ust_registry_channel_find(reg_uid->registry->reg.ust,
2936 ua_chan->tracing_channel_id);
2937 assert(chan_reg);
2938 chan_reg->consumer_key = ua_chan->key;
2939 chan_reg = NULL;
2940 pthread_mutex_unlock(&reg_uid->registry->reg.ust->lock);
2941
2942 notification_ret = notification_thread_command_add_channel(
2943 notification_thread_handle, session->name,
2944 ua_sess->euid, ua_sess->egid,
2945 ua_chan->name,
2946 ua_chan->key,
2947 LTTNG_DOMAIN_UST,
2948 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
2949 if (notification_ret != LTTNG_OK) {
2950 ret = - (int) notification_ret;
2951 ERR("Failed to add channel to notification thread");
2952 goto error;
2953 }
2954
2955 send_channel:
2956 /* Send buffers to the application. */
2957 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2958 if (ret < 0) {
2959 if (ret != -ENOTCONN) {
2960 ERR("Error sending channel to application");
2961 }
2962 goto error;
2963 }
2964
2965 error:
2966 return ret;
2967 }
2968
2969 /*
2970 * Create and send to the application the created buffers with per PID buffers.
2971 *
2972 * Called with UST app session lock held.
2973 * The session list lock and the session's lock must be acquired.
2974 *
2975 * Return 0 on success else a negative value.
2976 */
2977 static int create_channel_per_pid(struct ust_app *app,
2978 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2979 struct ust_app_channel *ua_chan)
2980 {
2981 int ret;
2982 struct ust_registry_session *registry;
2983 enum lttng_error_code cmd_ret;
2984 struct ltt_session *session;
2985 uint64_t chan_reg_key;
2986 struct ust_registry_channel *chan_reg;
2987
2988 assert(app);
2989 assert(usess);
2990 assert(ua_sess);
2991 assert(ua_chan);
2992
2993 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2994
2995 rcu_read_lock();
2996
2997 registry = get_session_registry(ua_sess);
2998 /* The UST app session lock is held, registry shall not be null. */
2999 assert(registry);
3000
3001 /* Create and add a new channel registry to session. */
3002 ret = ust_registry_channel_add(registry, ua_chan->key);
3003 if (ret < 0) {
3004 ERR("Error creating the UST channel \"%s\" registry instance",
3005 ua_chan->name);
3006 goto error;
3007 }
3008
3009 session = session_find_by_id(ua_sess->tracing_id);
3010 assert(session);
3011
3012 assert(pthread_mutex_trylock(&session->lock));
3013 assert(session_trylock_list());
3014
3015 /* Create and get channel on the consumer side. */
3016 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
3017 app->bits_per_long, registry,
3018 session->current_archive_id);
3019 if (ret < 0) {
3020 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3021 ua_chan->name);
3022 goto error;
3023 }
3024
3025 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
3026 if (ret < 0) {
3027 if (ret != -ENOTCONN) {
3028 ERR("Error sending channel to application");
3029 }
3030 goto error;
3031 }
3032
3033 chan_reg_key = ua_chan->key;
3034 pthread_mutex_lock(&registry->lock);
3035 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
3036 assert(chan_reg);
3037 chan_reg->consumer_key = ua_chan->key;
3038 pthread_mutex_unlock(&registry->lock);
3039
3040 cmd_ret = notification_thread_command_add_channel(
3041 notification_thread_handle, session->name,
3042 ua_sess->euid, ua_sess->egid,
3043 ua_chan->name,
3044 ua_chan->key,
3045 LTTNG_DOMAIN_UST,
3046 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3047 if (cmd_ret != LTTNG_OK) {
3048 ret = - (int) cmd_ret;
3049 ERR("Failed to add channel to notification thread");
3050 goto error;
3051 }
3052
3053 error:
3054 rcu_read_unlock();
3055 return ret;
3056 }
3057
3058 /*
3059 * From an already allocated ust app channel, create the channel buffers if
3060 * need and send it to the application. This MUST be called with a RCU read
3061 * side lock acquired.
3062 *
3063 * Called with UST app session lock held.
3064 *
3065 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3066 * the application exited concurrently.
3067 */
3068 static int do_create_channel(struct ust_app *app,
3069 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3070 struct ust_app_channel *ua_chan)
3071 {
3072 int ret;
3073
3074 assert(app);
3075 assert(usess);
3076 assert(ua_sess);
3077 assert(ua_chan);
3078
3079 /* Handle buffer type before sending the channel to the application. */
3080 switch (usess->buffer_type) {
3081 case LTTNG_BUFFER_PER_UID:
3082 {
3083 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
3084 if (ret < 0) {
3085 goto error;
3086 }
3087 break;
3088 }
3089 case LTTNG_BUFFER_PER_PID:
3090 {
3091 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
3092 if (ret < 0) {
3093 goto error;
3094 }
3095 break;
3096 }
3097 default:
3098 assert(0);
3099 ret = -EINVAL;
3100 goto error;
3101 }
3102
3103 /* Initialize ust objd object using the received handle and add it. */
3104 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
3105 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
3106
3107 /* If channel is not enabled, disable it on the tracer */
3108 if (!ua_chan->enabled) {
3109 ret = disable_ust_channel(app, ua_sess, ua_chan);
3110 if (ret < 0) {
3111 goto error;
3112 }
3113 }
3114
3115 error:
3116 return ret;
3117 }
3118
3119 /*
3120 * Create UST app channel and create it on the tracer. Set ua_chanp of the
3121 * newly created channel if not NULL.
3122 *
3123 * Called with UST app session lock and RCU read-side lock held.
3124 *
3125 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3126 * the application exited concurrently.
3127 */
3128 static int create_ust_app_channel(struct ust_app_session *ua_sess,
3129 struct ltt_ust_channel *uchan, struct ust_app *app,
3130 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
3131 struct ust_app_channel **ua_chanp)
3132 {
3133 int ret = 0;
3134 struct lttng_ht_iter iter;
3135 struct lttng_ht_node_str *ua_chan_node;
3136 struct ust_app_channel *ua_chan;
3137
3138 /* Lookup channel in the ust app session */
3139 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
3140 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
3141 if (ua_chan_node != NULL) {
3142 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3143 goto end;
3144 }
3145
3146 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
3147 if (ua_chan == NULL) {
3148 /* Only malloc can fail here */
3149 ret = -ENOMEM;
3150 goto error_alloc;
3151 }
3152 shadow_copy_channel(ua_chan, uchan);
3153
3154 /* Set channel type. */
3155 ua_chan->attr.type = type;
3156
3157 ret = do_create_channel(app, usess, ua_sess, ua_chan);
3158 if (ret < 0) {
3159 goto error;
3160 }
3161
3162 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
3163 app->pid);
3164
3165 /* Only add the channel if successful on the tracer side. */
3166 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
3167 end:
3168 if (ua_chanp) {
3169 *ua_chanp = ua_chan;
3170 }
3171
3172 /* Everything went well. */
3173 return 0;
3174
3175 error:
3176 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
3177 error_alloc:
3178 return ret;
3179 }
3180
3181 /*
3182 * Create UST app event and create it on the tracer side.
3183 *
3184 * Called with ust app session mutex held.
3185 */
3186 static
3187 int create_ust_app_event(struct ust_app_session *ua_sess,
3188 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
3189 struct ust_app *app)
3190 {
3191 int ret = 0;
3192 struct ust_app_event *ua_event;
3193
3194 /* Get event node */
3195 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
3196 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
3197 if (ua_event != NULL) {
3198 ret = -EEXIST;
3199 goto end;
3200 }
3201
3202 /* Does not exist so create one */
3203 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
3204 if (ua_event == NULL) {
3205 /* Only malloc can failed so something is really wrong */
3206 ret = -ENOMEM;
3207 goto end;
3208 }
3209 shadow_copy_event(ua_event, uevent);
3210
3211 /* Create it on the tracer side */
3212 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
3213 if (ret < 0) {
3214 /* Not found previously means that it does not exist on the tracer */
3215 assert(ret != -LTTNG_UST_ERR_EXIST);
3216 goto error;
3217 }
3218
3219 add_unique_ust_app_event(ua_chan, ua_event);
3220
3221 DBG2("UST app create event %s for PID %d completed", ua_event->name,
3222 app->pid);
3223
3224 end:
3225 return ret;
3226
3227 error:
3228 /* Valid. Calling here is already in a read side lock */
3229 delete_ust_app_event(-1, ua_event, app);
3230 return ret;
3231 }
3232
3233 /*
3234 * Create UST metadata and open it on the tracer side.
3235 *
3236 * Called with UST app session lock held and RCU read side lock.
3237 */
3238 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
3239 struct ust_app *app, struct consumer_output *consumer)
3240 {
3241 int ret = 0;
3242 struct ust_app_channel *metadata;
3243 struct consumer_socket *socket;
3244 struct ust_registry_session *registry;
3245 struct ltt_session *session;
3246
3247 assert(ua_sess);
3248 assert(app);
3249 assert(consumer);
3250
3251 registry = get_session_registry(ua_sess);
3252 /* The UST app session is held registry shall not be null. */
3253 assert(registry);
3254
3255 pthread_mutex_lock(&registry->lock);
3256
3257 /* Metadata already exists for this registry or it was closed previously */
3258 if (registry->metadata_key || registry->metadata_closed) {
3259 ret = 0;
3260 goto error;
3261 }
3262
3263 /* Allocate UST metadata */
3264 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
3265 if (!metadata) {
3266 /* malloc() failed */
3267 ret = -ENOMEM;
3268 goto error;
3269 }
3270
3271 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
3272
3273 /* Need one fd for the channel. */
3274 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3275 if (ret < 0) {
3276 ERR("Exhausted number of available FD upon create metadata");
3277 goto error;
3278 }
3279
3280 /* Get the right consumer socket for the application. */
3281 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
3282 if (!socket) {
3283 ret = -EINVAL;
3284 goto error_consumer;
3285 }
3286
3287 /*
3288 * Keep metadata key so we can identify it on the consumer side. Assign it
3289 * to the registry *before* we ask the consumer so we avoid the race of the
3290 * consumer requesting the metadata and the ask_channel call on our side
3291 * did not returned yet.
3292 */
3293 registry->metadata_key = metadata->key;
3294
3295 session = session_find_by_id(ua_sess->tracing_id);
3296 assert(session);
3297
3298 assert(pthread_mutex_trylock(&session->lock));
3299 assert(session_trylock_list());
3300
3301 /*
3302 * Ask the metadata channel creation to the consumer. The metadata object
3303 * will be created by the consumer and kept their. However, the stream is
3304 * never added or monitored until we do a first push metadata to the
3305 * consumer.
3306 */
3307 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
3308 registry, session->current_archive_id);
3309 if (ret < 0) {
3310 /* Nullify the metadata key so we don't try to close it later on. */
3311 registry->metadata_key = 0;
3312 goto error_consumer;
3313 }
3314
3315 /*
3316 * The setup command will make the metadata stream be sent to the relayd,
3317 * if applicable, and the thread managing the metadatas. This is important
3318 * because after this point, if an error occurs, the only way the stream
3319 * can be deleted is to be monitored in the consumer.
3320 */
3321 ret = consumer_setup_metadata(socket, metadata->key);
3322 if (ret < 0) {
3323 /* Nullify the metadata key so we don't try to close it later on. */
3324 registry->metadata_key = 0;
3325 goto error_consumer;
3326 }
3327
3328 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
3329 metadata->key, app->pid);
3330
3331 error_consumer:
3332 lttng_fd_put(LTTNG_FD_APPS, 1);
3333 delete_ust_app_channel(-1, metadata, app);
3334 error:
3335 pthread_mutex_unlock(&registry->lock);
3336 return ret;
3337 }
3338
3339 /*
3340 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3341 * acquired before calling this function.
3342 */
3343 struct ust_app *ust_app_find_by_pid(pid_t pid)
3344 {
3345 struct ust_app *app = NULL;
3346 struct lttng_ht_node_ulong *node;
3347 struct lttng_ht_iter iter;
3348
3349 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
3350 node = lttng_ht_iter_get_node_ulong(&iter);
3351 if (node == NULL) {
3352 DBG2("UST app no found with pid %d", pid);
3353 goto error;
3354 }
3355
3356 DBG2("Found UST app by pid %d", pid);
3357
3358 app = caa_container_of(node, struct ust_app, pid_n);
3359
3360 error:
3361 return app;
3362 }
3363
3364 /*
3365 * Allocate and init an UST app object using the registration information and
3366 * the command socket. This is called when the command socket connects to the
3367 * session daemon.
3368 *
3369 * The object is returned on success or else NULL.
3370 */
3371 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
3372 {
3373 struct ust_app *lta = NULL;
3374
3375 assert(msg);
3376 assert(sock >= 0);
3377
3378 DBG3("UST app creating application for socket %d", sock);
3379
3380 if ((msg->bits_per_long == 64 &&
3381 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
3382 || (msg->bits_per_long == 32 &&
3383 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
3384 ERR("Registration failed: application \"%s\" (pid: %d) has "
3385 "%d-bit long, but no consumerd for this size is available.\n",
3386 msg->name, msg->pid, msg->bits_per_long);
3387 goto error;
3388 }
3389
3390 lta = zmalloc(sizeof(struct ust_app));
3391 if (lta == NULL) {
3392 PERROR("malloc");
3393 goto error;
3394 }
3395
3396 lta->ppid = msg->ppid;
3397 lta->uid = msg->uid;
3398 lta->gid = msg->gid;
3399
3400 lta->bits_per_long = msg->bits_per_long;
3401 lta->uint8_t_alignment = msg->uint8_t_alignment;
3402 lta->uint16_t_alignment = msg->uint16_t_alignment;
3403 lta->uint32_t_alignment = msg->uint32_t_alignment;
3404 lta->uint64_t_alignment = msg->uint64_t_alignment;
3405 lta->long_alignment = msg->long_alignment;
3406 lta->byte_order = msg->byte_order;
3407
3408 lta->v_major = msg->major;
3409 lta->v_minor = msg->minor;
3410 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3411 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3412 lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3413 lta->notify_sock = -1;
3414
3415 /* Copy name and make sure it's NULL terminated. */
3416 strncpy(lta->name, msg->name, sizeof(lta->name));
3417 lta->name[UST_APP_PROCNAME_LEN] = '\0';
3418
3419 /*
3420 * Before this can be called, when receiving the registration information,
3421 * the application compatibility is checked. So, at this point, the
3422 * application can work with this session daemon.
3423 */
3424 lta->compatible = 1;
3425
3426 lta->pid = msg->pid;
3427 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
3428 lta->sock = sock;
3429 pthread_mutex_init(&lta->sock_lock, NULL);
3430 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
3431
3432 CDS_INIT_LIST_HEAD(&lta->teardown_head);
3433 error:
3434 return lta;
3435 }
3436
3437 /*
3438 * For a given application object, add it to every hash table.
3439 */
3440 void ust_app_add(struct ust_app *app)
3441 {
3442 assert(app);
3443 assert(app->notify_sock >= 0);
3444
3445 rcu_read_lock();
3446
3447 /*
3448 * On a re-registration, we want to kick out the previous registration of
3449 * that pid
3450 */
3451 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
3452
3453 /*
3454 * The socket _should_ be unique until _we_ call close. So, a add_unique
3455 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3456 * already in the table.
3457 */
3458 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
3459
3460 /* Add application to the notify socket hash table. */
3461 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
3462 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
3463
3464 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
3465 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
3466 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
3467 app->v_minor);
3468
3469 rcu_read_unlock();
3470 }
3471
3472 /*
3473 * Set the application version into the object.
3474 *
3475 * Return 0 on success else a negative value either an errno code or a
3476 * LTTng-UST error code.
3477 */
3478 int ust_app_version(struct ust_app *app)
3479 {
3480 int ret;
3481
3482 assert(app);
3483
3484 pthread_mutex_lock(&app->sock_lock);
3485 ret = ustctl_tracer_version(app->sock, &app->version);
3486 pthread_mutex_unlock(&app->sock_lock);
3487 if (ret < 0) {
3488 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3489 ERR("UST app %d version failed with ret %d", app->sock, ret);
3490 } else {
3491 DBG3("UST app %d version failed. Application is dead", app->sock);
3492 }
3493 }
3494
3495 return ret;
3496 }
3497
3498 /*
3499 * Unregister app by removing it from the global traceable app list and freeing
3500 * the data struct.
3501 *
3502 * The socket is already closed at this point so no close to sock.
3503 */
3504 void ust_app_unregister(int sock)
3505 {
3506 struct ust_app *lta;
3507 struct lttng_ht_node_ulong *node;
3508 struct lttng_ht_iter ust_app_sock_iter;
3509 struct lttng_ht_iter iter;
3510 struct ust_app_session *ua_sess;
3511 int ret;
3512
3513 rcu_read_lock();
3514
3515 /* Get the node reference for a call_rcu */
3516 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
3517 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
3518 assert(node);
3519
3520 lta = caa_container_of(node, struct ust_app, sock_n);
3521 DBG("PID %d unregistering with sock %d", lta->pid, sock);
3522
3523 /*
3524 * For per-PID buffers, perform "push metadata" and flush all
3525 * application streams before removing app from hash tables,
3526 * ensuring proper behavior of data_pending check.
3527 * Remove sessions so they are not visible during deletion.
3528 */
3529 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
3530 node.node) {
3531 struct ust_registry_session *registry;
3532
3533 ret = lttng_ht_del(lta->sessions, &iter);
3534 if (ret) {
3535 /* The session was already removed so scheduled for teardown. */
3536 continue;
3537 }
3538
3539 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
3540 (void) ust_app_flush_app_session(lta, ua_sess);
3541 }
3542
3543 /*
3544 * Add session to list for teardown. This is safe since at this point we
3545 * are the only one using this list.
3546 */
3547 pthread_mutex_lock(&ua_sess->lock);
3548
3549 if (ua_sess->deleted) {
3550 pthread_mutex_unlock(&ua_sess->lock);
3551 continue;
3552 }
3553
3554 /*
3555 * Normally, this is done in the delete session process which is
3556 * executed in the call rcu below. However, upon registration we can't
3557 * afford to wait for the grace period before pushing data or else the
3558 * data pending feature can race between the unregistration and stop
3559 * command where the data pending command is sent *before* the grace
3560 * period ended.
3561 *
3562 * The close metadata below nullifies the metadata pointer in the
3563 * session so the delete session will NOT push/close a second time.
3564 */
3565 registry = get_session_registry(ua_sess);
3566 if (registry) {
3567 /* Push metadata for application before freeing the application. */
3568 (void) push_metadata(registry, ua_sess->consumer);
3569
3570 /*
3571 * Don't ask to close metadata for global per UID buffers. Close
3572 * metadata only on destroy trace session in this case. Also, the
3573 * previous push metadata could have flag the metadata registry to
3574 * close so don't send a close command if closed.
3575 */
3576 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
3577 /* And ask to close it for this session registry. */
3578 (void) close_metadata(registry, ua_sess->consumer);
3579 }
3580 }
3581 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
3582
3583 pthread_mutex_unlock(&ua_sess->lock);
3584 }
3585
3586 /* Remove application from PID hash table */
3587 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
3588 assert(!ret);
3589
3590 /*
3591 * Remove application from notify hash table. The thread handling the
3592 * notify socket could have deleted the node so ignore on error because
3593 * either way it's valid. The close of that socket is handled by the
3594 * apps_notify_thread.
3595 */
3596 iter.iter.node = &lta->notify_sock_n.node;
3597 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3598
3599 /*
3600 * Ignore return value since the node might have been removed before by an
3601 * add replace during app registration because the PID can be reassigned by
3602 * the OS.
3603 */
3604 iter.iter.node = &lta->pid_n.node;
3605 ret = lttng_ht_del(ust_app_ht, &iter);
3606 if (ret) {
3607 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
3608 lta->pid);
3609 }
3610
3611 /* Free memory */
3612 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
3613
3614 rcu_read_unlock();
3615 return;
3616 }
3617
3618 /*
3619 * Fill events array with all events name of all registered apps.
3620 */
3621 int ust_app_list_events(struct lttng_event **events)
3622 {
3623 int ret, handle;
3624 size_t nbmem, count = 0;
3625 struct lttng_ht_iter iter;
3626 struct ust_app *app;
3627 struct lttng_event *tmp_event;
3628
3629 nbmem = UST_APP_EVENT_LIST_SIZE;
3630 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
3631 if (tmp_event == NULL) {
3632 PERROR("zmalloc ust app events");
3633 ret = -ENOMEM;
3634 goto error;
3635 }
3636
3637 rcu_read_lock();
3638
3639 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3640 struct lttng_ust_tracepoint_iter uiter;
3641
3642 health_code_update();
3643
3644 if (!app->compatible) {
3645 /*
3646 * TODO: In time, we should notice the caller of this error by
3647 * telling him that this is a version error.
3648 */
3649 continue;
3650 }
3651 pthread_mutex_lock(&app->sock_lock);
3652 handle = ustctl_tracepoint_list(app->sock);
3653 if (handle < 0) {
3654 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3655 ERR("UST app list events getting handle failed for app pid %d",
3656 app->pid);
3657 }
3658 pthread_mutex_unlock(&app->sock_lock);
3659 continue;
3660 }
3661
3662 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
3663 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3664 /* Handle ustctl error. */
3665 if (ret < 0) {
3666 int release_ret;
3667
3668 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3669 ERR("UST app tp list get failed for app %d with ret %d",
3670 app->sock, ret);
3671 } else {
3672 DBG3("UST app tp list get failed. Application is dead");
3673 /*
3674 * This is normal behavior, an application can die during the
3675 * creation process. Don't report an error so the execution can
3676 * continue normally. Continue normal execution.
3677 */
3678 break;
3679 }
3680 free(tmp_event);
3681 release_ret = ustctl_release_handle(app->sock, handle);
3682 if (release_ret < 0 &&
3683 release_ret != -LTTNG_UST_ERR_EXITING &&
3684 release_ret != -EPIPE) {
3685 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3686 }
3687 pthread_mutex_unlock(&app->sock_lock);
3688 goto rcu_error;
3689 }
3690
3691 health_code_update();
3692 if (count >= nbmem) {
3693 /* In case the realloc fails, we free the memory */
3694 struct lttng_event *new_tmp_event;
3695 size_t new_nbmem;
3696
3697 new_nbmem = nbmem << 1;
3698 DBG2("Reallocating event list from %zu to %zu entries",
3699 nbmem, new_nbmem);
3700 new_tmp_event = realloc(tmp_event,
3701 new_nbmem * sizeof(struct lttng_event));
3702 if (new_tmp_event == NULL) {
3703 int release_ret;
3704
3705 PERROR("realloc ust app events");
3706 free(tmp_event);
3707 ret = -ENOMEM;
3708 release_ret = ustctl_release_handle(app->sock, handle);
3709 if (release_ret < 0 &&
3710 release_ret != -LTTNG_UST_ERR_EXITING &&
3711 release_ret != -EPIPE) {
3712 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3713 }
3714 pthread_mutex_unlock(&app->sock_lock);
3715 goto rcu_error;
3716 }
3717 /* Zero the new memory */
3718 memset(new_tmp_event + nbmem, 0,
3719 (new_nbmem - nbmem) * sizeof(struct lttng_event));
3720 nbmem = new_nbmem;
3721 tmp_event = new_tmp_event;
3722 }
3723 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3724 tmp_event[count].loglevel = uiter.loglevel;
3725 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3726 tmp_event[count].pid = app->pid;
3727 tmp_event[count].enabled = -1;
3728 count++;
3729 }
3730 ret = ustctl_release_handle(app->sock, handle);
3731 pthread_mutex_unlock(&app->sock_lock);
3732 if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3733 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
3734 }
3735 }
3736
3737 ret = count;
3738 *events = tmp_event;
3739
3740 DBG2("UST app list events done (%zu events)", count);
3741
3742 rcu_error:
3743 rcu_read_unlock();
3744 error:
3745 health_code_update();
3746 return ret;
3747 }
3748
3749 /*
3750 * Fill events array with all events name of all registered apps.
3751 */
3752 int ust_app_list_event_fields(struct lttng_event_field **fields)
3753 {
3754 int ret, handle;
3755 size_t nbmem, count = 0;
3756 struct lttng_ht_iter iter;
3757 struct ust_app *app;
3758 struct lttng_event_field *tmp_event;
3759
3760 nbmem = UST_APP_EVENT_LIST_SIZE;
3761 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3762 if (tmp_event == NULL) {
3763 PERROR("zmalloc ust app event fields");
3764 ret = -ENOMEM;
3765 goto error;
3766 }
3767
3768 rcu_read_lock();
3769
3770 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3771 struct lttng_ust_field_iter uiter;
3772
3773 health_code_update();
3774
3775 if (!app->compatible) {
3776 /*
3777 * TODO: In time, we should notice the caller of this error by
3778 * telling him that this is a version error.
3779 */
3780 continue;
3781 }
3782 pthread_mutex_lock(&app->sock_lock);
3783 handle = ustctl_tracepoint_field_list(app->sock);
3784 if (handle < 0) {
3785 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3786 ERR("UST app list field getting handle failed for app pid %d",
3787 app->pid);
3788 }
3789 pthread_mutex_unlock(&app->sock_lock);
3790 continue;
3791 }
3792
3793 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
3794 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3795 /* Handle ustctl error. */
3796 if (ret < 0) {
3797 int release_ret;
3798
3799 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3800 ERR("UST app tp list field failed for app %d with ret %d",
3801 app->sock, ret);
3802 } else {
3803 DBG3("UST app tp list field failed. Application is dead");
3804 /*
3805 * This is normal behavior, an application can die during the
3806 * creation process. Don't report an error so the execution can
3807 * continue normally. Reset list and count for next app.
3808 */
3809 break;
3810 }
3811 free(tmp_event);
3812 release_ret = ustctl_release_handle(app->sock, handle);
3813 pthread_mutex_unlock(&app->sock_lock);
3814 if (release_ret < 0 &&
3815 release_ret != -LTTNG_UST_ERR_EXITING &&
3816 release_ret != -EPIPE) {
3817 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3818 }
3819 goto rcu_error;
3820 }
3821
3822 health_code_update();
3823 if (count >= nbmem) {
3824 /* In case the realloc fails, we free the memory */
3825 struct lttng_event_field *new_tmp_event;
3826 size_t new_nbmem;
3827
3828 new_nbmem = nbmem << 1;
3829 DBG2("Reallocating event field list from %zu to %zu entries",
3830 nbmem, new_nbmem);
3831 new_tmp_event = realloc(tmp_event,
3832 new_nbmem * sizeof(struct lttng_event_field));
3833 if (new_tmp_event == NULL) {
3834 int release_ret;
3835
3836 PERROR("realloc ust app event fields");
3837 free(tmp_event);
3838 ret = -ENOMEM;
3839 release_ret = ustctl_release_handle(app->sock, handle);
3840 pthread_mutex_unlock(&app->sock_lock);
3841 if (release_ret &&
3842 release_ret != -LTTNG_UST_ERR_EXITING &&
3843 release_ret != -EPIPE) {
3844 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3845 }
3846 goto rcu_error;
3847 }
3848 /* Zero the new memory */
3849 memset(new_tmp_event + nbmem, 0,
3850 (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
3851 nbmem = new_nbmem;
3852 tmp_event = new_tmp_event;
3853 }
3854
3855 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
3856 /* Mapping between these enums matches 1 to 1. */
3857 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
3858 tmp_event[count].nowrite = uiter.nowrite;
3859
3860 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3861 tmp_event[count].event.loglevel = uiter.loglevel;
3862 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
3863 tmp_event[count].event.pid = app->pid;
3864 tmp_event[count].event.enabled = -1;
3865 count++;
3866 }
3867 ret = ustctl_release_handle(app->sock, handle);
3868 pthread_mutex_unlock(&app->sock_lock);
3869 if (ret < 0 &&
3870 ret != -LTTNG_UST_ERR_EXITING &&
3871 ret != -EPIPE) {
3872 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
3873 }
3874 }
3875
3876 ret = count;
3877 *fields = tmp_event;
3878
3879 DBG2("UST app list event fields done (%zu events)", count);
3880
3881 rcu_error:
3882 rcu_read_unlock();
3883 error:
3884 health_code_update();
3885 return ret;
3886 }
3887
3888 /*
3889 * Free and clean all traceable apps of the global list.
3890 *
3891 * Should _NOT_ be called with RCU read-side lock held.
3892 */
3893 void ust_app_clean_list(void)
3894 {
3895 int ret;
3896 struct ust_app *app;
3897 struct lttng_ht_iter iter;
3898
3899 DBG2("UST app cleaning registered apps hash table");
3900
3901 rcu_read_lock();
3902
3903 if (ust_app_ht) {
3904 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3905 ret = lttng_ht_del(ust_app_ht, &iter);
3906 assert(!ret);
3907 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3908 }
3909 }
3910
3911 /* Cleanup socket hash table */
3912 if (ust_app_ht_by_sock) {
3913 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3914 sock_n.node) {
3915 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3916 assert(!ret);
3917 }
3918 }
3919
3920 /* Cleanup notify socket hash table */
3921 if (ust_app_ht_by_notify_sock) {
3922 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
3923 notify_sock_n.node) {
3924 ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3925 assert(!ret);
3926 }
3927 }
3928 rcu_read_unlock();
3929
3930 /* Destroy is done only when the ht is empty */
3931 if (ust_app_ht) {
3932 ht_cleanup_push(ust_app_ht);
3933 }
3934 if (ust_app_ht_by_sock) {
3935 ht_cleanup_push(ust_app_ht_by_sock);
3936 }
3937 if (ust_app_ht_by_notify_sock) {
3938 ht_cleanup_push(ust_app_ht_by_notify_sock);
3939 }
3940 }
3941
3942 /*
3943 * Init UST app hash table.
3944 */
3945 int ust_app_ht_alloc(void)
3946 {
3947 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3948 if (!ust_app_ht) {
3949 return -1;
3950 }
3951 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3952 if (!ust_app_ht_by_sock) {
3953 return -1;
3954 }
3955 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3956 if (!ust_app_ht_by_notify_sock) {
3957 return -1;
3958 }
3959 return 0;
3960 }
3961
3962 /*
3963 * For a specific UST session, disable the channel for all registered apps.
3964 */
3965 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
3966 struct ltt_ust_channel *uchan)
3967 {
3968 int ret = 0;
3969 struct lttng_ht_iter iter;
3970 struct lttng_ht_node_str *ua_chan_node;
3971 struct ust_app *app;
3972 struct ust_app_session *ua_sess;
3973 struct ust_app_channel *ua_chan;
3974
3975 if (usess == NULL || uchan == NULL) {
3976 ERR("Disabling UST global channel with NULL values");
3977 ret = -1;
3978 goto error;
3979 }
3980
3981 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
3982 uchan->name, usess->id);
3983
3984 rcu_read_lock();
3985
3986 /* For every registered applications */
3987 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3988 struct lttng_ht_iter uiter;
3989 if (!app->compatible) {
3990 /*
3991 * TODO: In time, we should notice the caller of this error by
3992 * telling him that this is a version error.
3993 */
3994 continue;
3995 }
3996 ua_sess = lookup_session_by_app(usess, app);
3997 if (ua_sess == NULL) {
3998 continue;
3999 }
4000
4001 /* Get channel */
4002 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4003 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4004 /* If the session if found for the app, the channel must be there */
4005 assert(ua_chan_node);
4006
4007 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4008 /* The channel must not be already disabled */
4009 assert(ua_chan->enabled == 1);
4010
4011 /* Disable channel onto application */
4012 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
4013 if (ret < 0) {
4014 /* XXX: We might want to report this error at some point... */
4015 continue;
4016 }
4017 }
4018
4019 rcu_read_unlock();
4020
4021 error:
4022 return ret;
4023 }
4024
4025 /*
4026 * For a specific UST session, enable the channel for all registered apps.
4027 */
4028 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
4029 struct ltt_ust_channel *uchan)
4030 {
4031 int ret = 0;
4032 struct lttng_ht_iter iter;
4033 struct ust_app *app;
4034 struct ust_app_session *ua_sess;
4035
4036 if (usess == NULL || uchan == NULL) {
4037 ERR("Adding UST global channel to NULL values");
4038 ret = -1;
4039 goto error;
4040 }
4041
4042 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
4043 uchan->name, usess->id);
4044
4045 rcu_read_lock();
4046
4047 /* For every registered applications */
4048 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4049 if (!app->compatible) {
4050 /*
4051 * TODO: In time, we should notice the caller of this error by
4052 * telling him that this is a version error.
4053 */
4054 continue;
4055 }
4056 ua_sess = lookup_session_by_app(usess, app);
4057 if (ua_sess == NULL) {
4058 continue;
4059 }
4060
4061 /* Enable channel onto application */
4062 ret = enable_ust_app_channel(ua_sess, uchan, app);
4063 if (ret < 0) {
4064 /* XXX: We might want to report this error at some point... */
4065 continue;
4066 }
4067 }
4068
4069 rcu_read_unlock();
4070
4071 error:
4072 return ret;
4073 }
4074
4075 /*
4076 * Disable an event in a channel and for a specific session.
4077 */
4078 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
4079 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4080 {
4081 int ret = 0;
4082 struct lttng_ht_iter iter, uiter;
4083 struct lttng_ht_node_str *ua_chan_node;
4084 struct ust_app *app;
4085 struct ust_app_session *ua_sess;
4086 struct ust_app_channel *ua_chan;
4087 struct ust_app_event *ua_event;
4088
4089 DBG("UST app disabling event %s for all apps in channel "
4090 "%s for session id %" PRIu64,
4091 uevent->attr.name, uchan->name, usess->id);
4092
4093 rcu_read_lock();
4094
4095 /* For all registered applications */
4096 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4097 if (!app->compatible) {
4098 /*
4099 * TODO: In time, we should notice the caller of this error by
4100 * telling him that this is a version error.
4101 */
4102 continue;
4103 }
4104 ua_sess = lookup_session_by_app(usess, app);
4105 if (ua_sess == NULL) {
4106 /* Next app */
4107 continue;
4108 }
4109
4110 /* Lookup channel in the ust app session */
4111 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4112 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4113 if (ua_chan_node == NULL) {
4114 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
4115 "Skipping", uchan->name, usess->id, app->pid);
4116 continue;
4117 }
4118 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4119
4120 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4121 uevent->filter, uevent->attr.loglevel,
4122 uevent->exclusion);
4123 if (ua_event == NULL) {
4124 DBG2("Event %s not found in channel %s for app pid %d."
4125 "Skipping", uevent->attr.name, uchan->name, app->pid);
4126 continue;
4127 }
4128
4129 ret = disable_ust_app_event(ua_sess, ua_event, app);
4130 if (ret < 0) {
4131 /* XXX: Report error someday... */
4132 continue;
4133 }
4134 }
4135
4136 rcu_read_unlock();
4137
4138 return ret;
4139 }
4140
4141 /*
4142 * For a specific UST session, create the channel for all registered apps.
4143 */
4144 int ust_app_create_channel_glb(struct ltt_ust_session *usess,
4145 struct ltt_ust_channel *uchan)
4146 {
4147 int ret = 0, created;
4148 struct lttng_ht_iter iter;
4149 struct ust_app *app;
4150 struct ust_app_session *ua_sess = NULL;
4151
4152 /* Very wrong code flow */
4153 assert(usess);
4154 assert(uchan);
4155
4156 DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
4157 uchan->name, usess->id);
4158
4159 rcu_read_lock();
4160
4161 /* For every registered applications */
4162 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4163 if (!app->compatible) {
4164 /*
4165 * TODO: In time, we should notice the caller of this error by
4166 * telling him that this is a version error.
4167 */
4168 continue;
4169 }
4170 if (!trace_ust_pid_tracker_lookup(usess, app->pid)) {
4171 /* Skip. */
4172 continue;
4173 }
4174
4175 /*
4176 * Create session on the tracer side and add it to app session HT. Note
4177 * that if session exist, it will simply return a pointer to the ust
4178 * app session.
4179 */
4180 ret = find_or_create_ust_app_session(usess, app, &ua_sess, &created);
4181 if (ret < 0) {
4182 switch (ret) {
4183 case -ENOTCONN:
4184 /*
4185 * The application's socket is not valid. Either a bad socket
4186 * or a timeout on it. We can't inform the caller that for a
4187 * specific app, the session failed so lets continue here.
4188 */
4189 ret = 0; /* Not an error. */
4190 continue;
4191 case -ENOMEM:
4192 default:
4193 goto error_rcu_unlock;
4194 }
4195 }
4196 assert(ua_sess);
4197
4198 pthread_mutex_lock(&ua_sess->lock);
4199
4200 if (ua_sess->deleted) {
4201 pthread_mutex_unlock(&ua_sess->lock);
4202 continue;
4203 }
4204
4205 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
4206 sizeof(uchan->name))) {
4207 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr, &uchan->attr);
4208 ret = 0;
4209 } else {
4210 /* Create channel onto application. We don't need the chan ref. */
4211 ret = create_ust_app_channel(ua_sess, uchan, app,
4212 LTTNG_UST_CHAN_PER_CPU, usess, NULL);
4213 }
4214 pthread_mutex_unlock(&ua_sess->lock);
4215 if (ret < 0) {
4216 /* Cleanup the created session if it's the case. */
4217 if (created) {
4218 destroy_app_session(app, ua_sess);
4219 }
4220 switch (ret) {
4221 case -ENOTCONN:
4222 /*
4223 * The application's socket is not valid. Either a bad socket
4224 * or a timeout on it. We can't inform the caller that for a
4225 * specific app, the session failed so lets continue here.
4226 */
4227 ret = 0; /* Not an error. */
4228 continue;
4229 case -ENOMEM:
4230 default:
4231 goto error_rcu_unlock;
4232 }
4233 }
4234 }
4235
4236 error_rcu_unlock:
4237 rcu_read_unlock();
4238 return ret;
4239 }
4240
4241 /*
4242 * Enable event for a specific session and channel on the tracer.
4243 */
4244 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
4245 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4246 {
4247 int ret = 0;
4248 struct lttng_ht_iter iter, uiter;
4249 struct lttng_ht_node_str *ua_chan_node;
4250 struct ust_app *app;
4251 struct ust_app_session *ua_sess;
4252 struct ust_app_channel *ua_chan;
4253 struct ust_app_event *ua_event;
4254
4255 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
4256 uevent->attr.name, usess->id);
4257
4258 /*
4259 * NOTE: At this point, this function is called only if the session and
4260 * channel passed are already created for all apps. and enabled on the
4261 * tracer also.
4262 */
4263
4264 rcu_read_lock();
4265
4266 /* For all registered applications */
4267 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4268 if (!app->compatible) {
4269 /*
4270 * TODO: In time, we should notice the caller of this error by
4271 * telling him that this is a version error.
4272 */
4273 continue;
4274 }
4275 ua_sess = lookup_session_by_app(usess, app);
4276 if (!ua_sess) {
4277 /* The application has problem or is probably dead. */
4278 continue;
4279 }
4280
4281 pthread_mutex_lock(&ua_sess->lock);
4282
4283 if (ua_sess->deleted) {
4284 pthread_mutex_unlock(&ua_sess->lock);
4285 continue;
4286 }
4287
4288 /* Lookup channel in the ust app session */
4289 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4290 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4291 /*
4292 * It is possible that the channel cannot be found is
4293 * the channel/event creation occurs concurrently with
4294 * an application exit.
4295 */
4296 if (!ua_chan_node) {
4297 pthread_mutex_unlock(&ua_sess->lock);
4298 continue;
4299 }
4300
4301 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4302
4303 /* Get event node */
4304 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4305 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
4306 if (ua_event == NULL) {
4307 DBG3("UST app enable event %s not found for app PID %d."
4308 "Skipping app", uevent->attr.name, app->pid);
4309 goto next_app;
4310 }
4311
4312 ret = enable_ust_app_event(ua_sess, ua_event, app);
4313 if (ret < 0) {
4314 pthread_mutex_unlock(&ua_sess->lock);
4315 goto error;
4316 }
4317 next_app:
4318 pthread_mutex_unlock(&ua_sess->lock);
4319 }
4320
4321 error:
4322 rcu_read_unlock();
4323 return ret;
4324 }
4325
4326 /*
4327 * For a specific existing UST session and UST channel, creates the event for
4328 * all registered apps.
4329 */
4330 int ust_app_create_event_glb(struct ltt_ust_session *usess,
4331 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4332 {
4333 int ret = 0;
4334 struct lttng_ht_iter iter, uiter;
4335 struct lttng_ht_node_str *ua_chan_node;
4336 struct ust_app *app;
4337 struct ust_app_session *ua_sess;
4338 struct ust_app_channel *ua_chan;
4339
4340 DBG("UST app creating event %s for all apps for session id %" PRIu64,
4341 uevent->attr.name, usess->id);
4342
4343 rcu_read_lock();
4344
4345 /* For all registered applications */
4346 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4347 if (!app->compatible) {
4348 /*
4349 * TODO: In time, we should notice the caller of this error by
4350 * telling him that this is a version error.
4351 */
4352 continue;
4353 }
4354 ua_sess = lookup_session_by_app(usess, app);
4355 if (!ua_sess) {
4356 /* The application has problem or is probably dead. */
4357 continue;
4358 }
4359
4360 pthread_mutex_lock(&ua_sess->lock);
4361
4362 if (ua_sess->deleted) {
4363 pthread_mutex_unlock(&ua_sess->lock);
4364 continue;
4365 }
4366
4367 /* Lookup channel in the ust app session */
4368 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4369 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4370 /* If the channel is not found, there is a code flow error */
4371 assert(ua_chan_node);
4372
4373 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4374
4375 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4376 pthread_mutex_unlock(&ua_sess->lock);
4377 if (ret < 0) {
4378 if (ret != -LTTNG_UST_ERR_EXIST) {
4379 /* Possible value at this point: -ENOMEM. If so, we stop! */
4380 break;
4381 }
4382 DBG2("UST app event %s already exist on app PID %d",
4383 uevent->attr.name, app->pid);
4384 continue;
4385 }
4386 }
4387
4388 rcu_read_unlock();
4389
4390 return ret;
4391 }
4392
4393 /*
4394 * Start tracing for a specific UST session and app.
4395 *
4396 * Called with UST app session lock held.
4397 *
4398 */
4399 static
4400 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
4401 {
4402 int ret = 0;
4403 struct ust_app_session *ua_sess;
4404
4405 DBG("Starting tracing for ust app pid %d", app->pid);
4406
4407 rcu_read_lock();
4408
4409 if (!app->compatible) {
4410 goto end;
4411 }
4412
4413 ua_sess = lookup_session_by_app(usess, app);
4414 if (ua_sess == NULL) {
4415 /* The session is in teardown process. Ignore and continue. */
4416 goto end;
4417 }
4418
4419 pthread_mutex_lock(&ua_sess->lock);
4420
4421 if (ua_sess->deleted) {
4422 pthread_mutex_unlock(&ua_sess->lock);
4423 goto end;
4424 }
4425
4426 /* Upon restart, we skip the setup, already done */
4427 if (ua_sess->started) {
4428 goto skip_setup;
4429 }
4430
4431 /* Create directories if consumer is LOCAL and has a path defined. */
4432 if (usess->consumer->type == CONSUMER_DST_LOCAL &&
4433 usess->consumer->dst.session_root_path[0] != '\0') {
4434 char *tmp_path;
4435
4436 tmp_path = zmalloc(LTTNG_PATH_MAX);
4437 if (!tmp_path) {
4438 ERR("Alloc tmp_path");
4439 goto error_unlock;
4440 }
4441 ret = snprintf(tmp_path, LTTNG_PATH_MAX, "%s%s%s",
4442 usess->consumer->dst.session_root_path,
4443 usess->consumer->chunk_path,
4444 usess->consumer->subdir);
4445 if (ret >= LTTNG_PATH_MAX) {
4446 ERR("Local destination path exceeds the maximal allowed length of %i bytes (needs %i bytes) with path = \"%s%s%s\"",
4447 LTTNG_PATH_MAX, ret,
4448 usess->consumer->dst.session_root_path,
4449 usess->consumer->chunk_path,
4450 usess->consumer->subdir);
4451 free(tmp_path);
4452 goto error_unlock;
4453 }
4454
4455 DBG("Creating directory path for local tracing: \"%s\"",
4456 tmp_path);
4457 ret = run_as_mkdir_recursive(tmp_path, S_IRWXU | S_IRWXG,
4458 ua_sess->euid, ua_sess->egid);
4459 free(tmp_path);
4460 if (ret < 0) {
4461 if (errno != EEXIST) {
4462 ERR("Trace directory creation error");
4463 goto error_unlock;
4464 }
4465 }
4466 }
4467
4468 /*
4469 * Create the metadata for the application. This returns gracefully if a
4470 * metadata was already set for the session.
4471 */
4472 ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
4473 if (ret < 0) {
4474 goto error_unlock;
4475 }
4476
4477 health_code_update();
4478
4479 skip_setup:
4480 /* This start the UST tracing */
4481 pthread_mutex_lock(&app->sock_lock);
4482 ret = ustctl_start_session(app->sock, ua_sess->handle);
4483 pthread_mutex_unlock(&app->sock_lock);
4484 if (ret < 0) {
4485 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4486 ERR("Error starting tracing for app pid: %d (ret: %d)",
4487 app->pid, ret);
4488 } else {
4489 DBG("UST app start session failed. Application is dead.");
4490 /*
4491 * This is normal behavior, an application can die during the
4492 * creation process. Don't report an error so the execution can
4493 * continue normally.
4494 */
4495 pthread_mutex_unlock(&ua_sess->lock);
4496 goto end;
4497 }
4498 goto error_unlock;
4499 }
4500
4501 /* Indicate that the session has been started once */
4502 ua_sess->started = 1;
4503
4504 pthread_mutex_unlock(&ua_sess->lock);
4505
4506 health_code_update();
4507
4508 /* Quiescent wait after starting trace */
4509 pthread_mutex_lock(&app->sock_lock);
4510 ret = ustctl_wait_quiescent(app->sock);
4511 pthread_mutex_unlock(&app->sock_lock);
4512 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4513 ERR("UST app wait quiescent failed for app pid %d ret %d",
4514 app->pid, ret);
4515 }
4516
4517 end:
4518 rcu_read_unlock();
4519 health_code_update();
4520 return 0;
4521
4522 error_unlock:
4523 pthread_mutex_unlock(&ua_sess->lock);
4524 rcu_read_unlock();
4525 health_code_update();
4526 return -1;
4527 }
4528
4529 /*
4530 * Stop tracing for a specific UST session and app.
4531 */
4532 static
4533 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
4534 {
4535 int ret = 0;
4536 struct ust_app_session *ua_sess;
4537 struct ust_registry_session *registry;
4538
4539 DBG("Stopping tracing for ust app pid %d", app->pid);
4540
4541 rcu_read_lock();
4542
4543 if (!app->compatible) {
4544 goto end_no_session;
4545 }
4546
4547 ua_sess = lookup_session_by_app(usess, app);
4548 if (ua_sess == NULL) {
4549 goto end_no_session;
4550 }
4551
4552 pthread_mutex_lock(&ua_sess->lock);
4553
4554 if (ua_sess->deleted) {
4555 pthread_mutex_unlock(&ua_sess->lock);
4556 goto end_no_session;
4557 }
4558
4559 /*
4560 * If started = 0, it means that stop trace has been called for a session
4561 * that was never started. It's possible since we can have a fail start
4562 * from either the application manager thread or the command thread. Simply
4563 * indicate that this is a stop error.
4564 */
4565 if (!ua_sess->started) {
4566 goto error_rcu_unlock;
4567 }
4568
4569 health_code_update();
4570
4571 /* This inhibits UST tracing */
4572 pthread_mutex_lock(&app->sock_lock);
4573 ret = ustctl_stop_session(app->sock, ua_sess->handle);
4574 pthread_mutex_unlock(&app->sock_lock);
4575 if (ret < 0) {
4576 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4577 ERR("Error stopping tracing for app pid: %d (ret: %d)",
4578 app->pid, ret);
4579 } else {
4580 DBG("UST app stop session failed. Application is dead.");
4581 /*
4582 * This is normal behavior, an application can die during the
4583 * creation process. Don't report an error so the execution can
4584 * continue normally.
4585 */
4586 goto end_unlock;
4587 }
4588 goto error_rcu_unlock;
4589 }
4590
4591 health_code_update();
4592
4593 /* Quiescent wait after stopping trace */
4594 pthread_mutex_lock(&app->sock_lock);
4595 ret = ustctl_wait_quiescent(app->sock);
4596 pthread_mutex_unlock(&app->sock_lock);
4597 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4598 ERR("UST app wait quiescent failed for app pid %d ret %d",
4599 app->pid, ret);
4600 }
4601
4602 health_code_update();
4603
4604 registry = get_session_registry(ua_sess);
4605
4606 /* The UST app session is held registry shall not be null. */
4607 assert(registry);
4608
4609 /* Push metadata for application before freeing the application. */
4610 (void) push_metadata(registry, ua_sess->consumer);
4611
4612 end_unlock:
4613 pthread_mutex_unlock(&ua_sess->lock);
4614 end_no_session:
4615 rcu_read_unlock();
4616 health_code_update();
4617 return 0;
4618
4619 error_rcu_unlock:
4620 pthread_mutex_unlock(&ua_sess->lock);
4621 rcu_read_unlock();
4622 health_code_update();
4623 return -1;
4624 }
4625
4626 static
4627 int ust_app_flush_app_session(struct ust_app *app,
4628 struct ust_app_session *ua_sess)
4629 {
4630 int ret, retval = 0;
4631 struct lttng_ht_iter iter;
4632 struct ust_app_channel *ua_chan;
4633 struct consumer_socket *socket;
4634
4635 DBG("Flushing app session buffers for ust app pid %d", app->pid);
4636
4637 rcu_read_lock();
4638
4639 if (!app->compatible) {
4640 goto end_not_compatible;
4641 }
4642
4643 pthread_mutex_lock(&ua_sess->lock);
4644
4645 if (ua_sess->deleted) {
4646 goto end_deleted;
4647 }
4648
4649 health_code_update();
4650
4651 /* Flushing buffers */
4652 socket = consumer_find_socket_by_bitness(app->bits_per_long,
4653 ua_sess->consumer);
4654
4655 /* Flush buffers and push metadata. */
4656 switch (ua_sess->buffer_type) {
4657 case LTTNG_BUFFER_PER_PID:
4658 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4659 node.node) {
4660 health_code_update();
4661 ret = consumer_flush_channel(socket, ua_chan->key);
4662 if (ret) {
4663 ERR("Error flushing consumer channel");
4664 retval = -1;
4665 continue;
4666 }
4667 }
4668 break;
4669 case LTTNG_BUFFER_PER_UID:
4670 default:
4671 assert(0);
4672 break;
4673 }
4674
4675 health_code_update();
4676
4677 end_deleted:
4678 pthread_mutex_unlock(&ua_sess->lock);
4679
4680 end_not_compatible:
4681 rcu_read_unlock();
4682 health_code_update();
4683 return retval;
4684 }
4685
4686 /*
4687 * Flush buffers for all applications for a specific UST session.
4688 * Called with UST session lock held.
4689 */
4690 static
4691 int ust_app_flush_session(struct ltt_ust_session *usess)
4692
4693 {
4694 int ret = 0;
4695
4696 DBG("Flushing session buffers for all ust apps");
4697
4698 rcu_read_lock();
4699
4700 /* Flush buffers and push metadata. */
4701 switch (usess->buffer_type) {
4702 case LTTNG_BUFFER_PER_UID:
4703 {
4704 struct buffer_reg_uid *reg;
4705 struct lttng_ht_iter iter;
4706
4707 /* Flush all per UID buffers associated to that session. */
4708 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4709 struct ust_registry_session *ust_session_reg;
4710 struct buffer_reg_channel *reg_chan;
4711 struct consumer_socket *socket;
4712
4713 /* Get consumer socket to use to push the metadata.*/
4714 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
4715 usess->consumer);
4716 if (!socket) {
4717 /* Ignore request if no consumer is found for the session. */
4718 continue;
4719 }
4720
4721 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
4722 reg_chan, node.node) {
4723 /*
4724 * The following call will print error values so the return
4725 * code is of little importance because whatever happens, we
4726 * have to try them all.
4727 */
4728 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
4729 }
4730
4731 ust_session_reg = reg->registry->reg.ust;
4732 /* Push metadata. */
4733 (void) push_metadata(ust_session_reg, usess->consumer);
4734 }
4735 break;
4736 }
4737 case LTTNG_BUFFER_PER_PID:
4738 {
4739 struct ust_app_session *ua_sess;
4740 struct lttng_ht_iter iter;
4741 struct ust_app *app;
4742
4743 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4744 ua_sess = lookup_session_by_app(usess, app);
4745 if (ua_sess == NULL) {
4746 continue;
4747 }
4748 (void) ust_app_flush_app_session(app, ua_sess);
4749 }
4750 break;
4751 }
4752 default:
4753 ret = -1;
4754 assert(0);
4755 break;
4756 }
4757
4758 rcu_read_unlock();
4759 health_code_update();
4760 return ret;
4761 }
4762
4763 static
4764 int ust_app_clear_quiescent_app_session(struct ust_app *app,
4765 struct ust_app_session *ua_sess)
4766 {
4767 int ret = 0;
4768 struct lttng_ht_iter iter;
4769 struct ust_app_channel *ua_chan;
4770 struct consumer_socket *socket;
4771
4772 DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
4773
4774 rcu_read_lock();
4775
4776 if (!app->compatible) {
4777 goto end_not_compatible;
4778 }
4779
4780 pthread_mutex_lock(&ua_sess->lock);
4781
4782 if (ua_sess->deleted) {
4783 goto end_unlock;
4784 }
4785
4786 health_code_update();
4787
4788 socket = consumer_find_socket_by_bitness(app->bits_per_long,
4789 ua_sess->consumer);
4790 if (!socket) {
4791 ERR("Failed to find consumer (%" PRIu32 ") socket",
4792 app->bits_per_long);
4793 ret = -1;
4794 goto end_unlock;
4795 }
4796
4797 /* Clear quiescent state. */
4798 switch (ua_sess->buffer_type) {
4799 case LTTNG_BUFFER_PER_PID:
4800 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter,
4801 ua_chan, node.node) {
4802 health_code_update();
4803 ret = consumer_clear_quiescent_channel(socket,
4804 ua_chan->key);
4805 if (ret) {
4806 ERR("Error clearing quiescent state for consumer channel");
4807 ret = -1;
4808 continue;
4809 }
4810 }
4811 break;
4812 case LTTNG_BUFFER_PER_UID:
4813 default:
4814 assert(0);
4815 ret = -1;
4816 break;
4817 }
4818
4819 health_code_update();
4820
4821 end_unlock:
4822 pthread_mutex_unlock(&ua_sess->lock);
4823
4824 end_not_compatible:
4825 rcu_read_unlock();
4826 health_code_update();
4827 return ret;
4828 }
4829
4830 /*
4831 * Clear quiescent state in each stream for all applications for a
4832 * specific UST session.
4833 * Called with UST session lock held.
4834 */
4835 static
4836 int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
4837
4838 {
4839 int ret = 0;
4840
4841 DBG("Clearing stream quiescent state for all ust apps");
4842
4843 rcu_read_lock();
4844
4845 switch (usess->buffer_type) {
4846 case LTTNG_BUFFER_PER_UID:
4847 {
4848 struct lttng_ht_iter iter;
4849 struct buffer_reg_uid *reg;
4850
4851 /*
4852 * Clear quiescent for all per UID buffers associated to
4853 * that session.
4854 */
4855 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4856 struct consumer_socket *socket;
4857 struct buffer_reg_channel *reg_chan;
4858
4859 /* Get associated consumer socket.*/
4860 socket = consumer_find_socket_by_bitness(
4861 reg->bits_per_long, usess->consumer);
4862 if (!socket) {
4863 /*
4864 * Ignore request if no consumer is found for
4865 * the session.
4866 */
4867 continue;
4868 }
4869
4870 cds_lfht_for_each_entry(reg->registry->channels->ht,
4871 &iter.iter, reg_chan, node.node) {
4872 /*
4873 * The following call will print error values so
4874 * the return code is of little importance
4875 * because whatever happens, we have to try them
4876 * all.
4877 */
4878 (void) consumer_clear_quiescent_channel(socket,
4879 reg_chan->consumer_key);
4880 }
4881 }
4882 break;
4883 }
4884 case LTTNG_BUFFER_PER_PID:
4885 {
4886 struct ust_app_session *ua_sess;
4887 struct lttng_ht_iter iter;
4888 struct ust_app *app;
4889
4890 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
4891 pid_n.node) {
4892 ua_sess = lookup_session_by_app(usess, app);
4893 if (ua_sess == NULL) {
4894 continue;
4895 }
4896 (void) ust_app_clear_quiescent_app_session(app,
4897 ua_sess);
4898 }
4899 break;
4900 }
4901 default:
4902 ret = -1;
4903 assert(0);
4904 break;
4905 }
4906
4907 rcu_read_unlock();
4908 health_code_update();
4909 return ret;
4910 }
4911
4912 /*
4913 * Destroy a specific UST session in apps.
4914 */
4915 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
4916 {
4917 int ret;
4918 struct ust_app_session *ua_sess;
4919 struct lttng_ht_iter iter;
4920 struct lttng_ht_node_u64 *node;
4921
4922 DBG("Destroy tracing for ust app pid %d", app->pid);
4923
4924 rcu_read_lock();
4925
4926 if (!app->compatible) {
4927 goto end;
4928 }
4929
4930 __lookup_session_by_app(usess, app, &iter);
4931 node = lttng_ht_iter_get_node_u64(&iter);
4932 if (node == NULL) {
4933 /* Session is being or is deleted. */
4934 goto end;
4935 }
4936 ua_sess = caa_container_of(node, struct ust_app_session, node);
4937
4938 health_code_update();
4939 destroy_app_session(app, ua_sess);
4940
4941 health_code_update();
4942
4943 /* Quiescent wait after stopping trace */
4944 pthread_mutex_lock(&app->sock_lock);
4945 ret = ustctl_wait_quiescent(app->sock);
4946 pthread_mutex_unlock(&app->sock_lock);
4947 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4948 ERR("UST app wait quiescent failed for app pid %d ret %d",
4949 app->pid, ret);
4950 }
4951 end:
4952 rcu_read_unlock();
4953 health_code_update();
4954 return 0;
4955 }
4956
4957 /*
4958 * Start tracing for the UST session.
4959 */
4960 int ust_app_start_trace_all(struct ltt_ust_session *usess)
4961 {
4962 int ret = 0;
4963 struct lttng_ht_iter iter;
4964 struct ust_app *app;
4965
4966 DBG("Starting all UST traces");
4967
4968 rcu_read_lock();
4969
4970 /*
4971 * In a start-stop-start use-case, we need to clear the quiescent state
4972 * of each channel set by the prior stop command, thus ensuring that a
4973 * following stop or destroy is sure to grab a timestamp_end near those
4974 * operations, even if the packet is empty.
4975 */
4976 (void) ust_app_clear_quiescent_session(usess);
4977
4978 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4979 ret = ust_app_start_trace(usess, app);
4980 if (ret < 0) {
4981 /* Continue to next apps even on error */
4982 continue;
4983 }
4984 }
4985
4986 rcu_read_unlock();
4987
4988 return 0;
4989 }
4990
4991 /*
4992 * Start tracing for the UST session.
4993 * Called with UST session lock held.
4994 */
4995 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
4996 {
4997 int ret = 0;
4998 struct lttng_ht_iter iter;
4999 struct ust_app *app;
5000
5001 DBG("Stopping all UST traces");
5002
5003 rcu_read_lock();
5004
5005 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5006 ret = ust_app_stop_trace(usess, app);
5007 if (ret < 0) {
5008 /* Continue to next apps even on error */
5009 continue;
5010 }
5011 }
5012
5013 (void) ust_app_flush_session(usess);
5014
5015 rcu_read_unlock();
5016
5017 return 0;
5018 }
5019
5020 /*
5021 * Destroy app UST session.
5022 */
5023 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
5024 {
5025 int ret = 0;
5026 struct lttng_ht_iter iter;
5027 struct ust_app *app;
5028
5029 DBG("Destroy all UST traces");
5030
5031 rcu_read_lock();
5032
5033 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5034 ret = destroy_trace(usess, app);
5035 if (ret < 0) {
5036 /* Continue to next apps even on error */
5037 continue;
5038 }
5039 }
5040
5041 rcu_read_unlock();
5042
5043 return 0;
5044 }
5045
5046 static
5047 void ust_app_global_create(struct ltt_ust_session *usess, struct ust_app *app)
5048 {
5049 int ret = 0;
5050 struct lttng_ht_iter iter, uiter;
5051 struct ust_app_session *ua_sess = NULL;
5052 struct ust_app_channel *ua_chan;
5053 struct ust_app_event *ua_event;
5054 struct ust_app_ctx *ua_ctx;
5055 int is_created = 0;
5056
5057 ret = find_or_create_ust_app_session(usess, app, &ua_sess, &is_created);
5058 if (ret < 0) {
5059 /* Tracer is probably gone or ENOMEM. */
5060 goto error;
5061 }
5062 if (!is_created) {
5063 /* App session already created. */
5064 goto end;
5065 }
5066 assert(ua_sess);
5067
5068 pthread_mutex_lock(&ua_sess->lock);
5069
5070 if (ua_sess->deleted) {
5071 pthread_mutex_unlock(&ua_sess->lock);
5072 goto end;
5073 }
5074
5075 /*
5076 * We can iterate safely here over all UST app session since the create ust
5077 * app session above made a shadow copy of the UST global domain from the
5078 * ltt ust session.
5079 */
5080 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
5081 node.node) {
5082 ret = do_create_channel(app, usess, ua_sess, ua_chan);
5083 if (ret < 0 && ret != -ENOTCONN) {
5084 /*
5085 * Stop everything. On error, the application
5086 * failed, no more file descriptor are available
5087 * or ENOMEM so stopping here is the only thing
5088 * we can do for now. The only exception is
5089 * -ENOTCONN, which indicates that the application
5090 * has exit.
5091 */
5092 goto error_unlock;
5093 }
5094
5095 /*
5096 * Add context using the list so they are enabled in the same order the
5097 * user added them.
5098 */
5099 cds_list_for_each_entry(ua_ctx, &ua_chan->ctx_list, list) {
5100 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
5101 if (ret < 0) {
5102 goto error_unlock;
5103 }
5104 }
5105
5106
5107 /* For each events */
5108 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
5109 node.node) {
5110 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
5111 if (ret < 0) {
5112 goto error_unlock;
5113 }
5114 }
5115 }
5116
5117 pthread_mutex_unlock(&ua_sess->lock);
5118
5119 if (usess->active) {
5120 ret = ust_app_start_trace(usess, app);
5121 if (ret < 0) {
5122 goto error;
5123 }
5124
5125 DBG2("UST trace started for app pid %d", app->pid);
5126 }
5127 end:
5128 /* Everything went well at this point. */
5129 return;
5130
5131 error_unlock:
5132 pthread_mutex_unlock(&ua_sess->lock);
5133 error:
5134 if (ua_sess) {
5135 destroy_app_session(app, ua_sess);
5136 }
5137 return;
5138 }
5139
5140 static
5141 void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
5142 {
5143 struct ust_app_session *ua_sess;
5144
5145 ua_sess = lookup_session_by_app(usess, app);
5146 if (ua_sess == NULL) {
5147 return;
5148 }
5149 destroy_app_session(app, ua_sess);
5150 }
5151
5152 /*
5153 * Add channels/events from UST global domain to registered apps at sock.
5154 *
5155 * Called with session lock held.
5156 * Called with RCU read-side lock held.
5157 */
5158 void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
5159 {
5160 assert(usess);
5161
5162 DBG2("UST app global update for app sock %d for session id %" PRIu64,
5163 app->sock, usess->id);
5164
5165 if (!app->compatible) {
5166 return;
5167 }
5168
5169 if (trace_ust_pid_tracker_lookup(usess, app->pid)) {
5170 ust_app_global_create(usess, app);
5171 } else {
5172 ust_app_global_destroy(usess, app);
5173 }
5174 }
5175
5176 /*
5177 * Called with session lock held.
5178 */
5179 void ust_app_global_update_all(struct ltt_ust_session *usess)
5180 {
5181 struct lttng_ht_iter iter;
5182 struct ust_app *app;
5183
5184 rcu_read_lock();
5185 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5186 ust_app_global_update(usess, app);
5187 }
5188 rcu_read_unlock();
5189 }
5190
5191 /*
5192 * Add context to a specific channel for global UST domain.
5193 */
5194 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
5195 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
5196 {
5197 int ret = 0;
5198 struct lttng_ht_node_str *ua_chan_node;
5199 struct lttng_ht_iter iter, uiter;
5200 struct ust_app_channel *ua_chan = NULL;
5201 struct ust_app_session *ua_sess;
5202 struct ust_app *app;
5203
5204 rcu_read_lock();
5205
5206 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5207 if (!app->compatible) {
5208 /*
5209 * TODO: In time, we should notice the caller of this error by
5210 * telling him that this is a version error.
5211 */
5212 continue;
5213 }
5214 ua_sess = lookup_session_by_app(usess, app);
5215 if (ua_sess == NULL) {
5216 continue;
5217 }
5218
5219 pthread_mutex_lock(&ua_sess->lock);
5220
5221 if (ua_sess->deleted) {
5222 pthread_mutex_unlock(&ua_sess->lock);
5223 continue;
5224 }
5225
5226 /* Lookup channel in the ust app session */
5227 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
5228 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
5229 if (ua_chan_node == NULL) {
5230 goto next_app;
5231 }
5232 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
5233 node);
5234 ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
5235 if (ret < 0) {
5236 goto next_app;
5237 }
5238 next_app:
5239 pthread_mutex_unlock(&ua_sess->lock);
5240 }
5241
5242 rcu_read_unlock();
5243 return ret;
5244 }
5245
5246 /*
5247 * Enable event for a channel from a UST session for a specific PID.
5248 */
5249 int ust_app_enable_event_pid(struct ltt_ust_session *usess,
5250 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
5251 {
5252 int ret = 0;
5253 struct lttng_ht_iter iter;
5254 struct lttng_ht_node_str *ua_chan_node;
5255 struct ust_app *app;
5256 struct ust_app_session *ua_sess;
5257 struct ust_app_channel *ua_chan;
5258 struct ust_app_event *ua_event;
5259
5260 DBG("UST app enabling event %s for PID %d", uevent->attr.name, pid);
5261
5262 rcu_read_lock();
5263
5264 app = ust_app_find_by_pid(pid);
5265 if (app == NULL) {
5266 ERR("UST app enable event per PID %d not found", pid);
5267 ret = -1;
5268 goto end;
5269 }
5270
5271 if (!app->compatible) {
5272 ret = 0;
5273 goto end;
5274 }
5275
5276 ua_sess = lookup_session_by_app(usess, app);
5277 if (!ua_sess) {
5278 /* The application has problem or is probably dead. */
5279 ret = 0;
5280 goto end;
5281 }
5282
5283 pthread_mutex_lock(&ua_sess->lock);
5284
5285 if (ua_sess->deleted) {
5286 ret = 0;
5287 goto end_unlock;
5288 }
5289
5290 /* Lookup channel in the ust app session */
5291 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
5292 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
5293 /* If the channel is not found, there is a code flow error */
5294 assert(ua_chan_node);
5295
5296 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
5297
5298 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
5299 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
5300 if (ua_event == NULL) {
5301 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
5302 if (ret < 0) {
5303 goto end_unlock;
5304 }
5305 } else {
5306 ret = enable_ust_app_event(ua_sess, ua_event, app);
5307 if (ret < 0) {
5308 goto end_unlock;
5309 }
5310 }
5311
5312 end_unlock:
5313 pthread_mutex_unlock(&ua_sess->lock);
5314 end:
5315 rcu_read_unlock();
5316 return ret;
5317 }
5318
5319 /*
5320 * Receive registration and populate the given msg structure.
5321 *
5322 * On success return 0 else a negative value returned by the ustctl call.
5323 */
5324 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
5325 {
5326 int ret;
5327 uint32_t pid, ppid, uid, gid;
5328
5329 assert(msg);
5330
5331 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
5332 &pid, &ppid, &uid, &gid,
5333 &msg->bits_per_long,
5334 &msg->uint8_t_alignment,
5335 &msg->uint16_t_alignment,
5336 &msg->uint32_t_alignment,
5337 &msg->uint64_t_alignment,
5338 &msg->long_alignment,
5339 &msg->byte_order,
5340 msg->name);
5341 if (ret < 0) {
5342 switch (-ret) {
5343 case EPIPE:
5344 case ECONNRESET:
5345 case LTTNG_UST_ERR_EXITING:
5346 DBG3("UST app recv reg message failed. Application died");
5347 break;
5348 case LTTNG_UST_ERR_UNSUP_MAJOR:
5349 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
5350 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
5351 LTTNG_UST_ABI_MINOR_VERSION);
5352 break;
5353 default:
5354 ERR("UST app recv reg message failed with ret %d", ret);
5355 break;
5356 }
5357 goto error;
5358 }
5359 msg->pid = (pid_t) pid;
5360 msg->ppid = (pid_t) ppid;
5361 msg->uid = (uid_t) uid;
5362 msg->gid = (gid_t) gid;
5363
5364 error:
5365 return ret;
5366 }
5367
5368 /*
5369 * Return a ust app session object using the application object and the
5370 * session object descriptor has a key. If not found, NULL is returned.
5371 * A RCU read side lock MUST be acquired when calling this function.
5372 */
5373 static struct ust_app_session *find_session_by_objd(struct ust_app *app,
5374 int objd)
5375 {
5376 struct lttng_ht_node_ulong *node;
5377 struct lttng_ht_iter iter;
5378 struct ust_app_session *ua_sess = NULL;
5379
5380 assert(app);
5381
5382 lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
5383 node = lttng_ht_iter_get_node_ulong(&iter);
5384 if (node == NULL) {
5385 DBG2("UST app session find by objd %d not found", objd);
5386 goto error;
5387 }
5388
5389 ua_sess = caa_container_of(node, struct ust_app_session, ust_objd_node);
5390
5391 error:
5392 return ua_sess;
5393 }
5394
5395 /*
5396 * Return a ust app channel object using the application object and the channel
5397 * object descriptor has a key. If not found, NULL is returned. A RCU read side
5398 * lock MUST be acquired before calling this function.
5399 */
5400 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
5401 int objd)
5402 {
5403 struct lttng_ht_node_ulong *node;
5404 struct lttng_ht_iter iter;
5405 struct ust_app_channel *ua_chan = NULL;
5406
5407 assert(app);
5408
5409 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
5410 node = lttng_ht_iter_get_node_ulong(&iter);
5411 if (node == NULL) {
5412 DBG2("UST app channel find by objd %d not found", objd);
5413 goto error;
5414 }
5415
5416 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
5417
5418 error:
5419 return ua_chan;
5420 }
5421
5422 /*
5423 * Reply to a register channel notification from an application on the notify
5424 * socket. The channel metadata is also created.
5425 *
5426 * The session UST registry lock is acquired in this function.
5427 *
5428 * On success 0 is returned else a negative value.
5429 */
5430 static int reply_ust_register_channel(int sock, int cobjd,
5431 size_t nr_fields, struct ustctl_field *fields)
5432 {
5433 int ret, ret_code = 0;
5434 uint32_t chan_id;
5435 uint64_t chan_reg_key;
5436 enum ustctl_channel_header type;
5437 struct ust_app *app;
5438 struct ust_app_channel *ua_chan;
5439 struct ust_app_session *ua_sess;
5440 struct ust_registry_session *registry;
5441 struct ust_registry_channel *chan_reg;
5442
5443 rcu_read_lock();
5444
5445 /* Lookup application. If not found, there is a code flow error. */
5446 app = find_app_by_notify_sock(sock);
5447 if (!app) {
5448 DBG("Application socket %d is being torn down. Abort event notify",
5449 sock);
5450 ret = 0;
5451 goto error_rcu_unlock;
5452 }
5453
5454 /* Lookup channel by UST object descriptor. */
5455 ua_chan = find_channel_by_objd(app, cobjd);
5456 if (!ua_chan) {
5457 DBG("Application channel is being torn down. Abort event notify");
5458 ret = 0;
5459 goto error_rcu_unlock;
5460 }
5461
5462 assert(ua_chan->session);
5463 ua_sess = ua_chan->session;
5464
5465 /* Get right session registry depending on the session buffer type. */
5466 registry = get_session_registry(ua_sess);
5467 if (!registry) {
5468 DBG("Application session is being torn down. Abort event notify");
5469 ret = 0;
5470 goto error_rcu_unlock;
5471 };
5472
5473 /* Depending on the buffer type, a different channel key is used. */
5474 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
5475 chan_reg_key = ua_chan->tracing_channel_id;
5476 } else {
5477 chan_reg_key = ua_chan->key;
5478 }
5479
5480 pthread_mutex_lock(&registry->lock);
5481
5482 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
5483 assert(chan_reg);
5484
5485 if (!chan_reg->register_done) {
5486 /*
5487 * TODO: eventually use the registry event count for
5488 * this channel to better guess header type for per-pid
5489 * buffers.
5490 */
5491 type = USTCTL_CHANNEL_HEADER_LARGE;
5492 chan_reg->nr_ctx_fields = nr_fields;
5493 chan_reg->ctx_fields = fields;
5494 fields = NULL;
5495 chan_reg->header_type = type;
5496 } else {
5497 /* Get current already assigned values. */
5498 type = chan_reg->header_type;
5499 }
5500 /* Channel id is set during the object creation. */
5501 chan_id = chan_reg->chan_id;
5502
5503 /* Append to metadata */
5504 if (!chan_reg->metadata_dumped) {
5505 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
5506 if (ret_code) {
5507 ERR("Error appending channel metadata (errno = %d)", ret_code);
5508 goto reply;
5509 }
5510 }
5511
5512 reply:
5513 DBG3("UST app replying to register channel key %" PRIu64
5514 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
5515 ret_code);
5516
5517 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
5518 if (ret < 0) {
5519 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5520 ERR("UST app reply channel failed with ret %d", ret);
5521 } else {
5522 DBG3("UST app reply channel failed. Application died");
5523 }
5524 goto error;
5525 }
5526
5527 /* This channel registry registration is completed. */
5528 chan_reg->register_done = 1;
5529
5530 error:
5531 pthread_mutex_unlock(&registry->lock);
5532 error_rcu_unlock:
5533 rcu_read_unlock();
5534 free(fields);
5535 return ret;
5536 }
5537
5538 /*
5539 * Add event to the UST channel registry. When the event is added to the
5540 * registry, the metadata is also created. Once done, this replies to the
5541 * application with the appropriate error code.
5542 *
5543 * The session UST registry lock is acquired in the function.
5544 *
5545 * On success 0 is returned else a negative value.
5546 */
5547 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
5548 char *sig, size_t nr_fields, struct ustctl_field *fields,
5549 int loglevel_value, char *model_emf_uri)
5550 {
5551 int ret, ret_code;
5552 uint32_t event_id = 0;
5553 uint64_t chan_reg_key;
5554 struct ust_app *app;
5555 struct ust_app_channel *ua_chan;
5556 struct ust_app_session *ua_sess;
5557 struct ust_registry_session *registry;
5558
5559 rcu_read_lock();
5560
5561 /* Lookup application. If not found, there is a code flow error. */
5562 app = find_app_by_notify_sock(sock);
5563 if (!app) {
5564 DBG("Application socket %d is being torn down. Abort event notify",
5565 sock);
5566 ret = 0;
5567 goto error_rcu_unlock;
5568 }
5569
5570 /* Lookup channel by UST object descriptor. */
5571 ua_chan = find_channel_by_objd(app, cobjd);
5572 if (!ua_chan) {
5573 DBG("Application channel is being torn down. Abort event notify");
5574 ret = 0;
5575 goto error_rcu_unlock;
5576 }
5577
5578 assert(ua_chan->session);
5579 ua_sess = ua_chan->session;
5580
5581 registry = get_session_registry(ua_sess);
5582 if (!registry) {
5583 DBG("Application session is being torn down. Abort event notify");
5584 ret = 0;
5585 goto error_rcu_unlock;
5586 }
5587
5588 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
5589 chan_reg_key = ua_chan->tracing_channel_id;
5590 } else {
5591 chan_reg_key = ua_chan->key;
5592 }
5593
5594 pthread_mutex_lock(&registry->lock);
5595
5596 /*
5597 * From this point on, this call acquires the ownership of the sig, fields
5598 * and model_emf_uri meaning any free are done inside it if needed. These
5599 * three variables MUST NOT be read/write after this.
5600 */
5601 ret_code = ust_registry_create_event(registry, chan_reg_key,
5602 sobjd, cobjd, name, sig, nr_fields, fields,
5603 loglevel_value, model_emf_uri, ua_sess->buffer_type,
5604 &event_id, app);
5605 sig = NULL;
5606 fields = NULL;
5607 model_emf_uri = NULL;
5608
5609 /*
5610 * The return value is returned to ustctl so in case of an error, the
5611 * application can be notified. In case of an error, it's important not to
5612 * return a negative error or else the application will get closed.
5613 */
5614 ret = ustctl_reply_register_event(sock, event_id, ret_code);
5615 if (ret < 0) {
5616 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5617 ERR("UST app reply event failed with ret %d", ret);
5618 } else {
5619 DBG3("UST app reply event failed. Application died");
5620 }
5621 /*
5622 * No need to wipe the create event since the application socket will
5623 * get close on error hence cleaning up everything by itself.
5624 */
5625 goto error;
5626 }
5627
5628 DBG3("UST registry event %s with id %" PRId32 " added successfully",
5629 name, event_id);
5630
5631 error:
5632 pthread_mutex_unlock(&registry->lock);
5633 error_rcu_unlock:
5634 rcu_read_unlock();
5635 free(sig);
5636 free(fields);
5637 free(model_emf_uri);
5638 return ret;
5639 }
5640
5641 /*
5642 * Add enum to the UST session registry. Once done, this replies to the
5643 * application with the appropriate error code.
5644 *
5645 * The session UST registry lock is acquired within this function.
5646 *
5647 * On success 0 is returned else a negative value.
5648 */
5649 static int add_enum_ust_registry(int sock, int sobjd, char *name,
5650 struct ustctl_enum_entry *entries, size_t nr_entries)
5651 {
5652 int ret = 0, ret_code;
5653 struct ust_app *app;
5654 struct ust_app_session *ua_sess;
5655 struct ust_registry_session *registry;
5656 uint64_t enum_id = -1ULL;
5657
5658 rcu_read_lock();
5659
5660 /* Lookup application. If not found, there is a code flow error. */
5661 app = find_app_by_notify_sock(sock);
5662 if (!app) {
5663 /* Return an error since this is not an error */
5664 DBG("Application socket %d is being torn down. Aborting enum registration",
5665 sock);
5666 free(entries);
5667 goto error_rcu_unlock;
5668 }
5669
5670 /* Lookup session by UST object descriptor. */
5671 ua_sess = find_session_by_objd(app, sobjd);
5672 if (!ua_sess) {
5673 /* Return an error since this is not an error */
5674 DBG("Application session is being torn down (session not found). Aborting enum registration.");
5675 free(entries);
5676 goto error_rcu_unlock;
5677 }
5678
5679 registry = get_session_registry(ua_sess);
5680 if (!registry) {
5681 DBG("Application session is being torn down (registry not found). Aborting enum registration.");
5682 free(entries);
5683 goto error_rcu_unlock;
5684 }
5685
5686 pthread_mutex_lock(&registry->lock);
5687
5688 /*
5689 * From this point on, the callee acquires the ownership of
5690 * entries. The variable entries MUST NOT be read/written after
5691 * call.
5692 */
5693 ret_code = ust_registry_create_or_find_enum(registry, sobjd, name,
5694 entries, nr_entries, &enum_id);
5695 entries = NULL;
5696
5697 /*
5698 * The return value is returned to ustctl so in case of an error, the
5699 * application can be notified. In case of an error, it's important not to
5700 * return a negative error or else the application will get closed.
5701 */
5702 ret = ustctl_reply_register_enum(sock, enum_id, ret_code);
5703 if (ret < 0) {
5704 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5705 ERR("UST app reply enum failed with ret %d", ret);
5706 } else {
5707 DBG3("UST app reply enum failed. Application died");
5708 }
5709 /*
5710 * No need to wipe the create enum since the application socket will
5711 * get close on error hence cleaning up everything by itself.
5712 */
5713 goto error;
5714 }
5715
5716 DBG3("UST registry enum %s added successfully or already found", name);
5717
5718 error:
5719 pthread_mutex_unlock(&registry->lock);
5720 error_rcu_unlock:
5721 rcu_read_unlock();
5722 return ret;
5723 }
5724
5725 /*
5726 * Handle application notification through the given notify socket.
5727 *
5728 * Return 0 on success or else a negative value.
5729 */
5730 int ust_app_recv_notify(int sock)
5731 {
5732 int ret;
5733 enum ustctl_notify_cmd cmd;
5734
5735 DBG3("UST app receiving notify from sock %d", sock);
5736
5737 ret = ustctl_recv_notify(sock, &cmd);
5738 if (ret < 0) {
5739 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5740 ERR("UST app recv notify failed with ret %d", ret);
5741 } else {
5742 DBG3("UST app recv notify failed. Application died");
5743 }
5744 goto error;
5745 }
5746
5747 switch (cmd) {
5748 case USTCTL_NOTIFY_CMD_EVENT:
5749 {
5750 int sobjd, cobjd, loglevel_value;
5751 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
5752 size_t nr_fields;
5753 struct ustctl_field *fields;
5754
5755 DBG2("UST app ustctl register event received");
5756
5757 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name,
5758 &loglevel_value, &sig, &nr_fields, &fields,
5759 &model_emf_uri);
5760 if (ret < 0) {
5761 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5762 ERR("UST app recv event failed with ret %d", ret);
5763 } else {
5764 DBG3("UST app recv event failed. Application died");
5765 }
5766 goto error;
5767 }
5768
5769 /*
5770 * Add event to the UST registry coming from the notify socket. This
5771 * call will free if needed the sig, fields and model_emf_uri. This
5772 * code path loses the ownsership of these variables and transfer them
5773 * to the this function.
5774 */
5775 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
5776 fields, loglevel_value, model_emf_uri);
5777 if (ret < 0) {
5778 goto error;
5779 }
5780
5781 break;
5782 }
5783 case USTCTL_NOTIFY_CMD_CHANNEL:
5784 {
5785 int sobjd, cobjd;
5786 size_t nr_fields;
5787 struct ustctl_field *fields;
5788
5789 DBG2("UST app ustctl register channel received");
5790
5791 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
5792 &fields);
5793 if (ret < 0) {
5794 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5795 ERR("UST app recv channel failed with ret %d", ret);
5796 } else {
5797 DBG3("UST app recv channel failed. Application died");
5798 }
5799 goto error;
5800 }
5801
5802 /*
5803 * The fields ownership are transfered to this function call meaning
5804 * that if needed it will be freed. After this, it's invalid to access
5805 * fields or clean it up.
5806 */
5807 ret = reply_ust_register_channel(sock, cobjd, nr_fields,
5808 fields);
5809 if (ret < 0) {
5810 goto error;
5811 }
5812
5813 break;
5814 }
5815 case USTCTL_NOTIFY_CMD_ENUM:
5816 {
5817 int sobjd;
5818 char name[LTTNG_UST_SYM_NAME_LEN];
5819 size_t nr_entries;
5820 struct ustctl_enum_entry *entries;
5821
5822 DBG2("UST app ustctl register enum received");
5823
5824 ret = ustctl_recv_register_enum(sock, &sobjd, name,
5825 &entries, &nr_entries);
5826 if (ret < 0) {
5827 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5828 ERR("UST app recv enum failed with ret %d", ret);
5829 } else {
5830 DBG3("UST app recv enum failed. Application died");
5831 }
5832 goto error;
5833 }
5834
5835 /* Callee assumes ownership of entries */
5836 ret = add_enum_ust_registry(sock, sobjd, name,
5837 entries, nr_entries);
5838 if (ret < 0) {
5839 goto error;
5840 }
5841
5842 break;
5843 }
5844 default:
5845 /* Should NEVER happen. */
5846 assert(0);
5847 }
5848
5849 error:
5850 return ret;
5851 }
5852
5853 /*
5854 * Once the notify socket hangs up, this is called. First, it tries to find the
5855 * corresponding application. On failure, the call_rcu to close the socket is
5856 * executed. If an application is found, it tries to delete it from the notify
5857 * socket hash table. Whathever the result, it proceeds to the call_rcu.
5858 *
5859 * Note that an object needs to be allocated here so on ENOMEM failure, the
5860 * call RCU is not done but the rest of the cleanup is.
5861 */
5862 void ust_app_notify_sock_unregister(int sock)
5863 {
5864 int err_enomem = 0;
5865 struct lttng_ht_iter iter;
5866 struct ust_app *app;
5867 struct ust_app_notify_sock_obj *obj;
5868
5869 assert(sock >= 0);
5870
5871 rcu_read_lock();
5872
5873 obj = zmalloc(sizeof(*obj));
5874 if (!obj) {
5875 /*
5876 * An ENOMEM is kind of uncool. If this strikes we continue the
5877 * procedure but the call_rcu will not be called. In this case, we
5878 * accept the fd leak rather than possibly creating an unsynchronized
5879 * state between threads.
5880 *
5881 * TODO: The notify object should be created once the notify socket is
5882 * registered and stored independantely from the ust app object. The
5883 * tricky part is to synchronize the teardown of the application and
5884 * this notify object. Let's keep that in mind so we can avoid this
5885 * kind of shenanigans with ENOMEM in the teardown path.
5886 */
5887 err_enomem = 1;
5888 } else {
5889 obj->fd = sock;
5890 }
5891
5892 DBG("UST app notify socket unregister %d", sock);
5893
5894 /*
5895 * Lookup application by notify socket. If this fails, this means that the
5896 * hash table delete has already been done by the application
5897 * unregistration process so we can safely close the notify socket in a
5898 * call RCU.
5899 */
5900 app = find_app_by_notify_sock(sock);
5901 if (!app) {
5902 goto close_socket;
5903 }
5904
5905 iter.iter.node = &app->notify_sock_n.node;
5906
5907 /*
5908 * Whatever happens here either we fail or succeed, in both cases we have
5909 * to close the socket after a grace period to continue to the call RCU
5910 * here. If the deletion is successful, the application is not visible
5911 * anymore by other threads and is it fails it means that it was already
5912 * deleted from the hash table so either way we just have to close the
5913 * socket.
5914 */
5915 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
5916
5917 close_socket:
5918 rcu_read_unlock();
5919
5920 /*
5921 * Close socket after a grace period to avoid for the socket to be reused
5922 * before the application object is freed creating potential race between
5923 * threads trying to add unique in the global hash table.
5924 */
5925 if (!err_enomem) {
5926 call_rcu(&obj->head, close_notify_sock_rcu);
5927 }
5928 }
5929
5930 /*
5931 * Destroy a ust app data structure and free its memory.
5932 */
5933 void ust_app_destroy(struct ust_app *app)
5934 {
5935 if (!app) {
5936 return;
5937 }
5938
5939 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
5940 }
5941
5942 /*
5943 * Take a snapshot for a given UST session. The snapshot is sent to the given
5944 * output.
5945 *
5946 * Return 0 on success or else a negative value.
5947 */
5948 int ust_app_snapshot_record(struct ltt_ust_session *usess,
5949 struct snapshot_output *output, int wait,
5950 uint64_t nb_packets_per_stream)
5951 {
5952 int ret = 0;
5953 struct lttng_ht_iter iter;
5954 struct ust_app *app;
5955 char pathname[PATH_MAX];
5956 struct ltt_session *session;
5957 uint64_t trace_archive_id;
5958
5959 assert(usess);
5960 assert(output);
5961
5962 rcu_read_lock();
5963
5964 session = session_find_by_id(usess->id);
5965 assert(session);
5966 assert(pthread_mutex_trylock(&session->lock));
5967 assert(session_trylock_list());
5968 trace_archive_id = session->current_archive_id;
5969
5970 switch (usess->buffer_type) {
5971 case LTTNG_BUFFER_PER_UID:
5972 {
5973 struct buffer_reg_uid *reg;
5974
5975 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5976 struct buffer_reg_channel *reg_chan;
5977 struct consumer_socket *socket;
5978
5979 if (!reg->registry->reg.ust->metadata_key) {
5980 /* Skip since no metadata is present */
5981 continue;
5982 }
5983
5984 /* Get consumer socket to use to push the metadata.*/
5985 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5986 usess->consumer);
5987 if (!socket) {
5988 ret = -EINVAL;
5989 goto error;
5990 }
5991
5992 memset(pathname, 0, sizeof(pathname));
5993 ret = snprintf(pathname, sizeof(pathname),
5994 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
5995 reg->uid, reg->bits_per_long);
5996 if (ret < 0) {
5997 PERROR("snprintf snapshot path");
5998 goto error;
5999 }
6000
6001 /* Add the UST default trace dir to path. */
6002 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6003 reg_chan, node.node) {
6004 ret = consumer_snapshot_channel(socket,
6005 reg_chan->consumer_key,
6006 output, 0, usess->uid,
6007 usess->gid, pathname, wait,
6008 nb_packets_per_stream,
6009 trace_archive_id);
6010 if (ret < 0) {
6011 goto error;
6012 }
6013 }
6014 ret = consumer_snapshot_channel(socket,
6015 reg->registry->reg.ust->metadata_key, output, 1,
6016 usess->uid, usess->gid, pathname, wait, 0,
6017 trace_archive_id);
6018 if (ret < 0) {
6019 goto error;
6020 }
6021 }
6022 break;
6023 }
6024 case LTTNG_BUFFER_PER_PID:
6025 {
6026 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6027 struct consumer_socket *socket;
6028 struct lttng_ht_iter chan_iter;
6029 struct ust_app_channel *ua_chan;
6030 struct ust_app_session *ua_sess;
6031 struct ust_registry_session *registry;
6032
6033 ua_sess = lookup_session_by_app(usess, app);
6034 if (!ua_sess) {
6035 /* Session not associated with this app. */
6036 continue;
6037 }
6038
6039 /* Get the right consumer socket for the application. */
6040 socket = consumer_find_socket_by_bitness(app->bits_per_long,
6041 output->consumer);
6042 if (!socket) {
6043 ret = -EINVAL;
6044 goto error;
6045 }
6046
6047 /* Add the UST default trace dir to path. */
6048 memset(pathname, 0, sizeof(pathname));
6049 ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
6050 ua_sess->path);
6051 if (ret < 0) {
6052 PERROR("snprintf snapshot path");
6053 goto error;
6054 }
6055
6056 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6057 ua_chan, node.node) {
6058 ret = consumer_snapshot_channel(socket,
6059 ua_chan->key, output,
6060 0, ua_sess->euid, ua_sess->egid,
6061 pathname, wait,
6062 nb_packets_per_stream,
6063 trace_archive_id);
6064 if (ret < 0) {
6065 goto error;
6066 }
6067 }
6068
6069 registry = get_session_registry(ua_sess);
6070 if (!registry) {
6071 DBG("Application session is being torn down. Abort snapshot record.");
6072 ret = -1;
6073 goto error;
6074 }
6075 ret = consumer_snapshot_channel(socket,
6076 registry->metadata_key, output,
6077 1, ua_sess->euid, ua_sess->egid,
6078 pathname, wait, 0,
6079 trace_archive_id);
6080 if (ret < 0) {
6081 goto error;
6082 }
6083 }
6084 break;
6085 }
6086 default:
6087 assert(0);
6088 break;
6089 }
6090
6091 error:
6092 rcu_read_unlock();
6093 return ret;
6094 }
6095
6096 /*
6097 * Return the size taken by one more packet per stream.
6098 */
6099 uint64_t ust_app_get_size_one_more_packet_per_stream(struct ltt_ust_session *usess,
6100 uint64_t cur_nr_packets)
6101 {
6102 uint64_t tot_size = 0;
6103 struct ust_app *app;
6104 struct lttng_ht_iter iter;
6105
6106 assert(usess);
6107
6108 switch (usess->buffer_type) {
6109 case LTTNG_BUFFER_PER_UID:
6110 {
6111 struct buffer_reg_uid *reg;
6112
6113 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6114 struct buffer_reg_channel *reg_chan;
6115
6116 rcu_read_lock();
6117 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6118 reg_chan, node.node) {
6119 if (cur_nr_packets >= reg_chan->num_subbuf) {
6120 /*
6121 * Don't take channel into account if we
6122 * already grab all its packets.
6123 */
6124 continue;
6125 }
6126 tot_size += reg_chan->subbuf_size * reg_chan->stream_count;
6127 }
6128 rcu_read_unlock();
6129 }
6130 break;
6131 }
6132 case LTTNG_BUFFER_PER_PID:
6133 {
6134 rcu_read_lock();
6135 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6136 struct ust_app_channel *ua_chan;
6137 struct ust_app_session *ua_sess;
6138 struct lttng_ht_iter chan_iter;
6139
6140 ua_sess = lookup_session_by_app(usess, app);
6141 if (!ua_sess) {
6142 /* Session not associated with this app. */
6143 continue;
6144 }
6145
6146 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6147 ua_chan, node.node) {
6148 if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
6149 /*
6150 * Don't take channel into account if we
6151 * already grab all its packets.
6152 */
6153 continue;
6154 }
6155 tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
6156 }
6157 }
6158 rcu_read_unlock();
6159 break;
6160 }
6161 default:
6162 assert(0);
6163 break;
6164 }
6165
6166 return tot_size;
6167 }
6168
6169 int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id,
6170 struct cds_list_head *buffer_reg_uid_list,
6171 struct consumer_output *consumer, uint64_t uchan_id,
6172 int overwrite, uint64_t *discarded, uint64_t *lost)
6173 {
6174 int ret;
6175 uint64_t consumer_chan_key;
6176
6177 *discarded = 0;
6178 *lost = 0;
6179
6180 ret = buffer_reg_uid_consumer_channel_key(
6181 buffer_reg_uid_list, uchan_id, &consumer_chan_key);
6182 if (ret < 0) {
6183 /* Not found */
6184 ret = 0;
6185 goto end;
6186 }
6187
6188 if (overwrite) {
6189 ret = consumer_get_lost_packets(ust_session_id,
6190 consumer_chan_key, consumer, lost);
6191 } else {
6192 ret = consumer_get_discarded_events(ust_session_id,
6193 consumer_chan_key, consumer, discarded);
6194 }
6195
6196 end:
6197 return ret;
6198 }
6199
6200 int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
6201 struct ltt_ust_channel *uchan,
6202 struct consumer_output *consumer, int overwrite,
6203 uint64_t *discarded, uint64_t *lost)
6204 {
6205 int ret = 0;
6206 struct lttng_ht_iter iter;
6207 struct lttng_ht_node_str *ua_chan_node;
6208 struct ust_app *app;
6209 struct ust_app_session *ua_sess;
6210 struct ust_app_channel *ua_chan;
6211
6212 *discarded = 0;
6213 *lost = 0;
6214
6215 rcu_read_lock();
6216 /*
6217 * Iterate over every registered applications. Sum counters for
6218 * all applications containing requested session and channel.
6219 */
6220 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6221 struct lttng_ht_iter uiter;
6222
6223 ua_sess = lookup_session_by_app(usess, app);
6224 if (ua_sess == NULL) {
6225 continue;
6226 }
6227
6228 /* Get channel */
6229 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
6230 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
6231 /* If the session is found for the app, the channel must be there */
6232 assert(ua_chan_node);
6233
6234 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
6235
6236 if (overwrite) {
6237 uint64_t _lost;
6238
6239 ret = consumer_get_lost_packets(usess->id, ua_chan->key,
6240 consumer, &_lost);
6241 if (ret < 0) {
6242 break;
6243 }
6244 (*lost) += _lost;
6245 } else {
6246 uint64_t _discarded;
6247
6248 ret = consumer_get_discarded_events(usess->id,
6249 ua_chan->key, consumer, &_discarded);
6250 if (ret < 0) {
6251 break;
6252 }
6253 (*discarded) += _discarded;
6254 }
6255 }
6256
6257 rcu_read_unlock();
6258 return ret;
6259 }
6260
6261 static
6262 int ust_app_regenerate_statedump(struct ltt_ust_session *usess,
6263 struct ust_app *app)
6264 {
6265 int ret = 0;
6266 struct ust_app_session *ua_sess;
6267
6268 DBG("Regenerating the metadata for ust app pid %d", app->pid);
6269
6270 rcu_read_lock();
6271
6272 ua_sess = lookup_session_by_app(usess, app);
6273 if (ua_sess == NULL) {
6274 /* The session is in teardown process. Ignore and continue. */
6275 goto end;
6276 }
6277
6278 pthread_mutex_lock(&ua_sess->lock);
6279
6280 if (ua_sess->deleted) {
6281 goto end_unlock;
6282 }
6283
6284 pthread_mutex_lock(&app->sock_lock);
6285 ret = ustctl_regenerate_statedump(app->sock, ua_sess->handle);
6286 pthread_mutex_unlock(&app->sock_lock);
6287
6288 end_unlock:
6289 pthread_mutex_unlock(&ua_sess->lock);
6290
6291 end:
6292 rcu_read_unlock();
6293 health_code_update();
6294 return ret;
6295 }
6296
6297 /*
6298 * Regenerate the statedump for each app in the session.
6299 */
6300 int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
6301 {
6302 int ret = 0;
6303 struct lttng_ht_iter iter;
6304 struct ust_app *app;
6305
6306 DBG("Regenerating the metadata for all UST apps");
6307
6308 rcu_read_lock();
6309
6310 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6311 if (!app->compatible) {
6312 continue;
6313 }
6314
6315 ret = ust_app_regenerate_statedump(usess, app);
6316 if (ret < 0) {
6317 /* Continue to the next app even on error */
6318 continue;
6319 }
6320 }
6321
6322 rcu_read_unlock();
6323
6324 return 0;
6325 }
6326
6327 /*
6328 * Rotate all the channels of a session.
6329 *
6330 * Return 0 on success or else a negative value.
6331 */
6332 int ust_app_rotate_session(struct ltt_session *session)
6333 {
6334 int ret = 0;
6335 struct lttng_ht_iter iter;
6336 struct ust_app *app;
6337 struct ltt_ust_session *usess = session->ust_session;
6338 char pathname[LTTNG_PATH_MAX];
6339
6340 assert(usess);
6341
6342 rcu_read_lock();
6343
6344 switch (usess->buffer_type) {
6345 case LTTNG_BUFFER_PER_UID:
6346 {
6347 struct buffer_reg_uid *reg;
6348
6349 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6350 struct buffer_reg_channel *reg_chan;
6351 struct consumer_socket *socket;
6352
6353 /* Get consumer socket to use to push the metadata.*/
6354 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
6355 usess->consumer);
6356 if (!socket) {
6357 ret = -EINVAL;
6358 goto error;
6359 }
6360
6361 ret = snprintf(pathname, sizeof(pathname),
6362 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
6363 reg->uid, reg->bits_per_long);
6364 if (ret < 0 || ret == sizeof(pathname)) {
6365 PERROR("Failed to format rotation path");
6366 goto error;
6367 }
6368
6369 /* Rotate the data channels. */
6370 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6371 reg_chan, node.node) {
6372 ret = consumer_rotate_channel(socket,
6373 reg_chan->consumer_key,
6374 usess->uid, usess->gid,
6375 usess->consumer, pathname,
6376 /* is_metadata_channel */ false,
6377 session->current_archive_id);
6378 if (ret < 0) {
6379 goto error;
6380 }
6381 }
6382
6383 (void) push_metadata(reg->registry->reg.ust, usess->consumer);
6384
6385 ret = consumer_rotate_channel(socket,
6386 reg->registry->reg.ust->metadata_key,
6387 usess->uid, usess->gid,
6388 usess->consumer, pathname,
6389 /* is_metadata_channel */ true,
6390 session->current_archive_id);
6391 if (ret < 0) {
6392 goto error;
6393 }
6394 }
6395 break;
6396 }
6397 case LTTNG_BUFFER_PER_PID:
6398 {
6399 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6400 struct consumer_socket *socket;
6401 struct lttng_ht_iter chan_iter;
6402 struct ust_app_channel *ua_chan;
6403 struct ust_app_session *ua_sess;
6404 struct ust_registry_session *registry;
6405
6406 ua_sess = lookup_session_by_app(usess, app);
6407 if (!ua_sess) {
6408 /* Session not associated with this app. */
6409 continue;
6410 }
6411 ret = snprintf(pathname, sizeof(pathname),
6412 DEFAULT_UST_TRACE_DIR "/%s",
6413 ua_sess->path);
6414 if (ret < 0 || ret == sizeof(pathname)) {
6415 PERROR("Failed to format rotation path");
6416 goto error;
6417 }
6418
6419 /* Get the right consumer socket for the application. */
6420 socket = consumer_find_socket_by_bitness(app->bits_per_long,
6421 usess->consumer);
6422 if (!socket) {
6423 ret = -EINVAL;
6424 goto error;
6425 }
6426
6427 registry = get_session_registry(ua_sess);
6428 if (!registry) {
6429 DBG("Application session is being torn down. Abort session rotation.");
6430 ret = -1;
6431 goto error;
6432 }
6433
6434
6435 /* Rotate the data channels. */
6436 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6437 ua_chan, node.node) {
6438 ret = consumer_rotate_channel(socket, ua_chan->key,
6439 ua_sess->euid, ua_sess->egid,
6440 ua_sess->consumer, pathname,
6441 /* is_metadata_channel */ false,
6442 session->current_archive_id);
6443 if (ret < 0) {
6444 goto error;
6445 }
6446 }
6447
6448 /* Rotate the metadata channel. */
6449 (void) push_metadata(registry, usess->consumer);
6450 ret = consumer_rotate_channel(socket, registry->metadata_key,
6451 ua_sess->euid, ua_sess->egid,
6452 ua_sess->consumer, pathname,
6453 /* is_metadata_channel */ true,
6454 session->current_archive_id);
6455 if (ret < 0) {
6456 goto error;
6457 }
6458 }
6459 break;
6460 }
6461 default:
6462 assert(0);
6463 break;
6464 }
6465
6466 ret = LTTNG_OK;
6467
6468 error:
6469 rcu_read_unlock();
6470 return ret;
6471 }
This page took 0.194099 seconds and 6 git commands to generate.