5a41c3800709ff3c2a628758ed89f01636be04f7
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Copyright (C) 2016 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _LGPL_SOURCE
20 #include <errno.h>
21 #include <inttypes.h>
22 #include <pthread.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/stat.h>
27 #include <sys/types.h>
28 #include <unistd.h>
29 #include <urcu/compiler.h>
30 #include <lttng/ust-error.h>
31 #include <signal.h>
32
33 #include <common/common.h>
34 #include <common/sessiond-comm/sessiond-comm.h>
35
36 #include "buffer-registry.h"
37 #include "fd-limit.h"
38 #include "health-sessiond.h"
39 #include "ust-app.h"
40 #include "ust-consumer.h"
41 #include "ust-ctl.h"
42 #include "utils.h"
43 #include "session.h"
44
45 static
46 int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
47
48 /* Next available channel key. Access under next_channel_key_lock. */
49 static uint64_t _next_channel_key;
50 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
51
52 /* Next available session ID. Access under next_session_id_lock. */
53 static uint64_t _next_session_id;
54 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
55
56 /*
57 * Return the incremented value of next_channel_key.
58 */
59 static uint64_t get_next_channel_key(void)
60 {
61 uint64_t ret;
62
63 pthread_mutex_lock(&next_channel_key_lock);
64 ret = ++_next_channel_key;
65 pthread_mutex_unlock(&next_channel_key_lock);
66 return ret;
67 }
68
69 /*
70 * Return the atomically incremented value of next_session_id.
71 */
72 static uint64_t get_next_session_id(void)
73 {
74 uint64_t ret;
75
76 pthread_mutex_lock(&next_session_id_lock);
77 ret = ++_next_session_id;
78 pthread_mutex_unlock(&next_session_id_lock);
79 return ret;
80 }
81
82 static void copy_channel_attr_to_ustctl(
83 struct ustctl_consumer_channel_attr *attr,
84 struct lttng_ust_channel_attr *uattr)
85 {
86 /* Copy event attributes since the layout is different. */
87 attr->subbuf_size = uattr->subbuf_size;
88 attr->num_subbuf = uattr->num_subbuf;
89 attr->overwrite = uattr->overwrite;
90 attr->switch_timer_interval = uattr->switch_timer_interval;
91 attr->read_timer_interval = uattr->read_timer_interval;
92 attr->output = uattr->output;
93 }
94
95 /*
96 * Match function for the hash table lookup.
97 *
98 * It matches an ust app event based on three attributes which are the event
99 * name, the filter bytecode and the loglevel.
100 */
101 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
102 {
103 struct ust_app_event *event;
104 const struct ust_app_ht_key *key;
105 int ev_loglevel_value;
106
107 assert(node);
108 assert(_key);
109
110 event = caa_container_of(node, struct ust_app_event, node.node);
111 key = _key;
112 ev_loglevel_value = event->attr.loglevel;
113
114 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
115
116 /* Event name */
117 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
118 goto no_match;
119 }
120
121 /* Event loglevel. */
122 if (ev_loglevel_value != key->loglevel_type) {
123 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
124 && key->loglevel_type == 0 &&
125 ev_loglevel_value == -1) {
126 /*
127 * Match is accepted. This is because on event creation, the
128 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
129 * -1 are accepted for this loglevel type since 0 is the one set by
130 * the API when receiving an enable event.
131 */
132 } else {
133 goto no_match;
134 }
135 }
136
137 /* One of the filters is NULL, fail. */
138 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
139 goto no_match;
140 }
141
142 if (key->filter && event->filter) {
143 /* Both filters exists, check length followed by the bytecode. */
144 if (event->filter->len != key->filter->len ||
145 memcmp(event->filter->data, key->filter->data,
146 event->filter->len) != 0) {
147 goto no_match;
148 }
149 }
150
151 /* One of the exclusions is NULL, fail. */
152 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
153 goto no_match;
154 }
155
156 if (key->exclusion && event->exclusion) {
157 /* Both exclusions exists, check count followed by the names. */
158 if (event->exclusion->count != key->exclusion->count ||
159 memcmp(event->exclusion->names, key->exclusion->names,
160 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
161 goto no_match;
162 }
163 }
164
165
166 /* Match. */
167 return 1;
168
169 no_match:
170 return 0;
171 }
172
173 /*
174 * Unique add of an ust app event in the given ht. This uses the custom
175 * ht_match_ust_app_event match function and the event name as hash.
176 */
177 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
178 struct ust_app_event *event)
179 {
180 struct cds_lfht_node *node_ptr;
181 struct ust_app_ht_key key;
182 struct lttng_ht *ht;
183
184 assert(ua_chan);
185 assert(ua_chan->events);
186 assert(event);
187
188 ht = ua_chan->events;
189 key.name = event->attr.name;
190 key.filter = event->filter;
191 key.loglevel_type = event->attr.loglevel;
192 key.exclusion = event->exclusion;
193
194 node_ptr = cds_lfht_add_unique(ht->ht,
195 ht->hash_fct(event->node.key, lttng_ht_seed),
196 ht_match_ust_app_event, &key, &event->node.node);
197 assert(node_ptr == &event->node.node);
198 }
199
200 /*
201 * Close the notify socket from the given RCU head object. This MUST be called
202 * through a call_rcu().
203 */
204 static void close_notify_sock_rcu(struct rcu_head *head)
205 {
206 int ret;
207 struct ust_app_notify_sock_obj *obj =
208 caa_container_of(head, struct ust_app_notify_sock_obj, head);
209
210 /* Must have a valid fd here. */
211 assert(obj->fd >= 0);
212
213 ret = close(obj->fd);
214 if (ret) {
215 ERR("close notify sock %d RCU", obj->fd);
216 }
217 lttng_fd_put(LTTNG_FD_APPS, 1);
218
219 free(obj);
220 }
221
222 /*
223 * Return the session registry according to the buffer type of the given
224 * session.
225 *
226 * A registry per UID object MUST exists before calling this function or else
227 * it assert() if not found. RCU read side lock must be acquired.
228 */
229 static struct ust_registry_session *get_session_registry(
230 struct ust_app_session *ua_sess)
231 {
232 struct ust_registry_session *registry = NULL;
233
234 assert(ua_sess);
235
236 switch (ua_sess->buffer_type) {
237 case LTTNG_BUFFER_PER_PID:
238 {
239 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
240 if (!reg_pid) {
241 goto error;
242 }
243 registry = reg_pid->registry->reg.ust;
244 break;
245 }
246 case LTTNG_BUFFER_PER_UID:
247 {
248 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
249 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
250 if (!reg_uid) {
251 goto error;
252 }
253 registry = reg_uid->registry->reg.ust;
254 break;
255 }
256 default:
257 assert(0);
258 };
259
260 error:
261 return registry;
262 }
263
264 /*
265 * Delete ust context safely. RCU read lock must be held before calling
266 * this function.
267 */
268 static
269 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
270 struct ust_app *app)
271 {
272 int ret;
273
274 assert(ua_ctx);
275
276 if (ua_ctx->obj) {
277 pthread_mutex_lock(&app->sock_lock);
278 ret = ustctl_release_object(sock, ua_ctx->obj);
279 pthread_mutex_unlock(&app->sock_lock);
280 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
281 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
282 sock, ua_ctx->obj->handle, ret);
283 }
284 free(ua_ctx->obj);
285 }
286 free(ua_ctx);
287 }
288
289 /*
290 * Delete ust app event safely. RCU read lock must be held before calling
291 * this function.
292 */
293 static
294 void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
295 struct ust_app *app)
296 {
297 int ret;
298
299 assert(ua_event);
300
301 free(ua_event->filter);
302 if (ua_event->exclusion != NULL)
303 free(ua_event->exclusion);
304 if (ua_event->obj != NULL) {
305 pthread_mutex_lock(&app->sock_lock);
306 ret = ustctl_release_object(sock, ua_event->obj);
307 pthread_mutex_unlock(&app->sock_lock);
308 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
309 ERR("UST app sock %d release event obj failed with ret %d",
310 sock, ret);
311 }
312 free(ua_event->obj);
313 }
314 free(ua_event);
315 }
316
317 /*
318 * Release ust data object of the given stream.
319 *
320 * Return 0 on success or else a negative value.
321 */
322 static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
323 struct ust_app *app)
324 {
325 int ret = 0;
326
327 assert(stream);
328
329 if (stream->obj) {
330 pthread_mutex_lock(&app->sock_lock);
331 ret = ustctl_release_object(sock, stream->obj);
332 pthread_mutex_unlock(&app->sock_lock);
333 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
334 ERR("UST app sock %d release stream obj failed with ret %d",
335 sock, ret);
336 }
337 lttng_fd_put(LTTNG_FD_APPS, 2);
338 free(stream->obj);
339 }
340
341 return ret;
342 }
343
344 /*
345 * Delete ust app stream safely. RCU read lock must be held before calling
346 * this function.
347 */
348 static
349 void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
350 struct ust_app *app)
351 {
352 assert(stream);
353
354 (void) release_ust_app_stream(sock, stream, app);
355 free(stream);
356 }
357
358 /*
359 * We need to execute ht_destroy outside of RCU read-side critical
360 * section and outside of call_rcu thread, so we postpone its execution
361 * using ht_cleanup_push. It is simpler than to change the semantic of
362 * the many callers of delete_ust_app_session().
363 */
364 static
365 void delete_ust_app_channel_rcu(struct rcu_head *head)
366 {
367 struct ust_app_channel *ua_chan =
368 caa_container_of(head, struct ust_app_channel, rcu_head);
369
370 ht_cleanup_push(ua_chan->ctx);
371 ht_cleanup_push(ua_chan->events);
372 free(ua_chan);
373 }
374
375 /*
376 * Extract the lost packet or discarded events counter when the channel is
377 * being deleted and store the value in the parent channel so we can
378 * access it from lttng list and at stop/destroy.
379 *
380 * The session list lock must be held by the caller.
381 */
382 static
383 void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
384 {
385 uint64_t discarded = 0, lost = 0;
386 struct ltt_session *session;
387 struct ltt_ust_channel *uchan;
388
389 if (ua_chan->attr.type != LTTNG_UST_CHAN_PER_CPU) {
390 return;
391 }
392
393 rcu_read_lock();
394 session = session_find_by_id(ua_chan->session->tracing_id);
395 if (!session || !session->ust_session) {
396 /*
397 * Not finding the session is not an error because there are
398 * multiple ways the channels can be torn down.
399 *
400 * 1) The session daemon can initiate the destruction of the
401 * ust app session after receiving a destroy command or
402 * during its shutdown/teardown.
403 * 2) The application, since we are in per-pid tracing, is
404 * unregistering and tearing down its ust app session.
405 *
406 * Both paths are protected by the session list lock which
407 * ensures that the accounting of lost packets and discarded
408 * events is done exactly once. The session is then unpublished
409 * from the session list, resulting in this condition.
410 */
411 goto end;
412 }
413
414 if (ua_chan->attr.overwrite) {
415 consumer_get_lost_packets(ua_chan->session->tracing_id,
416 ua_chan->key, session->ust_session->consumer,
417 &lost);
418 } else {
419 consumer_get_discarded_events(ua_chan->session->tracing_id,
420 ua_chan->key, session->ust_session->consumer,
421 &discarded);
422 }
423 uchan = trace_ust_find_channel_by_name(
424 session->ust_session->domain_global.channels,
425 ua_chan->name);
426 if (!uchan) {
427 ERR("Missing UST channel to store discarded counters");
428 goto end;
429 }
430
431 uchan->per_pid_closed_app_discarded += discarded;
432 uchan->per_pid_closed_app_lost += lost;
433
434 end:
435 rcu_read_unlock();
436 }
437
438 /*
439 * Delete ust app channel safely. RCU read lock must be held before calling
440 * this function.
441 *
442 * The session list lock must be held by the caller.
443 */
444 static
445 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
446 struct ust_app *app)
447 {
448 int ret;
449 struct lttng_ht_iter iter;
450 struct ust_app_event *ua_event;
451 struct ust_app_ctx *ua_ctx;
452 struct ust_app_stream *stream, *stmp;
453 struct ust_registry_session *registry;
454
455 assert(ua_chan);
456
457 DBG3("UST app deleting channel %s", ua_chan->name);
458
459 /* Wipe stream */
460 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
461 cds_list_del(&stream->list);
462 delete_ust_app_stream(sock, stream, app);
463 }
464
465 /* Wipe context */
466 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
467 cds_list_del(&ua_ctx->list);
468 ret = lttng_ht_del(ua_chan->ctx, &iter);
469 assert(!ret);
470 delete_ust_app_ctx(sock, ua_ctx, app);
471 }
472
473 /* Wipe events */
474 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
475 node.node) {
476 ret = lttng_ht_del(ua_chan->events, &iter);
477 assert(!ret);
478 delete_ust_app_event(sock, ua_event, app);
479 }
480
481 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
482 /* Wipe and free registry from session registry. */
483 registry = get_session_registry(ua_chan->session);
484 if (registry) {
485 ust_registry_channel_del_free(registry, ua_chan->key);
486 }
487 save_per_pid_lost_discarded_counters(ua_chan);
488 }
489
490 if (ua_chan->obj != NULL) {
491 /* Remove channel from application UST object descriptor. */
492 iter.iter.node = &ua_chan->ust_objd_node.node;
493 ret = lttng_ht_del(app->ust_objd, &iter);
494 assert(!ret);
495 pthread_mutex_lock(&app->sock_lock);
496 ret = ustctl_release_object(sock, ua_chan->obj);
497 pthread_mutex_unlock(&app->sock_lock);
498 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
499 ERR("UST app sock %d release channel obj failed with ret %d",
500 sock, ret);
501 }
502 lttng_fd_put(LTTNG_FD_APPS, 1);
503 free(ua_chan->obj);
504 }
505 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
506 }
507
508 int ust_app_register_done(struct ust_app *app)
509 {
510 int ret;
511
512 pthread_mutex_lock(&app->sock_lock);
513 ret = ustctl_register_done(app->sock);
514 pthread_mutex_unlock(&app->sock_lock);
515 return ret;
516 }
517
518 int ust_app_release_object(struct ust_app *app, struct lttng_ust_object_data *data)
519 {
520 int ret, sock;
521
522 if (app) {
523 pthread_mutex_lock(&app->sock_lock);
524 sock = app->sock;
525 } else {
526 sock = -1;
527 }
528 ret = ustctl_release_object(sock, data);
529 if (app) {
530 pthread_mutex_unlock(&app->sock_lock);
531 }
532 return ret;
533 }
534
535 /*
536 * Push metadata to consumer socket.
537 *
538 * RCU read-side lock must be held to guarantee existance of socket.
539 * Must be called with the ust app session lock held.
540 * Must be called with the registry lock held.
541 *
542 * On success, return the len of metadata pushed or else a negative value.
543 * Returning a -EPIPE return value means we could not send the metadata,
544 * but it can be caused by recoverable errors (e.g. the application has
545 * terminated concurrently).
546 */
547 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
548 struct consumer_socket *socket, int send_zero_data)
549 {
550 int ret;
551 char *metadata_str = NULL;
552 size_t len, offset, new_metadata_len_sent;
553 ssize_t ret_val;
554 uint64_t metadata_key, metadata_version;
555
556 assert(registry);
557 assert(socket);
558
559 metadata_key = registry->metadata_key;
560
561 /*
562 * Means that no metadata was assigned to the session. This can
563 * happens if no start has been done previously.
564 */
565 if (!metadata_key) {
566 return 0;
567 }
568
569 offset = registry->metadata_len_sent;
570 len = registry->metadata_len - registry->metadata_len_sent;
571 new_metadata_len_sent = registry->metadata_len;
572 metadata_version = registry->metadata_version;
573 if (len == 0) {
574 DBG3("No metadata to push for metadata key %" PRIu64,
575 registry->metadata_key);
576 ret_val = len;
577 if (send_zero_data) {
578 DBG("No metadata to push");
579 goto push_data;
580 }
581 goto end;
582 }
583
584 /* Allocate only what we have to send. */
585 metadata_str = zmalloc(len);
586 if (!metadata_str) {
587 PERROR("zmalloc ust app metadata string");
588 ret_val = -ENOMEM;
589 goto error;
590 }
591 /* Copy what we haven't sent out. */
592 memcpy(metadata_str, registry->metadata + offset, len);
593
594 push_data:
595 pthread_mutex_unlock(&registry->lock);
596 /*
597 * We need to unlock the registry while we push metadata to
598 * break a circular dependency between the consumerd metadata
599 * lock and the sessiond registry lock. Indeed, pushing metadata
600 * to the consumerd awaits that it gets pushed all the way to
601 * relayd, but doing so requires grabbing the metadata lock. If
602 * a concurrent metadata request is being performed by
603 * consumerd, this can try to grab the registry lock on the
604 * sessiond while holding the metadata lock on the consumer
605 * daemon. Those push and pull schemes are performed on two
606 * different bidirectionnal communication sockets.
607 */
608 ret = consumer_push_metadata(socket, metadata_key,
609 metadata_str, len, offset, metadata_version);
610 pthread_mutex_lock(&registry->lock);
611 if (ret < 0) {
612 /*
613 * There is an acceptable race here between the registry
614 * metadata key assignment and the creation on the
615 * consumer. The session daemon can concurrently push
616 * metadata for this registry while being created on the
617 * consumer since the metadata key of the registry is
618 * assigned *before* it is setup to avoid the consumer
619 * to ask for metadata that could possibly be not found
620 * in the session daemon.
621 *
622 * The metadata will get pushed either by the session
623 * being stopped or the consumer requesting metadata if
624 * that race is triggered.
625 */
626 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
627 ret = 0;
628 } else {
629 ERR("Error pushing metadata to consumer");
630 }
631 ret_val = ret;
632 goto error_push;
633 } else {
634 /*
635 * Metadata may have been concurrently pushed, since
636 * we're not holding the registry lock while pushing to
637 * consumer. This is handled by the fact that we send
638 * the metadata content, size, and the offset at which
639 * that metadata belongs. This may arrive out of order
640 * on the consumer side, and the consumer is able to
641 * deal with overlapping fragments. The consumer
642 * supports overlapping fragments, which must be
643 * contiguous starting from offset 0. We keep the
644 * largest metadata_len_sent value of the concurrent
645 * send.
646 */
647 registry->metadata_len_sent =
648 max_t(size_t, registry->metadata_len_sent,
649 new_metadata_len_sent);
650 }
651 free(metadata_str);
652 return len;
653
654 end:
655 error:
656 if (ret_val) {
657 /*
658 * On error, flag the registry that the metadata is
659 * closed. We were unable to push anything and this
660 * means that either the consumer is not responding or
661 * the metadata cache has been destroyed on the
662 * consumer.
663 */
664 registry->metadata_closed = 1;
665 }
666 error_push:
667 free(metadata_str);
668 return ret_val;
669 }
670
671 /*
672 * For a given application and session, push metadata to consumer.
673 * Either sock or consumer is required : if sock is NULL, the default
674 * socket to send the metadata is retrieved from consumer, if sock
675 * is not NULL we use it to send the metadata.
676 * RCU read-side lock must be held while calling this function,
677 * therefore ensuring existance of registry. It also ensures existance
678 * of socket throughout this function.
679 *
680 * Return 0 on success else a negative error.
681 * Returning a -EPIPE return value means we could not send the metadata,
682 * but it can be caused by recoverable errors (e.g. the application has
683 * terminated concurrently).
684 */
685 static int push_metadata(struct ust_registry_session *registry,
686 struct consumer_output *consumer)
687 {
688 int ret_val;
689 ssize_t ret;
690 struct consumer_socket *socket;
691
692 assert(registry);
693 assert(consumer);
694
695 pthread_mutex_lock(&registry->lock);
696 if (registry->metadata_closed) {
697 ret_val = -EPIPE;
698 goto error;
699 }
700
701 /* Get consumer socket to use to push the metadata.*/
702 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
703 consumer);
704 if (!socket) {
705 ret_val = -1;
706 goto error;
707 }
708
709 ret = ust_app_push_metadata(registry, socket, 0);
710 if (ret < 0) {
711 ret_val = ret;
712 goto error;
713 }
714 pthread_mutex_unlock(&registry->lock);
715 return 0;
716
717 error:
718 pthread_mutex_unlock(&registry->lock);
719 return ret_val;
720 }
721
722 /*
723 * Send to the consumer a close metadata command for the given session. Once
724 * done, the metadata channel is deleted and the session metadata pointer is
725 * nullified. The session lock MUST be held unless the application is
726 * in the destroy path.
727 *
728 * Return 0 on success else a negative value.
729 */
730 static int close_metadata(struct ust_registry_session *registry,
731 struct consumer_output *consumer)
732 {
733 int ret;
734 struct consumer_socket *socket;
735
736 assert(registry);
737 assert(consumer);
738
739 rcu_read_lock();
740
741 pthread_mutex_lock(&registry->lock);
742
743 if (!registry->metadata_key || registry->metadata_closed) {
744 ret = 0;
745 goto end;
746 }
747
748 /* Get consumer socket to use to push the metadata.*/
749 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
750 consumer);
751 if (!socket) {
752 ret = -1;
753 goto error;
754 }
755
756 ret = consumer_close_metadata(socket, registry->metadata_key);
757 if (ret < 0) {
758 goto error;
759 }
760
761 error:
762 /*
763 * Metadata closed. Even on error this means that the consumer is not
764 * responding or not found so either way a second close should NOT be emit
765 * for this registry.
766 */
767 registry->metadata_closed = 1;
768 end:
769 pthread_mutex_unlock(&registry->lock);
770 rcu_read_unlock();
771 return ret;
772 }
773
774 /*
775 * We need to execute ht_destroy outside of RCU read-side critical
776 * section and outside of call_rcu thread, so we postpone its execution
777 * using ht_cleanup_push. It is simpler than to change the semantic of
778 * the many callers of delete_ust_app_session().
779 */
780 static
781 void delete_ust_app_session_rcu(struct rcu_head *head)
782 {
783 struct ust_app_session *ua_sess =
784 caa_container_of(head, struct ust_app_session, rcu_head);
785
786 ht_cleanup_push(ua_sess->channels);
787 free(ua_sess);
788 }
789
790 /*
791 * Delete ust app session safely. RCU read lock must be held before calling
792 * this function.
793 *
794 * The session list lock must be held by the caller.
795 */
796 static
797 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
798 struct ust_app *app)
799 {
800 int ret;
801 struct lttng_ht_iter iter;
802 struct ust_app_channel *ua_chan;
803 struct ust_registry_session *registry;
804
805 assert(ua_sess);
806
807 pthread_mutex_lock(&ua_sess->lock);
808
809 assert(!ua_sess->deleted);
810 ua_sess->deleted = true;
811
812 registry = get_session_registry(ua_sess);
813 if (registry) {
814 /* Push metadata for application before freeing the application. */
815 (void) push_metadata(registry, ua_sess->consumer);
816
817 /*
818 * Don't ask to close metadata for global per UID buffers. Close
819 * metadata only on destroy trace session in this case. Also, the
820 * previous push metadata could have flag the metadata registry to
821 * close so don't send a close command if closed.
822 */
823 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
824 /* And ask to close it for this session registry. */
825 (void) close_metadata(registry, ua_sess->consumer);
826 }
827 }
828
829 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
830 node.node) {
831 ret = lttng_ht_del(ua_sess->channels, &iter);
832 assert(!ret);
833 delete_ust_app_channel(sock, ua_chan, app);
834 }
835
836 /* In case of per PID, the registry is kept in the session. */
837 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
838 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
839 if (reg_pid) {
840 buffer_reg_pid_remove(reg_pid);
841 buffer_reg_pid_destroy(reg_pid);
842 }
843 }
844
845 if (ua_sess->handle != -1) {
846 pthread_mutex_lock(&app->sock_lock);
847 ret = ustctl_release_handle(sock, ua_sess->handle);
848 pthread_mutex_unlock(&app->sock_lock);
849 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
850 ERR("UST app sock %d release session handle failed with ret %d",
851 sock, ret);
852 }
853 /* Remove session from application UST object descriptor. */
854 iter.iter.node = &ua_sess->ust_objd_node.node;
855 ret = lttng_ht_del(app->ust_sessions_objd, &iter);
856 assert(!ret);
857 }
858
859 pthread_mutex_unlock(&ua_sess->lock);
860
861 consumer_output_put(ua_sess->consumer);
862
863 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
864 }
865
866 /*
867 * Delete a traceable application structure from the global list. Never call
868 * this function outside of a call_rcu call.
869 *
870 * RCU read side lock should _NOT_ be held when calling this function.
871 */
872 static
873 void delete_ust_app(struct ust_app *app)
874 {
875 int ret, sock;
876 struct ust_app_session *ua_sess, *tmp_ua_sess;
877
878 /*
879 * The session list lock must be held during this function to guarantee
880 * the existence of ua_sess.
881 */
882 session_lock_list();
883 /* Delete ust app sessions info */
884 sock = app->sock;
885 app->sock = -1;
886
887 /* Wipe sessions */
888 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
889 teardown_node) {
890 /* Free every object in the session and the session. */
891 rcu_read_lock();
892 delete_ust_app_session(sock, ua_sess, app);
893 rcu_read_unlock();
894 }
895
896 ht_cleanup_push(app->sessions);
897 ht_cleanup_push(app->ust_sessions_objd);
898 ht_cleanup_push(app->ust_objd);
899
900 /*
901 * Wait until we have deleted the application from the sock hash table
902 * before closing this socket, otherwise an application could re-use the
903 * socket ID and race with the teardown, using the same hash table entry.
904 *
905 * It's OK to leave the close in call_rcu. We want it to stay unique for
906 * all RCU readers that could run concurrently with unregister app,
907 * therefore we _need_ to only close that socket after a grace period. So
908 * it should stay in this RCU callback.
909 *
910 * This close() is a very important step of the synchronization model so
911 * every modification to this function must be carefully reviewed.
912 */
913 ret = close(sock);
914 if (ret) {
915 PERROR("close");
916 }
917 lttng_fd_put(LTTNG_FD_APPS, 1);
918
919 DBG2("UST app pid %d deleted", app->pid);
920 free(app);
921 session_unlock_list();
922 }
923
924 /*
925 * URCU intermediate call to delete an UST app.
926 */
927 static
928 void delete_ust_app_rcu(struct rcu_head *head)
929 {
930 struct lttng_ht_node_ulong *node =
931 caa_container_of(head, struct lttng_ht_node_ulong, head);
932 struct ust_app *app =
933 caa_container_of(node, struct ust_app, pid_n);
934
935 DBG3("Call RCU deleting app PID %d", app->pid);
936 delete_ust_app(app);
937 }
938
939 /*
940 * Delete the session from the application ht and delete the data structure by
941 * freeing every object inside and releasing them.
942 *
943 * The session list lock must be held by the caller.
944 */
945 static void destroy_app_session(struct ust_app *app,
946 struct ust_app_session *ua_sess)
947 {
948 int ret;
949 struct lttng_ht_iter iter;
950
951 assert(app);
952 assert(ua_sess);
953
954 iter.iter.node = &ua_sess->node.node;
955 ret = lttng_ht_del(app->sessions, &iter);
956 if (ret) {
957 /* Already scheduled for teardown. */
958 goto end;
959 }
960
961 /* Once deleted, free the data structure. */
962 delete_ust_app_session(app->sock, ua_sess, app);
963
964 end:
965 return;
966 }
967
968 /*
969 * Alloc new UST app session.
970 */
971 static
972 struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
973 {
974 struct ust_app_session *ua_sess;
975
976 /* Init most of the default value by allocating and zeroing */
977 ua_sess = zmalloc(sizeof(struct ust_app_session));
978 if (ua_sess == NULL) {
979 PERROR("malloc");
980 goto error_free;
981 }
982
983 ua_sess->handle = -1;
984 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
985 ua_sess->metadata_attr.type = LTTNG_UST_CHAN_METADATA;
986 pthread_mutex_init(&ua_sess->lock, NULL);
987
988 return ua_sess;
989
990 error_free:
991 return NULL;
992 }
993
994 /*
995 * Alloc new UST app channel.
996 */
997 static
998 struct ust_app_channel *alloc_ust_app_channel(char *name,
999 struct ust_app_session *ua_sess,
1000 struct lttng_ust_channel_attr *attr)
1001 {
1002 struct ust_app_channel *ua_chan;
1003
1004 /* Init most of the default value by allocating and zeroing */
1005 ua_chan = zmalloc(sizeof(struct ust_app_channel));
1006 if (ua_chan == NULL) {
1007 PERROR("malloc");
1008 goto error;
1009 }
1010
1011 /* Setup channel name */
1012 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
1013 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1014
1015 ua_chan->enabled = 1;
1016 ua_chan->handle = -1;
1017 ua_chan->session = ua_sess;
1018 ua_chan->key = get_next_channel_key();
1019 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1020 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1021 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
1022
1023 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
1024 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
1025
1026 /* Copy attributes */
1027 if (attr) {
1028 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
1029 ua_chan->attr.subbuf_size = attr->subbuf_size;
1030 ua_chan->attr.num_subbuf = attr->num_subbuf;
1031 ua_chan->attr.overwrite = attr->overwrite;
1032 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
1033 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
1034 ua_chan->attr.output = attr->output;
1035 }
1036 /* By default, the channel is a per cpu channel. */
1037 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1038
1039 DBG3("UST app channel %s allocated", ua_chan->name);
1040
1041 return ua_chan;
1042
1043 error:
1044 return NULL;
1045 }
1046
1047 /*
1048 * Allocate and initialize a UST app stream.
1049 *
1050 * Return newly allocated stream pointer or NULL on error.
1051 */
1052 struct ust_app_stream *ust_app_alloc_stream(void)
1053 {
1054 struct ust_app_stream *stream = NULL;
1055
1056 stream = zmalloc(sizeof(*stream));
1057 if (stream == NULL) {
1058 PERROR("zmalloc ust app stream");
1059 goto error;
1060 }
1061
1062 /* Zero could be a valid value for a handle so flag it to -1. */
1063 stream->handle = -1;
1064
1065 error:
1066 return stream;
1067 }
1068
1069 /*
1070 * Alloc new UST app event.
1071 */
1072 static
1073 struct ust_app_event *alloc_ust_app_event(char *name,
1074 struct lttng_ust_event *attr)
1075 {
1076 struct ust_app_event *ua_event;
1077
1078 /* Init most of the default value by allocating and zeroing */
1079 ua_event = zmalloc(sizeof(struct ust_app_event));
1080 if (ua_event == NULL) {
1081 PERROR("malloc");
1082 goto error;
1083 }
1084
1085 ua_event->enabled = 1;
1086 strncpy(ua_event->name, name, sizeof(ua_event->name));
1087 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1088 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
1089
1090 /* Copy attributes */
1091 if (attr) {
1092 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1093 }
1094
1095 DBG3("UST app event %s allocated", ua_event->name);
1096
1097 return ua_event;
1098
1099 error:
1100 return NULL;
1101 }
1102
1103 /*
1104 * Alloc new UST app context.
1105 */
1106 static
1107 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
1108 {
1109 struct ust_app_ctx *ua_ctx;
1110
1111 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
1112 if (ua_ctx == NULL) {
1113 goto error;
1114 }
1115
1116 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1117
1118 if (uctx) {
1119 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
1120 if (uctx->ctx == LTTNG_UST_CONTEXT_APP_CONTEXT) {
1121 char *provider_name = NULL, *ctx_name = NULL;
1122
1123 provider_name = strdup(uctx->u.app_ctx.provider_name);
1124 ctx_name = strdup(uctx->u.app_ctx.ctx_name);
1125 if (!provider_name || !ctx_name) {
1126 free(provider_name);
1127 free(ctx_name);
1128 goto error;
1129 }
1130
1131 ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
1132 ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
1133 }
1134 }
1135
1136 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
1137 return ua_ctx;
1138 error:
1139 free(ua_ctx);
1140 return NULL;
1141 }
1142
1143 /*
1144 * Allocate a filter and copy the given original filter.
1145 *
1146 * Return allocated filter or NULL on error.
1147 */
1148 static struct lttng_filter_bytecode *copy_filter_bytecode(
1149 struct lttng_filter_bytecode *orig_f)
1150 {
1151 struct lttng_filter_bytecode *filter = NULL;
1152
1153 /* Copy filter bytecode */
1154 filter = zmalloc(sizeof(*filter) + orig_f->len);
1155 if (!filter) {
1156 PERROR("zmalloc alloc filter bytecode");
1157 goto error;
1158 }
1159
1160 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1161
1162 error:
1163 return filter;
1164 }
1165
1166 /*
1167 * Create a liblttng-ust filter bytecode from given bytecode.
1168 *
1169 * Return allocated filter or NULL on error.
1170 */
1171 static struct lttng_ust_filter_bytecode *create_ust_bytecode_from_bytecode(
1172 struct lttng_filter_bytecode *orig_f)
1173 {
1174 struct lttng_ust_filter_bytecode *filter = NULL;
1175
1176 /* Copy filter bytecode */
1177 filter = zmalloc(sizeof(*filter) + orig_f->len);
1178 if (!filter) {
1179 PERROR("zmalloc alloc ust filter bytecode");
1180 goto error;
1181 }
1182
1183 assert(sizeof(struct lttng_filter_bytecode) ==
1184 sizeof(struct lttng_ust_filter_bytecode));
1185 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1186 error:
1187 return filter;
1188 }
1189
1190 /*
1191 * Find an ust_app using the sock and return it. RCU read side lock must be
1192 * held before calling this helper function.
1193 */
1194 struct ust_app *ust_app_find_by_sock(int sock)
1195 {
1196 struct lttng_ht_node_ulong *node;
1197 struct lttng_ht_iter iter;
1198
1199 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
1200 node = lttng_ht_iter_get_node_ulong(&iter);
1201 if (node == NULL) {
1202 DBG2("UST app find by sock %d not found", sock);
1203 goto error;
1204 }
1205
1206 return caa_container_of(node, struct ust_app, sock_n);
1207
1208 error:
1209 return NULL;
1210 }
1211
1212 /*
1213 * Find an ust_app using the notify sock and return it. RCU read side lock must
1214 * be held before calling this helper function.
1215 */
1216 static struct ust_app *find_app_by_notify_sock(int sock)
1217 {
1218 struct lttng_ht_node_ulong *node;
1219 struct lttng_ht_iter iter;
1220
1221 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1222 &iter);
1223 node = lttng_ht_iter_get_node_ulong(&iter);
1224 if (node == NULL) {
1225 DBG2("UST app find by notify sock %d not found", sock);
1226 goto error;
1227 }
1228
1229 return caa_container_of(node, struct ust_app, notify_sock_n);
1230
1231 error:
1232 return NULL;
1233 }
1234
1235 /*
1236 * Lookup for an ust app event based on event name, filter bytecode and the
1237 * event loglevel.
1238 *
1239 * Return an ust_app_event object or NULL on error.
1240 */
1241 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1242 char *name, struct lttng_filter_bytecode *filter,
1243 int loglevel_value,
1244 const struct lttng_event_exclusion *exclusion)
1245 {
1246 struct lttng_ht_iter iter;
1247 struct lttng_ht_node_str *node;
1248 struct ust_app_event *event = NULL;
1249 struct ust_app_ht_key key;
1250
1251 assert(name);
1252 assert(ht);
1253
1254 /* Setup key for event lookup. */
1255 key.name = name;
1256 key.filter = filter;
1257 key.loglevel_type = loglevel_value;
1258 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1259 key.exclusion = exclusion;
1260
1261 /* Lookup using the event name as hash and a custom match fct. */
1262 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1263 ht_match_ust_app_event, &key, &iter.iter);
1264 node = lttng_ht_iter_get_node_str(&iter);
1265 if (node == NULL) {
1266 goto end;
1267 }
1268
1269 event = caa_container_of(node, struct ust_app_event, node);
1270
1271 end:
1272 return event;
1273 }
1274
1275 /*
1276 * Create the channel context on the tracer.
1277 *
1278 * Called with UST app session lock held.
1279 */
1280 static
1281 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1282 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1283 {
1284 int ret;
1285
1286 health_code_update();
1287
1288 pthread_mutex_lock(&app->sock_lock);
1289 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1290 ua_chan->obj, &ua_ctx->obj);
1291 pthread_mutex_unlock(&app->sock_lock);
1292 if (ret < 0) {
1293 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1294 ERR("UST app create channel context failed for app (pid: %d) "
1295 "with ret %d", app->pid, ret);
1296 } else {
1297 /*
1298 * This is normal behavior, an application can die during the
1299 * creation process. Don't report an error so the execution can
1300 * continue normally.
1301 */
1302 ret = 0;
1303 DBG3("UST app disable event failed. Application is dead.");
1304 }
1305 goto error;
1306 }
1307
1308 ua_ctx->handle = ua_ctx->obj->handle;
1309
1310 DBG2("UST app context handle %d created successfully for channel %s",
1311 ua_ctx->handle, ua_chan->name);
1312
1313 error:
1314 health_code_update();
1315 return ret;
1316 }
1317
1318 /*
1319 * Set the filter on the tracer.
1320 */
1321 static
1322 int set_ust_event_filter(struct ust_app_event *ua_event,
1323 struct ust_app *app)
1324 {
1325 int ret;
1326 struct lttng_ust_filter_bytecode *ust_bytecode = NULL;
1327
1328 health_code_update();
1329
1330 if (!ua_event->filter) {
1331 ret = 0;
1332 goto error;
1333 }
1334
1335 ust_bytecode = create_ust_bytecode_from_bytecode(ua_event->filter);
1336 if (!ust_bytecode) {
1337 ret = -LTTNG_ERR_NOMEM;
1338 goto error;
1339 }
1340 pthread_mutex_lock(&app->sock_lock);
1341 ret = ustctl_set_filter(app->sock, ust_bytecode,
1342 ua_event->obj);
1343 pthread_mutex_unlock(&app->sock_lock);
1344 if (ret < 0) {
1345 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1346 ERR("UST app event %s filter failed for app (pid: %d) "
1347 "with ret %d", ua_event->attr.name, app->pid, ret);
1348 } else {
1349 /*
1350 * This is normal behavior, an application can die during the
1351 * creation process. Don't report an error so the execution can
1352 * continue normally.
1353 */
1354 ret = 0;
1355 DBG3("UST app filter event failed. Application is dead.");
1356 }
1357 goto error;
1358 }
1359
1360 DBG2("UST filter set successfully for event %s", ua_event->name);
1361
1362 error:
1363 health_code_update();
1364 free(ust_bytecode);
1365 return ret;
1366 }
1367
1368 static
1369 struct lttng_ust_event_exclusion *create_ust_exclusion_from_exclusion(
1370 struct lttng_event_exclusion *exclusion)
1371 {
1372 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1373 size_t exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1374 LTTNG_UST_SYM_NAME_LEN * exclusion->count;
1375
1376 ust_exclusion = zmalloc(exclusion_alloc_size);
1377 if (!ust_exclusion) {
1378 PERROR("malloc");
1379 goto end;
1380 }
1381
1382 assert(sizeof(struct lttng_event_exclusion) ==
1383 sizeof(struct lttng_ust_event_exclusion));
1384 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1385 end:
1386 return ust_exclusion;
1387 }
1388
1389 /*
1390 * Set event exclusions on the tracer.
1391 */
1392 static
1393 int set_ust_event_exclusion(struct ust_app_event *ua_event,
1394 struct ust_app *app)
1395 {
1396 int ret;
1397 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1398
1399 health_code_update();
1400
1401 if (!ua_event->exclusion || !ua_event->exclusion->count) {
1402 ret = 0;
1403 goto error;
1404 }
1405
1406 ust_exclusion = create_ust_exclusion_from_exclusion(
1407 ua_event->exclusion);
1408 if (!ust_exclusion) {
1409 ret = -LTTNG_ERR_NOMEM;
1410 goto error;
1411 }
1412 pthread_mutex_lock(&app->sock_lock);
1413 ret = ustctl_set_exclusion(app->sock, ust_exclusion, ua_event->obj);
1414 pthread_mutex_unlock(&app->sock_lock);
1415 if (ret < 0) {
1416 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1417 ERR("UST app event %s exclusions failed for app (pid: %d) "
1418 "with ret %d", ua_event->attr.name, app->pid, ret);
1419 } else {
1420 /*
1421 * This is normal behavior, an application can die during the
1422 * creation process. Don't report an error so the execution can
1423 * continue normally.
1424 */
1425 ret = 0;
1426 DBG3("UST app event exclusion failed. Application is dead.");
1427 }
1428 goto error;
1429 }
1430
1431 DBG2("UST exclusion set successfully for event %s", ua_event->name);
1432
1433 error:
1434 health_code_update();
1435 free(ust_exclusion);
1436 return ret;
1437 }
1438
1439 /*
1440 * Disable the specified event on to UST tracer for the UST session.
1441 */
1442 static int disable_ust_event(struct ust_app *app,
1443 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1444 {
1445 int ret;
1446
1447 health_code_update();
1448
1449 pthread_mutex_lock(&app->sock_lock);
1450 ret = ustctl_disable(app->sock, ua_event->obj);
1451 pthread_mutex_unlock(&app->sock_lock);
1452 if (ret < 0) {
1453 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1454 ERR("UST app event %s disable failed for app (pid: %d) "
1455 "and session handle %d with ret %d",
1456 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1457 } else {
1458 /*
1459 * This is normal behavior, an application can die during the
1460 * creation process. Don't report an error so the execution can
1461 * continue normally.
1462 */
1463 ret = 0;
1464 DBG3("UST app disable event failed. Application is dead.");
1465 }
1466 goto error;
1467 }
1468
1469 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1470 ua_event->attr.name, app->pid);
1471
1472 error:
1473 health_code_update();
1474 return ret;
1475 }
1476
1477 /*
1478 * Disable the specified channel on to UST tracer for the UST session.
1479 */
1480 static int disable_ust_channel(struct ust_app *app,
1481 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1482 {
1483 int ret;
1484
1485 health_code_update();
1486
1487 pthread_mutex_lock(&app->sock_lock);
1488 ret = ustctl_disable(app->sock, ua_chan->obj);
1489 pthread_mutex_unlock(&app->sock_lock);
1490 if (ret < 0) {
1491 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1492 ERR("UST app channel %s disable failed for app (pid: %d) "
1493 "and session handle %d with ret %d",
1494 ua_chan->name, app->pid, ua_sess->handle, ret);
1495 } else {
1496 /*
1497 * This is normal behavior, an application can die during the
1498 * creation process. Don't report an error so the execution can
1499 * continue normally.
1500 */
1501 ret = 0;
1502 DBG3("UST app disable channel failed. Application is dead.");
1503 }
1504 goto error;
1505 }
1506
1507 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1508 ua_chan->name, app->pid);
1509
1510 error:
1511 health_code_update();
1512 return ret;
1513 }
1514
1515 /*
1516 * Enable the specified channel on to UST tracer for the UST session.
1517 */
1518 static int enable_ust_channel(struct ust_app *app,
1519 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1520 {
1521 int ret;
1522
1523 health_code_update();
1524
1525 pthread_mutex_lock(&app->sock_lock);
1526 ret = ustctl_enable(app->sock, ua_chan->obj);
1527 pthread_mutex_unlock(&app->sock_lock);
1528 if (ret < 0) {
1529 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1530 ERR("UST app channel %s enable failed for app (pid: %d) "
1531 "and session handle %d with ret %d",
1532 ua_chan->name, app->pid, ua_sess->handle, ret);
1533 } else {
1534 /*
1535 * This is normal behavior, an application can die during the
1536 * creation process. Don't report an error so the execution can
1537 * continue normally.
1538 */
1539 ret = 0;
1540 DBG3("UST app enable channel failed. Application is dead.");
1541 }
1542 goto error;
1543 }
1544
1545 ua_chan->enabled = 1;
1546
1547 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1548 ua_chan->name, app->pid);
1549
1550 error:
1551 health_code_update();
1552 return ret;
1553 }
1554
1555 /*
1556 * Enable the specified event on to UST tracer for the UST session.
1557 */
1558 static int enable_ust_event(struct ust_app *app,
1559 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1560 {
1561 int ret;
1562
1563 health_code_update();
1564
1565 pthread_mutex_lock(&app->sock_lock);
1566 ret = ustctl_enable(app->sock, ua_event->obj);
1567 pthread_mutex_unlock(&app->sock_lock);
1568 if (ret < 0) {
1569 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1570 ERR("UST app event %s enable failed for app (pid: %d) "
1571 "and session handle %d with ret %d",
1572 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1573 } else {
1574 /*
1575 * This is normal behavior, an application can die during the
1576 * creation process. Don't report an error so the execution can
1577 * continue normally.
1578 */
1579 ret = 0;
1580 DBG3("UST app enable event failed. Application is dead.");
1581 }
1582 goto error;
1583 }
1584
1585 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1586 ua_event->attr.name, app->pid);
1587
1588 error:
1589 health_code_update();
1590 return ret;
1591 }
1592
1593 /*
1594 * Send channel and stream buffer to application.
1595 *
1596 * Return 0 on success. On error, a negative value is returned.
1597 */
1598 static int send_channel_pid_to_ust(struct ust_app *app,
1599 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1600 {
1601 int ret;
1602 struct ust_app_stream *stream, *stmp;
1603
1604 assert(app);
1605 assert(ua_sess);
1606 assert(ua_chan);
1607
1608 health_code_update();
1609
1610 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1611 app->sock);
1612
1613 /* Send channel to the application. */
1614 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1615 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1616 ret = -ENOTCONN; /* Caused by app exiting. */
1617 goto error;
1618 } else if (ret < 0) {
1619 goto error;
1620 }
1621
1622 health_code_update();
1623
1624 /* Send all streams to application. */
1625 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1626 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1627 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1628 ret = -ENOTCONN; /* Caused by app exiting. */
1629 goto error;
1630 } else if (ret < 0) {
1631 goto error;
1632 }
1633 /* We don't need the stream anymore once sent to the tracer. */
1634 cds_list_del(&stream->list);
1635 delete_ust_app_stream(-1, stream, app);
1636 }
1637 /* Flag the channel that it is sent to the application. */
1638 ua_chan->is_sent = 1;
1639
1640 error:
1641 health_code_update();
1642 return ret;
1643 }
1644
1645 /*
1646 * Create the specified event onto the UST tracer for a UST session.
1647 *
1648 * Should be called with session mutex held.
1649 */
1650 static
1651 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1652 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1653 {
1654 int ret = 0;
1655
1656 health_code_update();
1657
1658 /* Create UST event on tracer */
1659 pthread_mutex_lock(&app->sock_lock);
1660 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1661 &ua_event->obj);
1662 pthread_mutex_unlock(&app->sock_lock);
1663 if (ret < 0) {
1664 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1665 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1666 ua_event->attr.name, app->pid, ret);
1667 } else {
1668 /*
1669 * This is normal behavior, an application can die during the
1670 * creation process. Don't report an error so the execution can
1671 * continue normally.
1672 */
1673 ret = 0;
1674 DBG3("UST app create event failed. Application is dead.");
1675 }
1676 goto error;
1677 }
1678
1679 ua_event->handle = ua_event->obj->handle;
1680
1681 DBG2("UST app event %s created successfully for pid:%d",
1682 ua_event->attr.name, app->pid);
1683
1684 health_code_update();
1685
1686 /* Set filter if one is present. */
1687 if (ua_event->filter) {
1688 ret = set_ust_event_filter(ua_event, app);
1689 if (ret < 0) {
1690 goto error;
1691 }
1692 }
1693
1694 /* Set exclusions for the event */
1695 if (ua_event->exclusion) {
1696 ret = set_ust_event_exclusion(ua_event, app);
1697 if (ret < 0) {
1698 goto error;
1699 }
1700 }
1701
1702 /* If event not enabled, disable it on the tracer */
1703 if (ua_event->enabled) {
1704 /*
1705 * We now need to explicitly enable the event, since it
1706 * is now disabled at creation.
1707 */
1708 ret = enable_ust_event(app, ua_sess, ua_event);
1709 if (ret < 0) {
1710 /*
1711 * If we hit an EPERM, something is wrong with our enable call. If
1712 * we get an EEXIST, there is a problem on the tracer side since we
1713 * just created it.
1714 */
1715 switch (ret) {
1716 case -LTTNG_UST_ERR_PERM:
1717 /* Code flow problem */
1718 assert(0);
1719 case -LTTNG_UST_ERR_EXIST:
1720 /* It's OK for our use case. */
1721 ret = 0;
1722 break;
1723 default:
1724 break;
1725 }
1726 goto error;
1727 }
1728 }
1729
1730 error:
1731 health_code_update();
1732 return ret;
1733 }
1734
1735 /*
1736 * Copy data between an UST app event and a LTT event.
1737 */
1738 static void shadow_copy_event(struct ust_app_event *ua_event,
1739 struct ltt_ust_event *uevent)
1740 {
1741 size_t exclusion_alloc_size;
1742
1743 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1744 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1745
1746 ua_event->enabled = uevent->enabled;
1747
1748 /* Copy event attributes */
1749 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1750
1751 /* Copy filter bytecode */
1752 if (uevent->filter) {
1753 ua_event->filter = copy_filter_bytecode(uevent->filter);
1754 /* Filter might be NULL here in case of ENONEM. */
1755 }
1756
1757 /* Copy exclusion data */
1758 if (uevent->exclusion) {
1759 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
1760 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
1761 ua_event->exclusion = zmalloc(exclusion_alloc_size);
1762 if (ua_event->exclusion == NULL) {
1763 PERROR("malloc");
1764 } else {
1765 memcpy(ua_event->exclusion, uevent->exclusion,
1766 exclusion_alloc_size);
1767 }
1768 }
1769 }
1770
1771 /*
1772 * Copy data between an UST app channel and a LTT channel.
1773 */
1774 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1775 struct ltt_ust_channel *uchan)
1776 {
1777 struct lttng_ht_iter iter;
1778 struct ltt_ust_event *uevent;
1779 struct ltt_ust_context *uctx;
1780 struct ust_app_event *ua_event;
1781
1782 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1783
1784 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1785 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1786
1787 ua_chan->tracefile_size = uchan->tracefile_size;
1788 ua_chan->tracefile_count = uchan->tracefile_count;
1789
1790 /* Copy event attributes since the layout is different. */
1791 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1792 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1793 ua_chan->attr.overwrite = uchan->attr.overwrite;
1794 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1795 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1796 ua_chan->attr.output = uchan->attr.output;
1797 /*
1798 * Note that the attribute channel type is not set since the channel on the
1799 * tracing registry side does not have this information.
1800 */
1801
1802 ua_chan->enabled = uchan->enabled;
1803 ua_chan->tracing_channel_id = uchan->id;
1804
1805 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
1806 struct ust_app_ctx *ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1807
1808 if (ua_ctx == NULL) {
1809 continue;
1810 }
1811 lttng_ht_node_init_ulong(&ua_ctx->node,
1812 (unsigned long) ua_ctx->ctx.ctx);
1813 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
1814 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1815 }
1816
1817 /* Copy all events from ltt ust channel to ust app channel */
1818 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
1819 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1820 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
1821 if (ua_event == NULL) {
1822 DBG2("UST event %s not found on shadow copy channel",
1823 uevent->attr.name);
1824 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
1825 if (ua_event == NULL) {
1826 continue;
1827 }
1828 shadow_copy_event(ua_event, uevent);
1829 add_unique_ust_app_event(ua_chan, ua_event);
1830 }
1831 }
1832
1833 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1834 }
1835
1836 /*
1837 * Copy data between a UST app session and a regular LTT session.
1838 */
1839 static void shadow_copy_session(struct ust_app_session *ua_sess,
1840 struct ltt_ust_session *usess, struct ust_app *app)
1841 {
1842 struct lttng_ht_node_str *ua_chan_node;
1843 struct lttng_ht_iter iter;
1844 struct ltt_ust_channel *uchan;
1845 struct ust_app_channel *ua_chan;
1846 time_t rawtime;
1847 struct tm *timeinfo;
1848 char datetime[16];
1849 int ret;
1850 char tmp_shm_path[PATH_MAX];
1851
1852 /* Get date and time for unique app path */
1853 time(&rawtime);
1854 timeinfo = localtime(&rawtime);
1855 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1856
1857 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1858
1859 ua_sess->tracing_id = usess->id;
1860 ua_sess->id = get_next_session_id();
1861 ua_sess->uid = app->uid;
1862 ua_sess->gid = app->gid;
1863 ua_sess->euid = usess->uid;
1864 ua_sess->egid = usess->gid;
1865 ua_sess->buffer_type = usess->buffer_type;
1866 ua_sess->bits_per_long = app->bits_per_long;
1867
1868 /* There is only one consumer object per session possible. */
1869 consumer_output_get(usess->consumer);
1870 ua_sess->consumer = usess->consumer;
1871
1872 ua_sess->output_traces = usess->output_traces;
1873 ua_sess->live_timer_interval = usess->live_timer_interval;
1874 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
1875 &usess->metadata_attr);
1876
1877 switch (ua_sess->buffer_type) {
1878 case LTTNG_BUFFER_PER_PID:
1879 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1880 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1881 datetime);
1882 break;
1883 case LTTNG_BUFFER_PER_UID:
1884 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1885 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1886 break;
1887 default:
1888 assert(0);
1889 goto error;
1890 }
1891 if (ret < 0) {
1892 PERROR("asprintf UST shadow copy session");
1893 assert(0);
1894 goto error;
1895 }
1896
1897 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
1898 sizeof(ua_sess->root_shm_path));
1899 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
1900 strncpy(ua_sess->shm_path, usess->shm_path,
1901 sizeof(ua_sess->shm_path));
1902 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1903 if (ua_sess->shm_path[0]) {
1904 switch (ua_sess->buffer_type) {
1905 case LTTNG_BUFFER_PER_PID:
1906 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
1907 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
1908 app->name, app->pid, datetime);
1909 break;
1910 case LTTNG_BUFFER_PER_UID:
1911 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
1912 DEFAULT_UST_TRACE_UID_PATH,
1913 app->uid, app->bits_per_long);
1914 break;
1915 default:
1916 assert(0);
1917 goto error;
1918 }
1919 if (ret < 0) {
1920 PERROR("sprintf UST shadow copy session");
1921 assert(0);
1922 goto error;
1923 }
1924 strncat(ua_sess->shm_path, tmp_shm_path,
1925 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
1926 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1927 }
1928
1929 /* Iterate over all channels in global domain. */
1930 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1931 uchan, node.node) {
1932 struct lttng_ht_iter uiter;
1933
1934 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1935 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
1936 if (ua_chan_node != NULL) {
1937 /* Session exist. Contiuing. */
1938 continue;
1939 }
1940
1941 DBG2("Channel %s not found on shadow session copy, creating it",
1942 uchan->name);
1943 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess,
1944 &uchan->attr);
1945 if (ua_chan == NULL) {
1946 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1947 continue;
1948 }
1949 shadow_copy_channel(ua_chan, uchan);
1950 /*
1951 * The concept of metadata channel does not exist on the tracing
1952 * registry side of the session daemon so this can only be a per CPU
1953 * channel and not metadata.
1954 */
1955 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1956
1957 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
1958 }
1959 return;
1960
1961 error:
1962 consumer_output_put(ua_sess->consumer);
1963 }
1964
1965 /*
1966 * Lookup sesison wrapper.
1967 */
1968 static
1969 void __lookup_session_by_app(struct ltt_ust_session *usess,
1970 struct ust_app *app, struct lttng_ht_iter *iter)
1971 {
1972 /* Get right UST app session from app */
1973 lttng_ht_lookup(app->sessions, &usess->id, iter);
1974 }
1975
1976 /*
1977 * Return ust app session from the app session hashtable using the UST session
1978 * id.
1979 */
1980 static struct ust_app_session *lookup_session_by_app(
1981 struct ltt_ust_session *usess, struct ust_app *app)
1982 {
1983 struct lttng_ht_iter iter;
1984 struct lttng_ht_node_u64 *node;
1985
1986 __lookup_session_by_app(usess, app, &iter);
1987 node = lttng_ht_iter_get_node_u64(&iter);
1988 if (node == NULL) {
1989 goto error;
1990 }
1991
1992 return caa_container_of(node, struct ust_app_session, node);
1993
1994 error:
1995 return NULL;
1996 }
1997
1998 /*
1999 * Setup buffer registry per PID for the given session and application. If none
2000 * is found, a new one is created, added to the global registry and
2001 * initialized. If regp is valid, it's set with the newly created object.
2002 *
2003 * Return 0 on success or else a negative value.
2004 */
2005 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
2006 struct ust_app *app, struct buffer_reg_pid **regp)
2007 {
2008 int ret = 0;
2009 struct buffer_reg_pid *reg_pid;
2010
2011 assert(ua_sess);
2012 assert(app);
2013
2014 rcu_read_lock();
2015
2016 reg_pid = buffer_reg_pid_find(ua_sess->id);
2017 if (!reg_pid) {
2018 /*
2019 * This is the create channel path meaning that if there is NO
2020 * registry available, we have to create one for this session.
2021 */
2022 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
2023 ua_sess->root_shm_path, ua_sess->shm_path);
2024 if (ret < 0) {
2025 goto error;
2026 }
2027 } else {
2028 goto end;
2029 }
2030
2031 /* Initialize registry. */
2032 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
2033 app->bits_per_long, app->uint8_t_alignment,
2034 app->uint16_t_alignment, app->uint32_t_alignment,
2035 app->uint64_t_alignment, app->long_alignment,
2036 app->byte_order, app->version.major,
2037 app->version.minor, reg_pid->root_shm_path,
2038 reg_pid->shm_path,
2039 ua_sess->euid, ua_sess->egid);
2040 if (ret < 0) {
2041 /*
2042 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2043 * destroy the buffer registry, because it is always expected
2044 * that if the buffer registry can be found, its ust registry is
2045 * non-NULL.
2046 */
2047 buffer_reg_pid_destroy(reg_pid);
2048 goto error;
2049 }
2050
2051 buffer_reg_pid_add(reg_pid);
2052
2053 DBG3("UST app buffer registry per PID created successfully");
2054
2055 end:
2056 if (regp) {
2057 *regp = reg_pid;
2058 }
2059 error:
2060 rcu_read_unlock();
2061 return ret;
2062 }
2063
2064 /*
2065 * Setup buffer registry per UID for the given session and application. If none
2066 * is found, a new one is created, added to the global registry and
2067 * initialized. If regp is valid, it's set with the newly created object.
2068 *
2069 * Return 0 on success or else a negative value.
2070 */
2071 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
2072 struct ust_app_session *ua_sess,
2073 struct ust_app *app, struct buffer_reg_uid **regp)
2074 {
2075 int ret = 0;
2076 struct buffer_reg_uid *reg_uid;
2077
2078 assert(usess);
2079 assert(app);
2080
2081 rcu_read_lock();
2082
2083 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2084 if (!reg_uid) {
2085 /*
2086 * This is the create channel path meaning that if there is NO
2087 * registry available, we have to create one for this session.
2088 */
2089 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
2090 LTTNG_DOMAIN_UST, &reg_uid,
2091 ua_sess->root_shm_path, ua_sess->shm_path);
2092 if (ret < 0) {
2093 goto error;
2094 }
2095 } else {
2096 goto end;
2097 }
2098
2099 /* Initialize registry. */
2100 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
2101 app->bits_per_long, app->uint8_t_alignment,
2102 app->uint16_t_alignment, app->uint32_t_alignment,
2103 app->uint64_t_alignment, app->long_alignment,
2104 app->byte_order, app->version.major,
2105 app->version.minor, reg_uid->root_shm_path,
2106 reg_uid->shm_path, usess->uid, usess->gid);
2107 if (ret < 0) {
2108 /*
2109 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2110 * destroy the buffer registry, because it is always expected
2111 * that if the buffer registry can be found, its ust registry is
2112 * non-NULL.
2113 */
2114 buffer_reg_uid_destroy(reg_uid, NULL);
2115 goto error;
2116 }
2117 /* Add node to teardown list of the session. */
2118 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2119
2120 buffer_reg_uid_add(reg_uid);
2121
2122 DBG3("UST app buffer registry per UID created successfully");
2123 end:
2124 if (regp) {
2125 *regp = reg_uid;
2126 }
2127 error:
2128 rcu_read_unlock();
2129 return ret;
2130 }
2131
2132 /*
2133 * Create a session on the tracer side for the given app.
2134 *
2135 * On success, ua_sess_ptr is populated with the session pointer or else left
2136 * untouched. If the session was created, is_created is set to 1. On error,
2137 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2138 * be NULL.
2139 *
2140 * Returns 0 on success or else a negative code which is either -ENOMEM or
2141 * -ENOTCONN which is the default code if the ustctl_create_session fails.
2142 */
2143 static int create_ust_app_session(struct ltt_ust_session *usess,
2144 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2145 int *is_created)
2146 {
2147 int ret, created = 0;
2148 struct ust_app_session *ua_sess;
2149
2150 assert(usess);
2151 assert(app);
2152 assert(ua_sess_ptr);
2153
2154 health_code_update();
2155
2156 ua_sess = lookup_session_by_app(usess, app);
2157 if (ua_sess == NULL) {
2158 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
2159 app->pid, usess->id);
2160 ua_sess = alloc_ust_app_session(app);
2161 if (ua_sess == NULL) {
2162 /* Only malloc can failed so something is really wrong */
2163 ret = -ENOMEM;
2164 goto error;
2165 }
2166 shadow_copy_session(ua_sess, usess, app);
2167 created = 1;
2168 }
2169
2170 switch (usess->buffer_type) {
2171 case LTTNG_BUFFER_PER_PID:
2172 /* Init local registry. */
2173 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
2174 if (ret < 0) {
2175 delete_ust_app_session(-1, ua_sess, app);
2176 goto error;
2177 }
2178 break;
2179 case LTTNG_BUFFER_PER_UID:
2180 /* Look for a global registry. If none exists, create one. */
2181 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
2182 if (ret < 0) {
2183 delete_ust_app_session(-1, ua_sess, app);
2184 goto error;
2185 }
2186 break;
2187 default:
2188 assert(0);
2189 ret = -EINVAL;
2190 goto error;
2191 }
2192
2193 health_code_update();
2194
2195 if (ua_sess->handle == -1) {
2196 pthread_mutex_lock(&app->sock_lock);
2197 ret = ustctl_create_session(app->sock);
2198 pthread_mutex_unlock(&app->sock_lock);
2199 if (ret < 0) {
2200 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2201 ERR("Creating session for app pid %d with ret %d",
2202 app->pid, ret);
2203 } else {
2204 DBG("UST app creating session failed. Application is dead");
2205 /*
2206 * This is normal behavior, an application can die during the
2207 * creation process. Don't report an error so the execution can
2208 * continue normally. This will get flagged ENOTCONN and the
2209 * caller will handle it.
2210 */
2211 ret = 0;
2212 }
2213 delete_ust_app_session(-1, ua_sess, app);
2214 if (ret != -ENOMEM) {
2215 /*
2216 * Tracer is probably gone or got an internal error so let's
2217 * behave like it will soon unregister or not usable.
2218 */
2219 ret = -ENOTCONN;
2220 }
2221 goto error;
2222 }
2223
2224 ua_sess->handle = ret;
2225
2226 /* Add ust app session to app's HT */
2227 lttng_ht_node_init_u64(&ua_sess->node,
2228 ua_sess->tracing_id);
2229 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
2230 lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
2231 lttng_ht_add_unique_ulong(app->ust_sessions_objd,
2232 &ua_sess->ust_objd_node);
2233
2234 DBG2("UST app session created successfully with handle %d", ret);
2235 }
2236
2237 *ua_sess_ptr = ua_sess;
2238 if (is_created) {
2239 *is_created = created;
2240 }
2241
2242 /* Everything went well. */
2243 ret = 0;
2244
2245 error:
2246 health_code_update();
2247 return ret;
2248 }
2249
2250 /*
2251 * Match function for a hash table lookup of ust_app_ctx.
2252 *
2253 * It matches an ust app context based on the context type and, in the case
2254 * of perf counters, their name.
2255 */
2256 static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2257 {
2258 struct ust_app_ctx *ctx;
2259 const struct lttng_ust_context_attr *key;
2260
2261 assert(node);
2262 assert(_key);
2263
2264 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2265 key = _key;
2266
2267 /* Context type */
2268 if (ctx->ctx.ctx != key->ctx) {
2269 goto no_match;
2270 }
2271
2272 switch(key->ctx) {
2273 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
2274 if (strncmp(key->u.perf_counter.name,
2275 ctx->ctx.u.perf_counter.name,
2276 sizeof(key->u.perf_counter.name))) {
2277 goto no_match;
2278 }
2279 break;
2280 case LTTNG_UST_CONTEXT_APP_CONTEXT:
2281 if (strcmp(key->u.app_ctx.provider_name,
2282 ctx->ctx.u.app_ctx.provider_name) ||
2283 strcmp(key->u.app_ctx.ctx_name,
2284 ctx->ctx.u.app_ctx.ctx_name)) {
2285 goto no_match;
2286 }
2287 break;
2288 default:
2289 break;
2290 }
2291
2292 /* Match. */
2293 return 1;
2294
2295 no_match:
2296 return 0;
2297 }
2298
2299 /*
2300 * Lookup for an ust app context from an lttng_ust_context.
2301 *
2302 * Must be called while holding RCU read side lock.
2303 * Return an ust_app_ctx object or NULL on error.
2304 */
2305 static
2306 struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
2307 struct lttng_ust_context_attr *uctx)
2308 {
2309 struct lttng_ht_iter iter;
2310 struct lttng_ht_node_ulong *node;
2311 struct ust_app_ctx *app_ctx = NULL;
2312
2313 assert(uctx);
2314 assert(ht);
2315
2316 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2317 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2318 ht_match_ust_app_ctx, uctx, &iter.iter);
2319 node = lttng_ht_iter_get_node_ulong(&iter);
2320 if (!node) {
2321 goto end;
2322 }
2323
2324 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2325
2326 end:
2327 return app_ctx;
2328 }
2329
2330 /*
2331 * Create a context for the channel on the tracer.
2332 *
2333 * Called with UST app session lock held and a RCU read side lock.
2334 */
2335 static
2336 int create_ust_app_channel_context(struct ust_app_session *ua_sess,
2337 struct ust_app_channel *ua_chan,
2338 struct lttng_ust_context_attr *uctx,
2339 struct ust_app *app)
2340 {
2341 int ret = 0;
2342 struct ust_app_ctx *ua_ctx;
2343
2344 DBG2("UST app adding context to channel %s", ua_chan->name);
2345
2346 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2347 if (ua_ctx) {
2348 ret = -EEXIST;
2349 goto error;
2350 }
2351
2352 ua_ctx = alloc_ust_app_ctx(uctx);
2353 if (ua_ctx == NULL) {
2354 /* malloc failed */
2355 ret = -1;
2356 goto error;
2357 }
2358
2359 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
2360 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
2361 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
2362
2363 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2364 if (ret < 0) {
2365 goto error;
2366 }
2367
2368 error:
2369 return ret;
2370 }
2371
2372 /*
2373 * Enable on the tracer side a ust app event for the session and channel.
2374 *
2375 * Called with UST app session lock held.
2376 */
2377 static
2378 int enable_ust_app_event(struct ust_app_session *ua_sess,
2379 struct ust_app_event *ua_event, struct ust_app *app)
2380 {
2381 int ret;
2382
2383 ret = enable_ust_event(app, ua_sess, ua_event);
2384 if (ret < 0) {
2385 goto error;
2386 }
2387
2388 ua_event->enabled = 1;
2389
2390 error:
2391 return ret;
2392 }
2393
2394 /*
2395 * Disable on the tracer side a ust app event for the session and channel.
2396 */
2397 static int disable_ust_app_event(struct ust_app_session *ua_sess,
2398 struct ust_app_event *ua_event, struct ust_app *app)
2399 {
2400 int ret;
2401
2402 ret = disable_ust_event(app, ua_sess, ua_event);
2403 if (ret < 0) {
2404 goto error;
2405 }
2406
2407 ua_event->enabled = 0;
2408
2409 error:
2410 return ret;
2411 }
2412
2413 /*
2414 * Lookup ust app channel for session and disable it on the tracer side.
2415 */
2416 static
2417 int disable_ust_app_channel(struct ust_app_session *ua_sess,
2418 struct ust_app_channel *ua_chan, struct ust_app *app)
2419 {
2420 int ret;
2421
2422 ret = disable_ust_channel(app, ua_sess, ua_chan);
2423 if (ret < 0) {
2424 goto error;
2425 }
2426
2427 ua_chan->enabled = 0;
2428
2429 error:
2430 return ret;
2431 }
2432
2433 /*
2434 * Lookup ust app channel for session and enable it on the tracer side. This
2435 * MUST be called with a RCU read side lock acquired.
2436 */
2437 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2438 struct ltt_ust_channel *uchan, struct ust_app *app)
2439 {
2440 int ret = 0;
2441 struct lttng_ht_iter iter;
2442 struct lttng_ht_node_str *ua_chan_node;
2443 struct ust_app_channel *ua_chan;
2444
2445 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2446 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2447 if (ua_chan_node == NULL) {
2448 DBG2("Unable to find channel %s in ust session id %" PRIu64,
2449 uchan->name, ua_sess->tracing_id);
2450 goto error;
2451 }
2452
2453 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2454
2455 ret = enable_ust_channel(app, ua_sess, ua_chan);
2456 if (ret < 0) {
2457 goto error;
2458 }
2459
2460 error:
2461 return ret;
2462 }
2463
2464 /*
2465 * Ask the consumer to create a channel and get it if successful.
2466 *
2467 * Return 0 on success or else a negative value.
2468 */
2469 static int do_consumer_create_channel(struct ltt_ust_session *usess,
2470 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2471 int bitness, struct ust_registry_session *registry)
2472 {
2473 int ret;
2474 unsigned int nb_fd = 0;
2475 struct consumer_socket *socket;
2476
2477 assert(usess);
2478 assert(ua_sess);
2479 assert(ua_chan);
2480 assert(registry);
2481
2482 rcu_read_lock();
2483 health_code_update();
2484
2485 /* Get the right consumer socket for the application. */
2486 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2487 if (!socket) {
2488 ret = -EINVAL;
2489 goto error;
2490 }
2491
2492 health_code_update();
2493
2494 /* Need one fd for the channel. */
2495 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2496 if (ret < 0) {
2497 ERR("Exhausted number of available FD upon create channel");
2498 goto error;
2499 }
2500
2501 /*
2502 * Ask consumer to create channel. The consumer will return the number of
2503 * stream we have to expect.
2504 */
2505 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2506 registry);
2507 if (ret < 0) {
2508 goto error_ask;
2509 }
2510
2511 /*
2512 * Compute the number of fd needed before receiving them. It must be 2 per
2513 * stream (2 being the default value here).
2514 */
2515 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2516
2517 /* Reserve the amount of file descriptor we need. */
2518 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2519 if (ret < 0) {
2520 ERR("Exhausted number of available FD upon create channel");
2521 goto error_fd_get_stream;
2522 }
2523
2524 health_code_update();
2525
2526 /*
2527 * Now get the channel from the consumer. This call wil populate the stream
2528 * list of that channel and set the ust objects.
2529 */
2530 if (usess->consumer->enabled) {
2531 ret = ust_consumer_get_channel(socket, ua_chan);
2532 if (ret < 0) {
2533 goto error_destroy;
2534 }
2535 }
2536
2537 rcu_read_unlock();
2538 return 0;
2539
2540 error_destroy:
2541 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2542 error_fd_get_stream:
2543 /*
2544 * Initiate a destroy channel on the consumer since we had an error
2545 * handling it on our side. The return value is of no importance since we
2546 * already have a ret value set by the previous error that we need to
2547 * return.
2548 */
2549 (void) ust_consumer_destroy_channel(socket, ua_chan);
2550 error_ask:
2551 lttng_fd_put(LTTNG_FD_APPS, 1);
2552 error:
2553 health_code_update();
2554 rcu_read_unlock();
2555 return ret;
2556 }
2557
2558 /*
2559 * Duplicate the ust data object of the ust app stream and save it in the
2560 * buffer registry stream.
2561 *
2562 * Return 0 on success or else a negative value.
2563 */
2564 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2565 struct ust_app_stream *stream)
2566 {
2567 int ret;
2568
2569 assert(reg_stream);
2570 assert(stream);
2571
2572 /* Reserve the amount of file descriptor we need. */
2573 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2574 if (ret < 0) {
2575 ERR("Exhausted number of available FD upon duplicate stream");
2576 goto error;
2577 }
2578
2579 /* Duplicate object for stream once the original is in the registry. */
2580 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2581 reg_stream->obj.ust);
2582 if (ret < 0) {
2583 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2584 reg_stream->obj.ust, stream->obj, ret);
2585 lttng_fd_put(LTTNG_FD_APPS, 2);
2586 goto error;
2587 }
2588 stream->handle = stream->obj->handle;
2589
2590 error:
2591 return ret;
2592 }
2593
2594 /*
2595 * Duplicate the ust data object of the ust app. channel and save it in the
2596 * buffer registry channel.
2597 *
2598 * Return 0 on success or else a negative value.
2599 */
2600 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2601 struct ust_app_channel *ua_chan)
2602 {
2603 int ret;
2604
2605 assert(reg_chan);
2606 assert(ua_chan);
2607
2608 /* Need two fds for the channel. */
2609 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2610 if (ret < 0) {
2611 ERR("Exhausted number of available FD upon duplicate channel");
2612 goto error_fd_get;
2613 }
2614
2615 /* Duplicate object for stream once the original is in the registry. */
2616 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2617 if (ret < 0) {
2618 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2619 reg_chan->obj.ust, ua_chan->obj, ret);
2620 goto error;
2621 }
2622 ua_chan->handle = ua_chan->obj->handle;
2623
2624 return 0;
2625
2626 error:
2627 lttng_fd_put(LTTNG_FD_APPS, 1);
2628 error_fd_get:
2629 return ret;
2630 }
2631
2632 /*
2633 * For a given channel buffer registry, setup all streams of the given ust
2634 * application channel.
2635 *
2636 * Return 0 on success or else a negative value.
2637 */
2638 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2639 struct ust_app_channel *ua_chan,
2640 struct ust_app *app)
2641 {
2642 int ret = 0;
2643 struct ust_app_stream *stream, *stmp;
2644
2645 assert(reg_chan);
2646 assert(ua_chan);
2647
2648 DBG2("UST app setup buffer registry stream");
2649
2650 /* Send all streams to application. */
2651 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2652 struct buffer_reg_stream *reg_stream;
2653
2654 ret = buffer_reg_stream_create(&reg_stream);
2655 if (ret < 0) {
2656 goto error;
2657 }
2658
2659 /*
2660 * Keep original pointer and nullify it in the stream so the delete
2661 * stream call does not release the object.
2662 */
2663 reg_stream->obj.ust = stream->obj;
2664 stream->obj = NULL;
2665 buffer_reg_stream_add(reg_stream, reg_chan);
2666
2667 /* We don't need the streams anymore. */
2668 cds_list_del(&stream->list);
2669 delete_ust_app_stream(-1, stream, app);
2670 }
2671
2672 error:
2673 return ret;
2674 }
2675
2676 /*
2677 * Create a buffer registry channel for the given session registry and
2678 * application channel object. If regp pointer is valid, it's set with the
2679 * created object. Important, the created object is NOT added to the session
2680 * registry hash table.
2681 *
2682 * Return 0 on success else a negative value.
2683 */
2684 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2685 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2686 {
2687 int ret;
2688 struct buffer_reg_channel *reg_chan = NULL;
2689
2690 assert(reg_sess);
2691 assert(ua_chan);
2692
2693 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2694
2695 /* Create buffer registry channel. */
2696 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2697 if (ret < 0) {
2698 goto error_create;
2699 }
2700 assert(reg_chan);
2701 reg_chan->consumer_key = ua_chan->key;
2702 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
2703 reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
2704
2705 /* Create and add a channel registry to session. */
2706 ret = ust_registry_channel_add(reg_sess->reg.ust,
2707 ua_chan->tracing_channel_id);
2708 if (ret < 0) {
2709 goto error;
2710 }
2711 buffer_reg_channel_add(reg_sess, reg_chan);
2712
2713 if (regp) {
2714 *regp = reg_chan;
2715 }
2716
2717 return 0;
2718
2719 error:
2720 /* Safe because the registry channel object was not added to any HT. */
2721 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2722 error_create:
2723 return ret;
2724 }
2725
2726 /*
2727 * Setup buffer registry channel for the given session registry and application
2728 * channel object. If regp pointer is valid, it's set with the created object.
2729 *
2730 * Return 0 on success else a negative value.
2731 */
2732 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2733 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan,
2734 struct ust_app *app)
2735 {
2736 int ret;
2737
2738 assert(reg_sess);
2739 assert(reg_chan);
2740 assert(ua_chan);
2741 assert(ua_chan->obj);
2742
2743 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2744
2745 /* Setup all streams for the registry. */
2746 ret = setup_buffer_reg_streams(reg_chan, ua_chan, app);
2747 if (ret < 0) {
2748 goto error;
2749 }
2750
2751 reg_chan->obj.ust = ua_chan->obj;
2752 ua_chan->obj = NULL;
2753
2754 return 0;
2755
2756 error:
2757 buffer_reg_channel_remove(reg_sess, reg_chan);
2758 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2759 return ret;
2760 }
2761
2762 /*
2763 * Send buffer registry channel to the application.
2764 *
2765 * Return 0 on success else a negative value.
2766 */
2767 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2768 struct ust_app *app, struct ust_app_session *ua_sess,
2769 struct ust_app_channel *ua_chan)
2770 {
2771 int ret;
2772 struct buffer_reg_stream *reg_stream;
2773
2774 assert(reg_chan);
2775 assert(app);
2776 assert(ua_sess);
2777 assert(ua_chan);
2778
2779 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2780
2781 ret = duplicate_channel_object(reg_chan, ua_chan);
2782 if (ret < 0) {
2783 goto error;
2784 }
2785
2786 /* Send channel to the application. */
2787 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2788 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2789 ret = -ENOTCONN; /* Caused by app exiting. */
2790 goto error;
2791 } else if (ret < 0) {
2792 goto error;
2793 }
2794
2795 health_code_update();
2796
2797 /* Send all streams to application. */
2798 pthread_mutex_lock(&reg_chan->stream_list_lock);
2799 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2800 struct ust_app_stream stream;
2801
2802 ret = duplicate_stream_object(reg_stream, &stream);
2803 if (ret < 0) {
2804 goto error_stream_unlock;
2805 }
2806
2807 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2808 if (ret < 0) {
2809 (void) release_ust_app_stream(-1, &stream, app);
2810 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2811 ret = -ENOTCONN; /* Caused by app exiting. */
2812 }
2813 goto error_stream_unlock;
2814 }
2815
2816 /*
2817 * The return value is not important here. This function will output an
2818 * error if needed.
2819 */
2820 (void) release_ust_app_stream(-1, &stream, app);
2821 }
2822 ua_chan->is_sent = 1;
2823
2824 error_stream_unlock:
2825 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2826 error:
2827 return ret;
2828 }
2829
2830 /*
2831 * Create and send to the application the created buffers with per UID buffers.
2832 *
2833 * Return 0 on success else a negative value.
2834 */
2835 static int create_channel_per_uid(struct ust_app *app,
2836 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2837 struct ust_app_channel *ua_chan)
2838 {
2839 int ret;
2840 struct buffer_reg_uid *reg_uid;
2841 struct buffer_reg_channel *reg_chan;
2842
2843 assert(app);
2844 assert(usess);
2845 assert(ua_sess);
2846 assert(ua_chan);
2847
2848 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2849
2850 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2851 /*
2852 * The session creation handles the creation of this global registry
2853 * object. If none can be find, there is a code flow problem or a
2854 * teardown race.
2855 */
2856 assert(reg_uid);
2857
2858 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2859 reg_uid);
2860 if (!reg_chan) {
2861 /* Create the buffer registry channel object. */
2862 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2863 if (ret < 0) {
2864 ERR("Error creating the UST channel \"%s\" registry instance",
2865 ua_chan->name);
2866 goto error;
2867 }
2868 assert(reg_chan);
2869
2870 /*
2871 * Create the buffers on the consumer side. This call populates the
2872 * ust app channel object with all streams and data object.
2873 */
2874 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2875 app->bits_per_long, reg_uid->registry->reg.ust);
2876 if (ret < 0) {
2877 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2878 ua_chan->name);
2879
2880 /*
2881 * Let's remove the previously created buffer registry channel so
2882 * it's not visible anymore in the session registry.
2883 */
2884 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2885 ua_chan->tracing_channel_id);
2886 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2887 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2888 goto error;
2889 }
2890
2891 /*
2892 * Setup the streams and add it to the session registry.
2893 */
2894 ret = setup_buffer_reg_channel(reg_uid->registry,
2895 ua_chan, reg_chan, app);
2896 if (ret < 0) {
2897 ERR("Error setting up UST channel \"%s\"",
2898 ua_chan->name);
2899 goto error;
2900 }
2901
2902 }
2903
2904 /* Send buffers to the application. */
2905 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2906 if (ret < 0) {
2907 if (ret != -ENOTCONN) {
2908 ERR("Error sending channel to application");
2909 }
2910 goto error;
2911 }
2912
2913 error:
2914 return ret;
2915 }
2916
2917 /*
2918 * Create and send to the application the created buffers with per PID buffers.
2919 *
2920 * Return 0 on success else a negative value.
2921 */
2922 static int create_channel_per_pid(struct ust_app *app,
2923 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2924 struct ust_app_channel *ua_chan)
2925 {
2926 int ret;
2927 struct ust_registry_session *registry;
2928
2929 assert(app);
2930 assert(usess);
2931 assert(ua_sess);
2932 assert(ua_chan);
2933
2934 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2935
2936 rcu_read_lock();
2937
2938 registry = get_session_registry(ua_sess);
2939 assert(registry);
2940
2941 /* Create and add a new channel registry to session. */
2942 ret = ust_registry_channel_add(registry, ua_chan->key);
2943 if (ret < 0) {
2944 ERR("Error creating the UST channel \"%s\" registry instance",
2945 ua_chan->name);
2946 goto error;
2947 }
2948
2949 /* Create and get channel on the consumer side. */
2950 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2951 app->bits_per_long, registry);
2952 if (ret < 0) {
2953 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2954 ua_chan->name);
2955 goto error;
2956 }
2957
2958 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2959 if (ret < 0) {
2960 if (ret != -ENOTCONN) {
2961 ERR("Error sending channel to application");
2962 }
2963 goto error;
2964 }
2965
2966 error:
2967 rcu_read_unlock();
2968 return ret;
2969 }
2970
2971 /*
2972 * From an already allocated ust app channel, create the channel buffers if
2973 * need and send it to the application. This MUST be called with a RCU read
2974 * side lock acquired.
2975 *
2976 * Return 0 on success or else a negative value. Returns -ENOTCONN if
2977 * the application exited concurrently.
2978 */
2979 static int do_create_channel(struct ust_app *app,
2980 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2981 struct ust_app_channel *ua_chan)
2982 {
2983 int ret;
2984
2985 assert(app);
2986 assert(usess);
2987 assert(ua_sess);
2988 assert(ua_chan);
2989
2990 /* Handle buffer type before sending the channel to the application. */
2991 switch (usess->buffer_type) {
2992 case LTTNG_BUFFER_PER_UID:
2993 {
2994 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
2995 if (ret < 0) {
2996 goto error;
2997 }
2998 break;
2999 }
3000 case LTTNG_BUFFER_PER_PID:
3001 {
3002 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
3003 if (ret < 0) {
3004 goto error;
3005 }
3006 break;
3007 }
3008 default:
3009 assert(0);
3010 ret = -EINVAL;
3011 goto error;
3012 }
3013
3014 /* Initialize ust objd object using the received handle and add it. */
3015 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
3016 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
3017
3018 /* If channel is not enabled, disable it on the tracer */
3019 if (!ua_chan->enabled) {
3020 ret = disable_ust_channel(app, ua_sess, ua_chan);
3021 if (ret < 0) {
3022 goto error;
3023 }
3024 }
3025
3026 error:
3027 return ret;
3028 }
3029
3030 /*
3031 * Create UST app channel and create it on the tracer. Set ua_chanp of the
3032 * newly created channel if not NULL.
3033 *
3034 * Called with UST app session lock and RCU read-side lock held.
3035 *
3036 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3037 * the application exited concurrently.
3038 */
3039 static int create_ust_app_channel(struct ust_app_session *ua_sess,
3040 struct ltt_ust_channel *uchan, struct ust_app *app,
3041 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
3042 struct ust_app_channel **ua_chanp)
3043 {
3044 int ret = 0;
3045 struct lttng_ht_iter iter;
3046 struct lttng_ht_node_str *ua_chan_node;
3047 struct ust_app_channel *ua_chan;
3048
3049 /* Lookup channel in the ust app session */
3050 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
3051 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
3052 if (ua_chan_node != NULL) {
3053 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3054 goto end;
3055 }
3056
3057 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
3058 if (ua_chan == NULL) {
3059 /* Only malloc can fail here */
3060 ret = -ENOMEM;
3061 goto error_alloc;
3062 }
3063 shadow_copy_channel(ua_chan, uchan);
3064
3065 /* Set channel type. */
3066 ua_chan->attr.type = type;
3067
3068 ret = do_create_channel(app, usess, ua_sess, ua_chan);
3069 if (ret < 0) {
3070 goto error;
3071 }
3072
3073 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
3074 app->pid);
3075
3076 /* Only add the channel if successful on the tracer side. */
3077 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
3078
3079 end:
3080 if (ua_chanp) {
3081 *ua_chanp = ua_chan;
3082 }
3083
3084 /* Everything went well. */
3085 return 0;
3086
3087 error:
3088 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
3089 error_alloc:
3090 return ret;
3091 }
3092
3093 /*
3094 * Create UST app event and create it on the tracer side.
3095 *
3096 * Called with ust app session mutex held.
3097 */
3098 static
3099 int create_ust_app_event(struct ust_app_session *ua_sess,
3100 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
3101 struct ust_app *app)
3102 {
3103 int ret = 0;
3104 struct ust_app_event *ua_event;
3105
3106 /* Get event node */
3107 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
3108 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
3109 if (ua_event != NULL) {
3110 ret = -EEXIST;
3111 goto end;
3112 }
3113
3114 /* Does not exist so create one */
3115 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
3116 if (ua_event == NULL) {
3117 /* Only malloc can failed so something is really wrong */
3118 ret = -ENOMEM;
3119 goto end;
3120 }
3121 shadow_copy_event(ua_event, uevent);
3122
3123 /* Create it on the tracer side */
3124 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
3125 if (ret < 0) {
3126 /* Not found previously means that it does not exist on the tracer */
3127 assert(ret != -LTTNG_UST_ERR_EXIST);
3128 goto error;
3129 }
3130
3131 add_unique_ust_app_event(ua_chan, ua_event);
3132
3133 DBG2("UST app create event %s for PID %d completed", ua_event->name,
3134 app->pid);
3135
3136 end:
3137 return ret;
3138
3139 error:
3140 /* Valid. Calling here is already in a read side lock */
3141 delete_ust_app_event(-1, ua_event, app);
3142 return ret;
3143 }
3144
3145 /*
3146 * Create UST metadata and open it on the tracer side.
3147 *
3148 * Called with UST app session lock held and RCU read side lock.
3149 */
3150 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
3151 struct ust_app *app, struct consumer_output *consumer)
3152 {
3153 int ret = 0;
3154 struct ust_app_channel *metadata;
3155 struct consumer_socket *socket;
3156 struct ust_registry_session *registry;
3157
3158 assert(ua_sess);
3159 assert(app);
3160 assert(consumer);
3161
3162 registry = get_session_registry(ua_sess);
3163 assert(registry);
3164
3165 pthread_mutex_lock(&registry->lock);
3166
3167 /* Metadata already exists for this registry or it was closed previously */
3168 if (registry->metadata_key || registry->metadata_closed) {
3169 ret = 0;
3170 goto error;
3171 }
3172
3173 /* Allocate UST metadata */
3174 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
3175 if (!metadata) {
3176 /* malloc() failed */
3177 ret = -ENOMEM;
3178 goto error;
3179 }
3180
3181 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
3182
3183 /* Need one fd for the channel. */
3184 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3185 if (ret < 0) {
3186 ERR("Exhausted number of available FD upon create metadata");
3187 goto error;
3188 }
3189
3190 /* Get the right consumer socket for the application. */
3191 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
3192 if (!socket) {
3193 ret = -EINVAL;
3194 goto error_consumer;
3195 }
3196
3197 /*
3198 * Keep metadata key so we can identify it on the consumer side. Assign it
3199 * to the registry *before* we ask the consumer so we avoid the race of the
3200 * consumer requesting the metadata and the ask_channel call on our side
3201 * did not returned yet.
3202 */
3203 registry->metadata_key = metadata->key;
3204
3205 /*
3206 * Ask the metadata channel creation to the consumer. The metadata object
3207 * will be created by the consumer and kept their. However, the stream is
3208 * never added or monitored until we do a first push metadata to the
3209 * consumer.
3210 */
3211 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
3212 registry);
3213 if (ret < 0) {
3214 /* Nullify the metadata key so we don't try to close it later on. */
3215 registry->metadata_key = 0;
3216 goto error_consumer;
3217 }
3218
3219 /*
3220 * The setup command will make the metadata stream be sent to the relayd,
3221 * if applicable, and the thread managing the metadatas. This is important
3222 * because after this point, if an error occurs, the only way the stream
3223 * can be deleted is to be monitored in the consumer.
3224 */
3225 ret = consumer_setup_metadata(socket, metadata->key);
3226 if (ret < 0) {
3227 /* Nullify the metadata key so we don't try to close it later on. */
3228 registry->metadata_key = 0;
3229 goto error_consumer;
3230 }
3231
3232 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
3233 metadata->key, app->pid);
3234
3235 error_consumer:
3236 lttng_fd_put(LTTNG_FD_APPS, 1);
3237 delete_ust_app_channel(-1, metadata, app);
3238 error:
3239 pthread_mutex_unlock(&registry->lock);
3240 return ret;
3241 }
3242
3243 /*
3244 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3245 * acquired before calling this function.
3246 */
3247 struct ust_app *ust_app_find_by_pid(pid_t pid)
3248 {
3249 struct ust_app *app = NULL;
3250 struct lttng_ht_node_ulong *node;
3251 struct lttng_ht_iter iter;
3252
3253 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
3254 node = lttng_ht_iter_get_node_ulong(&iter);
3255 if (node == NULL) {
3256 DBG2("UST app no found with pid %d", pid);
3257 goto error;
3258 }
3259
3260 DBG2("Found UST app by pid %d", pid);
3261
3262 app = caa_container_of(node, struct ust_app, pid_n);
3263
3264 error:
3265 return app;
3266 }
3267
3268 /*
3269 * Allocate and init an UST app object using the registration information and
3270 * the command socket. This is called when the command socket connects to the
3271 * session daemon.
3272 *
3273 * The object is returned on success or else NULL.
3274 */
3275 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
3276 {
3277 struct ust_app *lta = NULL;
3278
3279 assert(msg);
3280 assert(sock >= 0);
3281
3282 DBG3("UST app creating application for socket %d", sock);
3283
3284 if ((msg->bits_per_long == 64 &&
3285 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
3286 || (msg->bits_per_long == 32 &&
3287 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
3288 ERR("Registration failed: application \"%s\" (pid: %d) has "
3289 "%d-bit long, but no consumerd for this size is available.\n",
3290 msg->name, msg->pid, msg->bits_per_long);
3291 goto error;
3292 }
3293
3294 lta = zmalloc(sizeof(struct ust_app));
3295 if (lta == NULL) {
3296 PERROR("malloc");
3297 goto error;
3298 }
3299
3300 lta->ppid = msg->ppid;
3301 lta->uid = msg->uid;
3302 lta->gid = msg->gid;
3303
3304 lta->bits_per_long = msg->bits_per_long;
3305 lta->uint8_t_alignment = msg->uint8_t_alignment;
3306 lta->uint16_t_alignment = msg->uint16_t_alignment;
3307 lta->uint32_t_alignment = msg->uint32_t_alignment;
3308 lta->uint64_t_alignment = msg->uint64_t_alignment;
3309 lta->long_alignment = msg->long_alignment;
3310 lta->byte_order = msg->byte_order;
3311
3312 lta->v_major = msg->major;
3313 lta->v_minor = msg->minor;
3314 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3315 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3316 lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3317 lta->notify_sock = -1;
3318
3319 /* Copy name and make sure it's NULL terminated. */
3320 strncpy(lta->name, msg->name, sizeof(lta->name));
3321 lta->name[UST_APP_PROCNAME_LEN] = '\0';
3322
3323 /*
3324 * Before this can be called, when receiving the registration information,
3325 * the application compatibility is checked. So, at this point, the
3326 * application can work with this session daemon.
3327 */
3328 lta->compatible = 1;
3329
3330 lta->pid = msg->pid;
3331 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
3332 lta->sock = sock;
3333 pthread_mutex_init(&lta->sock_lock, NULL);
3334 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
3335
3336 CDS_INIT_LIST_HEAD(&lta->teardown_head);
3337 error:
3338 return lta;
3339 }
3340
3341 /*
3342 * For a given application object, add it to every hash table.
3343 */
3344 void ust_app_add(struct ust_app *app)
3345 {
3346 assert(app);
3347 assert(app->notify_sock >= 0);
3348
3349 rcu_read_lock();
3350
3351 /*
3352 * On a re-registration, we want to kick out the previous registration of
3353 * that pid
3354 */
3355 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
3356
3357 /*
3358 * The socket _should_ be unique until _we_ call close. So, a add_unique
3359 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3360 * already in the table.
3361 */
3362 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
3363
3364 /* Add application to the notify socket hash table. */
3365 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
3366 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
3367
3368 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
3369 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
3370 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
3371 app->v_minor);
3372
3373 rcu_read_unlock();
3374 }
3375
3376 /*
3377 * Set the application version into the object.
3378 *
3379 * Return 0 on success else a negative value either an errno code or a
3380 * LTTng-UST error code.
3381 */
3382 int ust_app_version(struct ust_app *app)
3383 {
3384 int ret;
3385
3386 assert(app);
3387
3388 pthread_mutex_lock(&app->sock_lock);
3389 ret = ustctl_tracer_version(app->sock, &app->version);
3390 pthread_mutex_unlock(&app->sock_lock);
3391 if (ret < 0) {
3392 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3393 ERR("UST app %d version failed with ret %d", app->sock, ret);
3394 } else {
3395 DBG3("UST app %d version failed. Application is dead", app->sock);
3396 }
3397 }
3398
3399 return ret;
3400 }
3401
3402 /*
3403 * Unregister app by removing it from the global traceable app list and freeing
3404 * the data struct.
3405 *
3406 * The socket is already closed at this point so no close to sock.
3407 */
3408 void ust_app_unregister(int sock)
3409 {
3410 struct ust_app *lta;
3411 struct lttng_ht_node_ulong *node;
3412 struct lttng_ht_iter ust_app_sock_iter;
3413 struct lttng_ht_iter iter;
3414 struct ust_app_session *ua_sess;
3415 int ret;
3416
3417 rcu_read_lock();
3418
3419 /* Get the node reference for a call_rcu */
3420 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
3421 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
3422 assert(node);
3423
3424 lta = caa_container_of(node, struct ust_app, sock_n);
3425 DBG("PID %d unregistering with sock %d", lta->pid, sock);
3426
3427 /*
3428 * For per-PID buffers, perform "push metadata" and flush all
3429 * application streams before removing app from hash tables,
3430 * ensuring proper behavior of data_pending check.
3431 * Remove sessions so they are not visible during deletion.
3432 */
3433 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
3434 node.node) {
3435 struct ust_registry_session *registry;
3436
3437 ret = lttng_ht_del(lta->sessions, &iter);
3438 if (ret) {
3439 /* The session was already removed so scheduled for teardown. */
3440 continue;
3441 }
3442
3443 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
3444 (void) ust_app_flush_app_session(lta, ua_sess);
3445 }
3446
3447 /*
3448 * Add session to list for teardown. This is safe since at this point we
3449 * are the only one using this list.
3450 */
3451 pthread_mutex_lock(&ua_sess->lock);
3452
3453 if (ua_sess->deleted) {
3454 pthread_mutex_unlock(&ua_sess->lock);
3455 continue;
3456 }
3457
3458 /*
3459 * Normally, this is done in the delete session process which is
3460 * executed in the call rcu below. However, upon registration we can't
3461 * afford to wait for the grace period before pushing data or else the
3462 * data pending feature can race between the unregistration and stop
3463 * command where the data pending command is sent *before* the grace
3464 * period ended.
3465 *
3466 * The close metadata below nullifies the metadata pointer in the
3467 * session so the delete session will NOT push/close a second time.
3468 */
3469 registry = get_session_registry(ua_sess);
3470 if (registry) {
3471 /* Push metadata for application before freeing the application. */
3472 (void) push_metadata(registry, ua_sess->consumer);
3473
3474 /*
3475 * Don't ask to close metadata for global per UID buffers. Close
3476 * metadata only on destroy trace session in this case. Also, the
3477 * previous push metadata could have flag the metadata registry to
3478 * close so don't send a close command if closed.
3479 */
3480 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
3481 /* And ask to close it for this session registry. */
3482 (void) close_metadata(registry, ua_sess->consumer);
3483 }
3484 }
3485 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
3486
3487 pthread_mutex_unlock(&ua_sess->lock);
3488 }
3489
3490 /* Remove application from PID hash table */
3491 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
3492 assert(!ret);
3493
3494 /*
3495 * Remove application from notify hash table. The thread handling the
3496 * notify socket could have deleted the node so ignore on error because
3497 * either way it's valid. The close of that socket is handled by the other
3498 * thread.
3499 */
3500 iter.iter.node = &lta->notify_sock_n.node;
3501 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3502
3503 /*
3504 * Ignore return value since the node might have been removed before by an
3505 * add replace during app registration because the PID can be reassigned by
3506 * the OS.
3507 */
3508 iter.iter.node = &lta->pid_n.node;
3509 ret = lttng_ht_del(ust_app_ht, &iter);
3510 if (ret) {
3511 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
3512 lta->pid);
3513 }
3514
3515 /* Free memory */
3516 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
3517
3518 rcu_read_unlock();
3519 return;
3520 }
3521
3522 /*
3523 * Fill events array with all events name of all registered apps.
3524 */
3525 int ust_app_list_events(struct lttng_event **events)
3526 {
3527 int ret, handle;
3528 size_t nbmem, count = 0;
3529 struct lttng_ht_iter iter;
3530 struct ust_app *app;
3531 struct lttng_event *tmp_event;
3532
3533 nbmem = UST_APP_EVENT_LIST_SIZE;
3534 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
3535 if (tmp_event == NULL) {
3536 PERROR("zmalloc ust app events");
3537 ret = -ENOMEM;
3538 goto error;
3539 }
3540
3541 rcu_read_lock();
3542
3543 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3544 struct lttng_ust_tracepoint_iter uiter;
3545
3546 health_code_update();
3547
3548 if (!app->compatible) {
3549 /*
3550 * TODO: In time, we should notice the caller of this error by
3551 * telling him that this is a version error.
3552 */
3553 continue;
3554 }
3555 pthread_mutex_lock(&app->sock_lock);
3556 handle = ustctl_tracepoint_list(app->sock);
3557 if (handle < 0) {
3558 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3559 ERR("UST app list events getting handle failed for app pid %d",
3560 app->pid);
3561 }
3562 pthread_mutex_unlock(&app->sock_lock);
3563 continue;
3564 }
3565
3566 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
3567 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3568 /* Handle ustctl error. */
3569 if (ret < 0) {
3570 int release_ret;
3571
3572 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3573 ERR("UST app tp list get failed for app %d with ret %d",
3574 app->sock, ret);
3575 } else {
3576 DBG3("UST app tp list get failed. Application is dead");
3577 /*
3578 * This is normal behavior, an application can die during the
3579 * creation process. Don't report an error so the execution can
3580 * continue normally. Continue normal execution.
3581 */
3582 break;
3583 }
3584 free(tmp_event);
3585 release_ret = ustctl_release_handle(app->sock, handle);
3586 if (release_ret < 0 &&
3587 release_ret != -LTTNG_UST_ERR_EXITING &&
3588 release_ret != -EPIPE) {
3589 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3590 }
3591 pthread_mutex_unlock(&app->sock_lock);
3592 goto rcu_error;
3593 }
3594
3595 health_code_update();
3596 if (count >= nbmem) {
3597 /* In case the realloc fails, we free the memory */
3598 struct lttng_event *new_tmp_event;
3599 size_t new_nbmem;
3600
3601 new_nbmem = nbmem << 1;
3602 DBG2("Reallocating event list from %zu to %zu entries",
3603 nbmem, new_nbmem);
3604 new_tmp_event = realloc(tmp_event,
3605 new_nbmem * sizeof(struct lttng_event));
3606 if (new_tmp_event == NULL) {
3607 int release_ret;
3608
3609 PERROR("realloc ust app events");
3610 free(tmp_event);
3611 ret = -ENOMEM;
3612 release_ret = ustctl_release_handle(app->sock, handle);
3613 if (release_ret < 0 &&
3614 release_ret != -LTTNG_UST_ERR_EXITING &&
3615 release_ret != -EPIPE) {
3616 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3617 }
3618 pthread_mutex_unlock(&app->sock_lock);
3619 goto rcu_error;
3620 }
3621 /* Zero the new memory */
3622 memset(new_tmp_event + nbmem, 0,
3623 (new_nbmem - nbmem) * sizeof(struct lttng_event));
3624 nbmem = new_nbmem;
3625 tmp_event = new_tmp_event;
3626 }
3627 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3628 tmp_event[count].loglevel = uiter.loglevel;
3629 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3630 tmp_event[count].pid = app->pid;
3631 tmp_event[count].enabled = -1;
3632 count++;
3633 }
3634 ret = ustctl_release_handle(app->sock, handle);
3635 pthread_mutex_unlock(&app->sock_lock);
3636 if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3637 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
3638 }
3639 }
3640
3641 ret = count;
3642 *events = tmp_event;
3643
3644 DBG2("UST app list events done (%zu events)", count);
3645
3646 rcu_error:
3647 rcu_read_unlock();
3648 error:
3649 health_code_update();
3650 return ret;
3651 }
3652
3653 /*
3654 * Fill events array with all events name of all registered apps.
3655 */
3656 int ust_app_list_event_fields(struct lttng_event_field **fields)
3657 {
3658 int ret, handle;
3659 size_t nbmem, count = 0;
3660 struct lttng_ht_iter iter;
3661 struct ust_app *app;
3662 struct lttng_event_field *tmp_event;
3663
3664 nbmem = UST_APP_EVENT_LIST_SIZE;
3665 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3666 if (tmp_event == NULL) {
3667 PERROR("zmalloc ust app event fields");
3668 ret = -ENOMEM;
3669 goto error;
3670 }
3671
3672 rcu_read_lock();
3673
3674 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3675 struct lttng_ust_field_iter uiter;
3676
3677 health_code_update();
3678
3679 if (!app->compatible) {
3680 /*
3681 * TODO: In time, we should notice the caller of this error by
3682 * telling him that this is a version error.
3683 */
3684 continue;
3685 }
3686 pthread_mutex_lock(&app->sock_lock);
3687 handle = ustctl_tracepoint_field_list(app->sock);
3688 if (handle < 0) {
3689 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3690 ERR("UST app list field getting handle failed for app pid %d",
3691 app->pid);
3692 }
3693 pthread_mutex_unlock(&app->sock_lock);
3694 continue;
3695 }
3696
3697 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
3698 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3699 /* Handle ustctl error. */
3700 if (ret < 0) {
3701 int release_ret;
3702
3703 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3704 ERR("UST app tp list field failed for app %d with ret %d",
3705 app->sock, ret);
3706 } else {
3707 DBG3("UST app tp list field failed. Application is dead");
3708 /*
3709 * This is normal behavior, an application can die during the
3710 * creation process. Don't report an error so the execution can
3711 * continue normally. Reset list and count for next app.
3712 */
3713 break;
3714 }
3715 free(tmp_event);
3716 release_ret = ustctl_release_handle(app->sock, handle);
3717 pthread_mutex_unlock(&app->sock_lock);
3718 if (release_ret < 0 &&
3719 release_ret != -LTTNG_UST_ERR_EXITING &&
3720 release_ret != -EPIPE) {
3721 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3722 }
3723 goto rcu_error;
3724 }
3725
3726 health_code_update();
3727 if (count >= nbmem) {
3728 /* In case the realloc fails, we free the memory */
3729 struct lttng_event_field *new_tmp_event;
3730 size_t new_nbmem;
3731
3732 new_nbmem = nbmem << 1;
3733 DBG2("Reallocating event field list from %zu to %zu entries",
3734 nbmem, new_nbmem);
3735 new_tmp_event = realloc(tmp_event,
3736 new_nbmem * sizeof(struct lttng_event_field));
3737 if (new_tmp_event == NULL) {
3738 int release_ret;
3739
3740 PERROR("realloc ust app event fields");
3741 free(tmp_event);
3742 ret = -ENOMEM;
3743 release_ret = ustctl_release_handle(app->sock, handle);
3744 pthread_mutex_unlock(&app->sock_lock);
3745 if (release_ret &&
3746 release_ret != -LTTNG_UST_ERR_EXITING &&
3747 release_ret != -EPIPE) {
3748 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3749 }
3750 goto rcu_error;
3751 }
3752 /* Zero the new memory */
3753 memset(new_tmp_event + nbmem, 0,
3754 (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
3755 nbmem = new_nbmem;
3756 tmp_event = new_tmp_event;
3757 }
3758
3759 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
3760 /* Mapping between these enums matches 1 to 1. */
3761 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
3762 tmp_event[count].nowrite = uiter.nowrite;
3763
3764 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3765 tmp_event[count].event.loglevel = uiter.loglevel;
3766 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
3767 tmp_event[count].event.pid = app->pid;
3768 tmp_event[count].event.enabled = -1;
3769 count++;
3770 }
3771 ret = ustctl_release_handle(app->sock, handle);
3772 pthread_mutex_unlock(&app->sock_lock);
3773 if (ret < 0 &&
3774 ret != -LTTNG_UST_ERR_EXITING &&
3775 ret != -EPIPE) {
3776 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
3777 }
3778 }
3779
3780 ret = count;
3781 *fields = tmp_event;
3782
3783 DBG2("UST app list event fields done (%zu events)", count);
3784
3785 rcu_error:
3786 rcu_read_unlock();
3787 error:
3788 health_code_update();
3789 return ret;
3790 }
3791
3792 /*
3793 * Free and clean all traceable apps of the global list.
3794 *
3795 * Should _NOT_ be called with RCU read-side lock held.
3796 */
3797 void ust_app_clean_list(void)
3798 {
3799 int ret;
3800 struct ust_app *app;
3801 struct lttng_ht_iter iter;
3802
3803 DBG2("UST app cleaning registered apps hash table");
3804
3805 rcu_read_lock();
3806
3807 if (ust_app_ht) {
3808 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3809 ret = lttng_ht_del(ust_app_ht, &iter);
3810 assert(!ret);
3811 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3812 }
3813 }
3814
3815 /* Cleanup socket hash table */
3816 if (ust_app_ht_by_sock) {
3817 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3818 sock_n.node) {
3819 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3820 assert(!ret);
3821 }
3822 }
3823
3824 /* Cleanup notify socket hash table */
3825 if (ust_app_ht_by_notify_sock) {
3826 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
3827 notify_sock_n.node) {
3828 ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3829 assert(!ret);
3830 }
3831 }
3832 rcu_read_unlock();
3833
3834 /* Destroy is done only when the ht is empty */
3835 if (ust_app_ht) {
3836 ht_cleanup_push(ust_app_ht);
3837 }
3838 if (ust_app_ht_by_sock) {
3839 ht_cleanup_push(ust_app_ht_by_sock);
3840 }
3841 if (ust_app_ht_by_notify_sock) {
3842 ht_cleanup_push(ust_app_ht_by_notify_sock);
3843 }
3844 }
3845
3846 /*
3847 * Init UST app hash table.
3848 */
3849 int ust_app_ht_alloc(void)
3850 {
3851 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3852 if (!ust_app_ht) {
3853 return -1;
3854 }
3855 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3856 if (!ust_app_ht_by_sock) {
3857 return -1;
3858 }
3859 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3860 if (!ust_app_ht_by_notify_sock) {
3861 return -1;
3862 }
3863 return 0;
3864 }
3865
3866 /*
3867 * For a specific UST session, disable the channel for all registered apps.
3868 */
3869 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
3870 struct ltt_ust_channel *uchan)
3871 {
3872 int ret = 0;
3873 struct lttng_ht_iter iter;
3874 struct lttng_ht_node_str *ua_chan_node;
3875 struct ust_app *app;
3876 struct ust_app_session *ua_sess;
3877 struct ust_app_channel *ua_chan;
3878
3879 if (usess == NULL || uchan == NULL) {
3880 ERR("Disabling UST global channel with NULL values");
3881 ret = -1;
3882 goto error;
3883 }
3884
3885 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
3886 uchan->name, usess->id);
3887
3888 rcu_read_lock();
3889
3890 /* For every registered applications */
3891 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3892 struct lttng_ht_iter uiter;
3893 if (!app->compatible) {
3894 /*
3895 * TODO: In time, we should notice the caller of this error by
3896 * telling him that this is a version error.
3897 */
3898 continue;
3899 }
3900 ua_sess = lookup_session_by_app(usess, app);
3901 if (ua_sess == NULL) {
3902 continue;
3903 }
3904
3905 /* Get channel */
3906 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3907 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3908 /* If the session if found for the app, the channel must be there */
3909 assert(ua_chan_node);
3910
3911 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3912 /* The channel must not be already disabled */
3913 assert(ua_chan->enabled == 1);
3914
3915 /* Disable channel onto application */
3916 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
3917 if (ret < 0) {
3918 /* XXX: We might want to report this error at some point... */
3919 continue;
3920 }
3921 }
3922
3923 rcu_read_unlock();
3924
3925 error:
3926 return ret;
3927 }
3928
3929 /*
3930 * For a specific UST session, enable the channel for all registered apps.
3931 */
3932 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
3933 struct ltt_ust_channel *uchan)
3934 {
3935 int ret = 0;
3936 struct lttng_ht_iter iter;
3937 struct ust_app *app;
3938 struct ust_app_session *ua_sess;
3939
3940 if (usess == NULL || uchan == NULL) {
3941 ERR("Adding UST global channel to NULL values");
3942 ret = -1;
3943 goto error;
3944 }
3945
3946 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
3947 uchan->name, usess->id);
3948
3949 rcu_read_lock();
3950
3951 /* For every registered applications */
3952 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3953 if (!app->compatible) {
3954 /*
3955 * TODO: In time, we should notice the caller of this error by
3956 * telling him that this is a version error.
3957 */
3958 continue;
3959 }
3960 ua_sess = lookup_session_by_app(usess, app);
3961 if (ua_sess == NULL) {
3962 continue;
3963 }
3964
3965 /* Enable channel onto application */
3966 ret = enable_ust_app_channel(ua_sess, uchan, app);
3967 if (ret < 0) {
3968 /* XXX: We might want to report this error at some point... */
3969 continue;
3970 }
3971 }
3972
3973 rcu_read_unlock();
3974
3975 error:
3976 return ret;
3977 }
3978
3979 /*
3980 * Disable an event in a channel and for a specific session.
3981 */
3982 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
3983 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3984 {
3985 int ret = 0;
3986 struct lttng_ht_iter iter, uiter;
3987 struct lttng_ht_node_str *ua_chan_node;
3988 struct ust_app *app;
3989 struct ust_app_session *ua_sess;
3990 struct ust_app_channel *ua_chan;
3991 struct ust_app_event *ua_event;
3992
3993 DBG("UST app disabling event %s for all apps in channel "
3994 "%s for session id %" PRIu64,
3995 uevent->attr.name, uchan->name, usess->id);
3996
3997 rcu_read_lock();
3998
3999 /* For all registered applications */
4000 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4001 if (!app->compatible) {
4002 /*
4003 * TODO: In time, we should notice the caller of this error by
4004 * telling him that this is a version error.
4005 */
4006 continue;
4007 }
4008 ua_sess = lookup_session_by_app(usess, app);
4009 if (ua_sess == NULL) {
4010 /* Next app */
4011 continue;
4012 }
4013
4014 /* Lookup channel in the ust app session */
4015 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4016 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4017 if (ua_chan_node == NULL) {
4018 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
4019 "Skipping", uchan->name, usess->id, app->pid);
4020 continue;
4021 }
4022 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4023
4024 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4025 uevent->filter, uevent->attr.loglevel,
4026 uevent->exclusion);
4027 if (ua_event == NULL) {
4028 DBG2("Event %s not found in channel %s for app pid %d."
4029 "Skipping", uevent->attr.name, uchan->name, app->pid);
4030 continue;
4031 }
4032
4033 ret = disable_ust_app_event(ua_sess, ua_event, app);
4034 if (ret < 0) {
4035 /* XXX: Report error someday... */
4036 continue;
4037 }
4038 }
4039
4040 rcu_read_unlock();
4041
4042 return ret;
4043 }
4044
4045 /*
4046 * For a specific UST session, create the channel for all registered apps.
4047 */
4048 int ust_app_create_channel_glb(struct ltt_ust_session *usess,
4049 struct ltt_ust_channel *uchan)
4050 {
4051 int ret = 0, created;
4052 struct lttng_ht_iter iter;
4053 struct ust_app *app;
4054 struct ust_app_session *ua_sess = NULL;
4055
4056 /* Very wrong code flow */
4057 assert(usess);
4058 assert(uchan);
4059
4060 DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
4061 uchan->name, usess->id);
4062
4063 rcu_read_lock();
4064
4065 /* For every registered applications */
4066 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4067 if (!app->compatible) {
4068 /*
4069 * TODO: In time, we should notice the caller of this error by
4070 * telling him that this is a version error.
4071 */
4072 continue;
4073 }
4074 if (!trace_ust_pid_tracker_lookup(usess, app->pid)) {
4075 /* Skip. */
4076 continue;
4077 }
4078
4079 /*
4080 * Create session on the tracer side and add it to app session HT. Note
4081 * that if session exist, it will simply return a pointer to the ust
4082 * app session.
4083 */
4084 ret = create_ust_app_session(usess, app, &ua_sess, &created);
4085 if (ret < 0) {
4086 switch (ret) {
4087 case -ENOTCONN:
4088 /*
4089 * The application's socket is not valid. Either a bad socket
4090 * or a timeout on it. We can't inform the caller that for a
4091 * specific app, the session failed so lets continue here.
4092 */
4093 ret = 0; /* Not an error. */
4094 continue;
4095 case -ENOMEM:
4096 default:
4097 goto error_rcu_unlock;
4098 }
4099 }
4100 assert(ua_sess);
4101
4102 pthread_mutex_lock(&ua_sess->lock);
4103
4104 if (ua_sess->deleted) {
4105 pthread_mutex_unlock(&ua_sess->lock);
4106 continue;
4107 }
4108
4109 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
4110 sizeof(uchan->name))) {
4111 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr, &uchan->attr);
4112 ret = 0;
4113 } else {
4114 /* Create channel onto application. We don't need the chan ref. */
4115 ret = create_ust_app_channel(ua_sess, uchan, app,
4116 LTTNG_UST_CHAN_PER_CPU, usess, NULL);
4117 }
4118 pthread_mutex_unlock(&ua_sess->lock);
4119 if (ret < 0) {
4120 /* Cleanup the created session if it's the case. */
4121 if (created) {
4122 destroy_app_session(app, ua_sess);
4123 }
4124 switch (ret) {
4125 case -ENOTCONN:
4126 /*
4127 * The application's socket is not valid. Either a bad socket
4128 * or a timeout on it. We can't inform the caller that for a
4129 * specific app, the session failed so lets continue here.
4130 */
4131 ret = 0; /* Not an error. */
4132 continue;
4133 case -ENOMEM:
4134 default:
4135 goto error_rcu_unlock;
4136 }
4137 }
4138 }
4139
4140 error_rcu_unlock:
4141 rcu_read_unlock();
4142 return ret;
4143 }
4144
4145 /*
4146 * Enable event for a specific session and channel on the tracer.
4147 */
4148 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
4149 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4150 {
4151 int ret = 0;
4152 struct lttng_ht_iter iter, uiter;
4153 struct lttng_ht_node_str *ua_chan_node;
4154 struct ust_app *app;
4155 struct ust_app_session *ua_sess;
4156 struct ust_app_channel *ua_chan;
4157 struct ust_app_event *ua_event;
4158
4159 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
4160 uevent->attr.name, usess->id);
4161
4162 /*
4163 * NOTE: At this point, this function is called only if the session and
4164 * channel passed are already created for all apps. and enabled on the
4165 * tracer also.
4166 */
4167
4168 rcu_read_lock();
4169
4170 /* For all registered applications */
4171 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4172 if (!app->compatible) {
4173 /*
4174 * TODO: In time, we should notice the caller of this error by
4175 * telling him that this is a version error.
4176 */
4177 continue;
4178 }
4179 ua_sess = lookup_session_by_app(usess, app);
4180 if (!ua_sess) {
4181 /* The application has problem or is probably dead. */
4182 continue;
4183 }
4184
4185 pthread_mutex_lock(&ua_sess->lock);
4186
4187 if (ua_sess->deleted) {
4188 pthread_mutex_unlock(&ua_sess->lock);
4189 continue;
4190 }
4191
4192 /* Lookup channel in the ust app session */
4193 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4194 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4195 /*
4196 * It is possible that the channel cannot be found is
4197 * the channel/event creation occurs concurrently with
4198 * an application exit.
4199 */
4200 if (!ua_chan_node) {
4201 pthread_mutex_unlock(&ua_sess->lock);
4202 continue;
4203 }
4204
4205 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4206
4207 /* Get event node */
4208 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4209 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
4210 if (ua_event == NULL) {
4211 DBG3("UST app enable event %s not found for app PID %d."
4212 "Skipping app", uevent->attr.name, app->pid);
4213 goto next_app;
4214 }
4215
4216 ret = enable_ust_app_event(ua_sess, ua_event, app);
4217 if (ret < 0) {
4218 pthread_mutex_unlock(&ua_sess->lock);
4219 goto error;
4220 }
4221 next_app:
4222 pthread_mutex_unlock(&ua_sess->lock);
4223 }
4224
4225 error:
4226 rcu_read_unlock();
4227 return ret;
4228 }
4229
4230 /*
4231 * For a specific existing UST session and UST channel, creates the event for
4232 * all registered apps.
4233 */
4234 int ust_app_create_event_glb(struct ltt_ust_session *usess,
4235 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4236 {
4237 int ret = 0;
4238 struct lttng_ht_iter iter, uiter;
4239 struct lttng_ht_node_str *ua_chan_node;
4240 struct ust_app *app;
4241 struct ust_app_session *ua_sess;
4242 struct ust_app_channel *ua_chan;
4243
4244 DBG("UST app creating event %s for all apps for session id %" PRIu64,
4245 uevent->attr.name, usess->id);
4246
4247 rcu_read_lock();
4248
4249 /* For all registered applications */
4250 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4251 if (!app->compatible) {
4252 /*
4253 * TODO: In time, we should notice the caller of this error by
4254 * telling him that this is a version error.
4255 */
4256 continue;
4257 }
4258 ua_sess = lookup_session_by_app(usess, app);
4259 if (!ua_sess) {
4260 /* The application has problem or is probably dead. */
4261 continue;
4262 }
4263
4264 pthread_mutex_lock(&ua_sess->lock);
4265
4266 if (ua_sess->deleted) {
4267 pthread_mutex_unlock(&ua_sess->lock);
4268 continue;
4269 }
4270
4271 /* Lookup channel in the ust app session */
4272 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4273 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4274 /* If the channel is not found, there is a code flow error */
4275 assert(ua_chan_node);
4276
4277 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4278
4279 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4280 pthread_mutex_unlock(&ua_sess->lock);
4281 if (ret < 0) {
4282 if (ret != -LTTNG_UST_ERR_EXIST) {
4283 /* Possible value at this point: -ENOMEM. If so, we stop! */
4284 break;
4285 }
4286 DBG2("UST app event %s already exist on app PID %d",
4287 uevent->attr.name, app->pid);
4288 continue;
4289 }
4290 }
4291
4292 rcu_read_unlock();
4293
4294 return ret;
4295 }
4296
4297 /*
4298 * Start tracing for a specific UST session and app.
4299 */
4300 static
4301 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
4302 {
4303 int ret = 0;
4304 struct ust_app_session *ua_sess;
4305
4306 DBG("Starting tracing for ust app pid %d", app->pid);
4307
4308 rcu_read_lock();
4309
4310 if (!app->compatible) {
4311 goto end;
4312 }
4313
4314 ua_sess = lookup_session_by_app(usess, app);
4315 if (ua_sess == NULL) {
4316 /* The session is in teardown process. Ignore and continue. */
4317 goto end;
4318 }
4319
4320 pthread_mutex_lock(&ua_sess->lock);
4321
4322 if (ua_sess->deleted) {
4323 pthread_mutex_unlock(&ua_sess->lock);
4324 goto end;
4325 }
4326
4327 /* Upon restart, we skip the setup, already done */
4328 if (ua_sess->started) {
4329 goto skip_setup;
4330 }
4331
4332 /* Create directories if consumer is LOCAL and has a path defined. */
4333 if (usess->consumer->type == CONSUMER_DST_LOCAL &&
4334 strlen(usess->consumer->dst.trace_path) > 0) {
4335 ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
4336 S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
4337 if (ret < 0) {
4338 if (errno != EEXIST) {
4339 ERR("Trace directory creation error");
4340 goto error_unlock;
4341 }
4342 }
4343 }
4344
4345 /*
4346 * Create the metadata for the application. This returns gracefully if a
4347 * metadata was already set for the session.
4348 */
4349 ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
4350 if (ret < 0) {
4351 goto error_unlock;
4352 }
4353
4354 health_code_update();
4355
4356 skip_setup:
4357 /* This start the UST tracing */
4358 pthread_mutex_lock(&app->sock_lock);
4359 ret = ustctl_start_session(app->sock, ua_sess->handle);
4360 pthread_mutex_unlock(&app->sock_lock);
4361 if (ret < 0) {
4362 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4363 ERR("Error starting tracing for app pid: %d (ret: %d)",
4364 app->pid, ret);
4365 } else {
4366 DBG("UST app start session failed. Application is dead.");
4367 /*
4368 * This is normal behavior, an application can die during the
4369 * creation process. Don't report an error so the execution can
4370 * continue normally.
4371 */
4372 pthread_mutex_unlock(&ua_sess->lock);
4373 goto end;
4374 }
4375 goto error_unlock;
4376 }
4377
4378 /* Indicate that the session has been started once */
4379 ua_sess->started = 1;
4380
4381 pthread_mutex_unlock(&ua_sess->lock);
4382
4383 health_code_update();
4384
4385 /* Quiescent wait after starting trace */
4386 pthread_mutex_lock(&app->sock_lock);
4387 ret = ustctl_wait_quiescent(app->sock);
4388 pthread_mutex_unlock(&app->sock_lock);
4389 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4390 ERR("UST app wait quiescent failed for app pid %d ret %d",
4391 app->pid, ret);
4392 }
4393
4394 end:
4395 rcu_read_unlock();
4396 health_code_update();
4397 return 0;
4398
4399 error_unlock:
4400 pthread_mutex_unlock(&ua_sess->lock);
4401 rcu_read_unlock();
4402 health_code_update();
4403 return -1;
4404 }
4405
4406 /*
4407 * Stop tracing for a specific UST session and app.
4408 */
4409 static
4410 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
4411 {
4412 int ret = 0;
4413 struct ust_app_session *ua_sess;
4414 struct ust_registry_session *registry;
4415
4416 DBG("Stopping tracing for ust app pid %d", app->pid);
4417
4418 rcu_read_lock();
4419
4420 if (!app->compatible) {
4421 goto end_no_session;
4422 }
4423
4424 ua_sess = lookup_session_by_app(usess, app);
4425 if (ua_sess == NULL) {
4426 goto end_no_session;
4427 }
4428
4429 pthread_mutex_lock(&ua_sess->lock);
4430
4431 if (ua_sess->deleted) {
4432 pthread_mutex_unlock(&ua_sess->lock);
4433 goto end_no_session;
4434 }
4435
4436 /*
4437 * If started = 0, it means that stop trace has been called for a session
4438 * that was never started. It's possible since we can have a fail start
4439 * from either the application manager thread or the command thread. Simply
4440 * indicate that this is a stop error.
4441 */
4442 if (!ua_sess->started) {
4443 goto error_rcu_unlock;
4444 }
4445
4446 health_code_update();
4447
4448 /* This inhibits UST tracing */
4449 pthread_mutex_lock(&app->sock_lock);
4450 ret = ustctl_stop_session(app->sock, ua_sess->handle);
4451 pthread_mutex_unlock(&app->sock_lock);
4452 if (ret < 0) {
4453 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4454 ERR("Error stopping tracing for app pid: %d (ret: %d)",
4455 app->pid, ret);
4456 } else {
4457 DBG("UST app stop session failed. Application is dead.");
4458 /*
4459 * This is normal behavior, an application can die during the
4460 * creation process. Don't report an error so the execution can
4461 * continue normally.
4462 */
4463 goto end_unlock;
4464 }
4465 goto error_rcu_unlock;
4466 }
4467
4468 health_code_update();
4469
4470 /* Quiescent wait after stopping trace */
4471 pthread_mutex_lock(&app->sock_lock);
4472 ret = ustctl_wait_quiescent(app->sock);
4473 pthread_mutex_unlock(&app->sock_lock);
4474 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4475 ERR("UST app wait quiescent failed for app pid %d ret %d",
4476 app->pid, ret);
4477 }
4478
4479 health_code_update();
4480
4481 registry = get_session_registry(ua_sess);
4482 assert(registry);
4483
4484 /* Push metadata for application before freeing the application. */
4485 (void) push_metadata(registry, ua_sess->consumer);
4486
4487 end_unlock:
4488 pthread_mutex_unlock(&ua_sess->lock);
4489 end_no_session:
4490 rcu_read_unlock();
4491 health_code_update();
4492 return 0;
4493
4494 error_rcu_unlock:
4495 pthread_mutex_unlock(&ua_sess->lock);
4496 rcu_read_unlock();
4497 health_code_update();
4498 return -1;
4499 }
4500
4501 static
4502 int ust_app_flush_app_session(struct ust_app *app,
4503 struct ust_app_session *ua_sess)
4504 {
4505 int ret, retval = 0;
4506 struct lttng_ht_iter iter;
4507 struct ust_app_channel *ua_chan;
4508 struct consumer_socket *socket;
4509
4510 DBG("Flushing app session buffers for ust app pid %d", app->pid);
4511
4512 rcu_read_lock();
4513
4514 if (!app->compatible) {
4515 goto end_not_compatible;
4516 }
4517
4518 pthread_mutex_lock(&ua_sess->lock);
4519
4520 if (ua_sess->deleted) {
4521 goto end_deleted;
4522 }
4523
4524 health_code_update();
4525
4526 /* Flushing buffers */
4527 socket = consumer_find_socket_by_bitness(app->bits_per_long,
4528 ua_sess->consumer);
4529
4530 /* Flush buffers and push metadata. */
4531 switch (ua_sess->buffer_type) {
4532 case LTTNG_BUFFER_PER_PID:
4533 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4534 node.node) {
4535 health_code_update();
4536 ret = consumer_flush_channel(socket, ua_chan->key);
4537 if (ret) {
4538 ERR("Error flushing consumer channel");
4539 retval = -1;
4540 continue;
4541 }
4542 }
4543 break;
4544 case LTTNG_BUFFER_PER_UID:
4545 default:
4546 assert(0);
4547 break;
4548 }
4549
4550 health_code_update();
4551
4552 end_deleted:
4553 pthread_mutex_unlock(&ua_sess->lock);
4554
4555 end_not_compatible:
4556 rcu_read_unlock();
4557 health_code_update();
4558 return retval;
4559 }
4560
4561 /*
4562 * Flush buffers for all applications for a specific UST session.
4563 * Called with UST session lock held.
4564 */
4565 static
4566 int ust_app_flush_session(struct ltt_ust_session *usess)
4567
4568 {
4569 int ret = 0;
4570
4571 DBG("Flushing session buffers for all ust apps");
4572
4573 rcu_read_lock();
4574
4575 /* Flush buffers and push metadata. */
4576 switch (usess->buffer_type) {
4577 case LTTNG_BUFFER_PER_UID:
4578 {
4579 struct buffer_reg_uid *reg;
4580 struct lttng_ht_iter iter;
4581
4582 /* Flush all per UID buffers associated to that session. */
4583 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4584 struct ust_registry_session *ust_session_reg;
4585 struct buffer_reg_channel *reg_chan;
4586 struct consumer_socket *socket;
4587
4588 /* Get consumer socket to use to push the metadata.*/
4589 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
4590 usess->consumer);
4591 if (!socket) {
4592 /* Ignore request if no consumer is found for the session. */
4593 continue;
4594 }
4595
4596 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
4597 reg_chan, node.node) {
4598 /*
4599 * The following call will print error values so the return
4600 * code is of little importance because whatever happens, we
4601 * have to try them all.
4602 */
4603 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
4604 }
4605
4606 ust_session_reg = reg->registry->reg.ust;
4607 /* Push metadata. */
4608 (void) push_metadata(ust_session_reg, usess->consumer);
4609 }
4610 break;
4611 }
4612 case LTTNG_BUFFER_PER_PID:
4613 {
4614 struct ust_app_session *ua_sess;
4615 struct lttng_ht_iter iter;
4616 struct ust_app *app;
4617
4618 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4619 ua_sess = lookup_session_by_app(usess, app);
4620 if (ua_sess == NULL) {
4621 continue;
4622 }
4623 (void) ust_app_flush_app_session(app, ua_sess);
4624 }
4625 break;
4626 }
4627 default:
4628 ret = -1;
4629 assert(0);
4630 break;
4631 }
4632
4633 rcu_read_unlock();
4634 health_code_update();
4635 return ret;
4636 }
4637
4638 static
4639 int ust_app_clear_quiescent_app_session(struct ust_app *app,
4640 struct ust_app_session *ua_sess)
4641 {
4642 int ret = 0;
4643 struct lttng_ht_iter iter;
4644 struct ust_app_channel *ua_chan;
4645 struct consumer_socket *socket;
4646
4647 DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
4648
4649 rcu_read_lock();
4650
4651 if (!app->compatible) {
4652 goto end_not_compatible;
4653 }
4654
4655 pthread_mutex_lock(&ua_sess->lock);
4656
4657 if (ua_sess->deleted) {
4658 goto end_unlock;
4659 }
4660
4661 health_code_update();
4662
4663 socket = consumer_find_socket_by_bitness(app->bits_per_long,
4664 ua_sess->consumer);
4665 if (!socket) {
4666 ERR("Failed to find consumer (%" PRIu32 ") socket",
4667 app->bits_per_long);
4668 ret = -1;
4669 goto end_unlock;
4670 }
4671
4672 /* Clear quiescent state. */
4673 switch (ua_sess->buffer_type) {
4674 case LTTNG_BUFFER_PER_PID:
4675 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter,
4676 ua_chan, node.node) {
4677 health_code_update();
4678 ret = consumer_clear_quiescent_channel(socket,
4679 ua_chan->key);
4680 if (ret) {
4681 ERR("Error clearing quiescent state for consumer channel");
4682 ret = -1;
4683 continue;
4684 }
4685 }
4686 break;
4687 case LTTNG_BUFFER_PER_UID:
4688 default:
4689 assert(0);
4690 ret = -1;
4691 break;
4692 }
4693
4694 health_code_update();
4695
4696 end_unlock:
4697 pthread_mutex_unlock(&ua_sess->lock);
4698
4699 end_not_compatible:
4700 rcu_read_unlock();
4701 health_code_update();
4702 return ret;
4703 }
4704
4705 /*
4706 * Clear quiescent state in each stream for all applications for a
4707 * specific UST session.
4708 * Called with UST session lock held.
4709 */
4710 static
4711 int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
4712
4713 {
4714 int ret = 0;
4715
4716 DBG("Clearing stream quiescent state for all ust apps");
4717
4718 rcu_read_lock();
4719
4720 switch (usess->buffer_type) {
4721 case LTTNG_BUFFER_PER_UID:
4722 {
4723 struct lttng_ht_iter iter;
4724 struct buffer_reg_uid *reg;
4725
4726 /*
4727 * Clear quiescent for all per UID buffers associated to
4728 * that session.
4729 */
4730 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4731 struct consumer_socket *socket;
4732 struct buffer_reg_channel *reg_chan;
4733
4734 /* Get associated consumer socket.*/
4735 socket = consumer_find_socket_by_bitness(
4736 reg->bits_per_long, usess->consumer);
4737 if (!socket) {
4738 /*
4739 * Ignore request if no consumer is found for
4740 * the session.
4741 */
4742 continue;
4743 }
4744
4745 cds_lfht_for_each_entry(reg->registry->channels->ht,
4746 &iter.iter, reg_chan, node.node) {
4747 /*
4748 * The following call will print error values so
4749 * the return code is of little importance
4750 * because whatever happens, we have to try them
4751 * all.
4752 */
4753 (void) consumer_clear_quiescent_channel(socket,
4754 reg_chan->consumer_key);
4755 }
4756 }
4757 break;
4758 }
4759 case LTTNG_BUFFER_PER_PID:
4760 {
4761 struct ust_app_session *ua_sess;
4762 struct lttng_ht_iter iter;
4763 struct ust_app *app;
4764
4765 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
4766 pid_n.node) {
4767 ua_sess = lookup_session_by_app(usess, app);
4768 if (ua_sess == NULL) {
4769 continue;
4770 }
4771 (void) ust_app_clear_quiescent_app_session(app,
4772 ua_sess);
4773 }
4774 break;
4775 }
4776 default:
4777 ret = -1;
4778 assert(0);
4779 break;
4780 }
4781
4782 rcu_read_unlock();
4783 health_code_update();
4784 return ret;
4785 }
4786
4787 /*
4788 * Destroy a specific UST session in apps.
4789 */
4790 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
4791 {
4792 int ret;
4793 struct ust_app_session *ua_sess;
4794 struct lttng_ht_iter iter;
4795 struct lttng_ht_node_u64 *node;
4796
4797 DBG("Destroy tracing for ust app pid %d", app->pid);
4798
4799 rcu_read_lock();
4800
4801 if (!app->compatible) {
4802 goto end;
4803 }
4804
4805 __lookup_session_by_app(usess, app, &iter);
4806 node = lttng_ht_iter_get_node_u64(&iter);
4807 if (node == NULL) {
4808 /* Session is being or is deleted. */
4809 goto end;
4810 }
4811 ua_sess = caa_container_of(node, struct ust_app_session, node);
4812
4813 health_code_update();
4814 destroy_app_session(app, ua_sess);
4815
4816 health_code_update();
4817
4818 /* Quiescent wait after stopping trace */
4819 pthread_mutex_lock(&app->sock_lock);
4820 ret = ustctl_wait_quiescent(app->sock);
4821 pthread_mutex_unlock(&app->sock_lock);
4822 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4823 ERR("UST app wait quiescent failed for app pid %d ret %d",
4824 app->pid, ret);
4825 }
4826 end:
4827 rcu_read_unlock();
4828 health_code_update();
4829 return 0;
4830 }
4831
4832 /*
4833 * Start tracing for the UST session.
4834 */
4835 int ust_app_start_trace_all(struct ltt_ust_session *usess)
4836 {
4837 int ret = 0;
4838 struct lttng_ht_iter iter;
4839 struct ust_app *app;
4840
4841 DBG("Starting all UST traces");
4842
4843 rcu_read_lock();
4844
4845 /*
4846 * In a start-stop-start use-case, we need to clear the quiescent state
4847 * of each channel set by the prior stop command, thus ensuring that a
4848 * following stop or destroy is sure to grab a timestamp_end near those
4849 * operations, even if the packet is empty.
4850 */
4851 (void) ust_app_clear_quiescent_session(usess);
4852
4853 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4854 ret = ust_app_start_trace(usess, app);
4855 if (ret < 0) {
4856 /* Continue to next apps even on error */
4857 continue;
4858 }
4859 }
4860
4861 rcu_read_unlock();
4862
4863 return 0;
4864 }
4865
4866 /*
4867 * Start tracing for the UST session.
4868 * Called with UST session lock held.
4869 */
4870 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
4871 {
4872 int ret = 0;
4873 struct lttng_ht_iter iter;
4874 struct ust_app *app;
4875
4876 DBG("Stopping all UST traces");
4877
4878 rcu_read_lock();
4879
4880 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4881 ret = ust_app_stop_trace(usess, app);
4882 if (ret < 0) {
4883 /* Continue to next apps even on error */
4884 continue;
4885 }
4886 }
4887
4888 (void) ust_app_flush_session(usess);
4889
4890 rcu_read_unlock();
4891
4892 return 0;
4893 }
4894
4895 /*
4896 * Destroy app UST session.
4897 */
4898 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
4899 {
4900 int ret = 0;
4901 struct lttng_ht_iter iter;
4902 struct ust_app *app;
4903
4904 DBG("Destroy all UST traces");
4905
4906 rcu_read_lock();
4907
4908 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4909 ret = destroy_trace(usess, app);
4910 if (ret < 0) {
4911 /* Continue to next apps even on error */
4912 continue;
4913 }
4914 }
4915
4916 rcu_read_unlock();
4917
4918 return 0;
4919 }
4920
4921 static
4922 void ust_app_global_create(struct ltt_ust_session *usess, struct ust_app *app)
4923 {
4924 int ret = 0;
4925 struct lttng_ht_iter iter, uiter;
4926 struct ust_app_session *ua_sess = NULL;
4927 struct ust_app_channel *ua_chan;
4928 struct ust_app_event *ua_event;
4929 struct ust_app_ctx *ua_ctx;
4930 int is_created = 0;
4931
4932 ret = create_ust_app_session(usess, app, &ua_sess, &is_created);
4933 if (ret < 0) {
4934 /* Tracer is probably gone or ENOMEM. */
4935 goto error;
4936 }
4937 if (!is_created) {
4938 /* App session already created. */
4939 goto end;
4940 }
4941 assert(ua_sess);
4942
4943 pthread_mutex_lock(&ua_sess->lock);
4944
4945 if (ua_sess->deleted) {
4946 pthread_mutex_unlock(&ua_sess->lock);
4947 goto end;
4948 }
4949
4950 /*
4951 * We can iterate safely here over all UST app session since the create ust
4952 * app session above made a shadow copy of the UST global domain from the
4953 * ltt ust session.
4954 */
4955 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4956 node.node) {
4957 ret = do_create_channel(app, usess, ua_sess, ua_chan);
4958 if (ret < 0 && ret != -ENOTCONN) {
4959 /*
4960 * Stop everything. On error, the application
4961 * failed, no more file descriptor are available
4962 * or ENOMEM so stopping here is the only thing
4963 * we can do for now. The only exception is
4964 * -ENOTCONN, which indicates that the application
4965 * has exit.
4966 */
4967 goto error_unlock;
4968 }
4969
4970 /*
4971 * Add context using the list so they are enabled in the same order the
4972 * user added them.
4973 */
4974 cds_list_for_each_entry(ua_ctx, &ua_chan->ctx_list, list) {
4975 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
4976 if (ret < 0) {
4977 goto error_unlock;
4978 }
4979 }
4980
4981
4982 /* For each events */
4983 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
4984 node.node) {
4985 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
4986 if (ret < 0) {
4987 goto error_unlock;
4988 }
4989 }
4990 }
4991
4992 pthread_mutex_unlock(&ua_sess->lock);
4993
4994 if (usess->active) {
4995 ret = ust_app_start_trace(usess, app);
4996 if (ret < 0) {
4997 goto error;
4998 }
4999
5000 DBG2("UST trace started for app pid %d", app->pid);
5001 }
5002 end:
5003 /* Everything went well at this point. */
5004 return;
5005
5006 error_unlock:
5007 pthread_mutex_unlock(&ua_sess->lock);
5008 error:
5009 if (ua_sess) {
5010 destroy_app_session(app, ua_sess);
5011 }
5012 return;
5013 }
5014
5015 static
5016 void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
5017 {
5018 struct ust_app_session *ua_sess;
5019
5020 ua_sess = lookup_session_by_app(usess, app);
5021 if (ua_sess == NULL) {
5022 return;
5023 }
5024 destroy_app_session(app, ua_sess);
5025 }
5026
5027 /*
5028 * Add channels/events from UST global domain to registered apps at sock.
5029 *
5030 * Called with session lock held.
5031 * Called with RCU read-side lock held.
5032 */
5033 void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
5034 {
5035 assert(usess);
5036
5037 DBG2("UST app global update for app sock %d for session id %" PRIu64,
5038 app->sock, usess->id);
5039
5040 if (!app->compatible) {
5041 return;
5042 }
5043
5044 if (trace_ust_pid_tracker_lookup(usess, app->pid)) {
5045 ust_app_global_create(usess, app);
5046 } else {
5047 ust_app_global_destroy(usess, app);
5048 }
5049 }
5050
5051 /*
5052 * Called with session lock held.
5053 */
5054 void ust_app_global_update_all(struct ltt_ust_session *usess)
5055 {
5056 struct lttng_ht_iter iter;
5057 struct ust_app *app;
5058
5059 rcu_read_lock();
5060 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5061 ust_app_global_update(usess, app);
5062 }
5063 rcu_read_unlock();
5064 }
5065
5066 /*
5067 * Add context to a specific channel for global UST domain.
5068 */
5069 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
5070 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
5071 {
5072 int ret = 0;
5073 struct lttng_ht_node_str *ua_chan_node;
5074 struct lttng_ht_iter iter, uiter;
5075 struct ust_app_channel *ua_chan = NULL;
5076 struct ust_app_session *ua_sess;
5077 struct ust_app *app;
5078
5079 rcu_read_lock();
5080
5081 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5082 if (!app->compatible) {
5083 /*
5084 * TODO: In time, we should notice the caller of this error by
5085 * telling him that this is a version error.
5086 */
5087 continue;
5088 }
5089 ua_sess = lookup_session_by_app(usess, app);
5090 if (ua_sess == NULL) {
5091 continue;
5092 }
5093
5094 pthread_mutex_lock(&ua_sess->lock);
5095
5096 if (ua_sess->deleted) {
5097 pthread_mutex_unlock(&ua_sess->lock);
5098 continue;
5099 }
5100
5101 /* Lookup channel in the ust app session */
5102 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
5103 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
5104 if (ua_chan_node == NULL) {
5105 goto next_app;
5106 }
5107 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
5108 node);
5109 ret = create_ust_app_channel_context(ua_sess, ua_chan, &uctx->ctx, app);
5110 if (ret < 0) {
5111 goto next_app;
5112 }
5113 next_app:
5114 pthread_mutex_unlock(&ua_sess->lock);
5115 }
5116
5117 rcu_read_unlock();
5118 return ret;
5119 }
5120
5121 /*
5122 * Enable event for a channel from a UST session for a specific PID.
5123 */
5124 int ust_app_enable_event_pid(struct ltt_ust_session *usess,
5125 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
5126 {
5127 int ret = 0;
5128 struct lttng_ht_iter iter;
5129 struct lttng_ht_node_str *ua_chan_node;
5130 struct ust_app *app;
5131 struct ust_app_session *ua_sess;
5132 struct ust_app_channel *ua_chan;
5133 struct ust_app_event *ua_event;
5134
5135 DBG("UST app enabling event %s for PID %d", uevent->attr.name, pid);
5136
5137 rcu_read_lock();
5138
5139 app = ust_app_find_by_pid(pid);
5140 if (app == NULL) {
5141 ERR("UST app enable event per PID %d not found", pid);
5142 ret = -1;
5143 goto end;
5144 }
5145
5146 if (!app->compatible) {
5147 ret = 0;
5148 goto end;
5149 }
5150
5151 ua_sess = lookup_session_by_app(usess, app);
5152 if (!ua_sess) {
5153 /* The application has problem or is probably dead. */
5154 ret = 0;
5155 goto end;
5156 }
5157
5158 pthread_mutex_lock(&ua_sess->lock);
5159
5160 if (ua_sess->deleted) {
5161 ret = 0;
5162 goto end_unlock;
5163 }
5164
5165 /* Lookup channel in the ust app session */
5166 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
5167 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
5168 /* If the channel is not found, there is a code flow error */
5169 assert(ua_chan_node);
5170
5171 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
5172
5173 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
5174 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
5175 if (ua_event == NULL) {
5176 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
5177 if (ret < 0) {
5178 goto end_unlock;
5179 }
5180 } else {
5181 ret = enable_ust_app_event(ua_sess, ua_event, app);
5182 if (ret < 0) {
5183 goto end_unlock;
5184 }
5185 }
5186
5187 end_unlock:
5188 pthread_mutex_unlock(&ua_sess->lock);
5189 end:
5190 rcu_read_unlock();
5191 return ret;
5192 }
5193
5194 /*
5195 * Receive registration and populate the given msg structure.
5196 *
5197 * On success return 0 else a negative value returned by the ustctl call.
5198 */
5199 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
5200 {
5201 int ret;
5202 uint32_t pid, ppid, uid, gid;
5203
5204 assert(msg);
5205
5206 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
5207 &pid, &ppid, &uid, &gid,
5208 &msg->bits_per_long,
5209 &msg->uint8_t_alignment,
5210 &msg->uint16_t_alignment,
5211 &msg->uint32_t_alignment,
5212 &msg->uint64_t_alignment,
5213 &msg->long_alignment,
5214 &msg->byte_order,
5215 msg->name);
5216 if (ret < 0) {
5217 switch (-ret) {
5218 case EPIPE:
5219 case ECONNRESET:
5220 case LTTNG_UST_ERR_EXITING:
5221 DBG3("UST app recv reg message failed. Application died");
5222 break;
5223 case LTTNG_UST_ERR_UNSUP_MAJOR:
5224 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
5225 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
5226 LTTNG_UST_ABI_MINOR_VERSION);
5227 break;
5228 default:
5229 ERR("UST app recv reg message failed with ret %d", ret);
5230 break;
5231 }
5232 goto error;
5233 }
5234 msg->pid = (pid_t) pid;
5235 msg->ppid = (pid_t) ppid;
5236 msg->uid = (uid_t) uid;
5237 msg->gid = (gid_t) gid;
5238
5239 error:
5240 return ret;
5241 }
5242
5243 /*
5244 * Return a ust app session object using the application object and the
5245 * session object descriptor has a key. If not found, NULL is returned.
5246 * A RCU read side lock MUST be acquired when calling this function.
5247 */
5248 static struct ust_app_session *find_session_by_objd(struct ust_app *app,
5249 int objd)
5250 {
5251 struct lttng_ht_node_ulong *node;
5252 struct lttng_ht_iter iter;
5253 struct ust_app_session *ua_sess = NULL;
5254
5255 assert(app);
5256
5257 lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
5258 node = lttng_ht_iter_get_node_ulong(&iter);
5259 if (node == NULL) {
5260 DBG2("UST app session find by objd %d not found", objd);
5261 goto error;
5262 }
5263
5264 ua_sess = caa_container_of(node, struct ust_app_session, ust_objd_node);
5265
5266 error:
5267 return ua_sess;
5268 }
5269
5270 /*
5271 * Return a ust app channel object using the application object and the channel
5272 * object descriptor has a key. If not found, NULL is returned. A RCU read side
5273 * lock MUST be acquired before calling this function.
5274 */
5275 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
5276 int objd)
5277 {
5278 struct lttng_ht_node_ulong *node;
5279 struct lttng_ht_iter iter;
5280 struct ust_app_channel *ua_chan = NULL;
5281
5282 assert(app);
5283
5284 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
5285 node = lttng_ht_iter_get_node_ulong(&iter);
5286 if (node == NULL) {
5287 DBG2("UST app channel find by objd %d not found", objd);
5288 goto error;
5289 }
5290
5291 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
5292
5293 error:
5294 return ua_chan;
5295 }
5296
5297 /*
5298 * Reply to a register channel notification from an application on the notify
5299 * socket. The channel metadata is also created.
5300 *
5301 * The session UST registry lock is acquired in this function.
5302 *
5303 * On success 0 is returned else a negative value.
5304 */
5305 static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
5306 size_t nr_fields, struct ustctl_field *fields)
5307 {
5308 int ret, ret_code = 0;
5309 uint32_t chan_id, reg_count;
5310 uint64_t chan_reg_key;
5311 enum ustctl_channel_header type;
5312 struct ust_app *app;
5313 struct ust_app_channel *ua_chan;
5314 struct ust_app_session *ua_sess;
5315 struct ust_registry_session *registry;
5316 struct ust_registry_channel *chan_reg;
5317
5318 rcu_read_lock();
5319
5320 /* Lookup application. If not found, there is a code flow error. */
5321 app = find_app_by_notify_sock(sock);
5322 if (!app) {
5323 DBG("Application socket %d is being teardown. Abort event notify",
5324 sock);
5325 ret = 0;
5326 free(fields);
5327 goto error_rcu_unlock;
5328 }
5329
5330 /* Lookup channel by UST object descriptor. */
5331 ua_chan = find_channel_by_objd(app, cobjd);
5332 if (!ua_chan) {
5333 DBG("Application channel is being teardown. Abort event notify");
5334 ret = 0;
5335 free(fields);
5336 goto error_rcu_unlock;
5337 }
5338
5339 assert(ua_chan->session);
5340 ua_sess = ua_chan->session;
5341
5342 /* Get right session registry depending on the session buffer type. */
5343 registry = get_session_registry(ua_sess);
5344 assert(registry);
5345
5346 /* Depending on the buffer type, a different channel key is used. */
5347 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
5348 chan_reg_key = ua_chan->tracing_channel_id;
5349 } else {
5350 chan_reg_key = ua_chan->key;
5351 }
5352
5353 pthread_mutex_lock(&registry->lock);
5354
5355 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
5356 assert(chan_reg);
5357
5358 if (!chan_reg->register_done) {
5359 reg_count = ust_registry_get_event_count(chan_reg);
5360 if (reg_count < 31) {
5361 type = USTCTL_CHANNEL_HEADER_COMPACT;
5362 } else {
5363 type = USTCTL_CHANNEL_HEADER_LARGE;
5364 }
5365
5366 chan_reg->nr_ctx_fields = nr_fields;
5367 chan_reg->ctx_fields = fields;
5368 chan_reg->header_type = type;
5369 } else {
5370 /* Get current already assigned values. */
5371 type = chan_reg->header_type;
5372 free(fields);
5373 /* Set to NULL so the error path does not do a double free. */
5374 fields = NULL;
5375 }
5376 /* Channel id is set during the object creation. */
5377 chan_id = chan_reg->chan_id;
5378
5379 /* Append to metadata */
5380 if (!chan_reg->metadata_dumped) {
5381 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
5382 if (ret_code) {
5383 ERR("Error appending channel metadata (errno = %d)", ret_code);
5384 goto reply;
5385 }
5386 }
5387
5388 reply:
5389 DBG3("UST app replying to register channel key %" PRIu64
5390 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
5391 ret_code);
5392
5393 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
5394 if (ret < 0) {
5395 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5396 ERR("UST app reply channel failed with ret %d", ret);
5397 } else {
5398 DBG3("UST app reply channel failed. Application died");
5399 }
5400 goto error;
5401 }
5402
5403 /* This channel registry registration is completed. */
5404 chan_reg->register_done = 1;
5405
5406 error:
5407 pthread_mutex_unlock(&registry->lock);
5408 error_rcu_unlock:
5409 rcu_read_unlock();
5410 if (ret) {
5411 free(fields);
5412 }
5413 return ret;
5414 }
5415
5416 /*
5417 * Add event to the UST channel registry. When the event is added to the
5418 * registry, the metadata is also created. Once done, this replies to the
5419 * application with the appropriate error code.
5420 *
5421 * The session UST registry lock is acquired in the function.
5422 *
5423 * On success 0 is returned else a negative value.
5424 */
5425 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
5426 char *sig, size_t nr_fields, struct ustctl_field *fields,
5427 int loglevel_value, char *model_emf_uri)
5428 {
5429 int ret, ret_code;
5430 uint32_t event_id = 0;
5431 uint64_t chan_reg_key;
5432 struct ust_app *app;
5433 struct ust_app_channel *ua_chan;
5434 struct ust_app_session *ua_sess;
5435 struct ust_registry_session *registry;
5436
5437 rcu_read_lock();
5438
5439 /* Lookup application. If not found, there is a code flow error. */
5440 app = find_app_by_notify_sock(sock);
5441 if (!app) {
5442 DBG("Application socket %d is being teardown. Abort event notify",
5443 sock);
5444 ret = 0;
5445 free(sig);
5446 free(fields);
5447 free(model_emf_uri);
5448 goto error_rcu_unlock;
5449 }
5450
5451 /* Lookup channel by UST object descriptor. */
5452 ua_chan = find_channel_by_objd(app, cobjd);
5453 if (!ua_chan) {
5454 DBG("Application channel is being teardown. Abort event notify");
5455 ret = 0;
5456 free(sig);
5457 free(fields);
5458 free(model_emf_uri);
5459 goto error_rcu_unlock;
5460 }
5461
5462 assert(ua_chan->session);
5463 ua_sess = ua_chan->session;
5464
5465 registry = get_session_registry(ua_sess);
5466 assert(registry);
5467
5468 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
5469 chan_reg_key = ua_chan->tracing_channel_id;
5470 } else {
5471 chan_reg_key = ua_chan->key;
5472 }
5473
5474 pthread_mutex_lock(&registry->lock);
5475
5476 /*
5477 * From this point on, this call acquires the ownership of the sig, fields
5478 * and model_emf_uri meaning any free are done inside it if needed. These
5479 * three variables MUST NOT be read/write after this.
5480 */
5481 ret_code = ust_registry_create_event(registry, chan_reg_key,
5482 sobjd, cobjd, name, sig, nr_fields, fields,
5483 loglevel_value, model_emf_uri, ua_sess->buffer_type,
5484 &event_id, app);
5485
5486 /*
5487 * The return value is returned to ustctl so in case of an error, the
5488 * application can be notified. In case of an error, it's important not to
5489 * return a negative error or else the application will get closed.
5490 */
5491 ret = ustctl_reply_register_event(sock, event_id, ret_code);
5492 if (ret < 0) {
5493 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5494 ERR("UST app reply event failed with ret %d", ret);
5495 } else {
5496 DBG3("UST app reply event failed. Application died");
5497 }
5498 /*
5499 * No need to wipe the create event since the application socket will
5500 * get close on error hence cleaning up everything by itself.
5501 */
5502 goto error;
5503 }
5504
5505 DBG3("UST registry event %s with id %" PRId32 " added successfully",
5506 name, event_id);
5507
5508 error:
5509 pthread_mutex_unlock(&registry->lock);
5510 error_rcu_unlock:
5511 rcu_read_unlock();
5512 return ret;
5513 }
5514
5515 /*
5516 * Add enum to the UST session registry. Once done, this replies to the
5517 * application with the appropriate error code.
5518 *
5519 * The session UST registry lock is acquired within this function.
5520 *
5521 * On success 0 is returned else a negative value.
5522 */
5523 static int add_enum_ust_registry(int sock, int sobjd, char *name,
5524 struct ustctl_enum_entry *entries, size_t nr_entries)
5525 {
5526 int ret = 0, ret_code;
5527 struct ust_app *app;
5528 struct ust_app_session *ua_sess;
5529 struct ust_registry_session *registry;
5530 uint64_t enum_id = -1ULL;
5531
5532 rcu_read_lock();
5533
5534 /* Lookup application. If not found, there is a code flow error. */
5535 app = find_app_by_notify_sock(sock);
5536 if (!app) {
5537 /* Return an error since this is not an error */
5538 DBG("Application socket %d is being torn down. Aborting enum registration",
5539 sock);
5540 free(entries);
5541 goto error_rcu_unlock;
5542 }
5543
5544 /* Lookup session by UST object descriptor. */
5545 ua_sess = find_session_by_objd(app, sobjd);
5546 if (!ua_sess) {
5547 /* Return an error since this is not an error */
5548 DBG("Application session is being torn down. Aborting enum registration.");
5549 free(entries);
5550 goto error_rcu_unlock;
5551 }
5552
5553 registry = get_session_registry(ua_sess);
5554 assert(registry);
5555
5556 pthread_mutex_lock(&registry->lock);
5557
5558 /*
5559 * From this point on, the callee acquires the ownership of
5560 * entries. The variable entries MUST NOT be read/written after
5561 * call.
5562 */
5563 ret_code = ust_registry_create_or_find_enum(registry, sobjd, name,
5564 entries, nr_entries, &enum_id);
5565 entries = NULL;
5566
5567 /*
5568 * The return value is returned to ustctl so in case of an error, the
5569 * application can be notified. In case of an error, it's important not to
5570 * return a negative error or else the application will get closed.
5571 */
5572 ret = ustctl_reply_register_enum(sock, enum_id, ret_code);
5573 if (ret < 0) {
5574 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5575 ERR("UST app reply enum failed with ret %d", ret);
5576 } else {
5577 DBG3("UST app reply enum failed. Application died");
5578 }
5579 /*
5580 * No need to wipe the create enum since the application socket will
5581 * get close on error hence cleaning up everything by itself.
5582 */
5583 goto error;
5584 }
5585
5586 DBG3("UST registry enum %s added successfully or already found", name);
5587
5588 error:
5589 pthread_mutex_unlock(&registry->lock);
5590 error_rcu_unlock:
5591 rcu_read_unlock();
5592 return ret;
5593 }
5594
5595 /*
5596 * Handle application notification through the given notify socket.
5597 *
5598 * Return 0 on success or else a negative value.
5599 */
5600 int ust_app_recv_notify(int sock)
5601 {
5602 int ret;
5603 enum ustctl_notify_cmd cmd;
5604
5605 DBG3("UST app receiving notify from sock %d", sock);
5606
5607 ret = ustctl_recv_notify(sock, &cmd);
5608 if (ret < 0) {
5609 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5610 ERR("UST app recv notify failed with ret %d", ret);
5611 } else {
5612 DBG3("UST app recv notify failed. Application died");
5613 }
5614 goto error;
5615 }
5616
5617 switch (cmd) {
5618 case USTCTL_NOTIFY_CMD_EVENT:
5619 {
5620 int sobjd, cobjd, loglevel_value;
5621 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
5622 size_t nr_fields;
5623 struct ustctl_field *fields;
5624
5625 DBG2("UST app ustctl register event received");
5626
5627 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name,
5628 &loglevel_value, &sig, &nr_fields, &fields,
5629 &model_emf_uri);
5630 if (ret < 0) {
5631 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5632 ERR("UST app recv event failed with ret %d", ret);
5633 } else {
5634 DBG3("UST app recv event failed. Application died");
5635 }
5636 goto error;
5637 }
5638
5639 /*
5640 * Add event to the UST registry coming from the notify socket. This
5641 * call will free if needed the sig, fields and model_emf_uri. This
5642 * code path loses the ownsership of these variables and transfer them
5643 * to the this function.
5644 */
5645 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
5646 fields, loglevel_value, model_emf_uri);
5647 if (ret < 0) {
5648 goto error;
5649 }
5650
5651 break;
5652 }
5653 case USTCTL_NOTIFY_CMD_CHANNEL:
5654 {
5655 int sobjd, cobjd;
5656 size_t nr_fields;
5657 struct ustctl_field *fields;
5658
5659 DBG2("UST app ustctl register channel received");
5660
5661 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
5662 &fields);
5663 if (ret < 0) {
5664 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5665 ERR("UST app recv channel failed with ret %d", ret);
5666 } else {
5667 DBG3("UST app recv channel failed. Application died");
5668 }
5669 goto error;
5670 }
5671
5672 /*
5673 * The fields ownership are transfered to this function call meaning
5674 * that if needed it will be freed. After this, it's invalid to access
5675 * fields or clean it up.
5676 */
5677 ret = reply_ust_register_channel(sock, sobjd, cobjd, nr_fields,
5678 fields);
5679 if (ret < 0) {
5680 goto error;
5681 }
5682
5683 break;
5684 }
5685 case USTCTL_NOTIFY_CMD_ENUM:
5686 {
5687 int sobjd;
5688 char name[LTTNG_UST_SYM_NAME_LEN];
5689 size_t nr_entries;
5690 struct ustctl_enum_entry *entries;
5691
5692 DBG2("UST app ustctl register enum received");
5693
5694 ret = ustctl_recv_register_enum(sock, &sobjd, name,
5695 &entries, &nr_entries);
5696 if (ret < 0) {
5697 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5698 ERR("UST app recv enum failed with ret %d", ret);
5699 } else {
5700 DBG3("UST app recv enum failed. Application died");
5701 }
5702 goto error;
5703 }
5704
5705 /* Callee assumes ownership of entries */
5706 ret = add_enum_ust_registry(sock, sobjd, name,
5707 entries, nr_entries);
5708 if (ret < 0) {
5709 goto error;
5710 }
5711
5712 break;
5713 }
5714 default:
5715 /* Should NEVER happen. */
5716 assert(0);
5717 }
5718
5719 error:
5720 return ret;
5721 }
5722
5723 /*
5724 * Once the notify socket hangs up, this is called. First, it tries to find the
5725 * corresponding application. On failure, the call_rcu to close the socket is
5726 * executed. If an application is found, it tries to delete it from the notify
5727 * socket hash table. Whathever the result, it proceeds to the call_rcu.
5728 *
5729 * Note that an object needs to be allocated here so on ENOMEM failure, the
5730 * call RCU is not done but the rest of the cleanup is.
5731 */
5732 void ust_app_notify_sock_unregister(int sock)
5733 {
5734 int err_enomem = 0;
5735 struct lttng_ht_iter iter;
5736 struct ust_app *app;
5737 struct ust_app_notify_sock_obj *obj;
5738
5739 assert(sock >= 0);
5740
5741 rcu_read_lock();
5742
5743 obj = zmalloc(sizeof(*obj));
5744 if (!obj) {
5745 /*
5746 * An ENOMEM is kind of uncool. If this strikes we continue the
5747 * procedure but the call_rcu will not be called. In this case, we
5748 * accept the fd leak rather than possibly creating an unsynchronized
5749 * state between threads.
5750 *
5751 * TODO: The notify object should be created once the notify socket is
5752 * registered and stored independantely from the ust app object. The
5753 * tricky part is to synchronize the teardown of the application and
5754 * this notify object. Let's keep that in mind so we can avoid this
5755 * kind of shenanigans with ENOMEM in the teardown path.
5756 */
5757 err_enomem = 1;
5758 } else {
5759 obj->fd = sock;
5760 }
5761
5762 DBG("UST app notify socket unregister %d", sock);
5763
5764 /*
5765 * Lookup application by notify socket. If this fails, this means that the
5766 * hash table delete has already been done by the application
5767 * unregistration process so we can safely close the notify socket in a
5768 * call RCU.
5769 */
5770 app = find_app_by_notify_sock(sock);
5771 if (!app) {
5772 goto close_socket;
5773 }
5774
5775 iter.iter.node = &app->notify_sock_n.node;
5776
5777 /*
5778 * Whatever happens here either we fail or succeed, in both cases we have
5779 * to close the socket after a grace period to continue to the call RCU
5780 * here. If the deletion is successful, the application is not visible
5781 * anymore by other threads and is it fails it means that it was already
5782 * deleted from the hash table so either way we just have to close the
5783 * socket.
5784 */
5785 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
5786
5787 close_socket:
5788 rcu_read_unlock();
5789
5790 /*
5791 * Close socket after a grace period to avoid for the socket to be reused
5792 * before the application object is freed creating potential race between
5793 * threads trying to add unique in the global hash table.
5794 */
5795 if (!err_enomem) {
5796 call_rcu(&obj->head, close_notify_sock_rcu);
5797 }
5798 }
5799
5800 /*
5801 * Destroy a ust app data structure and free its memory.
5802 */
5803 void ust_app_destroy(struct ust_app *app)
5804 {
5805 if (!app) {
5806 return;
5807 }
5808
5809 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
5810 }
5811
5812 /*
5813 * Take a snapshot for a given UST session. The snapshot is sent to the given
5814 * output.
5815 *
5816 * Return 0 on success or else a negative value.
5817 */
5818 int ust_app_snapshot_record(struct ltt_ust_session *usess,
5819 struct snapshot_output *output, int wait,
5820 uint64_t nb_packets_per_stream)
5821 {
5822 int ret = 0;
5823 struct lttng_ht_iter iter;
5824 struct ust_app *app;
5825 char pathname[PATH_MAX];
5826
5827 assert(usess);
5828 assert(output);
5829
5830 rcu_read_lock();
5831
5832 switch (usess->buffer_type) {
5833 case LTTNG_BUFFER_PER_UID:
5834 {
5835 struct buffer_reg_uid *reg;
5836
5837 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5838 struct buffer_reg_channel *reg_chan;
5839 struct consumer_socket *socket;
5840
5841 /* Get consumer socket to use to push the metadata.*/
5842 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5843 usess->consumer);
5844 if (!socket) {
5845 ret = -EINVAL;
5846 goto error;
5847 }
5848
5849 memset(pathname, 0, sizeof(pathname));
5850 ret = snprintf(pathname, sizeof(pathname),
5851 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
5852 reg->uid, reg->bits_per_long);
5853 if (ret < 0) {
5854 PERROR("snprintf snapshot path");
5855 goto error;
5856 }
5857
5858 /* Add the UST default trace dir to path. */
5859 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5860 reg_chan, node.node) {
5861 ret = consumer_snapshot_channel(socket, reg_chan->consumer_key,
5862 output, 0, usess->uid, usess->gid, pathname, wait,
5863 nb_packets_per_stream);
5864 if (ret < 0) {
5865 goto error;
5866 }
5867 }
5868 ret = consumer_snapshot_channel(socket,
5869 reg->registry->reg.ust->metadata_key, output, 1,
5870 usess->uid, usess->gid, pathname, wait, 0);
5871 if (ret < 0) {
5872 goto error;
5873 }
5874 }
5875 break;
5876 }
5877 case LTTNG_BUFFER_PER_PID:
5878 {
5879 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5880 struct consumer_socket *socket;
5881 struct lttng_ht_iter chan_iter;
5882 struct ust_app_channel *ua_chan;
5883 struct ust_app_session *ua_sess;
5884 struct ust_registry_session *registry;
5885
5886 ua_sess = lookup_session_by_app(usess, app);
5887 if (!ua_sess) {
5888 /* Session not associated with this app. */
5889 continue;
5890 }
5891
5892 /* Get the right consumer socket for the application. */
5893 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5894 output->consumer);
5895 if (!socket) {
5896 ret = -EINVAL;
5897 goto error;
5898 }
5899
5900 /* Add the UST default trace dir to path. */
5901 memset(pathname, 0, sizeof(pathname));
5902 ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
5903 ua_sess->path);
5904 if (ret < 0) {
5905 PERROR("snprintf snapshot path");
5906 goto error;
5907 }
5908
5909 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5910 ua_chan, node.node) {
5911 ret = consumer_snapshot_channel(socket, ua_chan->key, output,
5912 0, ua_sess->euid, ua_sess->egid, pathname, wait,
5913 nb_packets_per_stream);
5914 if (ret < 0) {
5915 goto error;
5916 }
5917 }
5918
5919 registry = get_session_registry(ua_sess);
5920 assert(registry);
5921 ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
5922 1, ua_sess->euid, ua_sess->egid, pathname, wait, 0);
5923 if (ret < 0) {
5924 goto error;
5925 }
5926 }
5927 break;
5928 }
5929 default:
5930 assert(0);
5931 break;
5932 }
5933
5934 error:
5935 rcu_read_unlock();
5936 return ret;
5937 }
5938
5939 /*
5940 * Return the size taken by one more packet per stream.
5941 */
5942 uint64_t ust_app_get_size_one_more_packet_per_stream(struct ltt_ust_session *usess,
5943 uint64_t cur_nr_packets)
5944 {
5945 uint64_t tot_size = 0;
5946 struct ust_app *app;
5947 struct lttng_ht_iter iter;
5948
5949 assert(usess);
5950
5951 switch (usess->buffer_type) {
5952 case LTTNG_BUFFER_PER_UID:
5953 {
5954 struct buffer_reg_uid *reg;
5955
5956 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5957 struct buffer_reg_channel *reg_chan;
5958
5959 rcu_read_lock();
5960 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5961 reg_chan, node.node) {
5962 if (cur_nr_packets >= reg_chan->num_subbuf) {
5963 /*
5964 * Don't take channel into account if we
5965 * already grab all its packets.
5966 */
5967 continue;
5968 }
5969 tot_size += reg_chan->subbuf_size * reg_chan->stream_count;
5970 }
5971 rcu_read_unlock();
5972 }
5973 break;
5974 }
5975 case LTTNG_BUFFER_PER_PID:
5976 {
5977 rcu_read_lock();
5978 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5979 struct ust_app_channel *ua_chan;
5980 struct ust_app_session *ua_sess;
5981 struct lttng_ht_iter chan_iter;
5982
5983 ua_sess = lookup_session_by_app(usess, app);
5984 if (!ua_sess) {
5985 /* Session not associated with this app. */
5986 continue;
5987 }
5988
5989 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5990 ua_chan, node.node) {
5991 if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
5992 /*
5993 * Don't take channel into account if we
5994 * already grab all its packets.
5995 */
5996 continue;
5997 }
5998 tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
5999 }
6000 }
6001 rcu_read_unlock();
6002 break;
6003 }
6004 default:
6005 assert(0);
6006 break;
6007 }
6008
6009 return tot_size;
6010 }
6011
6012 int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id,
6013 struct cds_list_head *buffer_reg_uid_list,
6014 struct consumer_output *consumer, uint64_t uchan_id,
6015 int overwrite, uint64_t *discarded, uint64_t *lost)
6016 {
6017 int ret;
6018 uint64_t consumer_chan_key;
6019
6020 ret = buffer_reg_uid_consumer_channel_key(
6021 buffer_reg_uid_list, ust_session_id,
6022 uchan_id, &consumer_chan_key);
6023 if (ret < 0) {
6024 goto end;
6025 }
6026
6027 if (overwrite) {
6028 ret = consumer_get_lost_packets(ust_session_id,
6029 consumer_chan_key, consumer, lost);
6030 *discarded = 0;
6031 } else {
6032 ret = consumer_get_discarded_events(ust_session_id,
6033 consumer_chan_key, consumer, discarded);
6034 *lost = 0;
6035 }
6036
6037 end:
6038 return ret;
6039 }
6040
6041 int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
6042 struct ltt_ust_channel *uchan,
6043 struct consumer_output *consumer, int overwrite,
6044 uint64_t *discarded, uint64_t *lost)
6045 {
6046 int ret = 0;
6047 struct lttng_ht_iter iter;
6048 struct lttng_ht_node_str *ua_chan_node;
6049 struct ust_app *app;
6050 struct ust_app_session *ua_sess;
6051 struct ust_app_channel *ua_chan;
6052
6053 rcu_read_lock();
6054 /*
6055 * Iterate over every registered applications, return when we
6056 * found one in the right session and channel.
6057 */
6058 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6059 struct lttng_ht_iter uiter;
6060
6061 ua_sess = lookup_session_by_app(usess, app);
6062 if (ua_sess == NULL) {
6063 continue;
6064 }
6065
6066 /* Get channel */
6067 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
6068 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
6069 /* If the session is found for the app, the channel must be there */
6070 assert(ua_chan_node);
6071
6072 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
6073
6074 if (overwrite) {
6075 ret = consumer_get_lost_packets(usess->id, ua_chan->key,
6076 consumer, lost);
6077 *discarded = 0;
6078 goto end;
6079 } else {
6080 ret = consumer_get_discarded_events(usess->id,
6081 ua_chan->key, consumer, discarded);
6082 *lost = 0;
6083 goto end;
6084 }
6085 }
6086
6087 end:
6088 rcu_read_unlock();
6089 return ret;
6090 }
6091
6092 static
6093 int ust_app_regenerate_statedump(struct ltt_ust_session *usess,
6094 struct ust_app *app)
6095 {
6096 int ret = 0;
6097 struct ust_app_session *ua_sess;
6098
6099 DBG("Regenerating the metadata for ust app pid %d", app->pid);
6100
6101 rcu_read_lock();
6102
6103 ua_sess = lookup_session_by_app(usess, app);
6104 if (ua_sess == NULL) {
6105 /* The session is in teardown process. Ignore and continue. */
6106 goto end;
6107 }
6108
6109 pthread_mutex_lock(&ua_sess->lock);
6110
6111 if (ua_sess->deleted) {
6112 goto end_unlock;
6113 }
6114
6115 pthread_mutex_lock(&app->sock_lock);
6116 ret = ustctl_regenerate_statedump(app->sock, ua_sess->handle);
6117 pthread_mutex_unlock(&app->sock_lock);
6118
6119 end_unlock:
6120 pthread_mutex_unlock(&ua_sess->lock);
6121
6122 end:
6123 rcu_read_unlock();
6124 health_code_update();
6125 return ret;
6126 }
6127
6128 /*
6129 * Regenerate the statedump for each app in the session.
6130 */
6131 int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
6132 {
6133 int ret = 0;
6134 struct lttng_ht_iter iter;
6135 struct ust_app *app;
6136
6137 DBG("Regenerating the metadata for all UST apps");
6138
6139 rcu_read_lock();
6140
6141 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6142 if (!app->compatible) {
6143 continue;
6144 }
6145
6146 ret = ust_app_regenerate_statedump(usess, app);
6147 if (ret < 0) {
6148 /* Continue to the next app even on error */
6149 continue;
6150 }
6151 }
6152
6153 rcu_read_unlock();
6154
6155 return 0;
6156 }
This page took 0.203794 seconds and 4 git commands to generate.