SoW-2020-0002: Trace Hit Counters: trigger error reporting integration
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
3 * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
4 *
5 * SPDX-License-Identifier: GPL-2.0-only
6 *
7 */
8
9 #define _LGPL_SOURCE
10 #include <errno.h>
11 #include <fcntl.h>
12 #include <inttypes.h>
13 #include <pthread.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <sys/mman.h>
18 #include <sys/stat.h>
19 #include <sys/types.h>
20 #include <unistd.h>
21 #include <urcu/compiler.h>
22 #include <signal.h>
23
24 #include <common/bytecode/bytecode.h>
25 #include <common/common.h>
26 #include <common/hashtable/utils.h>
27 #include <lttng/event-rule/event-rule.h>
28 #include <lttng/event-rule/event-rule-internal.h>
29 #include <lttng/event-rule/tracepoint.h>
30 #include <lttng/condition/condition.h>
31 #include <lttng/condition/event-rule-internal.h>
32 #include <lttng/condition/event-rule.h>
33 #include <lttng/trigger/trigger-internal.h>
34 #include <common/sessiond-comm/sessiond-comm.h>
35
36 #include "buffer-registry.h"
37 #include "condition-internal.h"
38 #include "fd-limit.h"
39 #include "health-sessiond.h"
40 #include "ust-app.h"
41 #include "ust-consumer.h"
42 #include "lttng-ust-ctl.h"
43 #include "lttng-ust-error.h"
44 #include "utils.h"
45 #include "session.h"
46 #include "lttng-sessiond.h"
47 #include "notification-thread-commands.h"
48 #include "rotate.h"
49 #include "event.h"
50 #include "trigger-error-accounting.h"
51
52
53 struct lttng_ht *ust_app_ht;
54 struct lttng_ht *ust_app_ht_by_sock;
55 struct lttng_ht *ust_app_ht_by_notify_sock;
56
57 static
58 int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
59
60 /* Next available channel key. Access under next_channel_key_lock. */
61 static uint64_t _next_channel_key;
62 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
63
64 /* Next available session ID. Access under next_session_id_lock. */
65 static uint64_t _next_session_id;
66 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
67
68 /*
69 * Return the incremented value of next_channel_key.
70 */
71 static uint64_t get_next_channel_key(void)
72 {
73 uint64_t ret;
74
75 pthread_mutex_lock(&next_channel_key_lock);
76 ret = ++_next_channel_key;
77 pthread_mutex_unlock(&next_channel_key_lock);
78 return ret;
79 }
80
81 /*
82 * Return the atomically incremented value of next_session_id.
83 */
84 static uint64_t get_next_session_id(void)
85 {
86 uint64_t ret;
87
88 pthread_mutex_lock(&next_session_id_lock);
89 ret = ++_next_session_id;
90 pthread_mutex_unlock(&next_session_id_lock);
91 return ret;
92 }
93
94 static void copy_channel_attr_to_ustctl(
95 struct ustctl_consumer_channel_attr *attr,
96 struct lttng_ust_channel_attr *uattr)
97 {
98 /* Copy event attributes since the layout is different. */
99 attr->subbuf_size = uattr->subbuf_size;
100 attr->num_subbuf = uattr->num_subbuf;
101 attr->overwrite = uattr->overwrite;
102 attr->switch_timer_interval = uattr->switch_timer_interval;
103 attr->read_timer_interval = uattr->read_timer_interval;
104 attr->output = uattr->output;
105 attr->blocking_timeout = uattr->u.s.blocking_timeout;
106 }
107
108 /*
109 * Match function for the hash table lookup.
110 *
111 * It matches an ust app event based on three attributes which are the event
112 * name, the filter bytecode and the loglevel.
113 */
114 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
115 {
116 struct ust_app_event *event;
117 const struct ust_app_ht_key *key;
118 int ev_loglevel_value;
119
120 assert(node);
121 assert(_key);
122
123 event = caa_container_of(node, struct ust_app_event, node.node);
124 key = _key;
125 ev_loglevel_value = event->attr.loglevel;
126
127 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
128
129 /* Event name */
130 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
131 goto no_match;
132 }
133
134 /* Event loglevel. */
135 if (ev_loglevel_value != key->loglevel_type) {
136 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
137 && key->loglevel_type == 0 &&
138 ev_loglevel_value == -1) {
139 /*
140 * Match is accepted. This is because on event creation, the
141 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
142 * -1 are accepted for this loglevel type since 0 is the one set by
143 * the API when receiving an enable event.
144 */
145 } else {
146 goto no_match;
147 }
148 }
149
150 /* One of the filters is NULL, fail. */
151 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
152 goto no_match;
153 }
154
155 if (key->filter && event->filter) {
156 /* Both filters exists, check length followed by the bytecode. */
157 if (event->filter->len != key->filter->len ||
158 memcmp(event->filter->data, key->filter->data,
159 event->filter->len) != 0) {
160 goto no_match;
161 }
162 }
163
164 /* One of the exclusions is NULL, fail. */
165 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
166 goto no_match;
167 }
168
169 if (key->exclusion && event->exclusion) {
170 /* Both exclusions exists, check count followed by the names. */
171 if (event->exclusion->count != key->exclusion->count ||
172 memcmp(event->exclusion->names, key->exclusion->names,
173 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
174 goto no_match;
175 }
176 }
177
178
179 /* Match. */
180 return 1;
181
182 no_match:
183 return 0;
184 }
185
186 /*
187 * Unique add of an ust app event in the given ht. This uses the custom
188 * ht_match_ust_app_event match function and the event name as hash.
189 */
190 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
191 struct ust_app_event *event)
192 {
193 struct cds_lfht_node *node_ptr;
194 struct ust_app_ht_key key;
195 struct lttng_ht *ht;
196
197 assert(ua_chan);
198 assert(ua_chan->events);
199 assert(event);
200
201 ht = ua_chan->events;
202 key.name = event->attr.name;
203 key.filter = event->filter;
204 key.loglevel_type = event->attr.loglevel;
205 key.exclusion = event->exclusion;
206
207 node_ptr = cds_lfht_add_unique(ht->ht,
208 ht->hash_fct(event->node.key, lttng_ht_seed),
209 ht_match_ust_app_event, &key, &event->node.node);
210 assert(node_ptr == &event->node.node);
211 }
212
213 /*
214 * Close the notify socket from the given RCU head object. This MUST be called
215 * through a call_rcu().
216 */
217 static void close_notify_sock_rcu(struct rcu_head *head)
218 {
219 int ret;
220 struct ust_app_notify_sock_obj *obj =
221 caa_container_of(head, struct ust_app_notify_sock_obj, head);
222
223 /* Must have a valid fd here. */
224 assert(obj->fd >= 0);
225
226 ret = close(obj->fd);
227 if (ret) {
228 ERR("close notify sock %d RCU", obj->fd);
229 }
230 lttng_fd_put(LTTNG_FD_APPS, 1);
231
232 free(obj);
233 }
234
235 /*
236 * Return the session registry according to the buffer type of the given
237 * session.
238 *
239 * A registry per UID object MUST exists before calling this function or else
240 * it assert() if not found. RCU read side lock must be acquired.
241 */
242 static struct ust_registry_session *get_session_registry(
243 struct ust_app_session *ua_sess)
244 {
245 struct ust_registry_session *registry = NULL;
246
247 assert(ua_sess);
248
249 switch (ua_sess->buffer_type) {
250 case LTTNG_BUFFER_PER_PID:
251 {
252 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
253 if (!reg_pid) {
254 goto error;
255 }
256 registry = reg_pid->registry->reg.ust;
257 break;
258 }
259 case LTTNG_BUFFER_PER_UID:
260 {
261 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
262 ua_sess->tracing_id, ua_sess->bits_per_long,
263 lttng_credentials_get_uid(&ua_sess->real_credentials));
264 if (!reg_uid) {
265 goto error;
266 }
267 registry = reg_uid->registry->reg.ust;
268 break;
269 }
270 default:
271 assert(0);
272 };
273
274 error:
275 return registry;
276 }
277
278 /*
279 * Delete ust context safely. RCU read lock must be held before calling
280 * this function.
281 */
282 static
283 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
284 struct ust_app *app)
285 {
286 int ret;
287
288 assert(ua_ctx);
289
290 if (ua_ctx->obj) {
291 pthread_mutex_lock(&app->sock_lock);
292 ret = ustctl_release_object(sock, ua_ctx->obj);
293 pthread_mutex_unlock(&app->sock_lock);
294 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
295 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
296 sock, ua_ctx->obj->handle, ret);
297 }
298 free(ua_ctx->obj);
299 }
300 free(ua_ctx);
301 }
302
303 /*
304 * Delete ust app event safely. RCU read lock must be held before calling
305 * this function.
306 */
307 static
308 void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
309 struct ust_app *app)
310 {
311 int ret;
312
313 assert(ua_event);
314
315 free(ua_event->filter);
316 if (ua_event->exclusion != NULL)
317 free(ua_event->exclusion);
318 if (ua_event->obj != NULL) {
319 pthread_mutex_lock(&app->sock_lock);
320 ret = ustctl_release_object(sock, ua_event->obj);
321 pthread_mutex_unlock(&app->sock_lock);
322 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
323 ERR("UST app sock %d release event obj failed with ret %d",
324 sock, ret);
325 }
326 free(ua_event->obj);
327 }
328 free(ua_event);
329 }
330
331 /*
332 * Delete ust app token event_rule safely. RCU read lock must be held before calling
333 * this function. TODO: or does it????
334 */
335 static
336 void delete_ust_app_token_event_rule(int sock, struct ust_app_token_event_rule *ua_token,
337 struct ust_app *app)
338 {
339 int ret;
340
341 assert(ua_token);
342
343 if (ua_token->exclusion != NULL)
344 free(ua_token->exclusion);
345 if (ua_token->obj != NULL) {
346 pthread_mutex_lock(&app->sock_lock);
347 ret = ustctl_release_object(sock, ua_token->obj);
348 pthread_mutex_unlock(&app->sock_lock);
349 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
350 ERR("UST app sock %d release event obj failed with ret %d",
351 sock, ret);
352 }
353 free(ua_token->obj);
354 }
355 lttng_trigger_put(ua_token->trigger);
356 free(ua_token);
357 }
358
359 /*
360 * Release ust data object of the given stream.
361 *
362 * Return 0 on success or else a negative value.
363 */
364 static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
365 struct ust_app *app)
366 {
367 int ret = 0;
368
369 assert(stream);
370
371 if (stream->obj) {
372 pthread_mutex_lock(&app->sock_lock);
373 ret = ustctl_release_object(sock, stream->obj);
374 pthread_mutex_unlock(&app->sock_lock);
375 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
376 ERR("UST app sock %d release stream obj failed with ret %d",
377 sock, ret);
378 }
379 lttng_fd_put(LTTNG_FD_APPS, 2);
380 free(stream->obj);
381 }
382
383 return ret;
384 }
385
386 /*
387 * Delete ust app stream safely. RCU read lock must be held before calling
388 * this function.
389 */
390 static
391 void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
392 struct ust_app *app)
393 {
394 assert(stream);
395
396 (void) release_ust_app_stream(sock, stream, app);
397 free(stream);
398 }
399
400 /*
401 * We need to execute ht_destroy outside of RCU read-side critical
402 * section and outside of call_rcu thread, so we postpone its execution
403 * using ht_cleanup_push. It is simpler than to change the semantic of
404 * the many callers of delete_ust_app_session().
405 */
406 static
407 void delete_ust_app_channel_rcu(struct rcu_head *head)
408 {
409 struct ust_app_channel *ua_chan =
410 caa_container_of(head, struct ust_app_channel, rcu_head);
411
412 ht_cleanup_push(ua_chan->ctx);
413 ht_cleanup_push(ua_chan->events);
414 free(ua_chan);
415 }
416
417 /*
418 * Extract the lost packet or discarded events counter when the channel is
419 * being deleted and store the value in the parent channel so we can
420 * access it from lttng list and at stop/destroy.
421 *
422 * The session list lock must be held by the caller.
423 */
424 static
425 void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
426 {
427 uint64_t discarded = 0, lost = 0;
428 struct ltt_session *session;
429 struct ltt_ust_channel *uchan;
430
431 if (ua_chan->attr.type != LTTNG_UST_CHAN_PER_CPU) {
432 return;
433 }
434
435 rcu_read_lock();
436 session = session_find_by_id(ua_chan->session->tracing_id);
437 if (!session || !session->ust_session) {
438 /*
439 * Not finding the session is not an error because there are
440 * multiple ways the channels can be torn down.
441 *
442 * 1) The session daemon can initiate the destruction of the
443 * ust app session after receiving a destroy command or
444 * during its shutdown/teardown.
445 * 2) The application, since we are in per-pid tracing, is
446 * unregistering and tearing down its ust app session.
447 *
448 * Both paths are protected by the session list lock which
449 * ensures that the accounting of lost packets and discarded
450 * events is done exactly once. The session is then unpublished
451 * from the session list, resulting in this condition.
452 */
453 goto end;
454 }
455
456 if (ua_chan->attr.overwrite) {
457 consumer_get_lost_packets(ua_chan->session->tracing_id,
458 ua_chan->key, session->ust_session->consumer,
459 &lost);
460 } else {
461 consumer_get_discarded_events(ua_chan->session->tracing_id,
462 ua_chan->key, session->ust_session->consumer,
463 &discarded);
464 }
465 uchan = trace_ust_find_channel_by_name(
466 session->ust_session->domain_global.channels,
467 ua_chan->name);
468 if (!uchan) {
469 ERR("Missing UST channel to store discarded counters");
470 goto end;
471 }
472
473 uchan->per_pid_closed_app_discarded += discarded;
474 uchan->per_pid_closed_app_lost += lost;
475
476 end:
477 rcu_read_unlock();
478 if (session) {
479 session_put(session);
480 }
481 }
482
483 /*
484 * Delete ust app channel safely. RCU read lock must be held before calling
485 * this function.
486 *
487 * The session list lock must be held by the caller.
488 */
489 static
490 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
491 struct ust_app *app)
492 {
493 int ret;
494 struct lttng_ht_iter iter;
495 struct ust_app_event *ua_event;
496 struct ust_app_ctx *ua_ctx;
497 struct ust_app_stream *stream, *stmp;
498 struct ust_registry_session *registry;
499
500 assert(ua_chan);
501
502 DBG3("UST app deleting channel %s", ua_chan->name);
503
504 /* Wipe stream */
505 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
506 cds_list_del(&stream->list);
507 delete_ust_app_stream(sock, stream, app);
508 }
509
510 /* Wipe context */
511 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
512 cds_list_del(&ua_ctx->list);
513 ret = lttng_ht_del(ua_chan->ctx, &iter);
514 assert(!ret);
515 delete_ust_app_ctx(sock, ua_ctx, app);
516 }
517
518 /* Wipe events */
519 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
520 node.node) {
521 ret = lttng_ht_del(ua_chan->events, &iter);
522 assert(!ret);
523 delete_ust_app_event(sock, ua_event, app);
524 }
525
526 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
527 /* Wipe and free registry from session registry. */
528 registry = get_session_registry(ua_chan->session);
529 if (registry) {
530 ust_registry_channel_del_free(registry, ua_chan->key,
531 sock >= 0);
532 }
533 /*
534 * A negative socket can be used by the caller when
535 * cleaning-up a ua_chan in an error path. Skip the
536 * accounting in this case.
537 */
538 if (sock >= 0) {
539 save_per_pid_lost_discarded_counters(ua_chan);
540 }
541 }
542
543 if (ua_chan->obj != NULL) {
544 /* Remove channel from application UST object descriptor. */
545 iter.iter.node = &ua_chan->ust_objd_node.node;
546 ret = lttng_ht_del(app->ust_objd, &iter);
547 assert(!ret);
548 pthread_mutex_lock(&app->sock_lock);
549 ret = ustctl_release_object(sock, ua_chan->obj);
550 pthread_mutex_unlock(&app->sock_lock);
551 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
552 ERR("UST app sock %d release channel obj failed with ret %d",
553 sock, ret);
554 }
555 lttng_fd_put(LTTNG_FD_APPS, 1);
556 free(ua_chan->obj);
557 }
558 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
559 }
560
561 int ust_app_register_done(struct ust_app *app)
562 {
563 int ret;
564
565 pthread_mutex_lock(&app->sock_lock);
566 ret = ustctl_register_done(app->sock);
567 pthread_mutex_unlock(&app->sock_lock);
568 return ret;
569 }
570
571 int ust_app_release_object(struct ust_app *app, struct lttng_ust_object_data *data)
572 {
573 int ret, sock;
574
575 if (app) {
576 pthread_mutex_lock(&app->sock_lock);
577 sock = app->sock;
578 } else {
579 sock = -1;
580 }
581 ret = ustctl_release_object(sock, data);
582 if (app) {
583 pthread_mutex_unlock(&app->sock_lock);
584 }
585 return ret;
586 }
587
588 /*
589 * Push metadata to consumer socket.
590 *
591 * RCU read-side lock must be held to guarantee existance of socket.
592 * Must be called with the ust app session lock held.
593 * Must be called with the registry lock held.
594 *
595 * On success, return the len of metadata pushed or else a negative value.
596 * Returning a -EPIPE return value means we could not send the metadata,
597 * but it can be caused by recoverable errors (e.g. the application has
598 * terminated concurrently).
599 */
600 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
601 struct consumer_socket *socket, int send_zero_data)
602 {
603 int ret;
604 char *metadata_str = NULL;
605 size_t len, offset, new_metadata_len_sent;
606 ssize_t ret_val;
607 uint64_t metadata_key, metadata_version;
608
609 assert(registry);
610 assert(socket);
611
612 metadata_key = registry->metadata_key;
613
614 /*
615 * Means that no metadata was assigned to the session. This can
616 * happens if no start has been done previously.
617 */
618 if (!metadata_key) {
619 return 0;
620 }
621
622 offset = registry->metadata_len_sent;
623 len = registry->metadata_len - registry->metadata_len_sent;
624 new_metadata_len_sent = registry->metadata_len;
625 metadata_version = registry->metadata_version;
626 if (len == 0) {
627 DBG3("No metadata to push for metadata key %" PRIu64,
628 registry->metadata_key);
629 ret_val = len;
630 if (send_zero_data) {
631 DBG("No metadata to push");
632 goto push_data;
633 }
634 goto end;
635 }
636
637 /* Allocate only what we have to send. */
638 metadata_str = zmalloc(len);
639 if (!metadata_str) {
640 PERROR("zmalloc ust app metadata string");
641 ret_val = -ENOMEM;
642 goto error;
643 }
644 /* Copy what we haven't sent out. */
645 memcpy(metadata_str, registry->metadata + offset, len);
646
647 push_data:
648 pthread_mutex_unlock(&registry->lock);
649 /*
650 * We need to unlock the registry while we push metadata to
651 * break a circular dependency between the consumerd metadata
652 * lock and the sessiond registry lock. Indeed, pushing metadata
653 * to the consumerd awaits that it gets pushed all the way to
654 * relayd, but doing so requires grabbing the metadata lock. If
655 * a concurrent metadata request is being performed by
656 * consumerd, this can try to grab the registry lock on the
657 * sessiond while holding the metadata lock on the consumer
658 * daemon. Those push and pull schemes are performed on two
659 * different bidirectionnal communication sockets.
660 */
661 ret = consumer_push_metadata(socket, metadata_key,
662 metadata_str, len, offset, metadata_version);
663 pthread_mutex_lock(&registry->lock);
664 if (ret < 0) {
665 /*
666 * There is an acceptable race here between the registry
667 * metadata key assignment and the creation on the
668 * consumer. The session daemon can concurrently push
669 * metadata for this registry while being created on the
670 * consumer since the metadata key of the registry is
671 * assigned *before* it is setup to avoid the consumer
672 * to ask for metadata that could possibly be not found
673 * in the session daemon.
674 *
675 * The metadata will get pushed either by the session
676 * being stopped or the consumer requesting metadata if
677 * that race is triggered.
678 */
679 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
680 ret = 0;
681 } else {
682 ERR("Error pushing metadata to consumer");
683 }
684 ret_val = ret;
685 goto error_push;
686 } else {
687 /*
688 * Metadata may have been concurrently pushed, since
689 * we're not holding the registry lock while pushing to
690 * consumer. This is handled by the fact that we send
691 * the metadata content, size, and the offset at which
692 * that metadata belongs. This may arrive out of order
693 * on the consumer side, and the consumer is able to
694 * deal with overlapping fragments. The consumer
695 * supports overlapping fragments, which must be
696 * contiguous starting from offset 0. We keep the
697 * largest metadata_len_sent value of the concurrent
698 * send.
699 */
700 registry->metadata_len_sent =
701 max_t(size_t, registry->metadata_len_sent,
702 new_metadata_len_sent);
703 }
704 free(metadata_str);
705 return len;
706
707 end:
708 error:
709 if (ret_val) {
710 /*
711 * On error, flag the registry that the metadata is
712 * closed. We were unable to push anything and this
713 * means that either the consumer is not responding or
714 * the metadata cache has been destroyed on the
715 * consumer.
716 */
717 registry->metadata_closed = 1;
718 }
719 error_push:
720 free(metadata_str);
721 return ret_val;
722 }
723
724 /*
725 * For a given application and session, push metadata to consumer.
726 * Either sock or consumer is required : if sock is NULL, the default
727 * socket to send the metadata is retrieved from consumer, if sock
728 * is not NULL we use it to send the metadata.
729 * RCU read-side lock must be held while calling this function,
730 * therefore ensuring existance of registry. It also ensures existance
731 * of socket throughout this function.
732 *
733 * Return 0 on success else a negative error.
734 * Returning a -EPIPE return value means we could not send the metadata,
735 * but it can be caused by recoverable errors (e.g. the application has
736 * terminated concurrently).
737 */
738 static int push_metadata(struct ust_registry_session *registry,
739 struct consumer_output *consumer)
740 {
741 int ret_val;
742 ssize_t ret;
743 struct consumer_socket *socket;
744
745 assert(registry);
746 assert(consumer);
747
748 pthread_mutex_lock(&registry->lock);
749 if (registry->metadata_closed) {
750 ret_val = -EPIPE;
751 goto error;
752 }
753
754 /* Get consumer socket to use to push the metadata.*/
755 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
756 consumer);
757 if (!socket) {
758 ret_val = -1;
759 goto error;
760 }
761
762 ret = ust_app_push_metadata(registry, socket, 0);
763 if (ret < 0) {
764 ret_val = ret;
765 goto error;
766 }
767 pthread_mutex_unlock(&registry->lock);
768 return 0;
769
770 error:
771 pthread_mutex_unlock(&registry->lock);
772 return ret_val;
773 }
774
775 /*
776 * Send to the consumer a close metadata command for the given session. Once
777 * done, the metadata channel is deleted and the session metadata pointer is
778 * nullified. The session lock MUST be held unless the application is
779 * in the destroy path.
780 *
781 * Do not hold the registry lock while communicating with the consumerd, because
782 * doing so causes inter-process deadlocks between consumerd and sessiond with
783 * the metadata request notification.
784 *
785 * Return 0 on success else a negative value.
786 */
787 static int close_metadata(struct ust_registry_session *registry,
788 struct consumer_output *consumer)
789 {
790 int ret;
791 struct consumer_socket *socket;
792 uint64_t metadata_key;
793 bool registry_was_already_closed;
794
795 assert(registry);
796 assert(consumer);
797
798 rcu_read_lock();
799
800 pthread_mutex_lock(&registry->lock);
801 metadata_key = registry->metadata_key;
802 registry_was_already_closed = registry->metadata_closed;
803 if (metadata_key != 0) {
804 /*
805 * Metadata closed. Even on error this means that the consumer
806 * is not responding or not found so either way a second close
807 * should NOT be emit for this registry.
808 */
809 registry->metadata_closed = 1;
810 }
811 pthread_mutex_unlock(&registry->lock);
812
813 if (metadata_key == 0 || registry_was_already_closed) {
814 ret = 0;
815 goto end;
816 }
817
818 /* Get consumer socket to use to push the metadata.*/
819 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
820 consumer);
821 if (!socket) {
822 ret = -1;
823 goto end;
824 }
825
826 ret = consumer_close_metadata(socket, metadata_key);
827 if (ret < 0) {
828 goto end;
829 }
830
831 end:
832 rcu_read_unlock();
833 return ret;
834 }
835
836 /*
837 * We need to execute ht_destroy outside of RCU read-side critical
838 * section and outside of call_rcu thread, so we postpone its execution
839 * using ht_cleanup_push. It is simpler than to change the semantic of
840 * the many callers of delete_ust_app_session().
841 */
842 static
843 void delete_ust_app_session_rcu(struct rcu_head *head)
844 {
845 struct ust_app_session *ua_sess =
846 caa_container_of(head, struct ust_app_session, rcu_head);
847
848 ht_cleanup_push(ua_sess->channels);
849 free(ua_sess);
850 }
851
852 /*
853 * Delete ust app session safely. RCU read lock must be held before calling
854 * this function.
855 *
856 * The session list lock must be held by the caller.
857 */
858 static
859 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
860 struct ust_app *app)
861 {
862 int ret;
863 struct lttng_ht_iter iter;
864 struct ust_app_channel *ua_chan;
865 struct ust_registry_session *registry;
866
867 assert(ua_sess);
868
869 pthread_mutex_lock(&ua_sess->lock);
870
871 assert(!ua_sess->deleted);
872 ua_sess->deleted = true;
873
874 registry = get_session_registry(ua_sess);
875 /* Registry can be null on error path during initialization. */
876 if (registry) {
877 /* Push metadata for application before freeing the application. */
878 (void) push_metadata(registry, ua_sess->consumer);
879
880 /*
881 * Don't ask to close metadata for global per UID buffers. Close
882 * metadata only on destroy trace session in this case. Also, the
883 * previous push metadata could have flag the metadata registry to
884 * close so don't send a close command if closed.
885 */
886 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
887 /* And ask to close it for this session registry. */
888 (void) close_metadata(registry, ua_sess->consumer);
889 }
890 }
891
892 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
893 node.node) {
894 ret = lttng_ht_del(ua_sess->channels, &iter);
895 assert(!ret);
896 delete_ust_app_channel(sock, ua_chan, app);
897 }
898
899 /* In case of per PID, the registry is kept in the session. */
900 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
901 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
902 if (reg_pid) {
903 /*
904 * Registry can be null on error path during
905 * initialization.
906 */
907 buffer_reg_pid_remove(reg_pid);
908 buffer_reg_pid_destroy(reg_pid);
909 }
910 }
911
912 if (ua_sess->handle != -1) {
913 pthread_mutex_lock(&app->sock_lock);
914 ret = ustctl_release_handle(sock, ua_sess->handle);
915 pthread_mutex_unlock(&app->sock_lock);
916 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
917 ERR("UST app sock %d release session handle failed with ret %d",
918 sock, ret);
919 }
920 /* Remove session from application UST object descriptor. */
921 iter.iter.node = &ua_sess->ust_objd_node.node;
922 ret = lttng_ht_del(app->ust_sessions_objd, &iter);
923 assert(!ret);
924 }
925
926 pthread_mutex_unlock(&ua_sess->lock);
927
928 consumer_output_put(ua_sess->consumer);
929
930 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
931 }
932
933 /*
934 * Delete a traceable application structure from the global list. Never call
935 * this function outside of a call_rcu call.
936 *
937 * RCU read side lock should _NOT_ be held when calling this function.
938 */
939 static
940 void delete_ust_app(struct ust_app *app)
941 {
942 int ret, sock;
943 struct ust_app_session *ua_sess, *tmp_ua_sess;
944 struct lttng_ht_iter iter;
945 struct ust_app_token_event_rule *token;
946
947 /*
948 * The session list lock must be held during this function to guarantee
949 * the existence of ua_sess.
950 */
951 session_lock_list();
952 /* Delete ust app sessions info */
953 sock = app->sock;
954 app->sock = -1;
955
956 /* Wipe sessions */
957 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
958 teardown_node) {
959 /* Free every object in the session and the session. */
960 rcu_read_lock();
961 delete_ust_app_session(sock, ua_sess, app);
962 rcu_read_unlock();
963 }
964
965 /* Wipe token associated with the app */
966 cds_lfht_for_each_entry(app->tokens_ht->ht, &iter.iter, token,
967 node.node) {
968 ret = lttng_ht_del(app->tokens_ht, &iter);
969 assert(!ret);
970 delete_ust_app_token_event_rule(app->sock, token, app);
971 }
972
973 ht_cleanup_push(app->sessions);
974 ht_cleanup_push(app->ust_sessions_objd);
975 ht_cleanup_push(app->ust_objd);
976 ht_cleanup_push(app->tokens_ht);
977
978 /* This can happen if trigger setup failed. e.g killed app */
979 if (app->token_communication.handle) {
980 ustctl_release_object(sock, app->token_communication.handle);
981 free(app->token_communication.handle);
982 }
983
984 lttng_pipe_destroy(app->token_communication.trigger_event_pipe);
985
986 /*
987 * Wait until we have deleted the application from the sock hash table
988 * before closing this socket, otherwise an application could re-use the
989 * socket ID and race with the teardown, using the same hash table entry.
990 *
991 * It's OK to leave the close in call_rcu. We want it to stay unique for
992 * all RCU readers that could run concurrently with unregister app,
993 * therefore we _need_ to only close that socket after a grace period. So
994 * it should stay in this RCU callback.
995 *
996 * This close() is a very important step of the synchronization model so
997 * every modification to this function must be carefully reviewed.
998 */
999 ret = close(sock);
1000 if (ret) {
1001 PERROR("close");
1002 }
1003 lttng_fd_put(LTTNG_FD_APPS, 1);
1004
1005 DBG2("UST app pid %d deleted", app->pid);
1006 free(app);
1007 session_unlock_list();
1008 }
1009
1010 /*
1011 * URCU intermediate call to delete an UST app.
1012 */
1013 static
1014 void delete_ust_app_rcu(struct rcu_head *head)
1015 {
1016 struct lttng_ht_node_ulong *node =
1017 caa_container_of(head, struct lttng_ht_node_ulong, head);
1018 struct ust_app *app =
1019 caa_container_of(node, struct ust_app, pid_n);
1020
1021 DBG3("Call RCU deleting app PID %d", app->pid);
1022 delete_ust_app(app);
1023 }
1024
1025 /*
1026 * Delete the session from the application ht and delete the data structure by
1027 * freeing every object inside and releasing them.
1028 *
1029 * The session list lock must be held by the caller.
1030 */
1031 static void destroy_app_session(struct ust_app *app,
1032 struct ust_app_session *ua_sess)
1033 {
1034 int ret;
1035 struct lttng_ht_iter iter;
1036
1037 assert(app);
1038 assert(ua_sess);
1039
1040 iter.iter.node = &ua_sess->node.node;
1041 ret = lttng_ht_del(app->sessions, &iter);
1042 if (ret) {
1043 /* Already scheduled for teardown. */
1044 goto end;
1045 }
1046
1047 /* Once deleted, free the data structure. */
1048 delete_ust_app_session(app->sock, ua_sess, app);
1049
1050 end:
1051 return;
1052 }
1053
1054 /*
1055 * Alloc new UST app session.
1056 */
1057 static
1058 struct ust_app_session *alloc_ust_app_session(void)
1059 {
1060 struct ust_app_session *ua_sess;
1061
1062 /* Init most of the default value by allocating and zeroing */
1063 ua_sess = zmalloc(sizeof(struct ust_app_session));
1064 if (ua_sess == NULL) {
1065 PERROR("malloc");
1066 goto error_free;
1067 }
1068
1069 ua_sess->handle = -1;
1070 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1071 ua_sess->metadata_attr.type = LTTNG_UST_CHAN_METADATA;
1072 pthread_mutex_init(&ua_sess->lock, NULL);
1073
1074 return ua_sess;
1075
1076 error_free:
1077 return NULL;
1078 }
1079
1080 /*
1081 * Alloc new UST app channel.
1082 */
1083 static
1084 struct ust_app_channel *alloc_ust_app_channel(const char *name,
1085 struct ust_app_session *ua_sess,
1086 struct lttng_ust_channel_attr *attr)
1087 {
1088 struct ust_app_channel *ua_chan;
1089
1090 /* Init most of the default value by allocating and zeroing */
1091 ua_chan = zmalloc(sizeof(struct ust_app_channel));
1092 if (ua_chan == NULL) {
1093 PERROR("malloc");
1094 goto error;
1095 }
1096
1097 /* Setup channel name */
1098 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
1099 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1100
1101 ua_chan->enabled = 1;
1102 ua_chan->handle = -1;
1103 ua_chan->session = ua_sess;
1104 ua_chan->key = get_next_channel_key();
1105 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1106 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1107 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
1108
1109 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
1110 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
1111
1112 /* Copy attributes */
1113 if (attr) {
1114 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
1115 ua_chan->attr.subbuf_size = attr->subbuf_size;
1116 ua_chan->attr.num_subbuf = attr->num_subbuf;
1117 ua_chan->attr.overwrite = attr->overwrite;
1118 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
1119 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
1120 ua_chan->attr.output = attr->output;
1121 ua_chan->attr.blocking_timeout = attr->u.s.blocking_timeout;
1122 }
1123 /* By default, the channel is a per cpu channel. */
1124 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1125
1126 DBG3("UST app channel %s allocated", ua_chan->name);
1127
1128 return ua_chan;
1129
1130 error:
1131 return NULL;
1132 }
1133
1134 /*
1135 * Allocate and initialize a UST app stream.
1136 *
1137 * Return newly allocated stream pointer or NULL on error.
1138 */
1139 struct ust_app_stream *ust_app_alloc_stream(void)
1140 {
1141 struct ust_app_stream *stream = NULL;
1142
1143 stream = zmalloc(sizeof(*stream));
1144 if (stream == NULL) {
1145 PERROR("zmalloc ust app stream");
1146 goto error;
1147 }
1148
1149 /* Zero could be a valid value for a handle so flag it to -1. */
1150 stream->handle = -1;
1151
1152 error:
1153 return stream;
1154 }
1155
1156 /*
1157 * Alloc new UST app event.
1158 */
1159 static
1160 struct ust_app_event *alloc_ust_app_event(char *name,
1161 struct lttng_ust_event *attr)
1162 {
1163 struct ust_app_event *ua_event;
1164
1165 /* Init most of the default value by allocating and zeroing */
1166 ua_event = zmalloc(sizeof(struct ust_app_event));
1167 if (ua_event == NULL) {
1168 PERROR("Failed to allocate ust_app_event structure");
1169 goto error;
1170 }
1171
1172 ua_event->enabled = 1;
1173 strncpy(ua_event->name, name, sizeof(ua_event->name));
1174 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1175 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
1176
1177 /* Copy attributes */
1178 if (attr) {
1179 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1180 }
1181
1182 DBG3("UST app event %s allocated", ua_event->name);
1183
1184 return ua_event;
1185
1186 error:
1187 return NULL;
1188 }
1189
1190 /*
1191 * Alloc new UST app token event rule.
1192 */
1193 static struct ust_app_token_event_rule *alloc_ust_app_token_event_rule(
1194 struct lttng_trigger *trigger)
1195 {
1196 struct ust_app_token_event_rule *ua_token;
1197 struct lttng_condition *condition = NULL;
1198 struct lttng_event_rule *event_rule = NULL;
1199
1200 ua_token = zmalloc(sizeof(struct ust_app_token_event_rule));
1201 if (ua_token == NULL) {
1202 PERROR("Failed to allocate ust_app_token_event_rule structure");
1203 goto error;
1204 }
1205
1206 /* Get reference of the trigger */
1207 /* TODO should this be like lttng_event_rule_get with a returned bool? */
1208 lttng_trigger_get(trigger);
1209
1210 ua_token->enabled = 1;
1211 ua_token->token = lttng_trigger_get_tracer_token(trigger);
1212 lttng_ht_node_init_u64(&ua_token->node, ua_token->token);
1213
1214 condition = lttng_trigger_get_condition(trigger);
1215 assert(condition);
1216 assert(lttng_condition_get_type(condition) == LTTNG_CONDITION_TYPE_EVENT_RULE_HIT);
1217
1218 assert(LTTNG_CONDITION_STATUS_OK == lttng_condition_event_rule_get_rule_mutable(condition, &event_rule));
1219 assert(event_rule);
1220
1221 ua_token->trigger = trigger;
1222 ua_token->filter = lttng_event_rule_get_filter_bytecode(event_rule);
1223 ua_token->exclusion = lttng_event_rule_generate_exclusions(event_rule);
1224 ua_token->error_counter_index = lttng_trigger_get_error_counter_index(trigger);
1225
1226 /* TODO put capture here? or later*/
1227
1228 DBG3("UST app token event rule %" PRIu64 " allocated", ua_token->token);
1229
1230 return ua_token;
1231
1232 error:
1233 return NULL;
1234 }
1235
1236 /*
1237 * Alloc new UST app context.
1238 */
1239 static
1240 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
1241 {
1242 struct ust_app_ctx *ua_ctx;
1243
1244 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
1245 if (ua_ctx == NULL) {
1246 goto error;
1247 }
1248
1249 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1250
1251 if (uctx) {
1252 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
1253 if (uctx->ctx == LTTNG_UST_CONTEXT_APP_CONTEXT) {
1254 char *provider_name = NULL, *ctx_name = NULL;
1255
1256 provider_name = strdup(uctx->u.app_ctx.provider_name);
1257 ctx_name = strdup(uctx->u.app_ctx.ctx_name);
1258 if (!provider_name || !ctx_name) {
1259 free(provider_name);
1260 free(ctx_name);
1261 goto error;
1262 }
1263
1264 ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
1265 ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
1266 }
1267 }
1268
1269 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
1270 return ua_ctx;
1271 error:
1272 free(ua_ctx);
1273 return NULL;
1274 }
1275
1276 /*
1277 * Create a liblttng-ust filter bytecode from given bytecode.
1278 *
1279 * Return allocated filter or NULL on error.
1280 */
1281 static struct lttng_ust_filter_bytecode *
1282 create_ust_filter_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
1283 {
1284 struct lttng_ust_filter_bytecode *filter = NULL;
1285
1286 /* Copy filter bytecode */
1287 filter = zmalloc(sizeof(*filter) + orig_f->len);
1288 if (!filter) {
1289 PERROR("zmalloc alloc ust filter bytecode");
1290 goto error;
1291 }
1292
1293 assert(sizeof(struct lttng_bytecode) ==
1294 sizeof(struct lttng_ust_filter_bytecode));
1295 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1296 error:
1297 return filter;
1298 }
1299
1300 /*
1301 * Create a liblttng-ust capture bytecode from given bytecode.
1302 *
1303 * Return allocated filter or NULL on error.
1304 */
1305 static struct lttng_ust_capture_bytecode *
1306 create_ust_capture_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
1307 {
1308 struct lttng_ust_capture_bytecode *capture = NULL;
1309
1310 /* Copy capture bytecode */
1311 capture = zmalloc(sizeof(*capture) + orig_f->len);
1312 if (!capture) {
1313 PERROR("zmalloc alloc ust capture bytecode");
1314 goto error;
1315 }
1316
1317 assert(sizeof(struct lttng_bytecode) ==
1318 sizeof(struct lttng_ust_capture_bytecode));
1319 memcpy(capture, orig_f, sizeof(*capture) + orig_f->len);
1320 error:
1321 return capture;
1322 }
1323
1324 /*
1325 * Find an ust_app using the sock and return it. RCU read side lock must be
1326 * held before calling this helper function.
1327 */
1328 struct ust_app *ust_app_find_by_sock(int sock)
1329 {
1330 struct lttng_ht_node_ulong *node;
1331 struct lttng_ht_iter iter;
1332
1333 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
1334 node = lttng_ht_iter_get_node_ulong(&iter);
1335 if (node == NULL) {
1336 DBG2("UST app find by sock %d not found", sock);
1337 goto error;
1338 }
1339
1340 return caa_container_of(node, struct ust_app, sock_n);
1341
1342 error:
1343 return NULL;
1344 }
1345
1346 /*
1347 * Find an ust_app using the notify sock and return it. RCU read side lock must
1348 * be held before calling this helper function.
1349 */
1350 static struct ust_app *find_app_by_notify_sock(int sock)
1351 {
1352 struct lttng_ht_node_ulong *node;
1353 struct lttng_ht_iter iter;
1354
1355 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1356 &iter);
1357 node = lttng_ht_iter_get_node_ulong(&iter);
1358 if (node == NULL) {
1359 DBG2("UST app find by notify sock %d not found", sock);
1360 goto error;
1361 }
1362
1363 return caa_container_of(node, struct ust_app, notify_sock_n);
1364
1365 error:
1366 return NULL;
1367 }
1368
1369 /*
1370 * Lookup for an ust app event based on event name, filter bytecode and the
1371 * event loglevel.
1372 *
1373 * Return an ust_app_event object or NULL on error.
1374 */
1375 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1376 const char *name, const struct lttng_bytecode *filter,
1377 int loglevel_value,
1378 const struct lttng_event_exclusion *exclusion)
1379 {
1380 struct lttng_ht_iter iter;
1381 struct lttng_ht_node_str *node;
1382 struct ust_app_event *event = NULL;
1383 struct ust_app_ht_key key;
1384
1385 assert(name);
1386 assert(ht);
1387
1388 /* Setup key for event lookup. */
1389 key.name = name;
1390 key.filter = filter;
1391 key.loglevel_type = loglevel_value;
1392 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1393 key.exclusion = exclusion;
1394
1395 /* Lookup using the event name as hash and a custom match fct. */
1396 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1397 ht_match_ust_app_event, &key, &iter.iter);
1398 node = lttng_ht_iter_get_node_str(&iter);
1399 if (node == NULL) {
1400 goto end;
1401 }
1402
1403 event = caa_container_of(node, struct ust_app_event, node);
1404
1405 end:
1406 return event;
1407 }
1408
1409 /*
1410 * Lookup for an ust app tokens based on a token id.
1411 *
1412 * Return an ust_app_token_event_rule object or NULL on error.
1413 */
1414 static struct ust_app_token_event_rule *find_ust_app_token_event_rule(struct lttng_ht *ht,
1415 uint64_t token)
1416 {
1417 struct lttng_ht_iter iter;
1418 struct lttng_ht_node_u64 *node;
1419 struct ust_app_token_event_rule *token_event_rule = NULL;
1420
1421 assert(ht);
1422
1423 lttng_ht_lookup(ht, &token, &iter);
1424 node = lttng_ht_iter_get_node_u64(&iter);
1425 if (node == NULL) {
1426 DBG2("UST app token %" PRIu64 " not found", token);
1427 goto end;
1428 }
1429
1430 token_event_rule = caa_container_of(node, struct ust_app_token_event_rule, node);
1431 end:
1432 return token_event_rule;
1433 }
1434
1435 /*
1436 * Create the channel context on the tracer.
1437 *
1438 * Called with UST app session lock held.
1439 */
1440 static
1441 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1442 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1443 {
1444 int ret;
1445
1446 health_code_update();
1447
1448 pthread_mutex_lock(&app->sock_lock);
1449 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1450 ua_chan->obj, &ua_ctx->obj);
1451 pthread_mutex_unlock(&app->sock_lock);
1452 if (ret < 0) {
1453 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1454 ERR("UST app create channel context failed for app (pid: %d) "
1455 "with ret %d", app->pid, ret);
1456 } else {
1457 /*
1458 * This is normal behavior, an application can die during the
1459 * creation process. Don't report an error so the execution can
1460 * continue normally.
1461 */
1462 ret = 0;
1463 DBG3("UST app add context failed. Application is dead.");
1464 }
1465 goto error;
1466 }
1467
1468 ua_ctx->handle = ua_ctx->obj->handle;
1469
1470 DBG2("UST app context handle %d created successfully for channel %s",
1471 ua_ctx->handle, ua_chan->name);
1472
1473 error:
1474 health_code_update();
1475 return ret;
1476 }
1477
1478 /*
1479 * Set the filter on the tracer.
1480 */
1481 static int set_ust_filter(struct ust_app *app,
1482 const struct lttng_bytecode *bytecode,
1483 struct lttng_ust_object_data *ust_object)
1484 {
1485 int ret;
1486 struct lttng_ust_filter_bytecode *ust_bytecode = NULL;
1487
1488 health_code_update();
1489
1490 ust_bytecode = create_ust_filter_bytecode_from_bytecode(bytecode);
1491 if (!ust_bytecode) {
1492 ret = -LTTNG_ERR_NOMEM;
1493 goto error;
1494 }
1495 pthread_mutex_lock(&app->sock_lock);
1496 ret = ustctl_set_filter(app->sock, ust_bytecode,
1497 ust_object);
1498 pthread_mutex_unlock(&app->sock_lock);
1499 if (ret < 0) {
1500 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1501 ERR("UST app set filter failed for object %p of app (pid: %d) "
1502 "with ret %d", ust_object, app->pid, ret);
1503 } else {
1504 /*
1505 * This is normal behavior, an application can die during the
1506 * creation process. Don't report an error so the execution can
1507 * continue normally.
1508 */
1509 ret = 0;
1510 DBG3("UST app set filter. Application is dead.");
1511 }
1512 goto error;
1513 }
1514
1515 DBG2("UST filter set for object %p successfully", ust_object);
1516
1517 error:
1518 health_code_update();
1519 free(ust_bytecode);
1520 return ret;
1521 }
1522
1523 /*
1524 * Set a capture bytecode for the passed object.
1525 * The seqnum enforce the ordering at runtime and on reception.
1526 */
1527 static int set_ust_capture(struct ust_app *app,
1528 const struct lttng_bytecode *bytecode,
1529 unsigned int seqnum,
1530 struct lttng_ust_object_data *ust_object)
1531 {
1532 int ret;
1533 struct lttng_ust_capture_bytecode *ust_bytecode = NULL;
1534
1535 health_code_update();
1536
1537 ust_bytecode = create_ust_capture_bytecode_from_bytecode(bytecode);
1538 if (!ust_bytecode) {
1539 ret = -LTTNG_ERR_NOMEM;
1540 goto error;
1541 }
1542
1543 /* Set the seqnum */
1544 ust_bytecode->seqnum = seqnum;
1545
1546 pthread_mutex_lock(&app->sock_lock);
1547 ret = ustctl_set_capture(app->sock, ust_bytecode,
1548 ust_object);
1549 pthread_mutex_unlock(&app->sock_lock);
1550 if (ret < 0) {
1551 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1552 ERR("UST app set capture failed for object %p of app (pid: %d) "
1553 "with ret %d", ust_object, app->pid, ret);
1554 } else {
1555 /*
1556 * This is normal behavior, an application can die during the
1557 * creation process. Don't report an error so the execution can
1558 * continue normally.
1559 */
1560 ret = 0;
1561 DBG3("UST app set capture. Application is dead.");
1562 }
1563 goto error;
1564 }
1565
1566 DBG2("UST capture set for object %p successfully", ust_object);
1567
1568 error:
1569 health_code_update();
1570 free(ust_bytecode);
1571 return ret;
1572 }
1573
1574 static
1575 struct lttng_ust_event_exclusion *create_ust_exclusion_from_exclusion(
1576 struct lttng_event_exclusion *exclusion)
1577 {
1578 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1579 size_t exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1580 LTTNG_UST_SYM_NAME_LEN * exclusion->count;
1581
1582 ust_exclusion = zmalloc(exclusion_alloc_size);
1583 if (!ust_exclusion) {
1584 PERROR("malloc");
1585 goto end;
1586 }
1587
1588 assert(sizeof(struct lttng_event_exclusion) ==
1589 sizeof(struct lttng_ust_event_exclusion));
1590 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1591 end:
1592 return ust_exclusion;
1593 }
1594
1595 /*
1596 * Set event exclusions on the tracer.
1597 */
1598 static int set_ust_exclusions(struct ust_app *app,
1599 struct lttng_event_exclusion *exclusions,
1600 struct lttng_ust_object_data *ust_object)
1601 {
1602 int ret;
1603 struct lttng_ust_event_exclusion *ust_exclusions = NULL;
1604
1605 assert(exclusions && exclusions->count > 0);
1606
1607 health_code_update();
1608
1609 ust_exclusions = create_ust_exclusion_from_exclusion(
1610 exclusions);
1611 if (!ust_exclusions) {
1612 ret = -LTTNG_ERR_NOMEM;
1613 goto error;
1614 }
1615 pthread_mutex_lock(&app->sock_lock);
1616 ret = ustctl_set_exclusion(app->sock, ust_exclusions, ust_object);
1617 pthread_mutex_unlock(&app->sock_lock);
1618 if (ret < 0) {
1619 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1620 ERR("UST app exclusions failed for object %p of app (pid: %d) "
1621 "with ret %d", ust_object, app->pid, ret);
1622 } else {
1623 /*
1624 * This is normal behavior, an application can die during the
1625 * creation process. Don't report an error so the execution can
1626 * continue normally.
1627 */
1628 ret = 0;
1629 DBG3("UST app set exclusions failed. Application is dead.");
1630 }
1631 goto error;
1632 }
1633
1634 DBG2("UST exclusions set successfully for object %p", ust_object);
1635
1636 error:
1637 health_code_update();
1638 free(ust_exclusions);
1639 return ret;
1640 }
1641
1642 /*
1643 * Disable the specified event on to UST tracer for the UST session.
1644 */
1645 static int disable_ust_object(struct ust_app *app,
1646 struct lttng_ust_object_data *object)
1647 {
1648 int ret;
1649
1650 health_code_update();
1651
1652 pthread_mutex_lock(&app->sock_lock);
1653 ret = ustctl_disable(app->sock, object);
1654 pthread_mutex_unlock(&app->sock_lock);
1655 if (ret < 0) {
1656 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1657 ERR("UST app disable failed for object %p app (pid: %d) with ret %d",
1658 object, app->pid, ret);
1659 } else {
1660 /*
1661 * This is normal behavior, an application can die during the
1662 * creation process. Don't report an error so the execution can
1663 * continue normally.
1664 */
1665 ret = 0;
1666 DBG3("UST app disable event failed. Application is dead.");
1667 }
1668 goto error;
1669 }
1670
1671 DBG2("UST app object %p disabled successfully for app (pid: %d)",
1672 object, app->pid);
1673
1674 error:
1675 health_code_update();
1676 return ret;
1677 }
1678
1679 /*
1680 * Disable the specified channel on to UST tracer for the UST session.
1681 */
1682 static int disable_ust_channel(struct ust_app *app,
1683 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1684 {
1685 int ret;
1686
1687 health_code_update();
1688
1689 pthread_mutex_lock(&app->sock_lock);
1690 ret = ustctl_disable(app->sock, ua_chan->obj);
1691 pthread_mutex_unlock(&app->sock_lock);
1692 if (ret < 0) {
1693 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1694 ERR("UST app channel %s disable failed for app (pid: %d) "
1695 "and session handle %d with ret %d",
1696 ua_chan->name, app->pid, ua_sess->handle, ret);
1697 } else {
1698 /*
1699 * This is normal behavior, an application can die during the
1700 * creation process. Don't report an error so the execution can
1701 * continue normally.
1702 */
1703 ret = 0;
1704 DBG3("UST app disable channel failed. Application is dead.");
1705 }
1706 goto error;
1707 }
1708
1709 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1710 ua_chan->name, app->pid);
1711
1712 error:
1713 health_code_update();
1714 return ret;
1715 }
1716
1717 /*
1718 * Enable the specified channel on to UST tracer for the UST session.
1719 */
1720 static int enable_ust_channel(struct ust_app *app,
1721 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1722 {
1723 int ret;
1724
1725 health_code_update();
1726
1727 pthread_mutex_lock(&app->sock_lock);
1728 ret = ustctl_enable(app->sock, ua_chan->obj);
1729 pthread_mutex_unlock(&app->sock_lock);
1730 if (ret < 0) {
1731 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1732 ERR("UST app channel %s enable failed for app (pid: %d) "
1733 "and session handle %d with ret %d",
1734 ua_chan->name, app->pid, ua_sess->handle, ret);
1735 } else {
1736 /*
1737 * This is normal behavior, an application can die during the
1738 * creation process. Don't report an error so the execution can
1739 * continue normally.
1740 */
1741 ret = 0;
1742 DBG3("UST app enable channel failed. Application is dead.");
1743 }
1744 goto error;
1745 }
1746
1747 ua_chan->enabled = 1;
1748
1749 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1750 ua_chan->name, app->pid);
1751
1752 error:
1753 health_code_update();
1754 return ret;
1755 }
1756
1757 /*
1758 * Enable the specified event on to UST tracer for the UST session.
1759 */
1760 static int enable_ust_object(struct ust_app *app, struct lttng_ust_object_data *ust_object)
1761 {
1762 int ret;
1763
1764 health_code_update();
1765
1766 pthread_mutex_lock(&app->sock_lock);
1767 ret = ustctl_enable(app->sock, ust_object);
1768 pthread_mutex_unlock(&app->sock_lock);
1769 if (ret < 0) {
1770 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1771 ERR("UST app enable failed for object %p app (pid: %d) with ret %d",
1772 ust_object, app->pid, ret);
1773 } else {
1774 /*
1775 * This is normal behavior, an application can die during the
1776 * creation process. Don't report an error so the execution can
1777 * continue normally.
1778 */
1779 ret = 0;
1780 DBG3("UST app enable failed. Application is dead.");
1781 }
1782 goto error;
1783 }
1784
1785 DBG2("UST app object %p enabled successfully for app (pid: %d)",
1786 ust_object, app->pid);
1787
1788 error:
1789 health_code_update();
1790 return ret;
1791 }
1792
1793 /*
1794 * Send channel and stream buffer to application.
1795 *
1796 * Return 0 on success. On error, a negative value is returned.
1797 */
1798 static int send_channel_pid_to_ust(struct ust_app *app,
1799 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1800 {
1801 int ret;
1802 struct ust_app_stream *stream, *stmp;
1803
1804 assert(app);
1805 assert(ua_sess);
1806 assert(ua_chan);
1807
1808 health_code_update();
1809
1810 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1811 app->sock);
1812
1813 /* Send channel to the application. */
1814 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1815 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1816 ret = -ENOTCONN; /* Caused by app exiting. */
1817 goto error;
1818 } else if (ret < 0) {
1819 goto error;
1820 }
1821
1822 health_code_update();
1823
1824 /* Send all streams to application. */
1825 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1826 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1827 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1828 ret = -ENOTCONN; /* Caused by app exiting. */
1829 goto error;
1830 } else if (ret < 0) {
1831 goto error;
1832 }
1833 /* We don't need the stream anymore once sent to the tracer. */
1834 cds_list_del(&stream->list);
1835 delete_ust_app_stream(-1, stream, app);
1836 }
1837 /* Flag the channel that it is sent to the application. */
1838 ua_chan->is_sent = 1;
1839
1840 error:
1841 health_code_update();
1842 return ret;
1843 }
1844
1845 /*
1846 * Create the specified event onto the UST tracer for a UST session.
1847 *
1848 * Should be called with session mutex held.
1849 */
1850 static
1851 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1852 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1853 {
1854 int ret = 0;
1855
1856 health_code_update();
1857
1858 /* Create UST event on tracer */
1859 pthread_mutex_lock(&app->sock_lock);
1860 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1861 &ua_event->obj);
1862 pthread_mutex_unlock(&app->sock_lock);
1863 if (ret < 0) {
1864 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1865 abort();
1866 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1867 ua_event->attr.name, app->pid, ret);
1868 } else {
1869 /*
1870 * This is normal behavior, an application can die during the
1871 * creation process. Don't report an error so the execution can
1872 * continue normally.
1873 */
1874 ret = 0;
1875 DBG3("UST app create event failed. Application is dead.");
1876 }
1877 goto error;
1878 }
1879
1880 ua_event->handle = ua_event->obj->handle;
1881
1882 DBG2("UST app event %s created successfully for pid:%d object: %p",
1883 ua_event->attr.name, app->pid, ua_event->obj);
1884
1885 health_code_update();
1886
1887 /* Set filter if one is present. */
1888 if (ua_event->filter) {
1889 ret = set_ust_filter(app, ua_event->filter, ua_event->obj);
1890 if (ret < 0) {
1891 goto error;
1892 }
1893 }
1894
1895 /* Set exclusions for the event */
1896 if (ua_event->exclusion) {
1897 ret = set_ust_exclusions(app, ua_event->exclusion, ua_event->obj);
1898 if (ret < 0) {
1899 goto error;
1900 }
1901 }
1902
1903 /* If event not enabled, disable it on the tracer */
1904 if (ua_event->enabled) {
1905 /*
1906 * We now need to explicitly enable the event, since it
1907 * is now disabled at creation.
1908 */
1909 ret = enable_ust_object(app, ua_event->obj);
1910 if (ret < 0) {
1911 /*
1912 * If we hit an EPERM, something is wrong with our enable call. If
1913 * we get an EEXIST, there is a problem on the tracer side since we
1914 * just created it.
1915 */
1916 switch (ret) {
1917 case -LTTNG_UST_ERR_PERM:
1918 /* Code flow problem */
1919 assert(0);
1920 case -LTTNG_UST_ERR_EXIST:
1921 /* It's OK for our use case. */
1922 ret = 0;
1923 break;
1924 default:
1925 break;
1926 }
1927 goto error;
1928 }
1929 }
1930
1931 error:
1932 health_code_update();
1933 return ret;
1934 }
1935
1936 static
1937 void init_ust_trigger_from_event_rule(const struct lttng_event_rule *rule, struct lttng_ust_trigger *trigger)
1938 {
1939 enum lttng_event_rule_status status;
1940 enum lttng_loglevel_type loglevel_type;
1941 enum lttng_ust_loglevel_type ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
1942 int loglevel = -1;
1943 const char *pattern;
1944
1945 /* For now only LTTNG_EVENT_RULE_TYPE_TRACEPOINT are supported */
1946 assert(lttng_event_rule_get_type(rule) == LTTNG_EVENT_RULE_TYPE_TRACEPOINT);
1947
1948 memset(trigger, 0, sizeof(*trigger));
1949
1950 if (lttng_event_rule_is_agent(rule)) {
1951 /*
1952 * Special event for agents
1953 * The actual meat of the event is in the filter that will be
1954 * attached later on.
1955 * Set the default values for the agent event.
1956 */
1957 pattern = event_get_default_agent_ust_name(lttng_event_rule_get_domain_type(rule));
1958 loglevel = 0;
1959 ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
1960 } else {
1961 status = lttng_event_rule_tracepoint_get_pattern(rule, &pattern);
1962 if (status != LTTNG_EVENT_RULE_STATUS_OK) {
1963 /* At this point this is a fatal error */
1964 assert(0);
1965 }
1966
1967 status = lttng_event_rule_tracepoint_get_log_level_type(
1968 rule, &loglevel_type);
1969 if (status != LTTNG_EVENT_RULE_STATUS_OK) {
1970 /* At this point this is a fatal error */
1971 assert(0);
1972 }
1973
1974 switch (loglevel_type) {
1975 case LTTNG_EVENT_LOGLEVEL_ALL:
1976 ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
1977 break;
1978 case LTTNG_EVENT_LOGLEVEL_RANGE:
1979 ust_loglevel_type = LTTNG_UST_LOGLEVEL_RANGE;
1980 break;
1981 case LTTNG_EVENT_LOGLEVEL_SINGLE:
1982 ust_loglevel_type = LTTNG_UST_LOGLEVEL_SINGLE;
1983 break;
1984 }
1985
1986 if (loglevel_type != LTTNG_EVENT_LOGLEVEL_ALL) {
1987 status = lttng_event_rule_tracepoint_get_log_level(
1988 rule, &loglevel);
1989 assert(status == LTTNG_EVENT_RULE_STATUS_OK);
1990 }
1991 }
1992
1993 trigger->instrumentation = LTTNG_UST_TRACEPOINT;
1994 strncpy(trigger->name, pattern, LTTNG_UST_SYM_NAME_LEN - 1);
1995 trigger->loglevel_type = ust_loglevel_type;
1996 trigger->loglevel = loglevel;
1997 }
1998
1999 /*
2000 * Create the specified event rule token onto the UST tracer for a UST app.
2001 */
2002 static
2003 int create_ust_token_event_rule(struct ust_app *app, struct ust_app_token_event_rule *ua_token)
2004 {
2005 int ret = 0;
2006 struct lttng_ust_trigger trigger;
2007 struct lttng_condition *condition = NULL;
2008 struct lttng_event_rule *event_rule = NULL;
2009 unsigned int capture_bytecode_count = 0;
2010
2011 health_code_update();
2012 assert(app->token_communication.handle);
2013
2014 condition = lttng_trigger_get_condition(ua_token->trigger);
2015 assert(condition);
2016 assert(lttng_condition_get_type(condition) == LTTNG_CONDITION_TYPE_EVENT_RULE_HIT);
2017
2018 lttng_condition_event_rule_get_rule_mutable(condition, &event_rule);
2019 assert(event_rule);
2020 assert(lttng_event_rule_get_type(event_rule) == LTTNG_EVENT_RULE_TYPE_TRACEPOINT);
2021 /* Should we also test for UST at this point, or do we trust all the
2022 * upper level? */
2023
2024 init_ust_trigger_from_event_rule(event_rule, &trigger);
2025
2026 trigger.id = ua_token->token;
2027 trigger.error_counter_index = ua_token->error_counter_index;
2028
2029 /* Create UST trigger on tracer */
2030 pthread_mutex_lock(&app->sock_lock);
2031 ret = ustctl_create_trigger(app->sock, &trigger, app->token_communication.handle, &ua_token->obj);
2032 pthread_mutex_unlock(&app->sock_lock);
2033 if (ret < 0) {
2034 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2035 abort();
2036 ERR("Error ustctl create trigger %s for app pid: %d with ret %d",
2037 trigger.name, app->pid, ret);
2038 } else {
2039 /*
2040 * This is normal behavior, an application can die during the
2041 * creation process. Don't report an error so the execution can
2042 * continue normally.
2043 */
2044 ret = 0;
2045 DBG3("UST app create event failed. Application is dead.");
2046 }
2047 goto error;
2048 }
2049
2050 ua_token->handle = ua_token->obj->handle;
2051
2052 DBG2("UST app event %s created successfully for pid:%d object: %p",
2053 trigger.name, app->pid, ua_token->obj);
2054
2055 health_code_update();
2056
2057 /* Set filter if one is present. */
2058 if (ua_token->filter) {
2059 ret = set_ust_filter(app, ua_token->filter, ua_token->obj);
2060 if (ret < 0) {
2061 goto error;
2062 }
2063 }
2064
2065 /* Set exclusions for the event */
2066 if (ua_token->exclusion) {
2067 ret = set_ust_exclusions(app, ua_token->exclusion, ua_token->obj);
2068 if (ret < 0) {
2069 goto error;
2070 }
2071 }
2072
2073 /* Set the capture bytecode
2074 * TODO: do we want to emulate what is done with exclusion and provide
2075 * and object with a count of capture bytecode? instead of multiple
2076 * call?
2077 * */
2078 capture_bytecode_count = lttng_trigger_get_capture_bytecode_count(ua_token->trigger);
2079 for (unsigned int i = 0; i < capture_bytecode_count; i++) {
2080 const struct lttng_bytecode *capture_bytecode = lttng_trigger_get_capture_bytecode_at_index(ua_token->trigger, i);
2081 ret = set_ust_capture(app, capture_bytecode, i, ua_token->obj);
2082 if (ret < 0) {
2083 goto error;
2084 }
2085 }
2086
2087 /*
2088 * We now need to explicitly enable the event, since it
2089 * is disabled at creation.
2090 */
2091 ret = enable_ust_object(app, ua_token->obj);
2092 if (ret < 0) {
2093 /*
2094 * If we hit an EPERM, something is wrong with our enable call. If
2095 * we get an EEXIST, there is a problem on the tracer side since we
2096 * just created it.
2097 */
2098 switch (ret) {
2099 case -LTTNG_UST_ERR_PERM:
2100 /* Code flow problem */
2101 assert(0);
2102 case -LTTNG_UST_ERR_EXIST:
2103 /* It's OK for our use case. */
2104 ret = 0;
2105 break;
2106 default:
2107 break;
2108 }
2109 goto error;
2110 }
2111 ua_token->enabled = true;
2112
2113 error:
2114 health_code_update();
2115 return ret;
2116 }
2117
2118 /*
2119 * Copy data between an UST app event and a LTT event.
2120 */
2121 static void shadow_copy_event(struct ust_app_event *ua_event,
2122 struct ltt_ust_event *uevent)
2123 {
2124 size_t exclusion_alloc_size;
2125
2126 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
2127 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
2128
2129 ua_event->enabled = uevent->enabled;
2130
2131 /* Copy event attributes */
2132 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
2133
2134 /* Copy filter bytecode */
2135 if (uevent->filter) {
2136 ua_event->filter = bytecode_copy(uevent->filter);
2137 /* Filter might be NULL here in case of ENONEM. */
2138 }
2139
2140 /* Copy exclusion data */
2141 if (uevent->exclusion) {
2142 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
2143 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
2144 ua_event->exclusion = zmalloc(exclusion_alloc_size);
2145 if (ua_event->exclusion == NULL) {
2146 PERROR("malloc");
2147 } else {
2148 memcpy(ua_event->exclusion, uevent->exclusion,
2149 exclusion_alloc_size);
2150 }
2151 }
2152 }
2153
2154 /*
2155 * Copy data between an UST app channel and a LTT channel.
2156 */
2157 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
2158 struct ltt_ust_channel *uchan)
2159 {
2160 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
2161
2162 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
2163 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
2164
2165 ua_chan->tracefile_size = uchan->tracefile_size;
2166 ua_chan->tracefile_count = uchan->tracefile_count;
2167
2168 /* Copy event attributes since the layout is different. */
2169 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
2170 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
2171 ua_chan->attr.overwrite = uchan->attr.overwrite;
2172 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
2173 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
2174 ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
2175 ua_chan->attr.output = uchan->attr.output;
2176 ua_chan->attr.blocking_timeout = uchan->attr.u.s.blocking_timeout;
2177
2178 /*
2179 * Note that the attribute channel type is not set since the channel on the
2180 * tracing registry side does not have this information.
2181 */
2182
2183 ua_chan->enabled = uchan->enabled;
2184 ua_chan->tracing_channel_id = uchan->id;
2185
2186 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
2187 }
2188
2189 /*
2190 * Copy data between a UST app session and a regular LTT session.
2191 */
2192 static void shadow_copy_session(struct ust_app_session *ua_sess,
2193 struct ltt_ust_session *usess, struct ust_app *app)
2194 {
2195 struct tm *timeinfo;
2196 char datetime[16];
2197 int ret;
2198 char tmp_shm_path[PATH_MAX];
2199
2200 timeinfo = localtime(&app->registration_time);
2201 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
2202
2203 DBG2("Shadow copy of session handle %d", ua_sess->handle);
2204
2205 ua_sess->tracing_id = usess->id;
2206 ua_sess->id = get_next_session_id();
2207 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.uid, app->uid);
2208 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.gid, app->gid);
2209 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.uid, usess->uid);
2210 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.gid, usess->gid);
2211 ua_sess->buffer_type = usess->buffer_type;
2212 ua_sess->bits_per_long = app->bits_per_long;
2213
2214 /* There is only one consumer object per session possible. */
2215 consumer_output_get(usess->consumer);
2216 ua_sess->consumer = usess->consumer;
2217
2218 ua_sess->output_traces = usess->output_traces;
2219 ua_sess->live_timer_interval = usess->live_timer_interval;
2220 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
2221 &usess->metadata_attr);
2222
2223 switch (ua_sess->buffer_type) {
2224 case LTTNG_BUFFER_PER_PID:
2225 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
2226 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
2227 datetime);
2228 break;
2229 case LTTNG_BUFFER_PER_UID:
2230 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
2231 DEFAULT_UST_TRACE_UID_PATH,
2232 lttng_credentials_get_uid(&ua_sess->real_credentials),
2233 app->bits_per_long);
2234 break;
2235 default:
2236 assert(0);
2237 goto error;
2238 }
2239 if (ret < 0) {
2240 PERROR("asprintf UST shadow copy session");
2241 assert(0);
2242 goto error;
2243 }
2244
2245 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
2246 sizeof(ua_sess->root_shm_path));
2247 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
2248 strncpy(ua_sess->shm_path, usess->shm_path,
2249 sizeof(ua_sess->shm_path));
2250 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2251 if (ua_sess->shm_path[0]) {
2252 switch (ua_sess->buffer_type) {
2253 case LTTNG_BUFFER_PER_PID:
2254 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
2255 "/" DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
2256 app->name, app->pid, datetime);
2257 break;
2258 case LTTNG_BUFFER_PER_UID:
2259 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
2260 "/" DEFAULT_UST_TRACE_UID_PATH,
2261 app->uid, app->bits_per_long);
2262 break;
2263 default:
2264 assert(0);
2265 goto error;
2266 }
2267 if (ret < 0) {
2268 PERROR("sprintf UST shadow copy session");
2269 assert(0);
2270 goto error;
2271 }
2272 strncat(ua_sess->shm_path, tmp_shm_path,
2273 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
2274 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2275 }
2276 return;
2277
2278 error:
2279 consumer_output_put(ua_sess->consumer);
2280 }
2281
2282 /*
2283 * Lookup sesison wrapper.
2284 */
2285 static
2286 void __lookup_session_by_app(const struct ltt_ust_session *usess,
2287 struct ust_app *app, struct lttng_ht_iter *iter)
2288 {
2289 /* Get right UST app session from app */
2290 lttng_ht_lookup(app->sessions, &usess->id, iter);
2291 }
2292
2293 /*
2294 * Return ust app session from the app session hashtable using the UST session
2295 * id.
2296 */
2297 static struct ust_app_session *lookup_session_by_app(
2298 const struct ltt_ust_session *usess, struct ust_app *app)
2299 {
2300 struct lttng_ht_iter iter;
2301 struct lttng_ht_node_u64 *node;
2302
2303 __lookup_session_by_app(usess, app, &iter);
2304 node = lttng_ht_iter_get_node_u64(&iter);
2305 if (node == NULL) {
2306 goto error;
2307 }
2308
2309 return caa_container_of(node, struct ust_app_session, node);
2310
2311 error:
2312 return NULL;
2313 }
2314
2315 /*
2316 * Setup buffer registry per PID for the given session and application. If none
2317 * is found, a new one is created, added to the global registry and
2318 * initialized. If regp is valid, it's set with the newly created object.
2319 *
2320 * Return 0 on success or else a negative value.
2321 */
2322 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
2323 struct ust_app *app, struct buffer_reg_pid **regp)
2324 {
2325 int ret = 0;
2326 struct buffer_reg_pid *reg_pid;
2327
2328 assert(ua_sess);
2329 assert(app);
2330
2331 rcu_read_lock();
2332
2333 reg_pid = buffer_reg_pid_find(ua_sess->id);
2334 if (!reg_pid) {
2335 /*
2336 * This is the create channel path meaning that if there is NO
2337 * registry available, we have to create one for this session.
2338 */
2339 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
2340 ua_sess->root_shm_path, ua_sess->shm_path);
2341 if (ret < 0) {
2342 goto error;
2343 }
2344 } else {
2345 goto end;
2346 }
2347
2348 /* Initialize registry. */
2349 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
2350 app->bits_per_long, app->uint8_t_alignment,
2351 app->uint16_t_alignment, app->uint32_t_alignment,
2352 app->uint64_t_alignment, app->long_alignment,
2353 app->byte_order, app->version.major, app->version.minor,
2354 reg_pid->root_shm_path, reg_pid->shm_path,
2355 lttng_credentials_get_uid(&ua_sess->effective_credentials),
2356 lttng_credentials_get_gid(&ua_sess->effective_credentials),
2357 ua_sess->tracing_id,
2358 app->uid);
2359 if (ret < 0) {
2360 /*
2361 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2362 * destroy the buffer registry, because it is always expected
2363 * that if the buffer registry can be found, its ust registry is
2364 * non-NULL.
2365 */
2366 buffer_reg_pid_destroy(reg_pid);
2367 goto error;
2368 }
2369
2370 buffer_reg_pid_add(reg_pid);
2371
2372 DBG3("UST app buffer registry per PID created successfully");
2373
2374 end:
2375 if (regp) {
2376 *regp = reg_pid;
2377 }
2378 error:
2379 rcu_read_unlock();
2380 return ret;
2381 }
2382
2383 /*
2384 * Setup buffer registry per UID for the given session and application. If none
2385 * is found, a new one is created, added to the global registry and
2386 * initialized. If regp is valid, it's set with the newly created object.
2387 *
2388 * Return 0 on success or else a negative value.
2389 */
2390 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
2391 struct ust_app_session *ua_sess,
2392 struct ust_app *app, struct buffer_reg_uid **regp)
2393 {
2394 int ret = 0;
2395 struct buffer_reg_uid *reg_uid;
2396
2397 assert(usess);
2398 assert(app);
2399
2400 rcu_read_lock();
2401
2402 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2403 if (!reg_uid) {
2404 /*
2405 * This is the create channel path meaning that if there is NO
2406 * registry available, we have to create one for this session.
2407 */
2408 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
2409 LTTNG_DOMAIN_UST, &reg_uid,
2410 ua_sess->root_shm_path, ua_sess->shm_path);
2411 if (ret < 0) {
2412 goto error;
2413 }
2414 } else {
2415 goto end;
2416 }
2417
2418 /* Initialize registry. */
2419 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
2420 app->bits_per_long, app->uint8_t_alignment,
2421 app->uint16_t_alignment, app->uint32_t_alignment,
2422 app->uint64_t_alignment, app->long_alignment,
2423 app->byte_order, app->version.major,
2424 app->version.minor, reg_uid->root_shm_path,
2425 reg_uid->shm_path, usess->uid, usess->gid,
2426 ua_sess->tracing_id, app->uid);
2427 if (ret < 0) {
2428 /*
2429 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2430 * destroy the buffer registry, because it is always expected
2431 * that if the buffer registry can be found, its ust registry is
2432 * non-NULL.
2433 */
2434 buffer_reg_uid_destroy(reg_uid, NULL);
2435 goto error;
2436 }
2437 /* Add node to teardown list of the session. */
2438 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2439
2440 buffer_reg_uid_add(reg_uid);
2441
2442 DBG3("UST app buffer registry per UID created successfully");
2443 end:
2444 if (regp) {
2445 *regp = reg_uid;
2446 }
2447 error:
2448 rcu_read_unlock();
2449 return ret;
2450 }
2451
2452 /*
2453 * Create a session on the tracer side for the given app.
2454 *
2455 * On success, ua_sess_ptr is populated with the session pointer or else left
2456 * untouched. If the session was created, is_created is set to 1. On error,
2457 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2458 * be NULL.
2459 *
2460 * Returns 0 on success or else a negative code which is either -ENOMEM or
2461 * -ENOTCONN which is the default code if the ustctl_create_session fails.
2462 */
2463 static int find_or_create_ust_app_session(struct ltt_ust_session *usess,
2464 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2465 int *is_created)
2466 {
2467 int ret, created = 0;
2468 struct ust_app_session *ua_sess;
2469
2470 assert(usess);
2471 assert(app);
2472 assert(ua_sess_ptr);
2473
2474 health_code_update();
2475
2476 ua_sess = lookup_session_by_app(usess, app);
2477 if (ua_sess == NULL) {
2478 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
2479 app->pid, usess->id);
2480 ua_sess = alloc_ust_app_session();
2481 if (ua_sess == NULL) {
2482 /* Only malloc can failed so something is really wrong */
2483 ret = -ENOMEM;
2484 goto error;
2485 }
2486 shadow_copy_session(ua_sess, usess, app);
2487 created = 1;
2488 }
2489
2490 switch (usess->buffer_type) {
2491 case LTTNG_BUFFER_PER_PID:
2492 /* Init local registry. */
2493 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
2494 if (ret < 0) {
2495 delete_ust_app_session(-1, ua_sess, app);
2496 goto error;
2497 }
2498 break;
2499 case LTTNG_BUFFER_PER_UID:
2500 /* Look for a global registry. If none exists, create one. */
2501 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
2502 if (ret < 0) {
2503 delete_ust_app_session(-1, ua_sess, app);
2504 goto error;
2505 }
2506 break;
2507 default:
2508 assert(0);
2509 ret = -EINVAL;
2510 goto error;
2511 }
2512
2513 health_code_update();
2514
2515 if (ua_sess->handle == -1) {
2516 pthread_mutex_lock(&app->sock_lock);
2517 ret = ustctl_create_session(app->sock);
2518 pthread_mutex_unlock(&app->sock_lock);
2519 if (ret < 0) {
2520 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2521 ERR("Creating session for app pid %d with ret %d",
2522 app->pid, ret);
2523 } else {
2524 DBG("UST app creating session failed. Application is dead");
2525 /*
2526 * This is normal behavior, an application can die during the
2527 * creation process. Don't report an error so the execution can
2528 * continue normally. This will get flagged ENOTCONN and the
2529 * caller will handle it.
2530 */
2531 ret = 0;
2532 }
2533 delete_ust_app_session(-1, ua_sess, app);
2534 if (ret != -ENOMEM) {
2535 /*
2536 * Tracer is probably gone or got an internal error so let's
2537 * behave like it will soon unregister or not usable.
2538 */
2539 ret = -ENOTCONN;
2540 }
2541 goto error;
2542 }
2543
2544 ua_sess->handle = ret;
2545
2546 /* Add ust app session to app's HT */
2547 lttng_ht_node_init_u64(&ua_sess->node,
2548 ua_sess->tracing_id);
2549 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
2550 lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
2551 lttng_ht_add_unique_ulong(app->ust_sessions_objd,
2552 &ua_sess->ust_objd_node);
2553
2554 DBG2("UST app session created successfully with handle %d", ret);
2555 }
2556
2557 *ua_sess_ptr = ua_sess;
2558 if (is_created) {
2559 *is_created = created;
2560 }
2561
2562 /* Everything went well. */
2563 ret = 0;
2564
2565 error:
2566 health_code_update();
2567 return ret;
2568 }
2569
2570 /*
2571 * Match function for a hash table lookup of ust_app_ctx.
2572 *
2573 * It matches an ust app context based on the context type and, in the case
2574 * of perf counters, their name.
2575 */
2576 static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2577 {
2578 struct ust_app_ctx *ctx;
2579 const struct lttng_ust_context_attr *key;
2580
2581 assert(node);
2582 assert(_key);
2583
2584 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2585 key = _key;
2586
2587 /* Context type */
2588 if (ctx->ctx.ctx != key->ctx) {
2589 goto no_match;
2590 }
2591
2592 switch(key->ctx) {
2593 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
2594 if (strncmp(key->u.perf_counter.name,
2595 ctx->ctx.u.perf_counter.name,
2596 sizeof(key->u.perf_counter.name))) {
2597 goto no_match;
2598 }
2599 break;
2600 case LTTNG_UST_CONTEXT_APP_CONTEXT:
2601 if (strcmp(key->u.app_ctx.provider_name,
2602 ctx->ctx.u.app_ctx.provider_name) ||
2603 strcmp(key->u.app_ctx.ctx_name,
2604 ctx->ctx.u.app_ctx.ctx_name)) {
2605 goto no_match;
2606 }
2607 break;
2608 default:
2609 break;
2610 }
2611
2612 /* Match. */
2613 return 1;
2614
2615 no_match:
2616 return 0;
2617 }
2618
2619 /*
2620 * Lookup for an ust app context from an lttng_ust_context.
2621 *
2622 * Must be called while holding RCU read side lock.
2623 * Return an ust_app_ctx object or NULL on error.
2624 */
2625 static
2626 struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
2627 struct lttng_ust_context_attr *uctx)
2628 {
2629 struct lttng_ht_iter iter;
2630 struct lttng_ht_node_ulong *node;
2631 struct ust_app_ctx *app_ctx = NULL;
2632
2633 assert(uctx);
2634 assert(ht);
2635
2636 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2637 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2638 ht_match_ust_app_ctx, uctx, &iter.iter);
2639 node = lttng_ht_iter_get_node_ulong(&iter);
2640 if (!node) {
2641 goto end;
2642 }
2643
2644 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2645
2646 end:
2647 return app_ctx;
2648 }
2649
2650 /*
2651 * Create a context for the channel on the tracer.
2652 *
2653 * Called with UST app session lock held and a RCU read side lock.
2654 */
2655 static
2656 int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
2657 struct lttng_ust_context_attr *uctx,
2658 struct ust_app *app)
2659 {
2660 int ret = 0;
2661 struct ust_app_ctx *ua_ctx;
2662
2663 DBG2("UST app adding context to channel %s", ua_chan->name);
2664
2665 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2666 if (ua_ctx) {
2667 ret = -EEXIST;
2668 goto error;
2669 }
2670
2671 ua_ctx = alloc_ust_app_ctx(uctx);
2672 if (ua_ctx == NULL) {
2673 /* malloc failed */
2674 ret = -ENOMEM;
2675 goto error;
2676 }
2677
2678 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
2679 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
2680 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
2681
2682 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2683 if (ret < 0) {
2684 goto error;
2685 }
2686
2687 error:
2688 return ret;
2689 }
2690
2691 /*
2692 * Enable on the tracer side a ust app event for the session and channel.
2693 *
2694 * Called with UST app session lock held.
2695 */
2696 static
2697 int enable_ust_app_event(struct ust_app_session *ua_sess,
2698 struct ust_app_event *ua_event, struct ust_app *app)
2699 {
2700 int ret;
2701
2702 ret = enable_ust_object(app, ua_event->obj);
2703 if (ret < 0) {
2704 goto error;
2705 }
2706
2707 ua_event->enabled = 1;
2708
2709 error:
2710 return ret;
2711 }
2712
2713 /*
2714 * Disable on the tracer side a ust app event for the session and channel.
2715 */
2716 static int disable_ust_app_event(struct ust_app_session *ua_sess,
2717 struct ust_app_event *ua_event, struct ust_app *app)
2718 {
2719 int ret;
2720
2721 ret = disable_ust_object(app, ua_event->obj);
2722 if (ret < 0) {
2723 goto error;
2724 }
2725
2726 ua_event->enabled = 0;
2727
2728 error:
2729 return ret;
2730 }
2731
2732 /*
2733 * Lookup ust app channel for session and disable it on the tracer side.
2734 */
2735 static
2736 int disable_ust_app_channel(struct ust_app_session *ua_sess,
2737 struct ust_app_channel *ua_chan, struct ust_app *app)
2738 {
2739 int ret;
2740
2741 ret = disable_ust_channel(app, ua_sess, ua_chan);
2742 if (ret < 0) {
2743 goto error;
2744 }
2745
2746 ua_chan->enabled = 0;
2747
2748 error:
2749 return ret;
2750 }
2751
2752 /*
2753 * Lookup ust app channel for session and enable it on the tracer side. This
2754 * MUST be called with a RCU read side lock acquired.
2755 */
2756 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2757 struct ltt_ust_channel *uchan, struct ust_app *app)
2758 {
2759 int ret = 0;
2760 struct lttng_ht_iter iter;
2761 struct lttng_ht_node_str *ua_chan_node;
2762 struct ust_app_channel *ua_chan;
2763
2764 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2765 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2766 if (ua_chan_node == NULL) {
2767 DBG2("Unable to find channel %s in ust session id %" PRIu64,
2768 uchan->name, ua_sess->tracing_id);
2769 goto error;
2770 }
2771
2772 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2773
2774 ret = enable_ust_channel(app, ua_sess, ua_chan);
2775 if (ret < 0) {
2776 goto error;
2777 }
2778
2779 error:
2780 return ret;
2781 }
2782
2783 /*
2784 * Ask the consumer to create a channel and get it if successful.
2785 *
2786 * Called with UST app session lock held.
2787 *
2788 * Return 0 on success or else a negative value.
2789 */
2790 static int do_consumer_create_channel(struct ltt_ust_session *usess,
2791 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2792 int bitness, struct ust_registry_session *registry,
2793 uint64_t trace_archive_id)
2794 {
2795 int ret;
2796 unsigned int nb_fd = 0;
2797 struct consumer_socket *socket;
2798
2799 assert(usess);
2800 assert(ua_sess);
2801 assert(ua_chan);
2802 assert(registry);
2803
2804 rcu_read_lock();
2805 health_code_update();
2806
2807 /* Get the right consumer socket for the application. */
2808 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2809 if (!socket) {
2810 ret = -EINVAL;
2811 goto error;
2812 }
2813
2814 health_code_update();
2815
2816 /* Need one fd for the channel. */
2817 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2818 if (ret < 0) {
2819 ERR("Exhausted number of available FD upon create channel");
2820 goto error;
2821 }
2822
2823 /*
2824 * Ask consumer to create channel. The consumer will return the number of
2825 * stream we have to expect.
2826 */
2827 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2828 registry, usess->current_trace_chunk);
2829 if (ret < 0) {
2830 goto error_ask;
2831 }
2832
2833 /*
2834 * Compute the number of fd needed before receiving them. It must be 2 per
2835 * stream (2 being the default value here).
2836 */
2837 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2838
2839 /* Reserve the amount of file descriptor we need. */
2840 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2841 if (ret < 0) {
2842 ERR("Exhausted number of available FD upon create channel");
2843 goto error_fd_get_stream;
2844 }
2845
2846 health_code_update();
2847
2848 /*
2849 * Now get the channel from the consumer. This call wil populate the stream
2850 * list of that channel and set the ust objects.
2851 */
2852 if (usess->consumer->enabled) {
2853 ret = ust_consumer_get_channel(socket, ua_chan);
2854 if (ret < 0) {
2855 goto error_destroy;
2856 }
2857 }
2858
2859 rcu_read_unlock();
2860 return 0;
2861
2862 error_destroy:
2863 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2864 error_fd_get_stream:
2865 /*
2866 * Initiate a destroy channel on the consumer since we had an error
2867 * handling it on our side. The return value is of no importance since we
2868 * already have a ret value set by the previous error that we need to
2869 * return.
2870 */
2871 (void) ust_consumer_destroy_channel(socket, ua_chan);
2872 error_ask:
2873 lttng_fd_put(LTTNG_FD_APPS, 1);
2874 error:
2875 health_code_update();
2876 rcu_read_unlock();
2877 return ret;
2878 }
2879
2880 /*
2881 * Duplicate the ust data object of the ust app stream and save it in the
2882 * buffer registry stream.
2883 *
2884 * Return 0 on success or else a negative value.
2885 */
2886 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2887 struct ust_app_stream *stream)
2888 {
2889 int ret;
2890
2891 assert(reg_stream);
2892 assert(stream);
2893
2894 /* Reserve the amount of file descriptor we need. */
2895 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2896 if (ret < 0) {
2897 ERR("Exhausted number of available FD upon duplicate stream");
2898 goto error;
2899 }
2900
2901 /* Duplicate object for stream once the original is in the registry. */
2902 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2903 reg_stream->obj.ust);
2904 if (ret < 0) {
2905 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2906 reg_stream->obj.ust, stream->obj, ret);
2907 lttng_fd_put(LTTNG_FD_APPS, 2);
2908 goto error;
2909 }
2910 stream->handle = stream->obj->handle;
2911
2912 error:
2913 return ret;
2914 }
2915
2916 /*
2917 * Duplicate the ust data object of the ust app. channel and save it in the
2918 * buffer registry channel.
2919 *
2920 * Return 0 on success or else a negative value.
2921 */
2922 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2923 struct ust_app_channel *ua_chan)
2924 {
2925 int ret;
2926
2927 assert(reg_chan);
2928 assert(ua_chan);
2929
2930 /* Need two fds for the channel. */
2931 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2932 if (ret < 0) {
2933 ERR("Exhausted number of available FD upon duplicate channel");
2934 goto error_fd_get;
2935 }
2936
2937 /* Duplicate object for stream once the original is in the registry. */
2938 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2939 if (ret < 0) {
2940 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2941 reg_chan->obj.ust, ua_chan->obj, ret);
2942 goto error;
2943 }
2944 ua_chan->handle = ua_chan->obj->handle;
2945
2946 return 0;
2947
2948 error:
2949 lttng_fd_put(LTTNG_FD_APPS, 1);
2950 error_fd_get:
2951 return ret;
2952 }
2953
2954 /*
2955 * For a given channel buffer registry, setup all streams of the given ust
2956 * application channel.
2957 *
2958 * Return 0 on success or else a negative value.
2959 */
2960 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2961 struct ust_app_channel *ua_chan,
2962 struct ust_app *app)
2963 {
2964 int ret = 0;
2965 struct ust_app_stream *stream, *stmp;
2966
2967 assert(reg_chan);
2968 assert(ua_chan);
2969
2970 DBG2("UST app setup buffer registry stream");
2971
2972 /* Send all streams to application. */
2973 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2974 struct buffer_reg_stream *reg_stream;
2975
2976 ret = buffer_reg_stream_create(&reg_stream);
2977 if (ret < 0) {
2978 goto error;
2979 }
2980
2981 /*
2982 * Keep original pointer and nullify it in the stream so the delete
2983 * stream call does not release the object.
2984 */
2985 reg_stream->obj.ust = stream->obj;
2986 stream->obj = NULL;
2987 buffer_reg_stream_add(reg_stream, reg_chan);
2988
2989 /* We don't need the streams anymore. */
2990 cds_list_del(&stream->list);
2991 delete_ust_app_stream(-1, stream, app);
2992 }
2993
2994 error:
2995 return ret;
2996 }
2997
2998 /*
2999 * Create a buffer registry channel for the given session registry and
3000 * application channel object. If regp pointer is valid, it's set with the
3001 * created object. Important, the created object is NOT added to the session
3002 * registry hash table.
3003 *
3004 * Return 0 on success else a negative value.
3005 */
3006 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
3007 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
3008 {
3009 int ret;
3010 struct buffer_reg_channel *reg_chan = NULL;
3011
3012 assert(reg_sess);
3013 assert(ua_chan);
3014
3015 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
3016
3017 /* Create buffer registry channel. */
3018 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
3019 if (ret < 0) {
3020 goto error_create;
3021 }
3022 assert(reg_chan);
3023 reg_chan->consumer_key = ua_chan->key;
3024 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
3025 reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
3026
3027 /* Create and add a channel registry to session. */
3028 ret = ust_registry_channel_add(reg_sess->reg.ust,
3029 ua_chan->tracing_channel_id);
3030 if (ret < 0) {
3031 goto error;
3032 }
3033 buffer_reg_channel_add(reg_sess, reg_chan);
3034
3035 if (regp) {
3036 *regp = reg_chan;
3037 }
3038
3039 return 0;
3040
3041 error:
3042 /* Safe because the registry channel object was not added to any HT. */
3043 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
3044 error_create:
3045 return ret;
3046 }
3047
3048 /*
3049 * Setup buffer registry channel for the given session registry and application
3050 * channel object. If regp pointer is valid, it's set with the created object.
3051 *
3052 * Return 0 on success else a negative value.
3053 */
3054 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
3055 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan,
3056 struct ust_app *app)
3057 {
3058 int ret;
3059
3060 assert(reg_sess);
3061 assert(reg_chan);
3062 assert(ua_chan);
3063 assert(ua_chan->obj);
3064
3065 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
3066
3067 /* Setup all streams for the registry. */
3068 ret = setup_buffer_reg_streams(reg_chan, ua_chan, app);
3069 if (ret < 0) {
3070 goto error;
3071 }
3072
3073 reg_chan->obj.ust = ua_chan->obj;
3074 ua_chan->obj = NULL;
3075
3076 return 0;
3077
3078 error:
3079 buffer_reg_channel_remove(reg_sess, reg_chan);
3080 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
3081 return ret;
3082 }
3083
3084 /*
3085 * Send buffer registry channel to the application.
3086 *
3087 * Return 0 on success else a negative value.
3088 */
3089 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
3090 struct ust_app *app, struct ust_app_session *ua_sess,
3091 struct ust_app_channel *ua_chan)
3092 {
3093 int ret;
3094 struct buffer_reg_stream *reg_stream;
3095
3096 assert(reg_chan);
3097 assert(app);
3098 assert(ua_sess);
3099 assert(ua_chan);
3100
3101 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
3102
3103 ret = duplicate_channel_object(reg_chan, ua_chan);
3104 if (ret < 0) {
3105 goto error;
3106 }
3107
3108 /* Send channel to the application. */
3109 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
3110 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3111 ret = -ENOTCONN; /* Caused by app exiting. */
3112 goto error;
3113 } else if (ret < 0) {
3114 goto error;
3115 }
3116
3117 health_code_update();
3118
3119 /* Send all streams to application. */
3120 pthread_mutex_lock(&reg_chan->stream_list_lock);
3121 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
3122 struct ust_app_stream stream;
3123
3124 ret = duplicate_stream_object(reg_stream, &stream);
3125 if (ret < 0) {
3126 goto error_stream_unlock;
3127 }
3128
3129 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
3130 if (ret < 0) {
3131 (void) release_ust_app_stream(-1, &stream, app);
3132 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3133 ret = -ENOTCONN; /* Caused by app exiting. */
3134 }
3135 goto error_stream_unlock;
3136 }
3137
3138 /*
3139 * The return value is not important here. This function will output an
3140 * error if needed.
3141 */
3142 (void) release_ust_app_stream(-1, &stream, app);
3143 }
3144 ua_chan->is_sent = 1;
3145
3146 error_stream_unlock:
3147 pthread_mutex_unlock(&reg_chan->stream_list_lock);
3148 error:
3149 return ret;
3150 }
3151
3152 /*
3153 * Create and send to the application the created buffers with per UID buffers.
3154 *
3155 * This MUST be called with a RCU read side lock acquired.
3156 * The session list lock and the session's lock must be acquired.
3157 *
3158 * Return 0 on success else a negative value.
3159 */
3160 static int create_channel_per_uid(struct ust_app *app,
3161 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3162 struct ust_app_channel *ua_chan)
3163 {
3164 int ret;
3165 struct buffer_reg_uid *reg_uid;
3166 struct buffer_reg_channel *reg_chan;
3167 struct ltt_session *session = NULL;
3168 enum lttng_error_code notification_ret;
3169 struct ust_registry_channel *chan_reg;
3170
3171 assert(app);
3172 assert(usess);
3173 assert(ua_sess);
3174 assert(ua_chan);
3175
3176 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
3177
3178 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
3179 /*
3180 * The session creation handles the creation of this global registry
3181 * object. If none can be find, there is a code flow problem or a
3182 * teardown race.
3183 */
3184 assert(reg_uid);
3185
3186 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
3187 reg_uid);
3188 if (reg_chan) {
3189 goto send_channel;
3190 }
3191
3192 /* Create the buffer registry channel object. */
3193 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
3194 if (ret < 0) {
3195 ERR("Error creating the UST channel \"%s\" registry instance",
3196 ua_chan->name);
3197 goto error;
3198 }
3199
3200 session = session_find_by_id(ua_sess->tracing_id);
3201 assert(session);
3202 assert(pthread_mutex_trylock(&session->lock));
3203 assert(session_trylock_list());
3204
3205 /*
3206 * Create the buffers on the consumer side. This call populates the
3207 * ust app channel object with all streams and data object.
3208 */
3209 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
3210 app->bits_per_long, reg_uid->registry->reg.ust,
3211 session->most_recent_chunk_id.value);
3212 if (ret < 0) {
3213 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3214 ua_chan->name);
3215
3216 /*
3217 * Let's remove the previously created buffer registry channel so
3218 * it's not visible anymore in the session registry.
3219 */
3220 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
3221 ua_chan->tracing_channel_id, false);
3222 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
3223 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
3224