SoW-2020-0002: Trace Hit Counters: trigger error reporting integration
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
3 * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
4 *
5 * SPDX-License-Identifier: GPL-2.0-only
6 *
7 */
8
9 #define _LGPL_SOURCE
10 #include <errno.h>
11 #include <fcntl.h>
12 #include <inttypes.h>
13 #include <pthread.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <sys/mman.h>
18 #include <sys/stat.h>
19 #include <sys/types.h>
20 #include <unistd.h>
21 #include <urcu/compiler.h>
22 #include <signal.h>
23
24 #include <common/bytecode/bytecode.h>
25 #include <common/common.h>
26 #include <common/hashtable/utils.h>
27 #include <lttng/event-rule/event-rule.h>
28 #include <lttng/event-rule/event-rule-internal.h>
29 #include <lttng/event-rule/tracepoint.h>
30 #include <lttng/condition/condition.h>
31 #include <lttng/condition/event-rule-internal.h>
32 #include <lttng/condition/event-rule.h>
33 #include <lttng/trigger/trigger-internal.h>
34 #include <common/sessiond-comm/sessiond-comm.h>
35
36 #include "buffer-registry.h"
37 #include "condition-internal.h"
38 #include "fd-limit.h"
39 #include "health-sessiond.h"
40 #include "ust-app.h"
41 #include "ust-consumer.h"
42 #include "lttng-ust-ctl.h"
43 #include "lttng-ust-error.h"
44 #include "utils.h"
45 #include "session.h"
46 #include "lttng-sessiond.h"
47 #include "notification-thread-commands.h"
48 #include "rotate.h"
49 #include "event.h"
50 #include "trigger-error-accounting.h"
51
52
53 struct lttng_ht *ust_app_ht;
54 struct lttng_ht *ust_app_ht_by_sock;
55 struct lttng_ht *ust_app_ht_by_notify_sock;
56
57 static
58 int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
59
60 /* Next available channel key. Access under next_channel_key_lock. */
61 static uint64_t _next_channel_key;
62 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
63
64 /* Next available session ID. Access under next_session_id_lock. */
65 static uint64_t _next_session_id;
66 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
67
68 /*
69 * Return the incremented value of next_channel_key.
70 */
71 static uint64_t get_next_channel_key(void)
72 {
73 uint64_t ret;
74
75 pthread_mutex_lock(&next_channel_key_lock);
76 ret = ++_next_channel_key;
77 pthread_mutex_unlock(&next_channel_key_lock);
78 return ret;
79 }
80
81 /*
82 * Return the atomically incremented value of next_session_id.
83 */
84 static uint64_t get_next_session_id(void)
85 {
86 uint64_t ret;
87
88 pthread_mutex_lock(&next_session_id_lock);
89 ret = ++_next_session_id;
90 pthread_mutex_unlock(&next_session_id_lock);
91 return ret;
92 }
93
94 static void copy_channel_attr_to_ustctl(
95 struct ustctl_consumer_channel_attr *attr,
96 struct lttng_ust_channel_attr *uattr)
97 {
98 /* Copy event attributes since the layout is different. */
99 attr->subbuf_size = uattr->subbuf_size;
100 attr->num_subbuf = uattr->num_subbuf;
101 attr->overwrite = uattr->overwrite;
102 attr->switch_timer_interval = uattr->switch_timer_interval;
103 attr->read_timer_interval = uattr->read_timer_interval;
104 attr->output = uattr->output;
105 attr->blocking_timeout = uattr->u.s.blocking_timeout;
106 }
107
108 /*
109 * Match function for the hash table lookup.
110 *
111 * It matches an ust app event based on three attributes which are the event
112 * name, the filter bytecode and the loglevel.
113 */
114 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
115 {
116 struct ust_app_event *event;
117 const struct ust_app_ht_key *key;
118 int ev_loglevel_value;
119
120 assert(node);
121 assert(_key);
122
123 event = caa_container_of(node, struct ust_app_event, node.node);
124 key = _key;
125 ev_loglevel_value = event->attr.loglevel;
126
127 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
128
129 /* Event name */
130 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
131 goto no_match;
132 }
133
134 /* Event loglevel. */
135 if (ev_loglevel_value != key->loglevel_type) {
136 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
137 && key->loglevel_type == 0 &&
138 ev_loglevel_value == -1) {
139 /*
140 * Match is accepted. This is because on event creation, the
141 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
142 * -1 are accepted for this loglevel type since 0 is the one set by
143 * the API when receiving an enable event.
144 */
145 } else {
146 goto no_match;
147 }
148 }
149
150 /* One of the filters is NULL, fail. */
151 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
152 goto no_match;
153 }
154
155 if (key->filter && event->filter) {
156 /* Both filters exists, check length followed by the bytecode. */
157 if (event->filter->len != key->filter->len ||
158 memcmp(event->filter->data, key->filter->data,
159 event->filter->len) != 0) {
160 goto no_match;
161 }
162 }
163
164 /* One of the exclusions is NULL, fail. */
165 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
166 goto no_match;
167 }
168
169 if (key->exclusion && event->exclusion) {
170 /* Both exclusions exists, check count followed by the names. */
171 if (event->exclusion->count != key->exclusion->count ||
172 memcmp(event->exclusion->names, key->exclusion->names,
173 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
174 goto no_match;
175 }
176 }
177
178
179 /* Match. */
180 return 1;
181
182 no_match:
183 return 0;
184 }
185
186 /*
187 * Unique add of an ust app event in the given ht. This uses the custom
188 * ht_match_ust_app_event match function and the event name as hash.
189 */
190 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
191 struct ust_app_event *event)
192 {
193 struct cds_lfht_node *node_ptr;
194 struct ust_app_ht_key key;
195 struct lttng_ht *ht;
196
197 assert(ua_chan);
198 assert(ua_chan->events);
199 assert(event);
200
201 ht = ua_chan->events;
202 key.name = event->attr.name;
203 key.filter = event->filter;
204 key.loglevel_type = event->attr.loglevel;
205 key.exclusion = event->exclusion;
206
207 node_ptr = cds_lfht_add_unique(ht->ht,
208 ht->hash_fct(event->node.key, lttng_ht_seed),
209 ht_match_ust_app_event, &key, &event->node.node);
210 assert(node_ptr == &event->node.node);
211 }
212
213 /*
214 * Close the notify socket from the given RCU head object. This MUST be called
215 * through a call_rcu().
216 */
217 static void close_notify_sock_rcu(struct rcu_head *head)
218 {
219 int ret;
220 struct ust_app_notify_sock_obj *obj =
221 caa_container_of(head, struct ust_app_notify_sock_obj, head);
222
223 /* Must have a valid fd here. */
224 assert(obj->fd >= 0);
225
226 ret = close(obj->fd);
227 if (ret) {
228 ERR("close notify sock %d RCU", obj->fd);
229 }
230 lttng_fd_put(LTTNG_FD_APPS, 1);
231
232 free(obj);
233 }
234
235 /*
236 * Return the session registry according to the buffer type of the given
237 * session.
238 *
239 * A registry per UID object MUST exists before calling this function or else
240 * it assert() if not found. RCU read side lock must be acquired.
241 */
242 static struct ust_registry_session *get_session_registry(
243 struct ust_app_session *ua_sess)
244 {
245 struct ust_registry_session *registry = NULL;
246
247 assert(ua_sess);
248
249 switch (ua_sess->buffer_type) {
250 case LTTNG_BUFFER_PER_PID:
251 {
252 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
253 if (!reg_pid) {
254 goto error;
255 }
256 registry = reg_pid->registry->reg.ust;
257 break;
258 }
259 case LTTNG_BUFFER_PER_UID:
260 {
261 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
262 ua_sess->tracing_id, ua_sess->bits_per_long,
263 lttng_credentials_get_uid(&ua_sess->real_credentials));
264 if (!reg_uid) {
265 goto error;
266 }
267 registry = reg_uid->registry->reg.ust;
268 break;
269 }
270 default:
271 assert(0);
272 };
273
274 error:
275 return registry;
276 }
277
278 /*
279 * Delete ust context safely. RCU read lock must be held before calling
280 * this function.
281 */
282 static
283 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
284 struct ust_app *app)
285 {
286 int ret;
287
288 assert(ua_ctx);
289
290 if (ua_ctx->obj) {
291 pthread_mutex_lock(&app->sock_lock);
292 ret = ustctl_release_object(sock, ua_ctx->obj);
293 pthread_mutex_unlock(&app->sock_lock);
294 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
295 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
296 sock, ua_ctx->obj->handle, ret);
297 }
298 free(ua_ctx->obj);
299 }
300 free(ua_ctx);
301 }
302
303 /*
304 * Delete ust app event safely. RCU read lock must be held before calling
305 * this function.
306 */
307 static
308 void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
309 struct ust_app *app)
310 {
311 int ret;
312
313 assert(ua_event);
314
315 free(ua_event->filter);
316 if (ua_event->exclusion != NULL)
317 free(ua_event->exclusion);
318 if (ua_event->obj != NULL) {
319 pthread_mutex_lock(&app->sock_lock);
320 ret = ustctl_release_object(sock, ua_event->obj);
321 pthread_mutex_unlock(&app->sock_lock);
322 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
323 ERR("UST app sock %d release event obj failed with ret %d",
324 sock, ret);
325 }
326 free(ua_event->obj);
327 }
328 free(ua_event);
329 }
330
331 /*
332 * Delete ust app token event_rule safely. RCU read lock must be held before calling
333 * this function. TODO: or does it????
334 */
335 static
336 void delete_ust_app_token_event_rule(int sock, struct ust_app_token_event_rule *ua_token,
337 struct ust_app *app)
338 {
339 int ret;
340
341 assert(ua_token);
342
343 if (ua_token->exclusion != NULL)
344 free(ua_token->exclusion);
345 if (ua_token->obj != NULL) {
346 pthread_mutex_lock(&app->sock_lock);
347 ret = ustctl_release_object(sock, ua_token->obj);
348 pthread_mutex_unlock(&app->sock_lock);
349 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
350 ERR("UST app sock %d release event obj failed with ret %d",
351 sock, ret);
352 }
353 free(ua_token->obj);
354 }
355 lttng_trigger_put(ua_token->trigger);
356 free(ua_token);
357 }
358
359 /*
360 * Release ust data object of the given stream.
361 *
362 * Return 0 on success or else a negative value.
363 */
364 static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
365 struct ust_app *app)
366 {
367 int ret = 0;
368
369 assert(stream);
370
371 if (stream->obj) {
372 pthread_mutex_lock(&app->sock_lock);
373 ret = ustctl_release_object(sock, stream->obj);
374 pthread_mutex_unlock(&app->sock_lock);
375 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
376 ERR("UST app sock %d release stream obj failed with ret %d",
377 sock, ret);
378 }
379 lttng_fd_put(LTTNG_FD_APPS, 2);
380 free(stream->obj);
381 }
382
383 return ret;
384 }
385
386 /*
387 * Delete ust app stream safely. RCU read lock must be held before calling
388 * this function.
389 */
390 static
391 void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
392 struct ust_app *app)
393 {
394 assert(stream);
395
396 (void) release_ust_app_stream(sock, stream, app);
397 free(stream);
398 }
399
400 /*
401 * We need to execute ht_destroy outside of RCU read-side critical
402 * section and outside of call_rcu thread, so we postpone its execution
403 * using ht_cleanup_push. It is simpler than to change the semantic of
404 * the many callers of delete_ust_app_session().
405 */
406 static
407 void delete_ust_app_channel_rcu(struct rcu_head *head)
408 {
409 struct ust_app_channel *ua_chan =
410 caa_container_of(head, struct ust_app_channel, rcu_head);
411
412 ht_cleanup_push(ua_chan->ctx);
413 ht_cleanup_push(ua_chan->events);
414 free(ua_chan);
415 }
416
417 /*
418 * Extract the lost packet or discarded events counter when the channel is
419 * being deleted and store the value in the parent channel so we can
420 * access it from lttng list and at stop/destroy.
421 *
422 * The session list lock must be held by the caller.
423 */
424 static
425 void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
426 {
427 uint64_t discarded = 0, lost = 0;
428 struct ltt_session *session;
429 struct ltt_ust_channel *uchan;
430
431 if (ua_chan->attr.type != LTTNG_UST_CHAN_PER_CPU) {
432 return;
433 }
434
435 rcu_read_lock();
436 session = session_find_by_id(ua_chan->session->tracing_id);
437 if (!session || !session->ust_session) {
438 /*
439 * Not finding the session is not an error because there are
440 * multiple ways the channels can be torn down.
441 *
442 * 1) The session daemon can initiate the destruction of the
443 * ust app session after receiving a destroy command or
444 * during its shutdown/teardown.
445 * 2) The application, since we are in per-pid tracing, is
446 * unregistering and tearing down its ust app session.
447 *
448 * Both paths are protected by the session list lock which
449 * ensures that the accounting of lost packets and discarded
450 * events is done exactly once. The session is then unpublished
451 * from the session list, resulting in this condition.
452 */
453 goto end;
454 }
455
456 if (ua_chan->attr.overwrite) {
457 consumer_get_lost_packets(ua_chan->session->tracing_id,
458 ua_chan->key, session->ust_session->consumer,
459 &lost);
460 } else {
461 consumer_get_discarded_events(ua_chan->session->tracing_id,
462 ua_chan->key, session->ust_session->consumer,
463 &discarded);
464 }
465 uchan = trace_ust_find_channel_by_name(
466 session->ust_session->domain_global.channels,
467 ua_chan->name);
468 if (!uchan) {
469 ERR("Missing UST channel to store discarded counters");
470 goto end;
471 }
472
473 uchan->per_pid_closed_app_discarded += discarded;
474 uchan->per_pid_closed_app_lost += lost;
475
476 end:
477 rcu_read_unlock();
478 if (session) {
479 session_put(session);
480 }
481 }
482
483 /*
484 * Delete ust app channel safely. RCU read lock must be held before calling
485 * this function.
486 *
487 * The session list lock must be held by the caller.
488 */
489 static
490 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
491 struct ust_app *app)
492 {
493 int ret;
494 struct lttng_ht_iter iter;
495 struct ust_app_event *ua_event;
496 struct ust_app_ctx *ua_ctx;
497 struct ust_app_stream *stream, *stmp;
498 struct ust_registry_session *registry;
499
500 assert(ua_chan);
501
502 DBG3("UST app deleting channel %s", ua_chan->name);
503
504 /* Wipe stream */
505 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
506 cds_list_del(&stream->list);
507 delete_ust_app_stream(sock, stream, app);
508 }
509
510 /* Wipe context */
511 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
512 cds_list_del(&ua_ctx->list);
513 ret = lttng_ht_del(ua_chan->ctx, &iter);
514 assert(!ret);
515 delete_ust_app_ctx(sock, ua_ctx, app);
516 }
517
518 /* Wipe events */
519 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
520 node.node) {
521 ret = lttng_ht_del(ua_chan->events, &iter);
522 assert(!ret);
523 delete_ust_app_event(sock, ua_event, app);
524 }
525
526 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
527 /* Wipe and free registry from session registry. */
528 registry = get_session_registry(ua_chan->session);
529 if (registry) {
530 ust_registry_channel_del_free(registry, ua_chan->key,
531 sock >= 0);
532 }
533 /*
534 * A negative socket can be used by the caller when
535 * cleaning-up a ua_chan in an error path. Skip the
536 * accounting in this case.
537 */
538 if (sock >= 0) {
539 save_per_pid_lost_discarded_counters(ua_chan);
540 }
541 }
542
543 if (ua_chan->obj != NULL) {
544 /* Remove channel from application UST object descriptor. */
545 iter.iter.node = &ua_chan->ust_objd_node.node;
546 ret = lttng_ht_del(app->ust_objd, &iter);
547 assert(!ret);
548 pthread_mutex_lock(&app->sock_lock);
549 ret = ustctl_release_object(sock, ua_chan->obj);
550 pthread_mutex_unlock(&app->sock_lock);
551 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
552 ERR("UST app sock %d release channel obj failed with ret %d",
553 sock, ret);
554 }
555 lttng_fd_put(LTTNG_FD_APPS, 1);
556 free(ua_chan->obj);
557 }
558 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
559 }
560
561 int ust_app_register_done(struct ust_app *app)
562 {
563 int ret;
564
565 pthread_mutex_lock(&app->sock_lock);
566 ret = ustctl_register_done(app->sock);
567 pthread_mutex_unlock(&app->sock_lock);
568 return ret;
569 }
570
571 int ust_app_release_object(struct ust_app *app, struct lttng_ust_object_data *data)
572 {
573 int ret, sock;
574
575 if (app) {
576 pthread_mutex_lock(&app->sock_lock);
577 sock = app->sock;
578 } else {
579 sock = -1;
580 }
581 ret = ustctl_release_object(sock, data);
582 if (app) {
583 pthread_mutex_unlock(&app->sock_lock);
584 }
585 return ret;
586 }
587
588 /*
589 * Push metadata to consumer socket.
590 *
591 * RCU read-side lock must be held to guarantee existance of socket.
592 * Must be called with the ust app session lock held.
593 * Must be called with the registry lock held.
594 *
595 * On success, return the len of metadata pushed or else a negative value.
596 * Returning a -EPIPE return value means we could not send the metadata,
597 * but it can be caused by recoverable errors (e.g. the application has
598 * terminated concurrently).
599 */
600 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
601 struct consumer_socket *socket, int send_zero_data)
602 {
603 int ret;
604 char *metadata_str = NULL;
605 size_t len, offset, new_metadata_len_sent;
606 ssize_t ret_val;
607 uint64_t metadata_key, metadata_version;
608
609 assert(registry);
610 assert(socket);
611
612 metadata_key = registry->metadata_key;
613
614 /*
615 * Means that no metadata was assigned to the session. This can
616 * happens if no start has been done previously.
617 */
618 if (!metadata_key) {
619 return 0;
620 }
621
622 offset = registry->metadata_len_sent;
623 len = registry->metadata_len - registry->metadata_len_sent;
624 new_metadata_len_sent = registry->metadata_len;
625 metadata_version = registry->metadata_version;
626 if (len == 0) {
627 DBG3("No metadata to push for metadata key %" PRIu64,
628 registry->metadata_key);
629 ret_val = len;
630 if (send_zero_data) {
631 DBG("No metadata to push");
632 goto push_data;
633 }
634 goto end;
635 }
636
637 /* Allocate only what we have to send. */
638 metadata_str = zmalloc(len);
639 if (!metadata_str) {
640 PERROR("zmalloc ust app metadata string");
641 ret_val = -ENOMEM;
642 goto error;
643 }
644 /* Copy what we haven't sent out. */
645 memcpy(metadata_str, registry->metadata + offset, len);
646
647 push_data:
648 pthread_mutex_unlock(&registry->lock);
649 /*
650 * We need to unlock the registry while we push metadata to
651 * break a circular dependency between the consumerd metadata
652 * lock and the sessiond registry lock. Indeed, pushing metadata
653 * to the consumerd awaits that it gets pushed all the way to
654 * relayd, but doing so requires grabbing the metadata lock. If
655 * a concurrent metadata request is being performed by
656 * consumerd, this can try to grab the registry lock on the
657 * sessiond while holding the metadata lock on the consumer
658 * daemon. Those push and pull schemes are performed on two
659 * different bidirectionnal communication sockets.
660 */
661 ret = consumer_push_metadata(socket, metadata_key,
662 metadata_str, len, offset, metadata_version);
663 pthread_mutex_lock(&registry->lock);
664 if (ret < 0) {
665 /*
666 * There is an acceptable race here between the registry
667 * metadata key assignment and the creation on the
668 * consumer. The session daemon can concurrently push
669 * metadata for this registry while being created on the
670 * consumer since the metadata key of the registry is
671 * assigned *before* it is setup to avoid the consumer
672 * to ask for metadata that could possibly be not found
673 * in the session daemon.
674 *
675 * The metadata will get pushed either by the session
676 * being stopped or the consumer requesting metadata if
677 * that race is triggered.
678 */
679 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
680 ret = 0;
681 } else {
682 ERR("Error pushing metadata to consumer");
683 }
684 ret_val = ret;
685 goto error_push;
686 } else {
687 /*
688 * Metadata may have been concurrently pushed, since
689 * we're not holding the registry lock while pushing to
690 * consumer. This is handled by the fact that we send
691 * the metadata content, size, and the offset at which
692 * that metadata belongs. This may arrive out of order
693 * on the consumer side, and the consumer is able to
694 * deal with overlapping fragments. The consumer
695 * supports overlapping fragments, which must be
696 * contiguous starting from offset 0. We keep the
697 * largest metadata_len_sent value of the concurrent
698 * send.
699 */
700 registry->metadata_len_sent =
701 max_t(size_t, registry->metadata_len_sent,
702 new_metadata_len_sent);
703 }
704 free(metadata_str);
705 return len;
706
707 end:
708 error:
709 if (ret_val) {
710 /*
711 * On error, flag the registry that the metadata is
712 * closed. We were unable to push anything and this
713 * means that either the consumer is not responding or
714 * the metadata cache has been destroyed on the
715 * consumer.
716 */
717 registry->metadata_closed = 1;
718 }
719 error_push:
720 free(metadata_str);
721 return ret_val;
722 }
723
724 /*
725 * For a given application and session, push metadata to consumer.
726 * Either sock or consumer is required : if sock is NULL, the default
727 * socket to send the metadata is retrieved from consumer, if sock
728 * is not NULL we use it to send the metadata.
729 * RCU read-side lock must be held while calling this function,
730 * therefore ensuring existance of registry. It also ensures existance
731 * of socket throughout this function.
732 *
733 * Return 0 on success else a negative error.
734 * Returning a -EPIPE return value means we could not send the metadata,
735 * but it can be caused by recoverable errors (e.g. the application has
736 * terminated concurrently).
737 */
738 static int push_metadata(struct ust_registry_session *registry,
739 struct consumer_output *consumer)
740 {
741 int ret_val;
742 ssize_t ret;
743 struct consumer_socket *socket;
744
745 assert(registry);
746 assert(consumer);
747
748 pthread_mutex_lock(&registry->lock);
749 if (registry->metadata_closed) {
750 ret_val = -EPIPE;
751 goto error;
752 }
753
754 /* Get consumer socket to use to push the metadata.*/
755 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
756 consumer);
757 if (!socket) {
758 ret_val = -1;
759 goto error;
760 }
761
762 ret = ust_app_push_metadata(registry, socket, 0);
763 if (ret < 0) {
764 ret_val = ret;
765 goto error;
766 }
767 pthread_mutex_unlock(&registry->lock);
768 return 0;
769
770 error:
771 pthread_mutex_unlock(&registry->lock);
772 return ret_val;
773 }
774
775 /*
776 * Send to the consumer a close metadata command for the given session. Once
777 * done, the metadata channel is deleted and the session metadata pointer is
778 * nullified. The session lock MUST be held unless the application is
779 * in the destroy path.
780 *
781 * Do not hold the registry lock while communicating with the consumerd, because
782 * doing so causes inter-process deadlocks between consumerd and sessiond with
783 * the metadata request notification.
784 *
785 * Return 0 on success else a negative value.
786 */
787 static int close_metadata(struct ust_registry_session *registry,
788 struct consumer_output *consumer)
789 {
790 int ret;
791 struct consumer_socket *socket;
792 uint64_t metadata_key;
793 bool registry_was_already_closed;
794
795 assert(registry);
796 assert(consumer);
797
798 rcu_read_lock();
799
800 pthread_mutex_lock(&registry->lock);
801 metadata_key = registry->metadata_key;
802 registry_was_already_closed = registry->metadata_closed;
803 if (metadata_key != 0) {
804 /*
805 * Metadata closed. Even on error this means that the consumer
806 * is not responding or not found so either way a second close
807 * should NOT be emit for this registry.
808 */
809 registry->metadata_closed = 1;
810 }
811 pthread_mutex_unlock(&registry->lock);
812
813 if (metadata_key == 0 || registry_was_already_closed) {
814 ret = 0;
815 goto end;
816 }
817
818 /* Get consumer socket to use to push the metadata.*/
819 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
820 consumer);
821 if (!socket) {
822 ret = -1;
823 goto end;
824 }
825
826 ret = consumer_close_metadata(socket, metadata_key);
827 if (ret < 0) {
828 goto end;
829 }
830
831 end:
832 rcu_read_unlock();
833 return ret;
834 }
835
836 /*
837 * We need to execute ht_destroy outside of RCU read-side critical
838 * section and outside of call_rcu thread, so we postpone its execution
839 * using ht_cleanup_push. It is simpler than to change the semantic of
840 * the many callers of delete_ust_app_session().
841 */
842 static
843 void delete_ust_app_session_rcu(struct rcu_head *head)
844 {
845 struct ust_app_session *ua_sess =
846 caa_container_of(head, struct ust_app_session, rcu_head);
847
848 ht_cleanup_push(ua_sess->channels);
849 free(ua_sess);
850 }
851
852 /*
853 * Delete ust app session safely. RCU read lock must be held before calling
854 * this function.
855 *
856 * The session list lock must be held by the caller.
857 */
858 static
859 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
860 struct ust_app *app)
861 {
862 int ret;
863 struct lttng_ht_iter iter;
864 struct ust_app_channel *ua_chan;
865 struct ust_registry_session *registry;
866
867 assert(ua_sess);
868
869 pthread_mutex_lock(&ua_sess->lock);
870
871 assert(!ua_sess->deleted);
872 ua_sess->deleted = true;
873
874 registry = get_session_registry(ua_sess);
875 /* Registry can be null on error path during initialization. */
876 if (registry) {
877 /* Push metadata for application before freeing the application. */
878 (void) push_metadata(registry, ua_sess->consumer);
879
880 /*
881 * Don't ask to close metadata for global per UID buffers. Close
882 * metadata only on destroy trace session in this case. Also, the
883 * previous push metadata could have flag the metadata registry to
884 * close so don't send a close command if closed.
885 */
886 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
887 /* And ask to close it for this session registry. */
888 (void) close_metadata(registry, ua_sess->consumer);
889 }
890 }
891
892 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
893 node.node) {
894 ret = lttng_ht_del(ua_sess->channels, &iter);
895 assert(!ret);
896 delete_ust_app_channel(sock, ua_chan, app);
897 }
898
899 /* In case of per PID, the registry is kept in the session. */
900 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
901 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
902 if (reg_pid) {
903 /*
904 * Registry can be null on error path during
905 * initialization.
906 */
907 buffer_reg_pid_remove(reg_pid);
908 buffer_reg_pid_destroy(reg_pid);
909 }
910 }
911
912 if (ua_sess->handle != -1) {
913 pthread_mutex_lock(&app->sock_lock);
914 ret = ustctl_release_handle(sock, ua_sess->handle);
915 pthread_mutex_unlock(&app->sock_lock);
916 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
917 ERR("UST app sock %d release session handle failed with ret %d",
918 sock, ret);
919 }
920 /* Remove session from application UST object descriptor. */
921 iter.iter.node = &ua_sess->ust_objd_node.node;
922 ret = lttng_ht_del(app->ust_sessions_objd, &iter);
923 assert(!ret);
924 }
925
926 pthread_mutex_unlock(&ua_sess->lock);
927
928 consumer_output_put(ua_sess->consumer);
929
930 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
931 }
932
933 /*
934 * Delete a traceable application structure from the global list. Never call
935 * this function outside of a call_rcu call.
936 *
937 * RCU read side lock should _NOT_ be held when calling this function.
938 */
939 static
940 void delete_ust_app(struct ust_app *app)
941 {
942 int ret, sock;
943 struct ust_app_session *ua_sess, *tmp_ua_sess;
944 struct lttng_ht_iter iter;
945 struct ust_app_token_event_rule *token;
946
947 /*
948 * The session list lock must be held during this function to guarantee
949 * the existence of ua_sess.
950 */
951 session_lock_list();
952 /* Delete ust app sessions info */
953 sock = app->sock;
954 app->sock = -1;
955
956 /* Wipe sessions */
957 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
958 teardown_node) {
959 /* Free every object in the session and the session. */
960 rcu_read_lock();
961 delete_ust_app_session(sock, ua_sess, app);
962 rcu_read_unlock();
963 }
964
965 /* Wipe token associated with the app */
966 cds_lfht_for_each_entry(app->tokens_ht->ht, &iter.iter, token,
967 node.node) {
968 ret = lttng_ht_del(app->tokens_ht, &iter);
969 assert(!ret);
970 delete_ust_app_token_event_rule(app->sock, token, app);
971 }
972
973 ht_cleanup_push(app->sessions);
974 ht_cleanup_push(app->ust_sessions_objd);
975 ht_cleanup_push(app->ust_objd);
976 ht_cleanup_push(app->tokens_ht);
977
978 /* This can happen if trigger setup failed. e.g killed app */
979 if (app->token_communication.handle) {
980 ustctl_release_object(sock, app->token_communication.handle);
981 free(app->token_communication.handle);
982 }
983
984 lttng_pipe_destroy(app->token_communication.trigger_event_pipe);
985
986 /*
987 * Wait until we have deleted the application from the sock hash table
988 * before closing this socket, otherwise an application could re-use the
989 * socket ID and race with the teardown, using the same hash table entry.
990 *
991 * It's OK to leave the close in call_rcu. We want it to stay unique for
992 * all RCU readers that could run concurrently with unregister app,
993 * therefore we _need_ to only close that socket after a grace period. So
994 * it should stay in this RCU callback.
995 *
996 * This close() is a very important step of the synchronization model so
997 * every modification to this function must be carefully reviewed.
998 */
999 ret = close(sock);
1000 if (ret) {
1001 PERROR("close");
1002 }
1003 lttng_fd_put(LTTNG_FD_APPS, 1);
1004
1005 DBG2("UST app pid %d deleted", app->pid);
1006 free(app);
1007 session_unlock_list();
1008 }
1009
1010 /*
1011 * URCU intermediate call to delete an UST app.
1012 */
1013 static
1014 void delete_ust_app_rcu(struct rcu_head *head)
1015 {
1016 struct lttng_ht_node_ulong *node =
1017 caa_container_of(head, struct lttng_ht_node_ulong, head);
1018 struct ust_app *app =
1019 caa_container_of(node, struct ust_app, pid_n);
1020
1021 DBG3("Call RCU deleting app PID %d", app->pid);
1022 delete_ust_app(app);
1023 }
1024
1025 /*
1026 * Delete the session from the application ht and delete the data structure by
1027 * freeing every object inside and releasing them.
1028 *
1029 * The session list lock must be held by the caller.
1030 */
1031 static void destroy_app_session(struct ust_app *app,
1032 struct ust_app_session *ua_sess)
1033 {
1034 int ret;
1035 struct lttng_ht_iter iter;
1036
1037 assert(app);
1038 assert(ua_sess);
1039
1040 iter.iter.node = &ua_sess->node.node;
1041 ret = lttng_ht_del(app->sessions, &iter);
1042 if (ret) {
1043 /* Already scheduled for teardown. */
1044 goto end;
1045 }
1046
1047 /* Once deleted, free the data structure. */
1048 delete_ust_app_session(app->sock, ua_sess, app);
1049
1050 end:
1051 return;
1052 }
1053
1054 /*
1055 * Alloc new UST app session.
1056 */
1057 static
1058 struct ust_app_session *alloc_ust_app_session(void)
1059 {
1060 struct ust_app_session *ua_sess;
1061
1062 /* Init most of the default value by allocating and zeroing */
1063 ua_sess = zmalloc(sizeof(struct ust_app_session));
1064 if (ua_sess == NULL) {
1065 PERROR("malloc");
1066 goto error_free;
1067 }
1068
1069 ua_sess->handle = -1;
1070 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1071 ua_sess->metadata_attr.type = LTTNG_UST_CHAN_METADATA;
1072 pthread_mutex_init(&ua_sess->lock, NULL);
1073
1074 return ua_sess;
1075
1076 error_free:
1077 return NULL;
1078 }
1079
1080 /*
1081 * Alloc new UST app channel.
1082 */
1083 static
1084 struct ust_app_channel *alloc_ust_app_channel(const char *name,
1085 struct ust_app_session *ua_sess,
1086 struct lttng_ust_channel_attr *attr)
1087 {
1088 struct ust_app_channel *ua_chan;
1089
1090 /* Init most of the default value by allocating and zeroing */
1091 ua_chan = zmalloc(sizeof(struct ust_app_channel));
1092 if (ua_chan == NULL) {
1093 PERROR("malloc");
1094 goto error;
1095 }
1096
1097 /* Setup channel name */
1098 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
1099 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1100
1101 ua_chan->enabled = 1;
1102 ua_chan->handle = -1;
1103 ua_chan->session = ua_sess;
1104 ua_chan->key = get_next_channel_key();
1105 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1106 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1107 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
1108
1109 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
1110 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
1111
1112 /* Copy attributes */
1113 if (attr) {
1114 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
1115 ua_chan->attr.subbuf_size = attr->subbuf_size;
1116 ua_chan->attr.num_subbuf = attr->num_subbuf;
1117 ua_chan->attr.overwrite = attr->overwrite;
1118 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
1119 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
1120 ua_chan->attr.output = attr->output;
1121 ua_chan->attr.blocking_timeout = attr->u.s.blocking_timeout;
1122 }
1123 /* By default, the channel is a per cpu channel. */
1124 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1125
1126 DBG3("UST app channel %s allocated", ua_chan->name);
1127
1128 return ua_chan;
1129
1130 error:
1131 return NULL;
1132 }
1133
1134 /*
1135 * Allocate and initialize a UST app stream.
1136 *
1137 * Return newly allocated stream pointer or NULL on error.
1138 */
1139 struct ust_app_stream *ust_app_alloc_stream(void)
1140 {
1141 struct ust_app_stream *stream = NULL;
1142
1143 stream = zmalloc(sizeof(*stream));
1144 if (stream == NULL) {
1145 PERROR("zmalloc ust app stream");
1146 goto error;
1147 }
1148
1149 /* Zero could be a valid value for a handle so flag it to -1. */
1150 stream->handle = -1;
1151
1152 error:
1153 return stream;
1154 }
1155
1156 /*
1157 * Alloc new UST app event.
1158 */
1159 static
1160 struct ust_app_event *alloc_ust_app_event(char *name,
1161 struct lttng_ust_event *attr)
1162 {
1163 struct ust_app_event *ua_event;
1164
1165 /* Init most of the default value by allocating and zeroing */
1166 ua_event = zmalloc(sizeof(struct ust_app_event));
1167 if (ua_event == NULL) {
1168 PERROR("Failed to allocate ust_app_event structure");
1169 goto error;
1170 }
1171
1172 ua_event->enabled = 1;
1173 strncpy(ua_event->name, name, sizeof(ua_event->name));
1174 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1175 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
1176
1177 /* Copy attributes */
1178 if (attr) {
1179 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1180 }
1181
1182 DBG3("UST app event %s allocated", ua_event->name);
1183
1184 return ua_event;
1185
1186 error:
1187 return NULL;
1188 }
1189
1190 /*
1191 * Alloc new UST app token event rule.
1192 */
1193 static struct ust_app_token_event_rule *alloc_ust_app_token_event_rule(
1194 struct lttng_trigger *trigger)
1195 {
1196 struct ust_app_token_event_rule *ua_token;
1197 struct lttng_condition *condition = NULL;
1198 struct lttng_event_rule *event_rule = NULL;
1199
1200 ua_token = zmalloc(sizeof(struct ust_app_token_event_rule));
1201 if (ua_token == NULL) {
1202 PERROR("Failed to allocate ust_app_token_event_rule structure");
1203 goto error;
1204 }
1205
1206 /* Get reference of the trigger */
1207 /* TODO should this be like lttng_event_rule_get with a returned bool? */
1208 lttng_trigger_get(trigger);
1209
1210 ua_token->enabled = 1;
1211 ua_token->token = lttng_trigger_get_tracer_token(trigger);
1212 lttng_ht_node_init_u64(&ua_token->node, ua_token->token);
1213
1214 condition = lttng_trigger_get_condition(trigger);
1215 assert(condition);
1216 assert(lttng_condition_get_type(condition) == LTTNG_CONDITION_TYPE_EVENT_RULE_HIT);
1217
1218 assert(LTTNG_CONDITION_STATUS_OK == lttng_condition_event_rule_get_rule_mutable(condition, &event_rule));
1219 assert(event_rule);
1220
1221 ua_token->trigger = trigger;
1222 ua_token->filter = lttng_event_rule_get_filter_bytecode(event_rule);
1223 ua_token->exclusion = lttng_event_rule_generate_exclusions(event_rule);
1224 ua_token->error_counter_index = lttng_trigger_get_error_counter_index(trigger);
1225
1226 /* TODO put capture here? or later*/
1227
1228 DBG3("UST app token event rule %" PRIu64 " allocated", ua_token->token);
1229
1230 return ua_token;
1231
1232 error:
1233 return NULL;
1234 }
1235
1236 /*
1237 * Alloc new UST app context.
1238 */
1239 static
1240 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
1241 {
1242 struct ust_app_ctx *ua_ctx;
1243
1244 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
1245 if (ua_ctx == NULL) {
1246 goto error;
1247 }
1248
1249 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1250
1251 if (uctx) {
1252 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
1253 if (uctx->ctx == LTTNG_UST_CONTEXT_APP_CONTEXT) {
1254 char *provider_name = NULL, *ctx_name = NULL;
1255
1256 provider_name = strdup(uctx->u.app_ctx.provider_name);
1257 ctx_name = strdup(uctx->u.app_ctx.ctx_name);
1258 if (!provider_name || !ctx_name) {
1259 free(provider_name);
1260 free(ctx_name);
1261 goto error;
1262 }
1263
1264 ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
1265 ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
1266 }
1267 }
1268
1269 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
1270 return ua_ctx;
1271 error:
1272 free(ua_ctx);
1273 return NULL;
1274 }
1275
1276 /*
1277 * Create a liblttng-ust filter bytecode from given bytecode.
1278 *
1279 * Return allocated filter or NULL on error.
1280 */
1281 static struct lttng_ust_filter_bytecode *
1282 create_ust_filter_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
1283 {
1284 struct lttng_ust_filter_bytecode *filter = NULL;
1285
1286 /* Copy filter bytecode */
1287 filter = zmalloc(sizeof(*filter) + orig_f->len);
1288 if (!filter) {
1289 PERROR("zmalloc alloc ust filter bytecode");
1290 goto error;
1291 }
1292
1293 assert(sizeof(struct lttng_bytecode) ==
1294 sizeof(struct lttng_ust_filter_bytecode));
1295 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1296 error:
1297 return filter;
1298 }
1299
1300 /*
1301 * Create a liblttng-ust capture bytecode from given bytecode.
1302 *
1303 * Return allocated filter or NULL on error.
1304 */
1305 static struct lttng_ust_capture_bytecode *
1306 create_ust_capture_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
1307 {
1308 struct lttng_ust_capture_bytecode *capture = NULL;
1309
1310 /* Copy capture bytecode */
1311 capture = zmalloc(sizeof(*capture) + orig_f->len);
1312 if (!capture) {
1313 PERROR("zmalloc alloc ust capture bytecode");
1314 goto error;
1315 }
1316
1317 assert(sizeof(struct lttng_bytecode) ==
1318 sizeof(struct lttng_ust_capture_bytecode));
1319 memcpy(capture, orig_f, sizeof(*capture) + orig_f->len);
1320 error:
1321 return capture;
1322 }
1323
1324 /*
1325 * Find an ust_app using the sock and return it. RCU read side lock must be
1326 * held before calling this helper function.
1327 */
1328 struct ust_app *ust_app_find_by_sock(int sock)
1329 {
1330 struct lttng_ht_node_ulong *node;
1331 struct lttng_ht_iter iter;
1332
1333 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
1334 node = lttng_ht_iter_get_node_ulong(&iter);
1335 if (node == NULL) {
1336 DBG2("UST app find by sock %d not found", sock);
1337 goto error;
1338 }
1339
1340 return caa_container_of(node, struct ust_app, sock_n);
1341
1342 error:
1343 return NULL;
1344 }
1345
1346 /*
1347 * Find an ust_app using the notify sock and return it. RCU read side lock must
1348 * be held before calling this helper function.
1349 */
1350 static struct ust_app *find_app_by_notify_sock(int sock)
1351 {
1352 struct lttng_ht_node_ulong *node;
1353 struct lttng_ht_iter iter;
1354
1355 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1356 &iter);
1357 node = lttng_ht_iter_get_node_ulong(&iter);
1358 if (node == NULL) {
1359 DBG2("UST app find by notify sock %d not found", sock);
1360 goto error;
1361 }
1362
1363 return caa_container_of(node, struct ust_app, notify_sock_n);
1364
1365 error:
1366 return NULL;
1367 }
1368
1369 /*
1370 * Lookup for an ust app event based on event name, filter bytecode and the
1371 * event loglevel.
1372 *
1373 * Return an ust_app_event object or NULL on error.
1374 */
1375 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1376 const char *name, const struct lttng_bytecode *filter,
1377 int loglevel_value,
1378 const struct lttng_event_exclusion *exclusion)
1379 {
1380 struct lttng_ht_iter iter;
1381 struct lttng_ht_node_str *node;
1382 struct ust_app_event *event = NULL;
1383 struct ust_app_ht_key key;
1384
1385 assert(name);
1386 assert(ht);
1387
1388 /* Setup key for event lookup. */
1389 key.name = name;
1390 key.filter = filter;
1391 key.loglevel_type = loglevel_value;
1392 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1393 key.exclusion = exclusion;
1394
1395 /* Lookup using the event name as hash and a custom match fct. */
1396 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1397 ht_match_ust_app_event, &key, &iter.iter);
1398 node = lttng_ht_iter_get_node_str(&iter);
1399 if (node == NULL) {
1400 goto end;
1401 }
1402
1403 event = caa_container_of(node, struct ust_app_event, node);
1404
1405 end:
1406 return event;
1407 }
1408
1409 /*
1410 * Lookup for an ust app tokens based on a token id.
1411 *
1412 * Return an ust_app_token_event_rule object or NULL on error.
1413 */
1414 static struct ust_app_token_event_rule *find_ust_app_token_event_rule(struct lttng_ht *ht,
1415 uint64_t token)
1416 {
1417 struct lttng_ht_iter iter;
1418 struct lttng_ht_node_u64 *node;
1419 struct ust_app_token_event_rule *token_event_rule = NULL;
1420
1421 assert(ht);
1422
1423 lttng_ht_lookup(ht, &token, &iter);
1424 node = lttng_ht_iter_get_node_u64(&iter);
1425 if (node == NULL) {
1426 DBG2("UST app token %" PRIu64 " not found", token);
1427 goto end;
1428 }
1429
1430 token_event_rule = caa_container_of(node, struct ust_app_token_event_rule, node);
1431 end:
1432 return token_event_rule;
1433 }
1434
1435 /*
1436 * Create the channel context on the tracer.
1437 *
1438 * Called with UST app session lock held.
1439 */
1440 static
1441 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1442 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1443 {
1444 int ret;
1445
1446 health_code_update();
1447
1448 pthread_mutex_lock(&app->sock_lock);
1449 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1450 ua_chan->obj, &ua_ctx->obj);
1451 pthread_mutex_unlock(&app->sock_lock);
1452 if (ret < 0) {
1453 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1454 ERR("UST app create channel context failed for app (pid: %d) "
1455 "with ret %d", app->pid, ret);
1456 } else {
1457 /*
1458 * This is normal behavior, an application can die during the
1459 * creation process. Don't report an error so the execution can
1460 * continue normally.
1461 */
1462 ret = 0;
1463 DBG3("UST app add context failed. Application is dead.");
1464 }
1465 goto error;
1466 }
1467
1468 ua_ctx->handle = ua_ctx->obj->handle;
1469
1470 DBG2("UST app context handle %d created successfully for channel %s",
1471 ua_ctx->handle, ua_chan->name);
1472
1473 error:
1474 health_code_update();
1475 return ret;
1476 }
1477
1478 /*
1479 * Set the filter on the tracer.
1480 */
1481 static int set_ust_filter(struct ust_app *app,
1482 const struct lttng_bytecode *bytecode,
1483 struct lttng_ust_object_data *ust_object)
1484 {
1485 int ret;
1486 struct lttng_ust_filter_bytecode *ust_bytecode = NULL;
1487
1488 health_code_update();
1489
1490 ust_bytecode = create_ust_filter_bytecode_from_bytecode(bytecode);
1491 if (!ust_bytecode) {
1492 ret = -LTTNG_ERR_NOMEM;
1493 goto error;
1494 }
1495 pthread_mutex_lock(&app->sock_lock);
1496 ret = ustctl_set_filter(app->sock, ust_bytecode,
1497 ust_object);
1498 pthread_mutex_unlock(&app->sock_lock);
1499 if (ret < 0) {
1500 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1501 ERR("UST app set filter failed for object %p of app (pid: %d) "
1502 "with ret %d", ust_object, app->pid, ret);
1503 } else {
1504 /*
1505 * This is normal behavior, an application can die during the
1506 * creation process. Don't report an error so the execution can
1507 * continue normally.
1508 */
1509 ret = 0;
1510 DBG3("UST app set filter. Application is dead.");
1511 }
1512 goto error;
1513 }
1514
1515 DBG2("UST filter set for object %p successfully", ust_object);
1516
1517 error:
1518 health_code_update();
1519 free(ust_bytecode);
1520 return ret;
1521 }
1522
1523 /*
1524 * Set a capture bytecode for the passed object.
1525 * The seqnum enforce the ordering at runtime and on reception.
1526 */
1527 static int set_ust_capture(struct ust_app *app,
1528 const struct lttng_bytecode *bytecode,
1529 unsigned int seqnum,
1530 struct lttng_ust_object_data *ust_object)
1531 {
1532 int ret;
1533 struct lttng_ust_capture_bytecode *ust_bytecode = NULL;
1534
1535 health_code_update();
1536
1537 ust_bytecode = create_ust_capture_bytecode_from_bytecode(bytecode);
1538 if (!ust_bytecode) {
1539 ret = -LTTNG_ERR_NOMEM;
1540 goto error;
1541 }
1542
1543 /* Set the seqnum */
1544 ust_bytecode->seqnum = seqnum;
1545
1546 pthread_mutex_lock(&app->sock_lock);
1547 ret = ustctl_set_capture(app->sock, ust_bytecode,
1548 ust_object);
1549 pthread_mutex_unlock(&app->sock_lock);
1550 if (ret < 0) {
1551 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1552 ERR("UST app set capture failed for object %p of app (pid: %d) "
1553 "with ret %d", ust_object, app->pid, ret);
1554 } else {
1555 /*
1556 * This is normal behavior, an application can die during the
1557 * creation process. Don't report an error so the execution can
1558 * continue normally.
1559 */
1560 ret = 0;
1561 DBG3("UST app set capture. Application is dead.");
1562 }
1563 goto error;
1564 }
1565
1566 DBG2("UST capture set for object %p successfully", ust_object);
1567
1568 error:
1569 health_code_update();
1570 free(ust_bytecode);
1571 return ret;
1572 }
1573
1574 static
1575 struct lttng_ust_event_exclusion *create_ust_exclusion_from_exclusion(
1576 struct lttng_event_exclusion *exclusion)
1577 {
1578 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1579 size_t exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1580 LTTNG_UST_SYM_NAME_LEN * exclusion->count;
1581
1582 ust_exclusion = zmalloc(exclusion_alloc_size);
1583 if (!ust_exclusion) {
1584 PERROR("malloc");
1585 goto end;
1586 }
1587
1588 assert(sizeof(struct lttng_event_exclusion) ==
1589 sizeof(struct lttng_ust_event_exclusion));
1590 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1591 end:
1592 return ust_exclusion;
1593 }
1594
1595 /*
1596 * Set event exclusions on the tracer.
1597 */
1598 static int set_ust_exclusions(struct ust_app *app,
1599 struct lttng_event_exclusion *exclusions,
1600 struct lttng_ust_object_data *ust_object)
1601 {
1602 int ret;
1603 struct lttng_ust_event_exclusion *ust_exclusions = NULL;
1604
1605 assert(exclusions && exclusions->count > 0);
1606
1607 health_code_update();
1608
1609 ust_exclusions = create_ust_exclusion_from_exclusion(
1610 exclusions);
1611 if (!ust_exclusions) {
1612 ret = -LTTNG_ERR_NOMEM;
1613 goto error;
1614 }
1615 pthread_mutex_lock(&app->sock_lock);
1616 ret = ustctl_set_exclusion(app->sock, ust_exclusions, ust_object);
1617 pthread_mutex_unlock(&app->sock_lock);
1618 if (ret < 0) {
1619 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1620 ERR("UST app exclusions failed for object %p of app (pid: %d) "
1621 "with ret %d", ust_object, app->pid, ret);
1622 } else {
1623 /*
1624 * This is normal behavior, an application can die during the
1625 * creation process. Don't report an error so the execution can
1626 * continue normally.
1627 */
1628 ret = 0;
1629 DBG3("UST app set exclusions failed. Application is dead.");
1630 }
1631 goto error;
1632 }
1633
1634 DBG2("UST exclusions set successfully for object %p", ust_object);
1635
1636 error:
1637 health_code_update();
1638 free(ust_exclusions);
1639 return ret;
1640 }
1641
1642 /*
1643 * Disable the specified event on to UST tracer for the UST session.
1644 */
1645 static int disable_ust_object(struct ust_app *app,
1646 struct lttng_ust_object_data *object)
1647 {
1648 int ret;
1649
1650 health_code_update();
1651
1652 pthread_mutex_lock(&app->sock_lock);
1653 ret = ustctl_disable(app->sock, object);
1654 pthread_mutex_unlock(&app->sock_lock);
1655 if (ret < 0) {
1656 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1657 ERR("UST app disable failed for object %p app (pid: %d) with ret %d",
1658 object, app->pid, ret);
1659 } else {
1660 /*
1661 * This is normal behavior, an application can die during the
1662 * creation process. Don't report an error so the execution can
1663 * continue normally.
1664 */
1665 ret = 0;
1666 DBG3("UST app disable event failed. Application is dead.");
1667 }
1668 goto error;
1669 }
1670
1671 DBG2("UST app object %p disabled successfully for app (pid: %d)",
1672 object, app->pid);
1673
1674 error:
1675 health_code_update();
1676 return ret;
1677 }
1678
1679 /*
1680 * Disable the specified channel on to UST tracer for the UST session.
1681 */
1682 static int disable_ust_channel(struct ust_app *app,
1683 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1684 {
1685 int ret;
1686
1687 health_code_update();
1688
1689 pthread_mutex_lock(&app->sock_lock);
1690 ret = ustctl_disable(app->sock, ua_chan->obj);
1691 pthread_mutex_unlock(&app->sock_lock);
1692 if (ret < 0) {
1693 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1694 ERR("UST app channel %s disable failed for app (pid: %d) "
1695 "and session handle %d with ret %d",
1696 ua_chan->name, app->pid, ua_sess->handle, ret);
1697 } else {
1698 /*
1699 * This is normal behavior, an application can die during the
1700 * creation process. Don't report an error so the execution can
1701 * continue normally.
1702 */
1703 ret = 0;
1704 DBG3("UST app disable channel failed. Application is dead.");
1705 }
1706 goto error;
1707 }
1708
1709 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1710 ua_chan->name, app->pid);
1711
1712 error:
1713 health_code_update();
1714 return ret;
1715 }
1716
1717 /*
1718 * Enable the specified channel on to UST tracer for the UST session.
1719 */
1720 static int enable_ust_channel(struct ust_app *app,
1721 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1722 {
1723 int ret;
1724
1725 health_code_update();
1726
1727 pthread_mutex_lock(&app->sock_lock);
1728 ret = ustctl_enable(app->sock, ua_chan->obj);
1729 pthread_mutex_unlock(&app->sock_lock);
1730 if (ret < 0) {
1731 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1732 ERR("UST app channel %s enable failed for app (pid: %d) "
1733 "and session handle %d with ret %d",
1734 ua_chan->name, app->pid, ua_sess->handle, ret);
1735 } else {
1736 /*
1737 * This is normal behavior, an application can die during the
1738 * creation process. Don't report an error so the execution can
1739 * continue normally.
1740 */
1741 ret = 0;
1742 DBG3("UST app enable channel failed. Application is dead.");
1743 }
1744 goto error;
1745 }
1746
1747 ua_chan->enabled = 1;
1748
1749 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1750 ua_chan->name, app->pid);
1751
1752 error:
1753 health_code_update();
1754 return ret;
1755 }
1756
1757 /*
1758 * Enable the specified event on to UST tracer for the UST session.
1759 */
1760 static int enable_ust_object(struct ust_app *app, struct lttng_ust_object_data *ust_object)
1761 {
1762 int ret;
1763
1764 health_code_update();
1765
1766 pthread_mutex_lock(&app->sock_lock);
1767 ret = ustctl_enable(app->sock, ust_object);
1768 pthread_mutex_unlock(&app->sock_lock);
1769 if (ret < 0) {
1770 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1771 ERR("UST app enable failed for object %p app (pid: %d) with ret %d",
1772 ust_object, app->pid, ret);
1773 } else {
1774 /*
1775 * This is normal behavior, an application can die during the
1776 * creation process. Don't report an error so the execution can
1777 * continue normally.
1778 */
1779 ret = 0;
1780 DBG3("UST app enable failed. Application is dead.");
1781 }
1782 goto error;
1783 }
1784
1785 DBG2("UST app object %p enabled successfully for app (pid: %d)",
1786 ust_object, app->pid);
1787
1788 error:
1789 health_code_update();
1790 return ret;
1791 }
1792
1793 /*
1794 * Send channel and stream buffer to application.
1795 *
1796 * Return 0 on success. On error, a negative value is returned.
1797 */
1798 static int send_channel_pid_to_ust(struct ust_app *app,
1799 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1800 {
1801 int ret;
1802 struct ust_app_stream *stream, *stmp;
1803
1804 assert(app);
1805 assert(ua_sess);
1806 assert(ua_chan);
1807
1808 health_code_update();
1809
1810 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1811 app->sock);
1812
1813 /* Send channel to the application. */
1814 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1815 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1816 ret = -ENOTCONN; /* Caused by app exiting. */
1817 goto error;
1818 } else if (ret < 0) {
1819 goto error;
1820 }
1821
1822 health_code_update();
1823
1824 /* Send all streams to application. */
1825 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1826 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1827 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1828 ret = -ENOTCONN; /* Caused by app exiting. */
1829 goto error;
1830 } else if (ret < 0) {
1831 goto error;
1832 }
1833 /* We don't need the stream anymore once sent to the tracer. */
1834 cds_list_del(&stream->list);
1835 delete_ust_app_stream(-1, stream, app);
1836 }
1837 /* Flag the channel that it is sent to the application. */
1838 ua_chan->is_sent = 1;
1839
1840 error:
1841 health_code_update();
1842 return ret;
1843 }
1844
1845 /*
1846 * Create the specified event onto the UST tracer for a UST session.
1847 *
1848 * Should be called with session mutex held.
1849 */
1850 static
1851 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1852 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1853 {
1854 int ret = 0;
1855
1856 health_code_update();
1857
1858 /* Create UST event on tracer */
1859 pthread_mutex_lock(&app->sock_lock);
1860 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1861 &ua_event->obj);
1862 pthread_mutex_unlock(&app->sock_lock);
1863 if (ret < 0) {
1864 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1865 abort();
1866 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1867 ua_event->attr.name, app->pid, ret);
1868 } else {
1869 /*
1870 * This is normal behavior, an application can die during the
1871 * creation process. Don't report an error so the execution can
1872 * continue normally.
1873 */
1874 ret = 0;
1875 DBG3("UST app create event failed. Application is dead.");
1876 }
1877 goto error;
1878 }
1879
1880 ua_event->handle = ua_event->obj->handle;
1881
1882 DBG2("UST app event %s created successfully for pid:%d object: %p",
1883 ua_event->attr.name, app->pid, ua_event->obj);
1884
1885 health_code_update();
1886
1887 /* Set filter if one is present. */
1888 if (ua_event->filter) {
1889 ret = set_ust_filter(app, ua_event->filter, ua_event->obj);
1890 if (ret < 0) {
1891 goto error;
1892 }
1893 }
1894
1895 /* Set exclusions for the event */
1896 if (ua_event->exclusion) {
1897 ret = set_ust_exclusions(app, ua_event->exclusion, ua_event->obj);
1898 if (ret < 0) {
1899 goto error;
1900 }
1901 }
1902
1903 /* If event not enabled, disable it on the tracer */
1904 if (ua_event->enabled) {
1905 /*
1906 * We now need to explicitly enable the event, since it
1907 * is now disabled at creation.
1908 */
1909 ret = enable_ust_object(app, ua_event->obj);
1910 if (ret < 0) {
1911 /*
1912 * If we hit an EPERM, something is wrong with our enable call. If
1913 * we get an EEXIST, there is a problem on the tracer side since we
1914 * just created it.
1915 */
1916 switch (ret) {
1917 case -LTTNG_UST_ERR_PERM:
1918 /* Code flow problem */
1919 assert(0);
1920 case -LTTNG_UST_ERR_EXIST:
1921 /* It's OK for our use case. */
1922 ret = 0;
1923 break;
1924 default:
1925 break;
1926 }
1927 goto error;
1928 }
1929 }
1930
1931 error:
1932 health_code_update();
1933 return ret;
1934 }
1935
1936 static
1937 void init_ust_trigger_from_event_rule(const struct lttng_event_rule *rule, struct lttng_ust_trigger *trigger)
1938 {
1939 enum lttng_event_rule_status status;
1940 enum lttng_loglevel_type loglevel_type;
1941 enum lttng_ust_loglevel_type ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
1942 int loglevel = -1;
1943 const char *pattern;
1944
1945 /* For now only LTTNG_EVENT_RULE_TYPE_TRACEPOINT are supported */
1946 assert(lttng_event_rule_get_type(rule) == LTTNG_EVENT_RULE_TYPE_TRACEPOINT);
1947
1948 memset(trigger, 0, sizeof(*trigger));
1949
1950 if (lttng_event_rule_is_agent(rule)) {
1951 /*
1952 * Special event for agents
1953 * The actual meat of the event is in the filter that will be
1954 * attached later on.
1955 * Set the default values for the agent event.
1956 */
1957 pattern = event_get_default_agent_ust_name(lttng_event_rule_get_domain_type(rule));
1958 loglevel = 0;
1959 ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
1960 } else {
1961 status = lttng_event_rule_tracepoint_get_pattern(rule, &pattern);
1962 if (status != LTTNG_EVENT_RULE_STATUS_OK) {
1963 /* At this point this is a fatal error */
1964 assert(0);
1965 }
1966
1967 status = lttng_event_rule_tracepoint_get_log_level_type(
1968 rule, &loglevel_type);
1969 if (status != LTTNG_EVENT_RULE_STATUS_OK) {
1970 /* At this point this is a fatal error */
1971 assert(0);
1972 }
1973
1974 switch (loglevel_type) {
1975 case LTTNG_EVENT_LOGLEVEL_ALL:
1976 ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
1977 break;
1978 case LTTNG_EVENT_LOGLEVEL_RANGE:
1979 ust_loglevel_type = LTTNG_UST_LOGLEVEL_RANGE;
1980 break;
1981 case LTTNG_EVENT_LOGLEVEL_SINGLE:
1982 ust_loglevel_type = LTTNG_UST_LOGLEVEL_SINGLE;
1983 break;
1984 }
1985
1986 if (loglevel_type != LTTNG_EVENT_LOGLEVEL_ALL) {
1987 status = lttng_event_rule_tracepoint_get_log_level(
1988 rule, &loglevel);
1989 assert(status == LTTNG_EVENT_RULE_STATUS_OK);
1990 }
1991 }
1992
1993 trigger->instrumentation = LTTNG_UST_TRACEPOINT;
1994 strncpy(trigger->name, pattern, LTTNG_UST_SYM_NAME_LEN - 1);
1995 trigger->loglevel_type = ust_loglevel_type;
1996 trigger->loglevel = loglevel;
1997 }
1998
1999 /*
2000 * Create the specified event rule token onto the UST tracer for a UST app.
2001 */
2002 static
2003 int create_ust_token_event_rule(struct ust_app *app, struct ust_app_token_event_rule *ua_token)
2004 {
2005 int ret = 0;
2006 struct lttng_ust_trigger trigger;
2007 struct lttng_condition *condition = NULL;
2008 struct lttng_event_rule *event_rule = NULL;
2009 unsigned int capture_bytecode_count = 0;
2010
2011 health_code_update();
2012 assert(app->token_communication.handle);
2013
2014 condition = lttng_trigger_get_condition(ua_token->trigger);
2015 assert(condition);
2016 assert(lttng_condition_get_type(condition) == LTTNG_CONDITION_TYPE_EVENT_RULE_HIT);
2017
2018 lttng_condition_event_rule_get_rule_mutable(condition, &event_rule);
2019 assert(event_rule);
2020 assert(lttng_event_rule_get_type(event_rule) == LTTNG_EVENT_RULE_TYPE_TRACEPOINT);
2021 /* Should we also test for UST at this point, or do we trust all the
2022 * upper level? */
2023
2024 init_ust_trigger_from_event_rule(event_rule, &trigger);
2025
2026 trigger.id = ua_token->token;
2027 trigger.error_counter_index = ua_token->error_counter_index;
2028
2029 /* Create UST trigger on tracer */
2030 pthread_mutex_lock(&app->sock_lock);
2031 ret = ustctl_create_trigger(app->sock, &trigger, app->token_communication.handle, &ua_token->obj);
2032 pthread_mutex_unlock(&app->sock_lock);
2033 if (ret < 0) {
2034 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2035 abort();
2036 ERR("Error ustctl create trigger %s for app pid: %d with ret %d",
2037 trigger.name, app->pid, ret);
2038 } else {
2039 /*
2040 * This is normal behavior, an application can die during the
2041 * creation process. Don't report an error so the execution can
2042 * continue normally.
2043 */
2044 ret = 0;
2045 DBG3("UST app create event failed. Application is dead.");
2046 }
2047 goto error;
2048 }
2049
2050 ua_token->handle = ua_token->obj->handle;
2051
2052 DBG2("UST app event %s created successfully for pid:%d object: %p",
2053 trigger.name, app->pid, ua_token->obj);
2054
2055 health_code_update();
2056
2057 /* Set filter if one is present. */
2058 if (ua_token->filter) {
2059 ret = set_ust_filter(app, ua_token->filter, ua_token->obj);
2060 if (ret < 0) {
2061 goto error;
2062 }
2063 }
2064
2065 /* Set exclusions for the event */
2066 if (ua_token->exclusion) {
2067 ret = set_ust_exclusions(app, ua_token->exclusion, ua_token->obj);
2068 if (ret < 0) {
2069 goto error;
2070 }
2071 }
2072
2073 /* Set the capture bytecode
2074 * TODO: do we want to emulate what is done with exclusion and provide
2075 * and object with a count of capture bytecode? instead of multiple
2076 * call?
2077 * */
2078 capture_bytecode_count = lttng_trigger_get_capture_bytecode_count(ua_token->trigger);
2079 for (unsigned int i = 0; i < capture_bytecode_count; i++) {
2080 const struct lttng_bytecode *capture_bytecode = lttng_trigger_get_capture_bytecode_at_index(ua_token->trigger, i);
2081 ret = set_ust_capture(app, capture_bytecode, i, ua_token->obj);
2082 if (ret < 0) {
2083 goto error;
2084 }
2085 }
2086
2087 /*
2088 * We now need to explicitly enable the event, since it
2089 * is disabled at creation.
2090 */
2091 ret = enable_ust_object(app, ua_token->obj);
2092 if (ret < 0) {
2093 /*
2094 * If we hit an EPERM, something is wrong with our enable call. If
2095 * we get an EEXIST, there is a problem on the tracer side since we
2096 * just created it.
2097 */
2098 switch (ret) {
2099 case -LTTNG_UST_ERR_PERM:
2100 /* Code flow problem */
2101 assert(0);
2102 case -LTTNG_UST_ERR_EXIST:
2103 /* It's OK for our use case. */
2104 ret = 0;
2105 break;
2106 default:
2107 break;
2108 }
2109 goto error;
2110 }
2111 ua_token->enabled = true;
2112
2113 error:
2114 health_code_update();
2115 return ret;
2116 }
2117
2118 /*
2119 * Copy data between an UST app event and a LTT event.
2120 */
2121 static void shadow_copy_event(struct ust_app_event *ua_event,
2122 struct ltt_ust_event *uevent)
2123 {
2124 size_t exclusion_alloc_size;
2125
2126 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
2127 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
2128
2129 ua_event->enabled = uevent->enabled;
2130
2131 /* Copy event attributes */
2132 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
2133
2134 /* Copy filter bytecode */
2135 if (uevent->filter) {
2136 ua_event->filter = bytecode_copy(uevent->filter);
2137 /* Filter might be NULL here in case of ENONEM. */
2138 }
2139
2140 /* Copy exclusion data */
2141 if (uevent->exclusion) {
2142 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
2143 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
2144 ua_event->exclusion = zmalloc(exclusion_alloc_size);
2145 if (ua_event->exclusion == NULL) {
2146 PERROR("malloc");
2147 } else {
2148 memcpy(ua_event->exclusion, uevent->exclusion,
2149 exclusion_alloc_size);
2150 }
2151 }
2152 }
2153
2154 /*
2155 * Copy data between an UST app channel and a LTT channel.
2156 */
2157 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
2158 struct ltt_ust_channel *uchan)
2159 {
2160 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
2161
2162 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
2163 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
2164
2165 ua_chan->tracefile_size = uchan->tracefile_size;
2166 ua_chan->tracefile_count = uchan->tracefile_count;
2167
2168 /* Copy event attributes since the layout is different. */
2169 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
2170 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
2171 ua_chan->attr.overwrite = uchan->attr.overwrite;
2172 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
2173 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
2174 ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
2175 ua_chan->attr.output = uchan->attr.output;
2176 ua_chan->attr.blocking_timeout = uchan->attr.u.s.blocking_timeout;
2177
2178 /*
2179 * Note that the attribute channel type is not set since the channel on the
2180 * tracing registry side does not have this information.
2181 */
2182
2183 ua_chan->enabled = uchan->enabled;
2184 ua_chan->tracing_channel_id = uchan->id;
2185
2186 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
2187 }
2188
2189 /*
2190 * Copy data between a UST app session and a regular LTT session.
2191 */
2192 static void shadow_copy_session(struct ust_app_session *ua_sess,
2193 struct ltt_ust_session *usess, struct ust_app *app)
2194 {
2195 struct tm *timeinfo;
2196 char datetime[16];
2197 int ret;
2198 char tmp_shm_path[PATH_MAX];
2199
2200 timeinfo = localtime(&app->registration_time);
2201 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
2202
2203 DBG2("Shadow copy of session handle %d", ua_sess->handle);
2204
2205 ua_sess->tracing_id = usess->id;
2206 ua_sess->id = get_next_session_id();
2207 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.uid, app->uid);
2208 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.gid, app->gid);
2209 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.uid, usess->uid);
2210 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.gid, usess->gid);
2211 ua_sess->buffer_type = usess->buffer_type;
2212 ua_sess->bits_per_long = app->bits_per_long;
2213
2214 /* There is only one consumer object per session possible. */
2215 consumer_output_get(usess->consumer);
2216 ua_sess->consumer = usess->consumer;
2217
2218 ua_sess->output_traces = usess->output_traces;
2219 ua_sess->live_timer_interval = usess->live_timer_interval;
2220 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
2221 &usess->metadata_attr);
2222
2223 switch (ua_sess->buffer_type) {
2224 case LTTNG_BUFFER_PER_PID:
2225 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
2226 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
2227 datetime);
2228 break;
2229 case LTTNG_BUFFER_PER_UID:
2230 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
2231 DEFAULT_UST_TRACE_UID_PATH,
2232 lttng_credentials_get_uid(&ua_sess->real_credentials),
2233 app->bits_per_long);
2234 break;
2235 default:
2236 assert(0);
2237 goto error;
2238 }
2239 if (ret < 0) {
2240 PERROR("asprintf UST shadow copy session");
2241 assert(0);
2242 goto error;
2243 }
2244
2245 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
2246 sizeof(ua_sess->root_shm_path));
2247 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
2248 strncpy(ua_sess->shm_path, usess->shm_path,
2249 sizeof(ua_sess->shm_path));
2250 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2251 if (ua_sess->shm_path[0]) {
2252 switch (ua_sess->buffer_type) {
2253 case LTTNG_BUFFER_PER_PID:
2254 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
2255 "/" DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
2256 app->name, app->pid, datetime);
2257 break;
2258 case LTTNG_BUFFER_PER_UID:
2259 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
2260 "/" DEFAULT_UST_TRACE_UID_PATH,
2261 app->uid, app->bits_per_long);
2262 break;
2263 default:
2264 assert(0);
2265 goto error;
2266 }
2267 if (ret < 0) {
2268 PERROR("sprintf UST shadow copy session");
2269 assert(0);
2270 goto error;
2271 }
2272 strncat(ua_sess->shm_path, tmp_shm_path,
2273 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
2274 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2275 }
2276 return;
2277
2278 error:
2279 consumer_output_put(ua_sess->consumer);
2280 }
2281
2282 /*
2283 * Lookup sesison wrapper.
2284 */
2285 static
2286 void __lookup_session_by_app(const struct ltt_ust_session *usess,
2287 struct ust_app *app, struct lttng_ht_iter *iter)
2288 {
2289 /* Get right UST app session from app */
2290 lttng_ht_lookup(app->sessions, &usess->id, iter);
2291 }
2292
2293 /*
2294 * Return ust app session from the app session hashtable using the UST session
2295 * id.
2296 */
2297 static struct ust_app_session *lookup_session_by_app(
2298 const struct ltt_ust_session *usess, struct ust_app *app)
2299 {
2300 struct lttng_ht_iter iter;
2301 struct lttng_ht_node_u64 *node;
2302
2303 __lookup_session_by_app(usess, app, &iter);
2304 node = lttng_ht_iter_get_node_u64(&iter);
2305 if (node == NULL) {
2306 goto error;
2307 }
2308
2309 return caa_container_of(node, struct ust_app_session, node);
2310
2311 error:
2312 return NULL;
2313 }
2314
2315 /*
2316 * Setup buffer registry per PID for the given session and application. If none
2317 * is found, a new one is created, added to the global registry and
2318 * initialized. If regp is valid, it's set with the newly created object.
2319 *
2320 * Return 0 on success or else a negative value.
2321 */
2322 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
2323 struct ust_app *app, struct buffer_reg_pid **regp)
2324 {
2325 int ret = 0;
2326 struct buffer_reg_pid *reg_pid;
2327
2328 assert(ua_sess);
2329 assert(app);
2330
2331 rcu_read_lock();
2332
2333 reg_pid = buffer_reg_pid_find(ua_sess->id);
2334 if (!reg_pid) {
2335 /*
2336 * This is the create channel path meaning that if there is NO
2337 * registry available, we have to create one for this session.
2338 */
2339 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
2340 ua_sess->root_shm_path, ua_sess->shm_path);
2341 if (ret < 0) {
2342 goto error;
2343 }
2344 } else {
2345 goto end;
2346 }
2347
2348 /* Initialize registry. */
2349 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
2350 app->bits_per_long, app->uint8_t_alignment,
2351 app->uint16_t_alignment, app->uint32_t_alignment,
2352 app->uint64_t_alignment, app->long_alignment,
2353 app->byte_order, app->version.major, app->version.minor,
2354 reg_pid->root_shm_path, reg_pid->shm_path,
2355 lttng_credentials_get_uid(&ua_sess->effective_credentials),
2356 lttng_credentials_get_gid(&ua_sess->effective_credentials),
2357 ua_sess->tracing_id,
2358 app->uid);
2359 if (ret < 0) {
2360 /*
2361 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2362 * destroy the buffer registry, because it is always expected
2363 * that if the buffer registry can be found, its ust registry is
2364 * non-NULL.
2365 */
2366 buffer_reg_pid_destroy(reg_pid);
2367 goto error;
2368 }
2369
2370 buffer_reg_pid_add(reg_pid);
2371
2372 DBG3("UST app buffer registry per PID created successfully");
2373
2374 end:
2375 if (regp) {
2376 *regp = reg_pid;
2377 }
2378 error:
2379 rcu_read_unlock();
2380 return ret;
2381 }
2382
2383 /*
2384 * Setup buffer registry per UID for the given session and application. If none
2385 * is found, a new one is created, added to the global registry and
2386 * initialized. If regp is valid, it's set with the newly created object.
2387 *
2388 * Return 0 on success or else a negative value.
2389 */
2390 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
2391 struct ust_app_session *ua_sess,
2392 struct ust_app *app, struct buffer_reg_uid **regp)
2393 {
2394 int ret = 0;
2395 struct buffer_reg_uid *reg_uid;
2396
2397 assert(usess);
2398 assert(app);
2399
2400 rcu_read_lock();
2401
2402 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2403 if (!reg_uid) {
2404 /*
2405 * This is the create channel path meaning that if there is NO
2406 * registry available, we have to create one for this session.
2407 */
2408 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
2409 LTTNG_DOMAIN_UST, &reg_uid,
2410 ua_sess->root_shm_path, ua_sess->shm_path);
2411 if (ret < 0) {
2412 goto error;
2413 }
2414 } else {
2415 goto end;
2416 }
2417
2418 /* Initialize registry. */
2419 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
2420 app->bits_per_long, app->uint8_t_alignment,
2421 app->uint16_t_alignment, app->uint32_t_alignment,
2422 app->uint64_t_alignment, app->long_alignment,
2423 app->byte_order, app->version.major,
2424 app->version.minor, reg_uid->root_shm_path,
2425 reg_uid->shm_path, usess->uid, usess->gid,
2426 ua_sess->tracing_id, app->uid);
2427 if (ret < 0) {
2428 /*
2429 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2430 * destroy the buffer registry, because it is always expected
2431 * that if the buffer registry can be found, its ust registry is
2432 * non-NULL.
2433 */
2434 buffer_reg_uid_destroy(reg_uid, NULL);
2435 goto error;
2436 }
2437 /* Add node to teardown list of the session. */
2438 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2439
2440 buffer_reg_uid_add(reg_uid);
2441
2442 DBG3("UST app buffer registry per UID created successfully");
2443 end:
2444 if (regp) {
2445 *regp = reg_uid;
2446 }
2447 error:
2448 rcu_read_unlock();
2449 return ret;
2450 }
2451
2452 /*
2453 * Create a session on the tracer side for the given app.
2454 *
2455 * On success, ua_sess_ptr is populated with the session pointer or else left
2456 * untouched. If the session was created, is_created is set to 1. On error,
2457 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2458 * be NULL.
2459 *
2460 * Returns 0 on success or else a negative code which is either -ENOMEM or
2461 * -ENOTCONN which is the default code if the ustctl_create_session fails.
2462 */
2463 static int find_or_create_ust_app_session(struct ltt_ust_session *usess,
2464 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2465 int *is_created)
2466 {
2467 int ret, created = 0;
2468 struct ust_app_session *ua_sess;
2469
2470 assert(usess);
2471 assert(app);
2472 assert(ua_sess_ptr);
2473
2474 health_code_update();
2475
2476 ua_sess = lookup_session_by_app(usess, app);
2477 if (ua_sess == NULL) {
2478 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
2479 app->pid, usess->id);
2480 ua_sess = alloc_ust_app_session();
2481 if (ua_sess == NULL) {
2482 /* Only malloc can failed so something is really wrong */
2483 ret = -ENOMEM;
2484 goto error;
2485 }
2486 shadow_copy_session(ua_sess, usess, app);
2487 created = 1;
2488 }
2489
2490 switch (usess->buffer_type) {
2491 case LTTNG_BUFFER_PER_PID:
2492 /* Init local registry. */
2493 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
2494 if (ret < 0) {
2495 delete_ust_app_session(-1, ua_sess, app);
2496 goto error;
2497 }
2498 break;
2499 case LTTNG_BUFFER_PER_UID:
2500 /* Look for a global registry. If none exists, create one. */
2501 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
2502 if (ret < 0) {
2503 delete_ust_app_session(-1, ua_sess, app);
2504 goto error;
2505 }
2506 break;
2507 default:
2508 assert(0);
2509 ret = -EINVAL;
2510 goto error;
2511 }
2512
2513 health_code_update();
2514
2515 if (ua_sess->handle == -1) {
2516 pthread_mutex_lock(&app->sock_lock);
2517 ret = ustctl_create_session(app->sock);
2518 pthread_mutex_unlock(&app->sock_lock);
2519 if (ret < 0) {
2520 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2521 ERR("Creating session for app pid %d with ret %d",
2522 app->pid, ret);
2523 } else {
2524 DBG("UST app creating session failed. Application is dead");
2525 /*
2526 * This is normal behavior, an application can die during the
2527 * creation process. Don't report an error so the execution can
2528 * continue normally. This will get flagged ENOTCONN and the
2529 * caller will handle it.
2530 */
2531 ret = 0;
2532 }
2533 delete_ust_app_session(-1, ua_sess, app);
2534 if (ret != -ENOMEM) {
2535 /*
2536 * Tracer is probably gone or got an internal error so let's
2537 * behave like it will soon unregister or not usable.
2538 */
2539 ret = -ENOTCONN;
2540 }
2541 goto error;
2542 }
2543
2544 ua_sess->handle = ret;
2545
2546 /* Add ust app session to app's HT */
2547 lttng_ht_node_init_u64(&ua_sess->node,
2548 ua_sess->tracing_id);
2549 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
2550 lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
2551 lttng_ht_add_unique_ulong(app->ust_sessions_objd,
2552 &ua_sess->ust_objd_node);
2553
2554 DBG2("UST app session created successfully with handle %d", ret);
2555 }
2556
2557 *ua_sess_ptr = ua_sess;
2558 if (is_created) {
2559 *is_created = created;
2560 }
2561
2562 /* Everything went well. */
2563 ret = 0;
2564
2565 error:
2566 health_code_update();
2567 return ret;
2568 }
2569
2570 /*
2571 * Match function for a hash table lookup of ust_app_ctx.
2572 *
2573 * It matches an ust app context based on the context type and, in the case
2574 * of perf counters, their name.
2575 */
2576 static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2577 {
2578 struct ust_app_ctx *ctx;
2579 const struct lttng_ust_context_attr *key;
2580
2581 assert(node);
2582 assert(_key);
2583
2584 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2585 key = _key;
2586
2587 /* Context type */
2588 if (ctx->ctx.ctx != key->ctx) {
2589 goto no_match;
2590 }
2591
2592 switch(key->ctx) {
2593 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
2594 if (strncmp(key->u.perf_counter.name,
2595 ctx->ctx.u.perf_counter.name,
2596 sizeof(key->u.perf_counter.name))) {
2597 goto no_match;
2598 }
2599 break;
2600 case LTTNG_UST_CONTEXT_APP_CONTEXT:
2601 if (strcmp(key->u.app_ctx.provider_name,
2602 ctx->ctx.u.app_ctx.provider_name) ||
2603 strcmp(key->u.app_ctx.ctx_name,
2604 ctx->ctx.u.app_ctx.ctx_name)) {
2605 goto no_match;
2606 }
2607 break;
2608 default:
2609 break;
2610 }
2611
2612 /* Match. */
2613 return 1;
2614
2615 no_match:
2616 return 0;
2617 }
2618
2619 /*
2620 * Lookup for an ust app context from an lttng_ust_context.
2621 *
2622 * Must be called while holding RCU read side lock.
2623 * Return an ust_app_ctx object or NULL on error.
2624 */
2625 static
2626 struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
2627 struct lttng_ust_context_attr *uctx)
2628 {
2629 struct lttng_ht_iter iter;
2630 struct lttng_ht_node_ulong *node;
2631 struct ust_app_ctx *app_ctx = NULL;
2632
2633 assert(uctx);
2634 assert(ht);
2635
2636 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2637 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2638 ht_match_ust_app_ctx, uctx, &iter.iter);
2639 node = lttng_ht_iter_get_node_ulong(&iter);
2640 if (!node) {
2641 goto end;
2642 }
2643
2644 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2645
2646 end:
2647 return app_ctx;
2648 }
2649
2650 /*
2651 * Create a context for the channel on the tracer.
2652 *
2653 * Called with UST app session lock held and a RCU read side lock.
2654 */
2655 static
2656 int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
2657 struct lttng_ust_context_attr *uctx,
2658 struct ust_app *app)
2659 {
2660 int ret = 0;
2661 struct ust_app_ctx *ua_ctx;
2662
2663 DBG2("UST app adding context to channel %s", ua_chan->name);
2664
2665 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2666 if (ua_ctx) {
2667 ret = -EEXIST;
2668 goto error;
2669 }
2670
2671 ua_ctx = alloc_ust_app_ctx(uctx);
2672 if (ua_ctx == NULL) {
2673 /* malloc failed */
2674 ret = -ENOMEM;
2675 goto error;
2676 }
2677
2678 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
2679 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
2680 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
2681
2682 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2683 if (ret < 0) {
2684 goto error;
2685 }
2686
2687 error:
2688 return ret;
2689 }
2690
2691 /*
2692 * Enable on the tracer side a ust app event for the session and channel.
2693 *
2694 * Called with UST app session lock held.
2695 */
2696 static
2697 int enable_ust_app_event(struct ust_app_session *ua_sess,
2698 struct ust_app_event *ua_event, struct ust_app *app)
2699 {
2700 int ret;
2701
2702 ret = enable_ust_object(app, ua_event->obj);
2703 if (ret < 0) {
2704 goto error;
2705 }
2706
2707 ua_event->enabled = 1;
2708
2709 error:
2710 return ret;
2711 }
2712
2713 /*
2714 * Disable on the tracer side a ust app event for the session and channel.
2715 */
2716 static int disable_ust_app_event(struct ust_app_session *ua_sess,
2717 struct ust_app_event *ua_event, struct ust_app *app)
2718 {
2719 int ret;
2720
2721 ret = disable_ust_object(app, ua_event->obj);
2722 if (ret < 0) {
2723 goto error;
2724 }
2725
2726 ua_event->enabled = 0;
2727
2728 error:
2729 return ret;
2730 }
2731
2732 /*
2733 * Lookup ust app channel for session and disable it on the tracer side.
2734 */
2735 static
2736 int disable_ust_app_channel(struct ust_app_session *ua_sess,
2737 struct ust_app_channel *ua_chan, struct ust_app *app)
2738 {
2739 int ret;
2740
2741 ret = disable_ust_channel(app, ua_sess, ua_chan);
2742 if (ret < 0) {
2743 goto error;
2744 }
2745
2746 ua_chan->enabled = 0;
2747
2748 error:
2749 return ret;
2750 }
2751
2752 /*
2753 * Lookup ust app channel for session and enable it on the tracer side. This
2754 * MUST be called with a RCU read side lock acquired.
2755 */
2756 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2757 struct ltt_ust_channel *uchan, struct ust_app *app)
2758 {
2759 int ret = 0;
2760 struct lttng_ht_iter iter;
2761 struct lttng_ht_node_str *ua_chan_node;
2762 struct ust_app_channel *ua_chan;
2763
2764 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2765 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2766 if (ua_chan_node == NULL) {
2767 DBG2("Unable to find channel %s in ust session id %" PRIu64,
2768 uchan->name, ua_sess->tracing_id);
2769 goto error;
2770 }
2771
2772 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2773
2774 ret = enable_ust_channel(app, ua_sess, ua_chan);
2775 if (ret < 0) {
2776 goto error;
2777 }
2778
2779 error:
2780 return ret;
2781 }
2782
2783 /*
2784 * Ask the consumer to create a channel and get it if successful.
2785 *
2786 * Called with UST app session lock held.
2787 *
2788 * Return 0 on success or else a negative value.
2789 */
2790 static int do_consumer_create_channel(struct ltt_ust_session *usess,
2791 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2792 int bitness, struct ust_registry_session *registry,
2793 uint64_t trace_archive_id)
2794 {
2795 int ret;
2796 unsigned int nb_fd = 0;
2797 struct consumer_socket *socket;
2798
2799 assert(usess);
2800 assert(ua_sess);
2801 assert(ua_chan);
2802 assert(registry);
2803
2804 rcu_read_lock();
2805 health_code_update();
2806
2807 /* Get the right consumer socket for the application. */
2808 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2809 if (!socket) {
2810 ret = -EINVAL;
2811 goto error;
2812 }
2813
2814 health_code_update();
2815
2816 /* Need one fd for the channel. */
2817 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2818 if (ret < 0) {
2819 ERR("Exhausted number of available FD upon create channel");
2820 goto error;
2821 }
2822
2823 /*
2824 * Ask consumer to create channel. The consumer will return the number of
2825 * stream we have to expect.
2826 */
2827 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2828 registry, usess->current_trace_chunk);
2829 if (ret < 0) {
2830 goto error_ask;
2831 }
2832
2833 /*
2834 * Compute the number of fd needed before receiving them. It must be 2 per
2835 * stream (2 being the default value here).
2836 */
2837 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2838
2839 /* Reserve the amount of file descriptor we need. */
2840 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2841 if (ret < 0) {
2842 ERR("Exhausted number of available FD upon create channel");
2843 goto error_fd_get_stream;
2844 }
2845
2846 health_code_update();
2847
2848 /*
2849 * Now get the channel from the consumer. This call wil populate the stream
2850 * list of that channel and set the ust objects.
2851 */
2852 if (usess->consumer->enabled) {
2853 ret = ust_consumer_get_channel(socket, ua_chan);
2854 if (ret < 0) {
2855 goto error_destroy;
2856 }
2857 }
2858
2859 rcu_read_unlock();
2860 return 0;
2861
2862 error_destroy:
2863 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2864 error_fd_get_stream:
2865 /*
2866 * Initiate a destroy channel on the consumer since we had an error
2867 * handling it on our side. The return value is of no importance since we
2868 * already have a ret value set by the previous error that we need to
2869 * return.
2870 */
2871 (void) ust_consumer_destroy_channel(socket, ua_chan);
2872 error_ask:
2873 lttng_fd_put(LTTNG_FD_APPS, 1);
2874 error:
2875 health_code_update();
2876 rcu_read_unlock();
2877 return ret;
2878 }
2879
2880 /*
2881 * Duplicate the ust data object of the ust app stream and save it in the
2882 * buffer registry stream.
2883 *
2884 * Return 0 on success or else a negative value.
2885 */
2886 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2887 struct ust_app_stream *stream)
2888 {
2889 int ret;
2890
2891 assert(reg_stream);
2892 assert(stream);
2893
2894 /* Reserve the amount of file descriptor we need. */
2895 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2896 if (ret < 0) {
2897 ERR("Exhausted number of available FD upon duplicate stream");
2898 goto error;
2899 }
2900
2901 /* Duplicate object for stream once the original is in the registry. */
2902 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2903 reg_stream->obj.ust);
2904 if (ret < 0) {
2905 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2906 reg_stream->obj.ust, stream->obj, ret);
2907 lttng_fd_put(LTTNG_FD_APPS, 2);
2908 goto error;
2909 }
2910 stream->handle = stream->obj->handle;
2911
2912 error:
2913 return ret;
2914 }
2915
2916 /*
2917 * Duplicate the ust data object of the ust app. channel and save it in the
2918 * buffer registry channel.
2919 *
2920 * Return 0 on success or else a negative value.
2921 */
2922 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2923 struct ust_app_channel *ua_chan)
2924 {
2925 int ret;
2926
2927 assert(reg_chan);
2928 assert(ua_chan);
2929
2930 /* Need two fds for the channel. */
2931 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2932 if (ret < 0) {
2933 ERR("Exhausted number of available FD upon duplicate channel");
2934 goto error_fd_get;
2935 }
2936
2937 /* Duplicate object for stream once the original is in the registry. */
2938 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2939 if (ret < 0) {
2940 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2941 reg_chan->obj.ust, ua_chan->obj, ret);
2942 goto error;
2943 }
2944 ua_chan->handle = ua_chan->obj->handle;
2945
2946 return 0;
2947
2948 error:
2949 lttng_fd_put(LTTNG_FD_APPS, 1);
2950 error_fd_get:
2951 return ret;
2952 }
2953
2954 /*
2955 * For a given channel buffer registry, setup all streams of the given ust
2956 * application channel.
2957 *
2958 * Return 0 on success or else a negative value.
2959 */
2960 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2961 struct ust_app_channel *ua_chan,
2962 struct ust_app *app)
2963 {
2964 int ret = 0;
2965 struct ust_app_stream *stream, *stmp;
2966
2967 assert(reg_chan);
2968 assert(ua_chan);
2969
2970 DBG2("UST app setup buffer registry stream");
2971
2972 /* Send all streams to application. */
2973 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2974 struct buffer_reg_stream *reg_stream;
2975
2976 ret = buffer_reg_stream_create(&reg_stream);
2977 if (ret < 0) {
2978 goto error;
2979 }
2980
2981 /*
2982 * Keep original pointer and nullify it in the stream so the delete
2983 * stream call does not release the object.
2984 */
2985 reg_stream->obj.ust = stream->obj;
2986 stream->obj = NULL;
2987 buffer_reg_stream_add(reg_stream, reg_chan);
2988
2989 /* We don't need the streams anymore. */
2990 cds_list_del(&stream->list);
2991 delete_ust_app_stream(-1, stream, app);
2992 }
2993
2994 error:
2995 return ret;
2996 }
2997
2998 /*
2999 * Create a buffer registry channel for the given session registry and
3000 * application channel object. If regp pointer is valid, it's set with the
3001 * created object. Important, the created object is NOT added to the session
3002 * registry hash table.
3003 *
3004 * Return 0 on success else a negative value.
3005 */
3006 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
3007 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
3008 {
3009 int ret;
3010 struct buffer_reg_channel *reg_chan = NULL;
3011
3012 assert(reg_sess);
3013 assert(ua_chan);
3014
3015 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
3016
3017 /* Create buffer registry channel. */
3018 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
3019 if (ret < 0) {
3020 goto error_create;
3021 }
3022 assert(reg_chan);
3023 reg_chan->consumer_key = ua_chan->key;
3024 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
3025 reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
3026
3027 /* Create and add a channel registry to session. */
3028 ret = ust_registry_channel_add(reg_sess->reg.ust,
3029 ua_chan->tracing_channel_id);
3030 if (ret < 0) {
3031 goto error;
3032 }
3033 buffer_reg_channel_add(reg_sess, reg_chan);
3034
3035 if (regp) {
3036 *regp = reg_chan;
3037 }
3038
3039 return 0;
3040
3041 error:
3042 /* Safe because the registry channel object was not added to any HT. */
3043 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
3044 error_create:
3045 return ret;
3046 }
3047
3048 /*
3049 * Setup buffer registry channel for the given session registry and application
3050 * channel object. If regp pointer is valid, it's set with the created object.
3051 *
3052 * Return 0 on success else a negative value.
3053 */
3054 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
3055 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan,
3056 struct ust_app *app)
3057 {
3058 int ret;
3059
3060 assert(reg_sess);
3061 assert(reg_chan);
3062 assert(ua_chan);
3063 assert(ua_chan->obj);
3064
3065 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
3066
3067 /* Setup all streams for the registry. */
3068 ret = setup_buffer_reg_streams(reg_chan, ua_chan, app);
3069 if (ret < 0) {
3070 goto error;
3071 }
3072
3073 reg_chan->obj.ust = ua_chan->obj;
3074 ua_chan->obj = NULL;
3075
3076 return 0;
3077
3078 error:
3079 buffer_reg_channel_remove(reg_sess, reg_chan);
3080 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
3081 return ret;
3082 }
3083
3084 /*
3085 * Send buffer registry channel to the application.
3086 *
3087 * Return 0 on success else a negative value.
3088 */
3089 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
3090 struct ust_app *app, struct ust_app_session *ua_sess,
3091 struct ust_app_channel *ua_chan)
3092 {
3093 int ret;
3094 struct buffer_reg_stream *reg_stream;
3095
3096 assert(reg_chan);
3097 assert(app);
3098 assert(ua_sess);
3099 assert(ua_chan);
3100
3101 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
3102
3103 ret = duplicate_channel_object(reg_chan, ua_chan);
3104 if (ret < 0) {
3105 goto error;
3106 }
3107
3108 /* Send channel to the application. */
3109 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
3110 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3111 ret = -ENOTCONN; /* Caused by app exiting. */
3112 goto error;
3113 } else if (ret < 0) {
3114 goto error;
3115 }
3116
3117 health_code_update();
3118
3119 /* Send all streams to application. */
3120 pthread_mutex_lock(&reg_chan->stream_list_lock);
3121 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
3122 struct ust_app_stream stream;
3123
3124 ret = duplicate_stream_object(reg_stream, &stream);
3125 if (ret < 0) {
3126 goto error_stream_unlock;
3127 }
3128
3129 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
3130 if (ret < 0) {
3131 (void) release_ust_app_stream(-1, &stream, app);
3132 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3133 ret = -ENOTCONN; /* Caused by app exiting. */
3134 }
3135 goto error_stream_unlock;
3136 }
3137
3138 /*
3139 * The return value is not important here. This function will output an
3140 * error if needed.
3141 */
3142 (void) release_ust_app_stream(-1, &stream, app);
3143 }
3144 ua_chan->is_sent = 1;
3145
3146 error_stream_unlock:
3147 pthread_mutex_unlock(&reg_chan->stream_list_lock);
3148 error:
3149 return ret;
3150 }
3151
3152 /*
3153 * Create and send to the application the created buffers with per UID buffers.
3154 *
3155 * This MUST be called with a RCU read side lock acquired.
3156 * The session list lock and the session's lock must be acquired.
3157 *
3158 * Return 0 on success else a negative value.
3159 */
3160 static int create_channel_per_uid(struct ust_app *app,
3161 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3162 struct ust_app_channel *ua_chan)
3163 {
3164 int ret;
3165 struct buffer_reg_uid *reg_uid;
3166 struct buffer_reg_channel *reg_chan;
3167 struct ltt_session *session = NULL;
3168 enum lttng_error_code notification_ret;
3169 struct ust_registry_channel *chan_reg;
3170
3171 assert(app);
3172 assert(usess);
3173 assert(ua_sess);
3174 assert(ua_chan);
3175
3176 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
3177
3178 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
3179 /*
3180 * The session creation handles the creation of this global registry
3181 * object. If none can be find, there is a code flow problem or a
3182 * teardown race.
3183 */
3184 assert(reg_uid);
3185
3186 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
3187 reg_uid);
3188 if (reg_chan) {
3189 goto send_channel;
3190 }
3191
3192 /* Create the buffer registry channel object. */
3193 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
3194 if (ret < 0) {
3195 ERR("Error creating the UST channel \"%s\" registry instance",
3196 ua_chan->name);
3197 goto error;
3198 }
3199
3200 session = session_find_by_id(ua_sess->tracing_id);
3201 assert(session);
3202 assert(pthread_mutex_trylock(&session->lock));
3203 assert(session_trylock_list());
3204
3205 /*
3206 * Create the buffers on the consumer side. This call populates the
3207 * ust app channel object with all streams and data object.
3208 */
3209 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
3210 app->bits_per_long, reg_uid->registry->reg.ust,
3211 session->most_recent_chunk_id.value);
3212 if (ret < 0) {
3213 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3214 ua_chan->name);
3215
3216 /*
3217 * Let's remove the previously created buffer registry channel so
3218 * it's not visible anymore in the session registry.
3219 */
3220 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
3221 ua_chan->tracing_channel_id, false);
3222 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
3223 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
3224 goto error;
3225 }
3226
3227 /*
3228 * Setup the streams and add it to the session registry.
3229 */
3230 ret = setup_buffer_reg_channel(reg_uid->registry,
3231 ua_chan, reg_chan, app);
3232 if (ret < 0) {
3233 ERR("Error setting up UST channel \"%s\"", ua_chan->name);
3234 goto error;
3235 }
3236
3237 /* Notify the notification subsystem of the channel's creation. */
3238 pthread_mutex_lock(&reg_uid->registry->reg.ust->lock);
3239 chan_reg = ust_registry_channel_find(reg_uid->registry->reg.ust,
3240 ua_chan->tracing_channel_id);
3241 assert(chan_reg);
3242 chan_reg->consumer_key = ua_chan->key;
3243 chan_reg = NULL;
3244 pthread_mutex_unlock(&reg_uid->registry->reg.ust->lock);
3245
3246 notification_ret = notification_thread_command_add_channel(
3247 notification_thread_handle, session->name,
3248 lttng_credentials_get_uid(&ua_sess->effective_credentials),
3249 lttng_credentials_get_gid(&ua_sess->effective_credentials),
3250 ua_chan->name,
3251 ua_chan->key, LTTNG_DOMAIN_UST,
3252 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3253 if (notification_ret != LTTNG_OK) {
3254 ret = - (int) notification_ret;
3255 ERR("Failed to add channel to notification thread");
3256 goto error;
3257 }
3258
3259 send_channel:
3260 /* Send buffers to the application. */
3261 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
3262 if (ret < 0) {
3263 if (ret != -ENOTCONN) {
3264 ERR("Error sending channel to application");
3265 }
3266 goto error;
3267 }
3268
3269 error:
3270 if (session) {
3271 session_put(session);
3272 }
3273 return ret;
3274 }
3275
3276 /*
3277 * Create and send to the application the created buffers with per PID buffers.
3278 *
3279 * Called with UST app session lock held.
3280 * The session list lock and the session's lock must be acquired.
3281 *
3282 * Return 0 on success else a negative value.
3283 */
3284 static int create_channel_per_pid(struct ust_app *app,
3285 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3286 struct ust_app_channel *ua_chan)
3287 {
3288 int ret;
3289 struct ust_registry_session *registry;
3290 enum lttng_error_code cmd_ret;
3291 struct ltt_session *session = NULL;
3292 uint64_t chan_reg_key;
3293 struct ust_registry_channel *chan_reg;
3294
3295 assert(app);
3296 assert(usess);
3297 assert(ua_sess);
3298 assert(ua_chan);
3299
3300 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
3301
3302 rcu_read_lock();
3303
3304 registry = get_session_registry(ua_sess);
3305 /* The UST app session lock is held, registry shall not be null. */
3306 assert(registry);
3307
3308 /* Create and add a new channel registry to session. */
3309 ret = ust_registry_channel_add(registry, ua_chan->key);
3310 if (ret < 0) {
3311 ERR("Error creating the UST channel \"%s\" registry instance",
3312 ua_chan->name);
3313 goto error;
3314 }
3315
3316 session = session_find_by_id(ua_sess->tracing_id);
3317 assert(session);
3318
3319 assert(pthread_mutex_trylock(&session->lock));
3320 assert(session_trylock_list());
3321
3322 /* Create and get channel on the consumer side. */
3323 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
3324 app->bits_per_long, registry,
3325 session->most_recent_chunk_id.value);
3326 if (ret < 0) {
3327 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3328 ua_chan->name);
3329 goto error_remove_from_registry;
3330 }
3331
3332 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
3333 if (ret < 0) {
3334 if (ret != -ENOTCONN) {
3335 ERR("Error sending channel to application");
3336 }
3337 goto error_remove_from_registry;
3338 }
3339
3340 chan_reg_key = ua_chan->key;
3341 pthread_mutex_lock(&registry->lock);
3342 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
3343 assert(chan_reg);
3344 chan_reg->consumer_key = ua_chan->key;
3345 pthread_mutex_unlock(&registry->lock);
3346
3347 cmd_ret = notification_thread_command_add_channel(
3348 notification_thread_handle, session->name,
3349 lttng_credentials_get_uid(&ua_sess->effective_credentials),
3350 lttng_credentials_get_gid(&ua_sess->effective_credentials),
3351 ua_chan->name,
3352 ua_chan->key, LTTNG_DOMAIN_UST,
3353 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3354 if (cmd_ret != LTTNG_OK) {
3355 ret = - (int) cmd_ret;
3356 ERR("Failed to add channel to notification thread");
3357 goto error_remove_from_registry;
3358 }
3359
3360 error_remove_from_registry:
3361 if (ret) {
3362 ust_registry_channel_del_free(registry, ua_chan->key, false);
3363 }
3364 error:
3365 rcu_read_unlock();
3366 if (session) {
3367 session_put(session);
3368 }
3369 return ret;
3370 }
3371
3372 /*
3373 * From an already allocated ust app channel, create the channel buffers if
3374 * needed and send them to the application. This MUST be called with a RCU read
3375 * side lock acquired.
3376 *
3377 * Called with UST app session lock held.
3378 *
3379 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3380 * the application exited concurrently.
3381 */
3382 static int ust_app_channel_send(struct ust_app *app,
3383 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3384 struct ust_app_channel *ua_chan)
3385 {
3386 int ret;
3387
3388 assert(app);
3389 assert(usess);
3390 assert(usess->active);
3391 assert(ua_sess);
3392 assert(ua_chan);
3393
3394 /* Handle buffer type before sending the channel to the application. */
3395 switch (usess->buffer_type) {
3396 case LTTNG_BUFFER_PER_UID:
3397 {
3398 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
3399 if (ret < 0) {
3400 goto error;
3401 }
3402 break;
3403 }
3404 case LTTNG_BUFFER_PER_PID:
3405 {
3406 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
3407 if (ret < 0) {
3408 goto error;
3409 }
3410 break;
3411 }
3412 default:
3413 assert(0);
3414 ret = -EINVAL;
3415 goto error;
3416 }
3417
3418 /* Initialize ust objd object using the received handle and add it. */
3419 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
3420 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
3421
3422 /* If channel is not enabled, disable it on the tracer */
3423 if (!ua_chan->enabled) {
3424 ret = disable_ust_channel(app, ua_sess, ua_chan);
3425 if (ret < 0) {
3426 goto error;
3427 }
3428 }
3429
3430 error:
3431 return ret;
3432 }
3433
3434 /*
3435 * Create UST app channel and return it through ua_chanp if not NULL.
3436 *
3437 * Called with UST app session lock and RCU read-side lock held.
3438 *
3439 * Return 0 on success or else a negative value.
3440 */
3441 static int ust_app_channel_allocate(struct ust_app_session *ua_sess,
3442 struct ltt_ust_channel *uchan,
3443 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
3444 struct ust_app_channel **ua_chanp)
3445 {
3446 int ret = 0;
3447 struct lttng_ht_iter iter;
3448 struct lttng_ht_node_str *ua_chan_node;
3449 struct ust_app_channel *ua_chan;
3450
3451 /* Lookup channel in the ust app session */
3452 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
3453 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
3454 if (ua_chan_node != NULL) {
3455 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3456 goto end;
3457 }
3458
3459 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
3460 if (ua_chan == NULL) {
3461 /* Only malloc can fail here */
3462 ret = -ENOMEM;
3463 goto error;
3464 }
3465 shadow_copy_channel(ua_chan, uchan);
3466
3467 /* Set channel type. */
3468 ua_chan->attr.type = type;
3469
3470 /* Only add the channel if successful on the tracer side. */
3471 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
3472 end:
3473 if (ua_chanp) {
3474 *ua_chanp = ua_chan;
3475 }
3476
3477 /* Everything went well. */
3478 return 0;
3479
3480 error:
3481 return ret;
3482 }
3483
3484 /*
3485 * Create UST app event and create it on the tracer side.
3486 *
3487 * Called with ust app session mutex held.
3488 */
3489 static
3490 int create_ust_app_event(struct ust_app_session *ua_sess,
3491 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
3492 struct ust_app *app)
3493 {
3494 int ret = 0;
3495 struct ust_app_event *ua_event;
3496
3497 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
3498 if (ua_event == NULL) {
3499 /* Only failure mode of alloc_ust_app_event(). */
3500 ret = -ENOMEM;
3501 goto end;
3502 }
3503 shadow_copy_event(ua_event, uevent);
3504
3505 /* Create it on the tracer side */
3506 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
3507 if (ret < 0) {
3508 /*
3509 * Not found previously means that it does not exist on the
3510 * tracer. If the application reports that the event existed,
3511 * it means there is a bug in the sessiond or lttng-ust
3512 * (or corruption, etc.)
3513 */
3514 if (ret == -LTTNG_UST_ERR_EXIST) {
3515 ERR("Tracer for application reported that an event being created already existed: "
3516 "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d",
3517 uevent->attr.name,
3518 app->pid, app->ppid, app->uid,
3519 app->gid);
3520 }
3521 goto error;
3522 }
3523
3524 add_unique_ust_app_event(ua_chan, ua_event);
3525
3526 DBG2("UST app create event %s for PID %d completed", ua_event->name,
3527 app->pid);
3528
3529 end:
3530 return ret;
3531
3532 error:
3533 /* Valid. Calling here is already in a read side lock */
3534 delete_ust_app_event(-1, ua_event, app);
3535 return ret;
3536 }
3537
3538 /*
3539 * Create UST app event and create it on the tracer side.
3540 *
3541 * Called with ust app session mutex held.
3542 */
3543 static
3544 int create_ust_app_token_event_rule(struct lttng_trigger *trigger,
3545 struct ust_app *app)
3546 {
3547 int ret = 0;
3548 struct ust_app_token_event_rule *ua_token;
3549
3550 ua_token = alloc_ust_app_token_event_rule(trigger);
3551 if (ua_token == NULL) {
3552 ret = -ENOMEM;
3553 goto end;
3554 }
3555
3556 /* Create it on the tracer side */
3557 ret = create_ust_token_event_rule(app, ua_token);
3558 if (ret < 0) {
3559 /*
3560 * Not found previously means that it does not exist on the
3561 * tracer. If the application reports that the event existed,
3562 * it means there is a bug in the sessiond or lttng-ust
3563 * (or corruption, etc.)
3564 */
3565 if (ret == -LTTNG_UST_ERR_EXIST) {
3566 ERR("Tracer for application reported that a token event rule being created already existed: "
3567 "token = \"%" PRIu64 "\", pid = %d, ppid = %d, uid = %d, gid = %d",
3568 lttng_trigger_get_tracer_token(trigger),
3569 app->pid, app->ppid, app->uid,
3570 app->gid);
3571 }
3572 goto error;
3573 }
3574
3575 lttng_ht_add_unique_u64(app->tokens_ht, &ua_token->node);
3576
3577 DBG2("UST app create token event rule %" PRIu64 " for PID %d completed", lttng_trigger_get_tracer_token(trigger),
3578 app->pid);
3579
3580 goto end;
3581
3582 error:
3583 /* Valid. Calling here is already in a read side lock */
3584 delete_ust_app_token_event_rule(-1, ua_token, app);
3585 end:
3586 return ret;
3587 }
3588
3589 /*
3590 * Create UST metadata and open it on the tracer side.
3591 *
3592 * Called with UST app session lock held and RCU read side lock.
3593 */
3594 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
3595 struct ust_app *app, struct consumer_output *consumer)
3596 {
3597 int ret = 0;
3598 struct ust_app_channel *metadata;
3599 struct consumer_socket *socket;
3600 struct ust_registry_session *registry;
3601 struct ltt_session *session = NULL;
3602
3603 assert(ua_sess);
3604 assert(app);
3605 assert(consumer);
3606
3607 registry = get_session_registry(ua_sess);
3608 /* The UST app session is held registry shall not be null. */
3609 assert(registry);
3610
3611 pthread_mutex_lock(&registry->lock);
3612
3613 /* Metadata already exists for this registry or it was closed previously */
3614 if (registry->metadata_key || registry->metadata_closed) {
3615 ret = 0;
3616 goto error;
3617 }
3618
3619 /* Allocate UST metadata */
3620 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
3621 if (!metadata) {
3622 /* malloc() failed */
3623 ret = -ENOMEM;
3624 goto error;
3625 }
3626
3627 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
3628
3629 /* Need one fd for the channel. */
3630 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3631 if (ret < 0) {
3632 ERR("Exhausted number of available FD upon create metadata");
3633 goto error;
3634 }
3635
3636 /* Get the right consumer socket for the application. */
3637 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
3638 if (!socket) {
3639 ret = -EINVAL;
3640 goto error_consumer;
3641 }
3642
3643 /*
3644 * Keep metadata key so we can identify it on the consumer side. Assign it
3645 * to the registry *before* we ask the consumer so we avoid the race of the
3646 * consumer requesting the metadata and the ask_channel call on our side
3647 * did not returned yet.
3648 */
3649 registry->metadata_key = metadata->key;
3650
3651 session = session_find_by_id(ua_sess->tracing_id);
3652 assert(session);
3653
3654 assert(pthread_mutex_trylock(&session->lock));
3655 assert(session_trylock_list());
3656
3657 /*
3658 * Ask the metadata channel creation to the consumer. The metadata object
3659 * will be created by the consumer and kept their. However, the stream is
3660 * never added or monitored until we do a first push metadata to the
3661 * consumer.
3662 */
3663 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
3664 registry, session->current_trace_chunk);
3665 if (ret < 0) {
3666 /* Nullify the metadata key so we don't try to close it later on. */
3667 registry->metadata_key = 0;
3668 goto error_consumer;
3669 }
3670
3671 /*
3672 * The setup command will make the metadata stream be sent to the relayd,
3673 * if applicable, and the thread managing the metadatas. This is important
3674 * because after this point, if an error occurs, the only way the stream
3675 * can be deleted is to be monitored in the consumer.
3676 */
3677 ret = consumer_setup_metadata(socket, metadata->key);
3678 if (ret < 0) {
3679 /* Nullify the metadata key so we don't try to close it later on. */
3680 registry->metadata_key = 0;
3681 goto error_consumer;
3682 }
3683
3684 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
3685 metadata->key, app->pid);
3686
3687 error_consumer:
3688 lttng_fd_put(LTTNG_FD_APPS, 1);
3689 delete_ust_app_channel(-1, metadata, app);
3690 error:
3691 pthread_mutex_unlock(&registry->lock);
3692 if (session) {
3693 session_put(session);
3694 }
3695 return ret;
3696 }
3697
3698 /*
3699 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3700 * acquired before calling this function.
3701 */
3702 struct ust_app *ust_app_find_by_pid(pid_t pid)
3703 {
3704 struct ust_app *app = NULL;
3705 struct lttng_ht_node_ulong *node;
3706 struct lttng_ht_iter iter;
3707
3708 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
3709 node = lttng_ht_iter_get_node_ulong(&iter);
3710 if (node == NULL) {
3711 DBG2("UST app no found with pid %d", pid);
3712 goto error;
3713 }
3714
3715 DBG2("Found UST app by pid %d", pid);
3716
3717 app = caa_container_of(node, struct ust_app, pid_n);
3718
3719 error:
3720 return app;
3721 }
3722
3723 /*
3724 * Allocate and init an UST app object using the registration information and
3725 * the command socket. This is called when the command socket connects to the
3726 * session daemon.
3727 *
3728 * The object is returned on success or else NULL.
3729 */
3730 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
3731 {
3732 struct ust_app *lta = NULL;
3733 struct lttng_pipe *trigger_event_source_pipe = NULL;
3734
3735 assert(msg);
3736 assert(sock >= 0);
3737
3738 DBG3("UST app creating application for socket %d", sock);
3739
3740 if ((msg->bits_per_long == 64 &&
3741 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
3742 || (msg->bits_per_long == 32 &&
3743 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
3744 ERR("Registration failed: application \"%s\" (pid: %d) has "
3745 "%d-bit long, but no consumerd for this size is available.\n",
3746 msg->name, msg->pid, msg->bits_per_long);
3747 goto error;
3748 }
3749
3750 trigger_event_source_pipe = lttng_pipe_open(FD_CLOEXEC);
3751 if (!trigger_event_source_pipe) {
3752 PERROR("Open trigger pipe");
3753 goto error;
3754 }
3755
3756 lta = zmalloc(sizeof(struct ust_app));
3757 if (lta == NULL) {
3758 PERROR("malloc");
3759 goto error;
3760 }
3761
3762 lta->token_communication.trigger_event_pipe = trigger_event_source_pipe;
3763
3764 lta->ppid = msg->ppid;
3765 lta->uid = msg->uid;
3766 lta->gid = msg->gid;
3767
3768 lta->bits_per_long = msg->bits_per_long;
3769 lta->uint8_t_alignment = msg->uint8_t_alignment;
3770 lta->uint16_t_alignment = msg->uint16_t_alignment;
3771 lta->uint32_t_alignment = msg->uint32_t_alignment;
3772 lta->uint64_t_alignment = msg->uint64_t_alignment;
3773 lta->long_alignment = msg->long_alignment;
3774 lta->byte_order = msg->byte_order;
3775
3776 lta->v_major = msg->major;
3777 lta->v_minor = msg->minor;
3778 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3779 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3780 lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3781 lta->notify_sock = -1;
3782 lta->tokens_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3783
3784 /* Copy name and make sure it's NULL terminated. */
3785 strncpy(lta->name, msg->name, sizeof(lta->name));
3786 lta->name[UST_APP_PROCNAME_LEN] = '\0';
3787
3788 /*
3789 * Before this can be called, when receiving the registration information,
3790 * the application compatibility is checked. So, at this point, the
3791 * application can work with this session daemon.
3792 */
3793 lta->compatible = 1;
3794
3795 lta->pid = msg->pid;
3796 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
3797 lta->sock = sock;
3798 pthread_mutex_init(&lta->sock_lock, NULL);
3799 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
3800
3801 CDS_INIT_LIST_HEAD(&lta->teardown_head);
3802 error:
3803 return lta;
3804 }
3805
3806 /*
3807 * For a given application object, add it to every hash table.
3808 */
3809 void ust_app_add(struct ust_app *app)
3810 {
3811 assert(app);
3812 assert(app->notify_sock >= 0);
3813
3814 app->registration_time = time(NULL);
3815
3816 rcu_read_lock();
3817
3818 /*
3819 * On a re-registration, we want to kick out the previous registration of
3820 * that pid
3821 */
3822 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
3823
3824 /*
3825 * The socket _should_ be unique until _we_ call close. So, a add_unique
3826 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3827 * already in the table.
3828 */
3829 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
3830
3831 /* Add application to the notify socket hash table. */
3832 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
3833 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
3834
3835 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
3836 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
3837 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
3838 app->v_minor);
3839
3840 rcu_read_unlock();
3841 }
3842
3843 /*
3844 * Set the application version into the object.
3845 *
3846 * Return 0 on success else a negative value either an errno code or a
3847 * LTTng-UST error code.
3848 */
3849 int ust_app_version(struct ust_app *app)
3850 {
3851 int ret;
3852
3853 assert(app);
3854
3855 pthread_mutex_lock(&app->sock_lock);
3856 ret = ustctl_tracer_version(app->sock, &app->version);
3857 pthread_mutex_unlock(&app->sock_lock);
3858 if (ret < 0) {
3859 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3860 ERR("UST app %d version failed with ret %d", app->sock, ret);
3861 } else {
3862 DBG3("UST app %d version failed. Application is dead", app->sock);
3863 }
3864 }
3865
3866 return ret;
3867 }
3868
3869 /*
3870 * Setup the base trigger group.
3871 *
3872 * Return 0 on success else a negative value either an errno code or a
3873 * LTTng-UST error code.
3874 */
3875 int ust_app_setup_trigger_group(struct ust_app *app)
3876 {
3877 int ret;
3878 int writefd;
3879 struct lttng_ust_object_data *group = NULL;
3880 enum lttng_error_code lttng_ret;
3881 enum trigger_error_accounting_status trigger_error_accounting_status;
3882
3883 assert(app);
3884
3885 /* Get the write side of the pipe */
3886 writefd = lttng_pipe_get_writefd(app->token_communication.trigger_event_pipe);
3887
3888 pthread_mutex_lock(&app->sock_lock);
3889 ret = ustctl_create_trigger_group(app->sock, writefd, &group);
3890 pthread_mutex_unlock(&app->sock_lock);
3891 if (ret < 0) {
3892 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3893 ERR("UST app %d create_trigger_group failed with ret %d, trigger pipe %d", app->sock, ret, writefd);
3894 } else {
3895 DBG("UST app %d create trigger group failed. Application is dead", app->sock);
3896 }
3897 goto end;
3898 }
3899
3900 lttng_ret = notification_thread_command_add_application(
3901 notification_thread_handle, lttng_pipe_get_readfd(app->token_communication.trigger_event_pipe), LTTNG_DOMAIN_UST);
3902 if (lttng_ret != LTTNG_OK) {
3903 /* TODO: error */
3904 ret = - 1;
3905 ERR("Failed to add channel to notification thread");
3906 goto end;
3907 }
3908
3909 /* Assign handle only when the complete setup is valid */
3910 app->token_communication.handle = group;
3911
3912 trigger_error_accounting_status = trigger_error_accounting_register_app(app);
3913 if (trigger_error_accounting_status != TRIGGER_ERROR_ACCOUNTING_STATUS_OK) {
3914 ERR("Failed to setup trigger error accouting for app");
3915 ret = -1;
3916 goto end;
3917 }
3918
3919
3920 end:
3921 return ret;
3922 }
3923
3924 /*
3925 * Unregister app by removing it from the global traceable app list and freeing
3926 * the data struct.
3927 *
3928 * The socket is already closed at this point so no close to sock.
3929 */
3930 void ust_app_unregister(int sock)
3931 {
3932 enum lttng_error_code ret_code;
3933 struct ust_app *lta;
3934 struct lttng_ht_node_ulong *node;
3935 struct lttng_ht_iter ust_app_sock_iter;
3936 struct lttng_ht_iter iter;
3937 struct ust_app_session *ua_sess;
3938 int ret;
3939
3940 rcu_read_lock();
3941
3942 /* Get the node reference for a call_rcu */
3943 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
3944 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
3945 assert(node);
3946
3947 lta = caa_container_of(node, struct ust_app, sock_n);
3948 DBG("PID %d unregistering with sock %d", lta->pid, sock);
3949
3950 /*
3951 * For per-PID buffers, perform "push metadata" and flush all
3952 * application streams before removing app from hash tables,
3953 * ensuring proper behavior of data_pending check.
3954 * Remove sessions so they are not visible during deletion.
3955 */
3956 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
3957 node.node) {
3958 struct ust_registry_session *registry;
3959
3960 ret = lttng_ht_del(lta->sessions, &iter);
3961 if (ret) {
3962 /* The session was already removed so scheduled for teardown. */
3963 continue;
3964 }
3965
3966 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
3967 (void) ust_app_flush_app_session(lta, ua_sess);
3968 }
3969
3970 /*
3971 * Add session to list for teardown. This is safe since at this point we
3972 * are the only one using this list.
3973 */
3974 pthread_mutex_lock(&ua_sess->lock);
3975
3976 if (ua_sess->deleted) {
3977 pthread_mutex_unlock(&ua_sess->lock);
3978 continue;
3979 }
3980
3981 /*
3982 * Normally, this is done in the delete session process which is
3983 * executed in the call rcu below. However, upon registration we can't
3984 * afford to wait for the grace period before pushing data or else the
3985 * data pending feature can race between the unregistration and stop
3986 * command where the data pending command is sent *before* the grace
3987 * period ended.
3988 *
3989 * The close metadata below nullifies the metadata pointer in the
3990 * session so the delete session will NOT push/close a second time.
3991 */
3992 registry = get_session_registry(ua_sess);
3993 if (registry) {
3994 /* Push metadata for application before freeing the application. */
3995 (void) push_metadata(registry, ua_sess->consumer);
3996
3997 /*
3998 * Don't ask to close metadata for global per UID buffers. Close
3999 * metadata only on destroy trace session in this case. Also, the
4000 * previous push metadata could have flag the metadata registry to
4001 * close so don't send a close command if closed.
4002 */
4003 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
4004 /* And ask to close it for this session registry. */
4005 (void) close_metadata(registry, ua_sess->consumer);
4006 }
4007 }
4008 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
4009
4010 pthread_mutex_unlock(&ua_sess->lock);
4011 }
4012
4013 /* Remove application from PID hash table */
4014 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
4015 assert(!ret);
4016
4017 /*
4018 * Remove application from notify hash table. The thread handling the
4019 * notify socket could have deleted the node so ignore on error because
4020 * either way it's valid. The close of that socket is handled by the
4021 * apps_notify_thread.
4022 */
4023 iter.iter.node = &lta->notify_sock_n.node;
4024 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
4025
4026 /*
4027 * Ignore return value since the node might have been removed before by an
4028 * add replace during app registration because the PID can be reassigned by
4029 * the OS.
4030 */
4031 iter.iter.node = &lta->pid_n.node;
4032 ret = lttng_ht_del(ust_app_ht, &iter);
4033 if (ret) {
4034 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
4035 lta->pid);
4036 }
4037
4038 /* trigger handle can be null in certain scenario such as a dead app */
4039 if (lta->token_communication.handle) {
4040 int fd = lttng_pipe_get_readfd(
4041 lta->token_communication.trigger_event_pipe);
4042
4043 ret_code = notification_thread_command_remove_application(
4044 notification_thread_handle,
4045 fd);
4046 if (ret_code != LTTNG_OK) {
4047 ERR("Failed to remove application from notification thread");
4048 }
4049 }
4050
4051 /* Free memory */
4052 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
4053
4054 rcu_read_unlock();
4055 return;
4056 }
4057
4058 /*
4059 * Fill events array with all events name of all registered apps.
4060 */
4061 int ust_app_list_events(struct lttng_event **events)
4062 {
4063 int ret, handle;
4064 size_t nbmem, count = 0;
4065 struct lttng_ht_iter iter;
4066 struct ust_app *app;
4067 struct lttng_event *tmp_event;
4068
4069 nbmem = UST_APP_EVENT_LIST_SIZE;
4070 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
4071 if (tmp_event == NULL) {
4072 PERROR("zmalloc ust app events");
4073 ret = -ENOMEM;
4074 goto error;
4075 }
4076
4077 rcu_read_lock();
4078
4079 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4080 struct lttng_ust_tracepoint_iter uiter;
4081
4082 health_code_update();
4083
4084 if (!app->compatible) {
4085 /*
4086 * TODO: In time, we should notice the caller of this error by
4087 * telling him that this is a version error.
4088 */
4089 continue;
4090 }
4091 pthread_mutex_lock(&app->sock_lock);
4092 handle = ustctl_tracepoint_list(app->sock);
4093 if (handle < 0) {
4094 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
4095 ERR("UST app list events getting handle failed for app pid %d",
4096 app->pid);
4097 }
4098 pthread_mutex_unlock(&app->sock_lock);
4099 continue;
4100 }
4101
4102 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
4103 &uiter)) != -LTTNG_UST_ERR_NOENT) {
4104 /* Handle ustctl error. */
4105 if (ret < 0) {
4106 int release_ret;
4107
4108 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4109 ERR("UST app tp list get failed for app %d with ret %d",
4110 app->sock, ret);
4111 } else {
4112 DBG3("UST app tp list get failed. Application is dead");
4113 /*
4114 * This is normal behavior, an application can die during the
4115 * creation process. Don't report an error so the execution can
4116 * continue normally. Continue normal execution.
4117 */
4118 break;
4119 }
4120 free(tmp_event);
4121 release_ret = ustctl_release_handle(app->sock, handle);
4122 if (release_ret < 0 &&
4123 release_ret != -LTTNG_UST_ERR_EXITING &&
4124 release_ret != -EPIPE) {
4125 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4126 }
4127 pthread_mutex_unlock(&app->sock_lock);
4128 goto rcu_error;
4129 }
4130
4131 health_code_update();
4132 if (count >= nbmem) {
4133 /* In case the realloc fails, we free the memory */
4134 struct lttng_event *new_tmp_event;
4135 size_t new_nbmem;
4136
4137 new_nbmem = nbmem << 1;
4138 DBG2("Reallocating event list from %zu to %zu entries",
4139 nbmem, new_nbmem);
4140 new_tmp_event = realloc(tmp_event,
4141 new_nbmem * sizeof(struct lttng_event));
4142 if (new_tmp_event == NULL) {
4143 int release_ret;
4144
4145 PERROR("realloc ust app events");
4146 free(tmp_event);
4147 ret = -ENOMEM;
4148 release_ret = ustctl_release_handle(app->sock, handle);
4149 if (release_ret < 0 &&
4150 release_ret != -LTTNG_UST_ERR_EXITING &&
4151 release_ret != -EPIPE) {
4152 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4153 }
4154 pthread_mutex_unlock(&app->sock_lock);
4155 goto rcu_error;
4156 }
4157 /* Zero the new memory */
4158 memset(new_tmp_event + nbmem, 0,
4159 (new_nbmem - nbmem) * sizeof(struct lttng_event));
4160 nbmem = new_nbmem;
4161 tmp_event = new_tmp_event;
4162 }
4163 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
4164 tmp_event[count].loglevel = uiter.loglevel;
4165 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
4166 tmp_event[count].pid = app->pid;
4167 tmp_event[count].enabled = -1;
4168 count++;
4169 }
4170 ret = ustctl_release_handle(app->sock, handle);
4171 pthread_mutex_unlock(&app->sock_lock);
4172 if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4173 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
4174 }
4175 }
4176
4177 ret = count;
4178 *events = tmp_event;
4179
4180 DBG2("UST app list events done (%zu events)", count);
4181
4182 rcu_error:
4183 rcu_read_unlock();
4184 error:
4185 health_code_update();
4186 return ret;
4187 }
4188
4189 /*
4190 * Fill events array with all events name of all registered apps.
4191 */
4192 int ust_app_list_event_fields(struct lttng_event_field **fields)
4193 {
4194 int ret, handle;
4195 size_t nbmem, count = 0;
4196 struct lttng_ht_iter iter;
4197 struct ust_app *app;
4198 struct lttng_event_field *tmp_event;
4199
4200 nbmem = UST_APP_EVENT_LIST_SIZE;
4201 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
4202 if (tmp_event == NULL) {
4203 PERROR("zmalloc ust app event fields");
4204 ret = -ENOMEM;
4205 goto error;
4206 }
4207
4208 rcu_read_lock();
4209
4210 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4211 struct lttng_ust_field_iter uiter;
4212
4213 health_code_update();
4214
4215 if (!app->compatible) {
4216 /*
4217 * TODO: In time, we should notice the caller of this error by
4218 * telling him that this is a version error.
4219 */
4220 continue;
4221 }
4222 pthread_mutex_lock(&app->sock_lock);
4223 handle = ustctl_tracepoint_field_list(app->sock);
4224 if (handle < 0) {
4225 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
4226 ERR("UST app list field getting handle failed for app pid %d",
4227 app->pid);
4228 }
4229 pthread_mutex_unlock(&app->sock_lock);
4230 continue;
4231 }
4232
4233 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
4234 &uiter)) != -LTTNG_UST_ERR_NOENT) {
4235 /* Handle ustctl error. */
4236 if (ret < 0) {
4237 int release_ret;
4238
4239 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4240 ERR("UST app tp list field failed for app %d with ret %d",
4241 app->sock, ret);
4242 } else {
4243 DBG3("UST app tp list field failed. Application is dead");
4244 /*
4245 * This is normal behavior, an application can die during the
4246 * creation process. Don't report an error so the execution can
4247 * continue normally. Reset list and count for next app.
4248 */
4249 break;
4250 }
4251 free(tmp_event);
4252 release_ret = ustctl_release_handle(app->sock, handle);
4253 pthread_mutex_unlock(&app->sock_lock);
4254 if (release_ret < 0 &&
4255 release_ret != -LTTNG_UST_ERR_EXITING &&
4256 release_ret != -EPIPE) {
4257 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4258 }
4259 goto rcu_error;
4260 }
4261
4262 health_code_update();
4263 if (count >= nbmem) {
4264 /* In case the realloc fails, we free the memory */
4265 struct lttng_event_field *new_tmp_event;
4266 size_t new_nbmem;
4267
4268 new_nbmem = nbmem << 1;
4269 DBG2("Reallocating event field list from %zu to %zu entries",
4270 nbmem, new_nbmem);
4271 new_tmp_event = realloc(tmp_event,
4272 new_nbmem * sizeof(struct lttng_event_field));
4273 if (new_tmp_event == NULL) {
4274 int release_ret;
4275
4276 PERROR("realloc ust app event fields");
4277 free(tmp_event);
4278 ret = -ENOMEM;
4279 release_ret = ustctl_release_handle(app->sock, handle);
4280 pthread_mutex_unlock(&app->sock_lock);
4281 if (release_ret &&
4282 release_ret != -LTTNG_UST_ERR_EXITING &&
4283 release_ret != -EPIPE) {
4284 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4285 }
4286 goto rcu_error;
4287 }
4288 /* Zero the new memory */
4289 memset(new_tmp_event + nbmem, 0,
4290 (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
4291 nbmem = new_nbmem;
4292 tmp_event = new_tmp_event;
4293 }
4294
4295 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
4296 /* Mapping between these enums matches 1 to 1. */
4297 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
4298 tmp_event[count].nowrite = uiter.nowrite;
4299
4300 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
4301 tmp_event[count].event.loglevel = uiter.loglevel;
4302 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
4303 tmp_event[count].event.pid = app->pid;
4304 tmp_event[count].event.enabled = -1;
4305 count++;
4306 }
4307 ret = ustctl_release_handle(app->sock, handle);
4308 pthread_mutex_unlock(&app->sock_lock);
4309 if (ret < 0 &&
4310 ret != -LTTNG_UST_ERR_EXITING &&
4311 ret != -EPIPE) {
4312 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
4313 }
4314 }
4315
4316 ret = count;
4317 *fields = tmp_event;
4318
4319 DBG2("UST app list event fields done (%zu events)", count);
4320
4321 rcu_error:
4322 rcu_read_unlock();
4323 error:
4324 health_code_update();
4325 return ret;
4326 }
4327
4328 /*
4329 * Free and clean all traceable apps of the global list.
4330 *
4331 * Should _NOT_ be called with RCU read-side lock held.
4332 */
4333 void ust_app_clean_list(void)
4334 {
4335 int ret;
4336 struct ust_app *app;
4337 struct lttng_ht_iter iter;
4338
4339 DBG2("UST app cleaning registered apps hash table");
4340
4341 rcu_read_lock();
4342
4343 /* Cleanup notify socket hash table */
4344 if (ust_app_ht_by_notify_sock) {
4345 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
4346 notify_sock_n.node) {
4347 struct cds_lfht_node *node;
4348 struct ust_app *app;
4349
4350 node = cds_lfht_iter_get_node(&iter.iter);
4351 if (!node) {
4352 continue;
4353 }
4354
4355 app = container_of(node, struct ust_app,
4356 notify_sock_n.node);
4357 ust_app_notify_sock_unregister(app->notify_sock);
4358 }
4359 }
4360
4361 if (ust_app_ht) {
4362 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4363 ret = lttng_ht_del(ust_app_ht, &iter);
4364 assert(!ret);
4365 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
4366 }
4367 }
4368
4369 /* Cleanup socket hash table */
4370 if (ust_app_ht_by_sock) {
4371 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
4372 sock_n.node) {
4373 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
4374 assert(!ret);
4375 }
4376 }
4377
4378 rcu_read_unlock();
4379
4380 /* Destroy is done only when the ht is empty */
4381 if (ust_app_ht) {
4382 ht_cleanup_push(ust_app_ht);
4383 }
4384 if (ust_app_ht_by_sock) {
4385 ht_cleanup_push(ust_app_ht_by_sock);
4386 }
4387 if (ust_app_ht_by_notify_sock) {
4388 ht_cleanup_push(ust_app_ht_by_notify_sock);
4389 }
4390 }
4391
4392 /*
4393 * Init UST app hash table.
4394 */
4395 int ust_app_ht_alloc(void)
4396 {
4397 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4398 if (!ust_app_ht) {
4399 return -1;
4400 }
4401 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4402 if (!ust_app_ht_by_sock) {
4403 return -1;
4404 }
4405 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4406 if (!ust_app_ht_by_notify_sock) {
4407 return -1;
4408 }
4409 return 0;
4410 }
4411
4412 /*
4413 * For a specific UST session, disable the channel for all registered apps.
4414 */
4415 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
4416 struct ltt_ust_channel *uchan)
4417 {
4418 int ret = 0;
4419 struct lttng_ht_iter iter;
4420 struct lttng_ht_node_str *ua_chan_node;
4421 struct ust_app *app;
4422 struct ust_app_session *ua_sess;
4423 struct ust_app_channel *ua_chan;
4424
4425 assert(usess->active);
4426 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
4427 uchan->name, usess->id);
4428
4429 rcu_read_lock();
4430
4431 /* For every registered applications */
4432 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4433 struct lttng_ht_iter uiter;
4434 if (!app->compatible) {
4435 /*
4436 * TODO: In time, we should notice the caller of this error by
4437 * telling him that this is a version error.
4438 */
4439 continue;
4440 }
4441 ua_sess = lookup_session_by_app(usess, app);
4442 if (ua_sess == NULL) {
4443 continue;
4444 }
4445
4446 /* Get channel */
4447 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4448 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4449 /* If the session if found for the app, the channel must be there */
4450 assert(ua_chan_node);
4451
4452 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4453 /* The channel must not be already disabled */
4454 assert(ua_chan->enabled == 1);
4455
4456 /* Disable channel onto application */
4457 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
4458 if (ret < 0) {
4459 /* XXX: We might want to report this error at some point... */
4460 continue;
4461 }
4462 }
4463
4464 rcu_read_unlock();
4465 return ret;
4466 }
4467
4468 /*
4469 * For a specific UST session, enable the channel for all registered apps.
4470 */
4471 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
4472 struct ltt_ust_channel *uchan)
4473 {
4474 int ret = 0;
4475 struct lttng_ht_iter iter;
4476 struct ust_app *app;
4477 struct ust_app_session *ua_sess;
4478
4479 assert(usess->active);
4480 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
4481 uchan->name, usess->id);
4482
4483 rcu_read_lock();
4484
4485 /* For every registered applications */
4486 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4487 if (!app->compatible) {
4488 /*
4489 * TODO: In time, we should notice the caller of this error by
4490 * telling him that this is a version error.
4491 */
4492 continue;
4493 }
4494 ua_sess = lookup_session_by_app(usess, app);
4495 if (ua_sess == NULL) {
4496 continue;
4497 }
4498
4499 /* Enable channel onto application */
4500 ret = enable_ust_app_channel(ua_sess, uchan, app);
4501 if (ret < 0) {
4502 /* XXX: We might want to report this error at some point... */
4503 continue;
4504 }
4505 }
4506
4507 rcu_read_unlock();
4508 return ret;
4509 }
4510
4511 /*
4512 * Disable an event in a channel and for a specific session.
4513 */
4514 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
4515 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4516 {
4517 int ret = 0;
4518 struct lttng_ht_iter iter, uiter;
4519 struct lttng_ht_node_str *ua_chan_node;
4520 struct ust_app *app;
4521 struct ust_app_session *ua_sess;
4522 struct ust_app_channel *ua_chan;
4523 struct ust_app_event *ua_event;
4524
4525 assert(usess->active);
4526 DBG("UST app disabling event %s for all apps in channel "
4527 "%s for session id %" PRIu64,
4528 uevent->attr.name, uchan->name, usess->id);
4529
4530 rcu_read_lock();
4531
4532 /* For all registered applications */
4533 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4534 if (!app->compatible) {
4535 /*
4536 * TODO: In time, we should notice the caller of this error by
4537 * telling him that this is a version error.
4538 */
4539 continue;
4540 }
4541 ua_sess = lookup_session_by_app(usess, app);
4542 if (ua_sess == NULL) {
4543 /* Next app */
4544 continue;
4545 }
4546
4547 /* Lookup channel in the ust app session */
4548 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4549 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4550 if (ua_chan_node == NULL) {
4551 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
4552 "Skipping", uchan->name, usess->id, app->pid);
4553 continue;
4554 }
4555 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4556
4557 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4558 uevent->filter, uevent->attr.loglevel,
4559 uevent->exclusion);
4560 if (ua_event == NULL) {
4561 DBG2("Event %s not found in channel %s for app pid %d."
4562 "Skipping", uevent->attr.name, uchan->name, app->pid);
4563 continue;
4564 }
4565
4566 ret = disable_ust_app_event(ua_sess, ua_event, app);
4567 if (ret < 0) {
4568 /* XXX: Report error someday... */
4569 continue;
4570 }
4571 }
4572
4573 rcu_read_unlock();
4574 return ret;
4575 }
4576
4577 /* The ua_sess lock must be held by the caller. */
4578 static
4579 int ust_app_channel_create(struct ltt_ust_session *usess,
4580 struct ust_app_session *ua_sess,
4581 struct ltt_ust_channel *uchan, struct ust_app *app,
4582 struct ust_app_channel **_ua_chan)
4583 {
4584 int ret = 0;
4585 struct ust_app_channel *ua_chan = NULL;
4586
4587 assert(ua_sess);
4588 ASSERT_LOCKED(ua_sess->lock);
4589
4590 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
4591 sizeof(uchan->name))) {
4592 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
4593 &uchan->attr);
4594 ret = 0;
4595 } else {
4596 struct ltt_ust_context *uctx = NULL;
4597
4598 /*
4599 * Create channel onto application and synchronize its
4600 * configuration.
4601 */
4602 ret = ust_app_channel_allocate(ua_sess, uchan,
4603 LTTNG_UST_CHAN_PER_CPU, usess,
4604 &ua_chan);
4605 if (ret < 0) {
4606 goto error;
4607 }
4608
4609 ret = ust_app_channel_send(app, usess,
4610 ua_sess, ua_chan);
4611 if (ret) {
4612 goto error;
4613 }
4614
4615 /* Add contexts. */
4616 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
4617 ret = create_ust_app_channel_context(ua_chan,
4618 &uctx->ctx, app);
4619 if (ret) {
4620 goto error;
4621 }
4622 }
4623 }
4624
4625 error:
4626 if (ret < 0) {
4627 switch (ret) {
4628 case -ENOTCONN:
4629 /*
4630 * The application's socket is not valid. Either a bad socket
4631 * or a timeout on it. We can't inform the caller that for a
4632 * specific app, the session failed so lets continue here.
4633 */
4634 ret = 0; /* Not an error. */
4635 break;
4636 case -ENOMEM:
4637 default:
4638 break;
4639 }
4640 }
4641
4642 if (ret == 0 && _ua_chan) {
4643 /*
4644 * Only return the application's channel on success. Note
4645 * that the channel can still be part of the application's
4646 * channel hashtable on error.
4647 */
4648 *_ua_chan = ua_chan;
4649 }
4650 return ret;
4651 }
4652
4653 /*
4654 * Enable event for a specific session and channel on the tracer.
4655 */
4656 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
4657 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4658 {
4659 int ret = 0;
4660 struct lttng_ht_iter iter, uiter;
4661 struct lttng_ht_node_str *ua_chan_node;
4662 struct ust_app *app;
4663 struct ust_app_session *ua_sess;
4664 struct ust_app_channel *ua_chan;
4665 struct ust_app_event *ua_event;
4666
4667 assert(usess->active);
4668 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
4669 uevent->attr.name, usess->id);
4670
4671 /*
4672 * NOTE: At this point, this function is called only if the session and
4673 * channel passed are already created for all apps. and enabled on the
4674 * tracer also.
4675 */
4676
4677 rcu_read_lock();
4678
4679 /* For all registered applications */
4680 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4681 if (!app->compatible) {
4682 /*
4683 * TODO: In time, we should notice the caller of this error by
4684 * telling him that this is a version error.
4685 */
4686 continue;
4687 }
4688 ua_sess = lookup_session_by_app(usess, app);
4689 if (!ua_sess) {
4690 /* The application has problem or is probably dead. */
4691 continue;
4692 }
4693
4694 pthread_mutex_lock(&ua_sess->lock);
4695
4696 if (ua_sess->deleted) {
4697 pthread_mutex_unlock(&ua_sess->lock);
4698 continue;
4699 }
4700
4701 /* Lookup channel in the ust app session */
4702 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4703 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4704 /*
4705 * It is possible that the channel cannot be found is
4706 * the channel/event creation occurs concurrently with
4707 * an application exit.
4708 */
4709 if (!ua_chan_node) {
4710 pthread_mutex_unlock(&ua_sess->lock);
4711 continue;
4712 }
4713
4714 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4715
4716 /* Get event node */
4717 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4718 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
4719 if (ua_event == NULL) {
4720 DBG3("UST app enable event %s not found for app PID %d."
4721 "Skipping app", uevent->attr.name, app->pid);
4722 goto next_app;
4723 }
4724
4725 ret = enable_ust_app_event(ua_sess, ua_event, app);
4726 if (ret < 0) {
4727 pthread_mutex_unlock(&ua_sess->lock);
4728 goto error;
4729 }
4730 next_app:
4731 pthread_mutex_unlock(&ua_sess->lock);
4732 }
4733
4734 error:
4735 rcu_read_unlock();
4736 return ret;
4737 }
4738
4739 /*
4740 * For a specific existing UST session and UST channel, creates the event for
4741 * all registered apps.
4742 */
4743 int ust_app_create_event_glb(struct ltt_ust_session *usess,
4744 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4745 {
4746 int ret = 0;
4747 struct lttng_ht_iter iter, uiter;
4748 struct lttng_ht_node_str *ua_chan_node;
4749 struct ust_app *app;
4750 struct ust_app_session *ua_sess;
4751 struct ust_app_channel *ua_chan;
4752
4753 assert(usess->active);
4754 DBG("UST app creating event %s for all apps for session id %" PRIu64,
4755 uevent->attr.name, usess->id);
4756
4757 rcu_read_lock();
4758
4759 /* For all registered applications */
4760 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4761 if (!app->compatible) {
4762 /*
4763 * TODO: In time, we should notice the caller of this error by
4764 * telling him that this is a version error.
4765 */
4766 continue;
4767 }
4768 ua_sess = lookup_session_by_app(usess, app);
4769 if (!ua_sess) {
4770 /* The application has problem or is probably dead. */
4771 continue;
4772 }
4773
4774 pthread_mutex_lock(&ua_sess->lock);
4775
4776 if (ua_sess->deleted) {
4777 pthread_mutex_unlock(&ua_sess->lock);
4778 continue;
4779 }
4780
4781 /* Lookup channel in the ust app session */
4782 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4783 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4784 /* If the channel is not found, there is a code flow error */
4785 assert(ua_chan_node);
4786
4787 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4788
4789 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4790 pthread_mutex_unlock(&ua_sess->lock);
4791 if (ret < 0) {
4792 if (ret != -LTTNG_UST_ERR_EXIST) {
4793 /* Possible value at this point: -ENOMEM. If so, we stop! */
4794 break;
4795 }
4796 DBG2("UST app event %s already exist on app PID %d",
4797 uevent->attr.name, app->pid);
4798 continue;
4799 }
4800 }
4801
4802 rcu_read_unlock();
4803 return ret;
4804 }
4805
4806 /*
4807 * Start tracing for a specific UST session and app.
4808 *
4809 * Called with UST app session lock held.
4810 *
4811 */
4812 static
4813 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
4814 {
4815 int ret = 0;
4816 struct ust_app_session *ua_sess;
4817
4818 DBG("Starting tracing for ust app pid %d", app->pid);
4819
4820 rcu_read_lock();
4821
4822 if (!app->compatible) {
4823 goto end;
4824 }
4825
4826 ua_sess = lookup_session_by_app(usess, app);
4827 if (ua_sess == NULL) {
4828 /* The session is in teardown process. Ignore and continue. */
4829 goto end;
4830 }
4831
4832 pthread_mutex_lock(&ua_sess->lock);
4833
4834 if (ua_sess->deleted) {
4835 pthread_mutex_unlock(&ua_sess->lock);
4836 goto end;
4837 }
4838
4839 if (ua_sess->enabled) {
4840 pthread_mutex_unlock(&ua_sess->lock);
4841 goto end;
4842 }
4843
4844 /* Upon restart, we skip the setup, already done */
4845 if (ua_sess->started) {
4846 goto skip_setup;
4847 }
4848
4849 /*
4850 * Create the metadata for the application. This returns gracefully if a
4851 * metadata was already set for the session.
4852 */
4853 ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
4854 if (ret < 0) {
4855 goto error_unlock;
4856 }
4857
4858 health_code_update();
4859
4860 skip_setup:
4861 /* This starts the UST tracing */
4862 pthread_mutex_lock(&app->sock_lock);
4863 ret = ustctl_start_session(app->sock, ua_sess->handle);
4864 pthread_mutex_unlock(&app->sock_lock);
4865 if (ret < 0) {
4866 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4867 ERR("Error starting tracing for app pid: %d (ret: %d)",
4868 app->pid, ret);
4869 } else {
4870 DBG("UST app start session failed. Application is dead.");
4871 /*
4872 * This is normal behavior, an application can die during the
4873 * creation process. Don't report an error so the execution can
4874 * continue normally.
4875 */
4876 pthread_mutex_unlock(&ua_sess->lock);
4877 goto end;
4878 }
4879 goto error_unlock;
4880 }
4881
4882 /* Indicate that the session has been started once */
4883 ua_sess->started = 1;
4884 ua_sess->enabled = 1;
4885
4886 pthread_mutex_unlock(&ua_sess->lock);
4887
4888 health_code_update();
4889
4890 /* Quiescent wait after starting trace */
4891 pthread_mutex_lock(&app->sock_lock);
4892 ret = ustctl_wait_quiescent(app->sock);
4893 pthread_mutex_unlock(&app->sock_lock);
4894 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4895 ERR("UST app wait quiescent failed for app pid %d ret %d",
4896 app->pid, ret);
4897 }
4898
4899 end:
4900 rcu_read_unlock();
4901 health_code_update();
4902 return 0;
4903
4904 error_unlock:
4905 pthread_mutex_unlock(&ua_sess->lock);
4906 rcu_read_unlock();
4907 health_code_update();
4908 return -1;
4909 }
4910
4911 /*
4912 * Stop tracing for a specific UST session and app.
4913 */
4914 static
4915 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
4916 {
4917 int ret = 0;
4918 struct ust_app_session *ua_sess;
4919 struct ust_registry_session *registry;
4920
4921 DBG("Stopping tracing for ust app pid %d", app->pid);
4922
4923 rcu_read_lock();
4924
4925 if (!app->compatible) {
4926 goto end_no_session;
4927 }
4928
4929 ua_sess = lookup_session_by_app(usess, app);
4930 if (ua_sess == NULL) {
4931 goto end_no_session;
4932 }
4933
4934 pthread_mutex_lock(&ua_sess->lock);
4935
4936 if (ua_sess->deleted) {
4937 pthread_mutex_unlock(&ua_sess->lock);
4938 goto end_no_session;
4939 }
4940
4941 /*
4942 * If started = 0, it means that stop trace has been called for a session
4943 * that was never started. It's possible since we can have a fail start
4944 * from either the application manager thread or the command thread. Simply
4945 * indicate that this is a stop error.
4946 */
4947 if (!ua_sess->started) {
4948 goto error_rcu_unlock;
4949 }
4950
4951 health_code_update();
4952
4953 /* This inhibits UST tracing */
4954 pthread_mutex_lock(&app->sock_lock);
4955 ret = ustctl_stop_session(app->sock, ua_sess->handle);
4956 pthread_mutex_unlock(&app->sock_lock);
4957 if (ret < 0) {
4958 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4959 ERR("Error stopping tracing for app pid: %d (ret: %d)",
4960 app->pid, ret);
4961 } else {
4962 DBG("UST app stop session failed. Application is dead.");
4963 /*
4964 * This is normal behavior, an application can die during the
4965 * creation process. Don't report an error so the execution can
4966 * continue normally.
4967 */
4968 goto end_unlock;
4969 }
4970 goto error_rcu_unlock;
4971 }
4972
4973 health_code_update();
4974 ua_sess->enabled = 0;
4975
4976 /* Quiescent wait after stopping trace */
4977 pthread_mutex_lock(&app->sock_lock);
4978 ret = ustctl_wait_quiescent(app->sock);
4979 pthread_mutex_unlock(&app->sock_lock);
4980 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4981 ERR("UST app wait quiescent failed for app pid %d ret %d",
4982 app->pid, ret);
4983 }
4984
4985 health_code_update();
4986
4987 registry = get_session_registry(ua_sess);
4988
4989 /* The UST app session is held registry shall not be null. */
4990 assert(registry);
4991
4992 /* Push metadata for application before freeing the application. */
4993 (void) push_metadata(registry, ua_sess->consumer);
4994
4995 end_unlock:
4996 pthread_mutex_unlock(&ua_sess->lock);
4997 end_no_session:
4998 rcu_read_unlock();
4999 health_code_update();
5000 return 0;
5001
5002 error_rcu_unlock:
5003 pthread_mutex_unlock(&ua_sess->lock);
5004 rcu_read_unlock();
5005 health_code_update();
5006 return -1;
5007 }
5008
5009 static
5010 int ust_app_flush_app_session(struct ust_app *app,
5011 struct ust_app_session *ua_sess)
5012 {
5013 int ret, retval = 0;
5014 struct lttng_ht_iter iter;
5015 struct ust_app_channel *ua_chan;
5016 struct consumer_socket *socket;
5017
5018 DBG("Flushing app session buffers for ust app pid %d", app->pid);
5019
5020 rcu_read_lock();
5021
5022 if (!app->compatible) {
5023 goto end_not_compatible;
5024 }
5025
5026 pthread_mutex_lock(&ua_sess->lock);
5027
5028 if (ua_sess->deleted) {
5029 goto end_deleted;
5030 }
5031
5032 health_code_update();
5033
5034 /* Flushing buffers */
5035 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5036 ua_sess->consumer);
5037
5038 /* Flush buffers and push metadata. */
5039 switch (ua_sess->buffer_type) {
5040 case LTTNG_BUFFER_PER_PID:
5041 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
5042 node.node) {
5043 health_code_update();
5044 ret = consumer_flush_channel(socket, ua_chan->key);
5045 if (ret) {
5046 ERR("Error flushing consumer channel");
5047 retval = -1;
5048 continue;
5049 }
5050 }
5051 break;
5052 case LTTNG_BUFFER_PER_UID:
5053 default:
5054 assert(0);
5055 break;
5056 }
5057
5058 health_code_update();
5059
5060 end_deleted:
5061 pthread_mutex_unlock(&ua_sess->lock);
5062
5063 end_not_compatible:
5064 rcu_read_unlock();
5065 health_code_update();
5066 return retval;
5067 }
5068
5069 /*
5070 * Flush buffers for all applications for a specific UST session.
5071 * Called with UST session lock held.
5072 */
5073 static
5074 int ust_app_flush_session(struct ltt_ust_session *usess)
5075
5076 {
5077 int ret = 0;
5078
5079 DBG("Flushing session buffers for all ust apps");
5080
5081 rcu_read_lock();
5082
5083 /* Flush buffers and push metadata. */
5084 switch (usess->buffer_type) {
5085 case LTTNG_BUFFER_PER_UID:
5086 {
5087 struct buffer_reg_uid *reg;
5088 struct lttng_ht_iter iter;
5089
5090 /* Flush all per UID buffers associated to that session. */
5091 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5092 struct ust_registry_session *ust_session_reg;
5093 struct buffer_reg_channel *reg_chan;
5094 struct consumer_socket *socket;
5095
5096 /* Get consumer socket to use to push the metadata.*/
5097 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5098 usess->consumer);
5099 if (!socket) {
5100 /* Ignore request if no consumer is found for the session. */
5101 continue;
5102 }
5103
5104 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5105 reg_chan, node.node) {
5106 /*
5107 * The following call will print error values so the return
5108 * code is of little importance because whatever happens, we
5109 * have to try them all.
5110 */
5111 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
5112 }
5113
5114 ust_session_reg = reg->registry->reg.ust;
5115 /* Push metadata. */
5116 (void) push_metadata(ust_session_reg, usess->consumer);
5117 }
5118 break;
5119 }
5120 case LTTNG_BUFFER_PER_PID:
5121 {
5122 struct ust_app_session *ua_sess;
5123 struct lttng_ht_iter iter;
5124 struct ust_app *app;
5125
5126 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5127 ua_sess = lookup_session_by_app(usess, app);
5128 if (ua_sess == NULL) {
5129 continue;
5130 }
5131 (void) ust_app_flush_app_session(app, ua_sess);
5132 }
5133 break;
5134 }
5135 default:
5136 ret = -1;
5137 assert(0);
5138 break;
5139 }
5140
5141 rcu_read_unlock();
5142 health_code_update();
5143 return ret;
5144 }
5145
5146 static
5147 int ust_app_clear_quiescent_app_session(struct ust_app *app,
5148 struct ust_app_session *ua_sess)
5149 {
5150 int ret = 0;
5151 struct lttng_ht_iter iter;
5152 struct ust_app_channel *ua_chan;
5153 struct consumer_socket *socket;
5154
5155 DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
5156
5157 rcu_read_lock();
5158
5159 if (!app->compatible) {
5160 goto end_not_compatible;
5161 }
5162
5163 pthread_mutex_lock(&ua_sess->lock);
5164
5165 if (ua_sess->deleted) {
5166 goto end_unlock;
5167 }
5168
5169 health_code_update();
5170
5171 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5172 ua_sess->consumer);
5173 if (!socket) {
5174 ERR("Failed to find consumer (%" PRIu32 ") socket",
5175 app->bits_per_long);
5176 ret = -1;
5177 goto end_unlock;
5178 }
5179
5180 /* Clear quiescent state. */
5181 switch (ua_sess->buffer_type) {
5182 case LTTNG_BUFFER_PER_PID:
5183 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter,
5184 ua_chan, node.node) {
5185 health_code_update();
5186 ret = consumer_clear_quiescent_channel(socket,
5187 ua_chan->key);
5188 if (ret) {
5189 ERR("Error clearing quiescent state for consumer channel");
5190 ret = -1;
5191 continue;
5192 }
5193 }
5194 break;
5195 case LTTNG_BUFFER_PER_UID:
5196 default:
5197 assert(0);
5198 ret = -1;
5199 break;
5200 }
5201
5202 health_code_update();
5203
5204 end_unlock:
5205 pthread_mutex_unlock(&ua_sess->lock);
5206
5207 end_not_compatible:
5208 rcu_read_unlock();
5209 health_code_update();
5210 return ret;
5211 }
5212
5213 /*
5214 * Clear quiescent state in each stream for all applications for a
5215 * specific UST session.
5216 * Called with UST session lock held.
5217 */
5218 static
5219 int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
5220
5221 {
5222 int ret = 0;
5223
5224 DBG("Clearing stream quiescent state for all ust apps");
5225
5226 rcu_read_lock();
5227
5228 switch (usess->buffer_type) {
5229 case LTTNG_BUFFER_PER_UID:
5230 {
5231 struct lttng_ht_iter iter;
5232 struct buffer_reg_uid *reg;
5233
5234 /*
5235 * Clear quiescent for all per UID buffers associated to
5236 * that session.
5237 */
5238 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5239 struct consumer_socket *socket;
5240 struct buffer_reg_channel *reg_chan;
5241
5242 /* Get associated consumer socket.*/
5243 socket = consumer_find_socket_by_bitness(
5244 reg->bits_per_long, usess->consumer);
5245 if (!socket) {
5246 /*
5247 * Ignore request if no consumer is found for
5248 * the session.
5249 */
5250 continue;
5251 }
5252
5253 cds_lfht_for_each_entry(reg->registry->channels->ht,
5254 &iter.iter, reg_chan, node.node) {
5255 /*
5256 * The following call will print error values so
5257 * the return code is of little importance
5258 * because whatever happens, we have to try them
5259 * all.
5260 */
5261 (void) consumer_clear_quiescent_channel(socket,
5262 reg_chan->consumer_key);
5263 }
5264 }
5265 break;
5266 }
5267 case LTTNG_BUFFER_PER_PID:
5268 {
5269 struct ust_app_session *ua_sess;
5270 struct lttng_ht_iter iter;
5271 struct ust_app *app;
5272
5273 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
5274 pid_n.node) {
5275 ua_sess = lookup_session_by_app(usess, app);
5276 if (ua_sess == NULL) {
5277 continue;
5278 }
5279 (void) ust_app_clear_quiescent_app_session(app,
5280 ua_sess);
5281 }
5282 break;
5283 }
5284 default:
5285 ret = -1;
5286 assert(0);
5287 break;
5288 }
5289
5290 rcu_read_unlock();
5291 health_code_update();
5292 return ret;
5293 }
5294
5295 /*
5296 * Destroy a specific UST session in apps.
5297 */
5298 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
5299 {
5300 int ret;
5301 struct ust_app_session *ua_sess;
5302 struct lttng_ht_iter iter;
5303 struct lttng_ht_node_u64 *node;
5304
5305 DBG("Destroy tracing for ust app pid %d", app->pid);
5306
5307 rcu_read_lock();
5308
5309 if (!app->compatible) {
5310 goto end;
5311 }
5312
5313 __lookup_session_by_app(usess, app, &iter);
5314 node = lttng_ht_iter_get_node_u64(&iter);
5315 if (node == NULL) {
5316 /* Session is being or is deleted. */
5317 goto end;
5318 }
5319 ua_sess = caa_container_of(node, struct ust_app_session, node);
5320
5321 health_code_update();
5322 destroy_app_session(app, ua_sess);
5323
5324 health_code_update();
5325
5326 /* Quiescent wait after stopping trace */
5327 pthread_mutex_lock(&app->sock_lock);
5328 ret = ustctl_wait_quiescent(app->sock);
5329 pthread_mutex_unlock(&app->sock_lock);
5330 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5331 ERR("UST app wait quiescent failed for app pid %d ret %d",
5332 app->pid, ret);
5333 }
5334 end:
5335 rcu_read_unlock();
5336 health_code_update();
5337 return 0;
5338 }
5339
5340 /*
5341 * Start tracing for the UST session.
5342 */
5343 int ust_app_start_trace_all(struct ltt_ust_session *usess)
5344 {
5345 struct lttng_ht_iter iter;
5346 struct ust_app *app;
5347
5348 DBG("Starting all UST traces");
5349
5350 /*
5351 * Even though the start trace might fail, flag this session active so
5352 * other application coming in are started by default.
5353 */
5354 usess->active = 1;
5355
5356 rcu_read_lock();
5357
5358 /*
5359 * In a start-stop-start use-case, we need to clear the quiescent state
5360 * of each channel set by the prior stop command, thus ensuring that a
5361 * following stop or destroy is sure to grab a timestamp_end near those
5362 * operations, even if the packet is empty.
5363 */
5364 (void) ust_app_clear_quiescent_session(usess);
5365
5366 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5367 ust_app_global_update(usess, app);
5368 }
5369
5370 rcu_read_unlock();
5371
5372 return 0;
5373 }
5374
5375 /*
5376 * Start tracing for the UST session.
5377 * Called with UST session lock held.
5378 */
5379 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
5380 {
5381 int ret = 0;
5382 struct lttng_ht_iter iter;
5383 struct ust_app *app;
5384
5385 DBG("Stopping all UST traces");
5386
5387 /*
5388 * Even though the stop trace might fail, flag this session inactive so
5389 * other application coming in are not started by default.
5390 */
5391 usess->active = 0;
5392
5393 rcu_read_lock();
5394
5395 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5396 ret = ust_app_stop_trace(usess, app);
5397 if (ret < 0) {
5398 /* Continue to next apps even on error */
5399 continue;
5400 }
5401 }
5402
5403 (void) ust_app_flush_session(usess);
5404
5405 rcu_read_unlock();
5406
5407 return 0;
5408 }
5409
5410 /*
5411 * Destroy app UST session.
5412 */
5413 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
5414 {
5415 int ret = 0;
5416 struct lttng_ht_iter iter;
5417 struct ust_app *app;
5418
5419 DBG("Destroy all UST traces");
5420
5421 rcu_read_lock();
5422
5423 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5424 ret = destroy_trace(usess, app);
5425 if (ret < 0) {
5426 /* Continue to next apps even on error */
5427 continue;
5428 }
5429 }
5430
5431 rcu_read_unlock();
5432
5433 return 0;
5434 }
5435
5436 /* The ua_sess lock must be held by the caller. */
5437 static
5438 int find_or_create_ust_app_channel(
5439 struct ltt_ust_session *usess,
5440 struct ust_app_session *ua_sess,
5441 struct ust_app *app,
5442 struct ltt_ust_channel *uchan,
5443 struct ust_app_channel **ua_chan)
5444 {
5445 int ret = 0;
5446 struct lttng_ht_iter iter;
5447 struct lttng_ht_node_str *ua_chan_node;
5448
5449 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter);
5450 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
5451 if (ua_chan_node) {
5452 *ua_chan = caa_container_of(ua_chan_node,
5453 struct ust_app_channel, node);
5454 goto end;
5455 }
5456
5457 ret = ust_app_channel_create(usess, ua_sess, uchan, app, ua_chan);
5458 if (ret) {
5459 goto end;
5460 }
5461 end:
5462 return ret;
5463 }
5464
5465 static
5466 int ust_app_channel_synchronize_event(struct ust_app_channel *ua_chan,
5467 struct ltt_ust_event *uevent, struct ust_app_session *ua_sess,
5468 struct ust_app *app)
5469 {
5470 int ret = 0;
5471 struct ust_app_event *ua_event = NULL;
5472
5473 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
5474 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
5475 if (!ua_event) {
5476 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
5477 if (ret < 0) {
5478 goto end;
5479 }
5480 } else {
5481 if (ua_event->enabled != uevent->enabled) {
5482 ret = uevent->enabled ?
5483 enable_ust_app_event(ua_sess, ua_event, app) :
5484 disable_ust_app_event(ua_sess, ua_event, app);
5485 }
5486 }
5487
5488 end:
5489 return ret;
5490 }
5491
5492 static
5493 void ust_app_synchronize_tokens(struct ust_app *app)
5494 {
5495 int ret = 0;
5496 enum lttng_error_code ret_code;
5497 enum lttng_trigger_status t_status;
5498 struct lttng_ht_iter app_trigger_iter;
5499 struct lttng_triggers *triggers;
5500 struct ust_app_token_event_rule *token_event_rule_element;
5501 unsigned int count;
5502
5503 rcu_read_lock();
5504 /* TODO: is this necessary to protect against new trigger being added ?
5505 * notification_trigger_tokens_ht is still the backing data structure
5506 * for this listing. Leave it there for now.
5507 */
5508 pthread_mutex_lock(&notification_trigger_tokens_ht_lock);
5509 ret_code = notification_thread_command_get_tokens(
5510 notification_thread_handle, &triggers);
5511 if (ret_code != LTTNG_OK) {
5512 ret = -1;
5513 goto end;
5514 }
5515
5516 assert(triggers);
5517
5518 t_status = lttng_triggers_get_count(triggers, &count);
5519 if (t_status != LTTNG_TRIGGER_STATUS_OK) {
5520 ret = -1;
5521 goto end;
5522 }
5523
5524 for (unsigned int i = 0; i < count; i++) {
5525 struct lttng_condition *condition;
5526 struct lttng_event_rule *event_rule;
5527 struct lttng_trigger *trigger;
5528 struct ust_app_token_event_rule *ua_token;
5529 uint64_t token;
5530
5531 trigger = lttng_triggers_get_pointer_of_index(triggers, i);
5532 assert(trigger);
5533
5534 /* TODO: error checking and type checking */
5535 token = lttng_trigger_get_tracer_token(trigger);
5536 condition = lttng_trigger_get_condition(trigger);
5537 (void) lttng_condition_event_rule_get_rule_mutable(condition, &event_rule);
5538
5539 if (lttng_event_rule_get_domain_type(event_rule) == LTTNG_DOMAIN_KERNEL) {
5540 /* Skip kernel related trigger */
5541 continue;
5542 }
5543
5544 /* Iterate over all known token trigger */
5545 ua_token = find_ust_app_token_event_rule(app->tokens_ht, token);
5546 if (!ua_token) {
5547 ret = create_ust_app_token_event_rule(trigger, app);
5548 if (ret < 0) {
5549 goto end;
5550 }
5551 }
5552 }
5553
5554 /* Remove all unknown trigger from the app
5555 * TODO find a way better way then this, do it on the unregister command
5556 * and be specific on the token to remove instead of going over all
5557 * trigger known to the app. This is sub optimal.
5558 */
5559 cds_lfht_for_each_entry (app->tokens_ht->ht, &app_trigger_iter.iter,
5560 token_event_rule_element, node.node) {
5561 uint64_t token;
5562 bool found = false;
5563
5564 token = token_event_rule_element->token;
5565
5566 /*
5567 * Check if the app event trigger still exists on the
5568 * notification side.
5569 * TODO: might want to change the backing data struct of the
5570 * lttng_triggers object to allow quick lookup?
5571 * For kernel mostly all of this can be removed once we delete
5572 * on a per trigger basis.
5573 */
5574
5575 for (unsigned int i = 0; i < count; i++) {
5576 struct lttng_trigger *trigger;
5577 uint64_t inner_token;
5578
5579 trigger = lttng_triggers_get_pointer_of_index(
5580 triggers, i);
5581 assert(trigger);
5582
5583 inner_token = lttng_trigger_get_tracer_token(trigger);
5584
5585 if (inner_token == token) {
5586 found = true;
5587 break;
5588 }
5589 }
5590
5591 if (found) {
5592 /* Still valid */
5593 continue;
5594 }
5595
5596 /* TODO: This is fucking ugly API for fuck sake */
5597 assert(!lttng_ht_del(app->tokens_ht, &app_trigger_iter));
5598
5599 (void) disable_ust_object(app, token_event_rule_element->obj);
5600
5601 delete_ust_app_token_event_rule(app->sock, token_event_rule_element, app);
5602 }
5603 end:
5604 lttng_triggers_destroy(triggers);
5605 rcu_read_unlock();
5606 pthread_mutex_unlock(&notification_trigger_tokens_ht_lock);
5607 return;
5608 }
5609
5610 /*
5611 * The caller must ensure that the application is compatible and is tracked
5612 * by the process attribute trackers.
5613 */
5614 static
5615 void ust_app_synchronize(struct ltt_ust_session *usess,
5616 struct ust_app *app)
5617 {
5618 int ret = 0;
5619 struct cds_lfht_iter uchan_iter;
5620 struct ltt_ust_channel *uchan;
5621 struct ust_app_session *ua_sess = NULL;
5622
5623 /*
5624 * The application's configuration should only be synchronized for
5625 * active sessions.
5626 */
5627 assert(usess->active);
5628
5629 ret = find_or_create_ust_app_session(usess, app, &ua_sess, NULL);
5630 if (ret < 0) {
5631 /* Tracer is probably gone or ENOMEM. */
5632 goto error;
5633 }
5634 assert(ua_sess);
5635
5636 pthread_mutex_lock(&ua_sess->lock);
5637 if (ua_sess->deleted) {
5638 pthread_mutex_unlock(&ua_sess->lock);
5639 goto end;
5640 }
5641
5642 rcu_read_lock();
5643 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &uchan_iter,
5644 uchan, node.node) {
5645 struct ust_app_channel *ua_chan;
5646 struct cds_lfht_iter uevent_iter;
5647 struct ltt_ust_event *uevent;
5648
5649 /*
5650 * Search for a matching ust_app_channel. If none is found,
5651 * create it. Creating the channel will cause the ua_chan
5652 * structure to be allocated, the channel buffers to be
5653 * allocated (if necessary) and sent to the application, and
5654 * all enabled contexts will be added to the channel.
5655 */
5656 ret = find_or_create_ust_app_channel(usess, ua_sess,
5657 app, uchan, &ua_chan);
5658 if (ret) {
5659 /* Tracer is probably gone or ENOMEM. */
5660 goto error_unlock;
5661 }
5662
5663 if (!ua_chan) {
5664 /* ua_chan will be NULL for the metadata channel */
5665 continue;
5666 }
5667
5668 cds_lfht_for_each_entry(uchan->events->ht, &uevent_iter, uevent,
5669 node.node) {
5670 ret = ust_app_channel_synchronize_event(ua_chan,
5671 uevent, ua_sess, app);
5672 if (ret) {
5673 goto error_unlock;
5674 }
5675 }
5676
5677 if (ua_chan->enabled != uchan->enabled) {
5678 ret = uchan->enabled ?
5679 enable_ust_app_channel(ua_sess, uchan, app) :
5680 disable_ust_app_channel(ua_sess, ua_chan, app);
5681 if (ret) {
5682 goto error_unlock;
5683 }
5684 }
5685 }
5686 rcu_read_unlock();
5687
5688 end:
5689 pthread_mutex_unlock(&ua_sess->lock);
5690 /* Everything went well at this point. */
5691 return;
5692
5693 error_unlock:
5694 rcu_read_unlock();
5695 pthread_mutex_unlock(&ua_sess->lock);
5696 error:
5697 if (ua_sess) {
5698 destroy_app_session(app, ua_sess);
5699 }
5700 return;
5701 }
5702
5703 static
5704 void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
5705 {
5706 struct ust_app_session *ua_sess;
5707
5708 ua_sess = lookup_session_by_app(usess, app);
5709 if (ua_sess == NULL) {
5710 return;
5711 }
5712 destroy_app_session(app, ua_sess);
5713 }
5714
5715 /*
5716 * Add channels/events from UST global domain to registered apps at sock.
5717 *
5718 * Called with session lock held.
5719 * Called with RCU read-side lock held.
5720 */
5721 void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
5722 {
5723 assert(usess);
5724 assert(usess->active);
5725
5726 DBG2("UST app global update for app sock %d for session id %" PRIu64,
5727 app->sock, usess->id);
5728
5729 if (!app->compatible) {
5730 return;
5731 }
5732 if (trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID,
5733 usess, app->pid) &&
5734 trace_ust_id_tracker_lookup(
5735 LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID,
5736 usess, app->uid) &&
5737 trace_ust_id_tracker_lookup(
5738 LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID,
5739 usess, app->gid)) {
5740 /*
5741 * Synchronize the application's internal tracing configuration
5742 * and start tracing.
5743 */
5744 ust_app_synchronize(usess, app);
5745 ust_app_start_trace(usess, app);
5746 } else {
5747 ust_app_global_destroy(usess, app);
5748 }
5749 }
5750
5751 void ust_app_global_update_tokens(struct ust_app *app)
5752 {
5753 DBG2("UST app global update token for app sock %d", app->sock);
5754
5755 if (!app->compatible) {
5756 return;
5757 }
5758 if (app->token_communication.handle == NULL) {
5759 WARN("UST app global update token for app sock %d skipped since communcation handle is null", app->sock);
5760 return;
5761 }
5762
5763 ust_app_synchronize_tokens(app);
5764 }
5765
5766 /*
5767 * Called with session lock held.
5768 */
5769 void ust_app_global_update_all(struct ltt_ust_session *usess)
5770 {
5771 struct lttng_ht_iter iter;
5772 struct ust_app *app;
5773
5774 rcu_read_lock();
5775 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5776 ust_app_global_update(usess, app);
5777 }
5778 rcu_read_unlock();
5779 }
5780
5781 void ust_app_global_update_all_tokens(void)
5782 {
5783 struct lttng_ht_iter iter;
5784 struct ust_app *app;
5785
5786 rcu_read_lock();
5787 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5788 ust_app_global_update_tokens(app);
5789 }
5790 rcu_read_unlock();
5791 }
5792
5793 void ust_app_update_trigger_error_count(struct lttng_trigger *trigger)
5794 {
5795 uint64_t error_count = 0;
5796 enum trigger_error_accounting_status status;
5797
5798 status = trigger_error_accounting_get_count(trigger, &error_count);
5799 if (status != TRIGGER_ERROR_ACCOUNTING_STATUS_OK) {
5800 ERR("Error getting trigger error count");
5801 }
5802
5803 lttng_trigger_set_error_count(trigger, error_count);
5804 }
5805
5806 /*
5807 * Add context to a specific channel for global UST domain.
5808 */
5809 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
5810 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
5811 {
5812 int ret = 0;
5813 struct lttng_ht_node_str *ua_chan_node;
5814 struct lttng_ht_iter iter, uiter;
5815 struct ust_app_channel *ua_chan = NULL;
5816 struct ust_app_session *ua_sess;
5817 struct ust_app *app;
5818
5819 assert(usess->active);
5820
5821 rcu_read_lock();
5822 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5823 if (!app->compatible) {
5824 /*
5825 * TODO: In time, we should notice the caller of this error by
5826 * telling him that this is a version error.
5827 */
5828 continue;
5829 }
5830 ua_sess = lookup_session_by_app(usess, app);
5831 if (ua_sess == NULL) {
5832 continue;
5833 }
5834
5835 pthread_mutex_lock(&ua_sess->lock);
5836
5837 if (ua_sess->deleted) {
5838 pthread_mutex_unlock(&ua_sess->lock);
5839 continue;
5840 }
5841
5842 /* Lookup channel in the ust app session */
5843 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
5844 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
5845 if (ua_chan_node == NULL) {
5846 goto next_app;
5847 }
5848 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
5849 node);
5850 ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
5851 if (ret < 0) {
5852 goto next_app;
5853 }
5854 next_app:
5855 pthread_mutex_unlock(&ua_sess->lock);
5856 }
5857
5858 rcu_read_unlock();
5859 return ret;
5860 }
5861
5862 /*
5863 * Receive registration and populate the given msg structure.
5864 *
5865 * On success return 0 else a negative value returned by the ustctl call.
5866 */
5867 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
5868 {
5869 int ret;
5870 uint32_t pid, ppid, uid, gid;
5871
5872 assert(msg);
5873
5874 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
5875 &pid, &ppid, &uid, &gid,
5876 &msg->bits_per_long,
5877 &msg->uint8_t_alignment,
5878 &msg->uint16_t_alignment,
5879 &msg->uint32_t_alignment,
5880 &msg->uint64_t_alignment,
5881 &msg->long_alignment,
5882 &msg->byte_order,
5883 msg->name);
5884 if (ret < 0) {
5885 switch (-ret) {
5886 case EPIPE:
5887 case ECONNRESET:
5888 case LTTNG_UST_ERR_EXITING:
5889 DBG3("UST app recv reg message failed. Application died");
5890 break;
5891 case LTTNG_UST_ERR_UNSUP_MAJOR:
5892 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
5893 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
5894 LTTNG_UST_ABI_MINOR_VERSION);
5895 break;
5896 default:
5897 ERR("UST app recv reg message failed with ret %d", ret);
5898 break;
5899 }
5900 goto error;
5901 }
5902 msg->pid = (pid_t) pid;
5903 msg->ppid = (pid_t) ppid;
5904 msg->uid = (uid_t) uid;
5905 msg->gid = (gid_t) gid;
5906
5907 error:
5908 return ret;
5909 }
5910
5911 /*
5912 * Return a ust app session object using the application object and the
5913 * session object descriptor has a key. If not found, NULL is returned.
5914 * A RCU read side lock MUST be acquired when calling this function.
5915 */
5916 static struct ust_app_session *find_session_by_objd(struct ust_app *app,
5917 int objd)
5918 {
5919 struct lttng_ht_node_ulong *node;
5920 struct lttng_ht_iter iter;
5921 struct ust_app_session *ua_sess = NULL;
5922
5923 assert(app);
5924
5925 lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
5926 node = lttng_ht_iter_get_node_ulong(&iter);
5927 if (node == NULL) {
5928 DBG2("UST app session find by objd %d not found", objd);
5929 goto error;
5930 }
5931
5932 ua_sess = caa_container_of(node, struct ust_app_session, ust_objd_node);
5933
5934 error:
5935 return ua_sess;
5936 }
5937
5938 /*
5939 * Return a ust app channel object using the application object and the channel
5940 * object descriptor has a key. If not found, NULL is returned. A RCU read side
5941 * lock MUST be acquired before calling this function.
5942 */
5943 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
5944 int objd)
5945 {
5946 struct lttng_ht_node_ulong *node;
5947 struct lttng_ht_iter iter;
5948 struct ust_app_channel *ua_chan = NULL;
5949
5950 assert(app);
5951
5952 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
5953 node = lttng_ht_iter_get_node_ulong(&iter);
5954 if (node == NULL) {
5955 DBG2("UST app channel find by objd %d not found", objd);
5956 goto error;
5957 }
5958
5959 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
5960
5961 error:
5962 return ua_chan;
5963 }
5964
5965 /*
5966 * Reply to a register channel notification from an application on the notify
5967 * socket. The channel metadata is also created.
5968 *
5969 * The session UST registry lock is acquired in this function.
5970 *
5971 * On success 0 is returned else a negative value.
5972 */
5973 static int reply_ust_register_channel(int sock, int cobjd,
5974 size_t nr_fields, struct ustctl_field *fields)
5975 {
5976 int ret, ret_code = 0;
5977 uint32_t chan_id;
5978 uint64_t chan_reg_key;
5979 enum ustctl_channel_header type;
5980 struct ust_app *app;
5981 struct ust_app_channel *ua_chan;
5982 struct ust_app_session *ua_sess;
5983 struct ust_registry_session *registry;
5984 struct ust_registry_channel *chan_reg;
5985
5986 rcu_read_lock();
5987
5988 /* Lookup application. If not found, there is a code flow error. */
5989 app = find_app_by_notify_sock(sock);
5990 if (!app) {
5991 DBG("Application socket %d is being torn down. Abort event notify",
5992 sock);
5993 ret = 0;
5994 goto error_rcu_unlock;
5995 }
5996
5997 /* Lookup channel by UST object descriptor. */
5998 ua_chan = find_channel_by_objd(app, cobjd);
5999 if (!ua_chan) {
6000 DBG("Application channel is being torn down. Abort event notify");
6001 ret = 0;
6002 goto error_rcu_unlock;
6003 }
6004
6005 assert(ua_chan->session);
6006 ua_sess = ua_chan->session;
6007
6008 /* Get right session registry depending on the session buffer type. */
6009 registry = get_session_registry(ua_sess);
6010 if (!registry) {
6011 DBG("Application session is being torn down. Abort event notify");
6012 ret = 0;
6013 goto error_rcu_unlock;
6014 };
6015
6016 /* Depending on the buffer type, a different channel key is used. */
6017 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
6018 chan_reg_key = ua_chan->tracing_channel_id;
6019 } else {
6020 chan_reg_key = ua_chan->key;
6021 }
6022
6023 pthread_mutex_lock(&registry->lock);
6024
6025 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
6026 assert(chan_reg);
6027
6028 if (!chan_reg->register_done) {
6029 /*
6030 * TODO: eventually use the registry event count for
6031 * this channel to better guess header type for per-pid
6032 * buffers.
6033 */
6034 type = USTCTL_CHANNEL_HEADER_LARGE;
6035 chan_reg->nr_ctx_fields = nr_fields;
6036 chan_reg->ctx_fields = fields;
6037 fields = NULL;
6038 chan_reg->header_type = type;
6039 } else {
6040 /* Get current already assigned values. */
6041 type = chan_reg->header_type;
6042 }
6043 /* Channel id is set during the object creation. */
6044 chan_id = chan_reg->chan_id;
6045
6046 /* Append to metadata */
6047 if (!chan_reg->metadata_dumped) {
6048 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
6049 if (ret_code) {
6050 ERR("Error appending channel metadata (errno = %d)", ret_code);
6051 goto reply;
6052 }
6053 }
6054
6055 reply:
6056 DBG3("UST app replying to register channel key %" PRIu64
6057 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
6058 ret_code);
6059
6060 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
6061 if (ret < 0) {
6062 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6063 ERR("UST app reply channel failed with ret %d", ret);
6064 } else {
6065 DBG3("UST app reply channel failed. Application died");
6066 }
6067 goto error;
6068 }
6069
6070 /* This channel registry registration is completed. */
6071 chan_reg->register_done = 1;
6072
6073 error:
6074 pthread_mutex_unlock(&registry->lock);
6075 error_rcu_unlock:
6076 rcu_read_unlock();
6077 free(fields);
6078 return ret;
6079 }
6080
6081 /*
6082 * Add event to the UST channel registry. When the event is added to the
6083 * registry, the metadata is also created. Once done, this replies to the
6084 * application with the appropriate error code.
6085 *
6086 * The session UST registry lock is acquired in the function.
6087 *
6088 * On success 0 is returned else a negative value.
6089 */
6090 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
6091 char *sig, size_t nr_fields, struct ustctl_field *fields,
6092 int loglevel_value, char *model_emf_uri)
6093 {
6094 int ret, ret_code;
6095 uint32_t event_id = 0;
6096 uint64_t chan_reg_key;
6097 struct ust_app *app;
6098 struct ust_app_channel *ua_chan;
6099 struct ust_app_session *ua_sess;
6100 struct ust_registry_session *registry;
6101
6102 rcu_read_lock();
6103
6104 /* Lookup application. If not found, there is a code flow error. */
6105 app = find_app_by_notify_sock(sock);
6106 if (!app) {
6107 DBG("Application socket %d is being torn down. Abort event notify",
6108 sock);
6109 ret = 0;
6110 goto error_rcu_unlock;
6111 }
6112
6113 /* Lookup channel by UST object descriptor. */
6114 ua_chan = find_channel_by_objd(app, cobjd);
6115 if (!ua_chan) {
6116 DBG("Application channel is being torn down. Abort event notify");
6117 ret = 0;
6118 goto error_rcu_unlock;
6119 }
6120
6121 assert(ua_chan->session);
6122 ua_sess = ua_chan->session;
6123
6124 registry = get_session_registry(ua_sess);
6125 if (!registry) {
6126 DBG("Application session is being torn down. Abort event notify");
6127 ret = 0;
6128 goto error_rcu_unlock;
6129 }
6130
6131 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
6132 chan_reg_key = ua_chan->tracing_channel_id;
6133 } else {
6134 chan_reg_key = ua_chan->key;
6135 }
6136
6137 pthread_mutex_lock(&registry->lock);
6138
6139 /*
6140 * From this point on, this call acquires the ownership of the sig, fields
6141 * and model_emf_uri meaning any free are done inside it if needed. These
6142 * three variables MUST NOT be read/write after this.
6143 */
6144 ret_code = ust_registry_create_event(registry, chan_reg_key,
6145 sobjd, cobjd, name, sig, nr_fields, fields,
6146 loglevel_value, model_emf_uri, ua_sess->buffer_type,
6147 &event_id, app);
6148 sig = NULL;
6149 fields = NULL;
6150 model_emf_uri = NULL;
6151
6152 /*
6153 * The return value is returned to ustctl so in case of an error, the
6154 * application can be notified. In case of an error, it's important not to
6155 * return a negative error or else the application will get closed.
6156 */
6157 ret = ustctl_reply_register_event(sock, event_id, ret_code);
6158 if (ret < 0) {
6159 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6160 ERR("UST app reply event failed with ret %d", ret);
6161 } else {
6162 DBG3("UST app reply event failed. Application died");
6163 }
6164 /*
6165 * No need to wipe the create event since the application socket will
6166 * get close on error hence cleaning up everything by itself.
6167 */
6168 goto error;
6169 }
6170
6171 DBG3("UST registry event %s with id %" PRId32 " added successfully",
6172 name, event_id);
6173
6174 error:
6175 pthread_mutex_unlock(&registry->lock);
6176 error_rcu_unlock:
6177 rcu_read_unlock();
6178 free(sig);
6179 free(fields);
6180 free(model_emf_uri);
6181 return ret;
6182 }
6183
6184 /*
6185 * Add enum to the UST session registry. Once done, this replies to the
6186 * application with the appropriate error code.
6187 *
6188 * The session UST registry lock is acquired within this function.
6189 *
6190 * On success 0 is returned else a negative value.
6191 */
6192 static int add_enum_ust_registry(int sock, int sobjd, char *name,
6193 struct ustctl_enum_entry *entries, size_t nr_entries)
6194 {
6195 int ret = 0, ret_code;
6196 struct ust_app *app;
6197 struct ust_app_session *ua_sess;
6198 struct ust_registry_session *registry;
6199 uint64_t enum_id = -1ULL;
6200
6201 rcu_read_lock();
6202
6203 /* Lookup application. If not found, there is a code flow error. */
6204 app = find_app_by_notify_sock(sock);
6205 if (!app) {
6206 /* Return an error since this is not an error */
6207 DBG("Application socket %d is being torn down. Aborting enum registration",
6208 sock);
6209 free(entries);
6210 goto error_rcu_unlock;
6211 }
6212
6213 /* Lookup session by UST object descriptor. */
6214 ua_sess = find_session_by_objd(app, sobjd);
6215 if (!ua_sess) {
6216 /* Return an error since this is not an error */
6217 DBG("Application session is being torn down (session not found). Aborting enum registration.");
6218 free(entries);
6219 goto error_rcu_unlock;
6220 }
6221
6222 registry = get_session_registry(ua_sess);
6223 if (!registry) {
6224 DBG("Application session is being torn down (registry not found). Aborting enum registration.");
6225 free(entries);
6226 goto error_rcu_unlock;
6227 }
6228
6229 pthread_mutex_lock(&registry->lock);
6230
6231 /*
6232 * From this point on, the callee acquires the ownership of
6233 * entries. The variable entries MUST NOT be read/written after
6234 * call.
6235 */
6236 ret_code = ust_registry_create_or_find_enum(registry, sobjd, name,
6237 entries, nr_entries, &enum_id);
6238 entries = NULL;
6239
6240 /*
6241 * The return value is returned to ustctl so in case of an error, the
6242 * application can be notified. In case of an error, it's important not to
6243 * return a negative error or else the application will get closed.
6244 */
6245 ret = ustctl_reply_register_enum(sock, enum_id, ret_code);
6246 if (ret < 0) {
6247 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6248 ERR("UST app reply enum failed with ret %d", ret);
6249 } else {
6250 DBG3("UST app reply enum failed. Application died");
6251 }
6252 /*
6253 * No need to wipe the create enum since the application socket will
6254 * get close on error hence cleaning up everything by itself.
6255 */
6256 goto error;
6257 }
6258
6259 DBG3("UST registry enum %s added successfully or already found", name);
6260
6261 error:
6262 pthread_mutex_unlock(&registry->lock);
6263 error_rcu_unlock:
6264 rcu_read_unlock();
6265 return ret;
6266 }
6267
6268 /*
6269 * Handle application notification through the given notify socket.
6270 *
6271 * Return 0 on success or else a negative value.
6272 */
6273 int ust_app_recv_notify(int sock)
6274 {
6275 int ret;
6276 enum ustctl_notify_cmd cmd;
6277
6278 DBG3("UST app receiving notify from sock %d", sock);
6279
6280 ret = ustctl_recv_notify(sock, &cmd);
6281 if (ret < 0) {
6282 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6283 ERR("UST app recv notify failed with ret %d", ret);
6284 } else {
6285 DBG3("UST app recv notify failed. Application died");
6286 }
6287 goto error;
6288 }
6289
6290 switch (cmd) {
6291 case USTCTL_NOTIFY_CMD_EVENT:
6292 {
6293 int sobjd, cobjd, loglevel_value;
6294 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
6295 size_t nr_fields;
6296 struct ustctl_field *fields;
6297
6298 DBG2("UST app ustctl register event received");
6299
6300 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name,
6301 &loglevel_value, &sig, &nr_fields, &fields,
6302 &model_emf_uri);
6303 if (ret < 0) {
6304 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6305 ERR("UST app recv event failed with ret %d", ret);
6306 } else {
6307 DBG3("UST app recv event failed. Application died");
6308 }
6309 goto error;
6310 }
6311
6312 /*
6313 * Add event to the UST registry coming from the notify socket. This
6314 * call will free if needed the sig, fields and model_emf_uri. This
6315 * code path loses the ownsership of these variables and transfer them
6316 * to the this function.
6317 */
6318 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
6319 fields, loglevel_value, model_emf_uri);
6320 if (ret < 0) {
6321 goto error;
6322 }
6323
6324 break;
6325 }
6326 case USTCTL_NOTIFY_CMD_CHANNEL:
6327 {
6328 int sobjd, cobjd;
6329 size_t nr_fields;
6330 struct ustctl_field *fields;
6331
6332 DBG2("UST app ustctl register channel received");
6333
6334 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
6335 &fields);
6336 if (ret < 0) {
6337 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6338 ERR("UST app recv channel failed with ret %d", ret);
6339 } else {
6340 DBG3("UST app recv channel failed. Application died");
6341 }
6342 goto error;
6343 }
6344
6345 /*
6346 * The fields ownership are transfered to this function call meaning
6347 * that if needed it will be freed. After this, it's invalid to access
6348 * fields or clean it up.
6349 */
6350 ret = reply_ust_register_channel(sock, cobjd, nr_fields,
6351 fields);
6352 if (ret < 0) {
6353 goto error;
6354 }
6355
6356 break;
6357 }
6358 case USTCTL_NOTIFY_CMD_ENUM:
6359 {
6360 int sobjd;
6361 char name[LTTNG_UST_SYM_NAME_LEN];
6362 size_t nr_entries;
6363 struct ustctl_enum_entry *entries;
6364
6365 DBG2("UST app ustctl register enum received");
6366
6367 ret = ustctl_recv_register_enum(sock, &sobjd, name,
6368 &entries, &nr_entries);
6369 if (ret < 0) {
6370 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6371 ERR("UST app recv enum failed with ret %d", ret);
6372 } else {
6373 DBG3("UST app recv enum failed. Application died");
6374 }
6375 goto error;
6376 }
6377
6378 /* Callee assumes ownership of entries */
6379 ret = add_enum_ust_registry(sock, sobjd, name,
6380 entries, nr_entries);
6381 if (ret < 0) {
6382 goto error;
6383 }
6384
6385 break;
6386 }
6387 default:
6388 /* Should NEVER happen. */
6389 assert(0);
6390 }
6391
6392 error:
6393 return ret;
6394 }
6395
6396 /*
6397 * Once the notify socket hangs up, this is called. First, it tries to find the
6398 * corresponding application. On failure, the call_rcu to close the socket is
6399 * executed. If an application is found, it tries to delete it from the notify
6400 * socket hash table. Whathever the result, it proceeds to the call_rcu.
6401 *
6402 * Note that an object needs to be allocated here so on ENOMEM failure, the
6403 * call RCU is not done but the rest of the cleanup is.
6404 */
6405 void ust_app_notify_sock_unregister(int sock)
6406 {
6407 int err_enomem = 0;
6408 struct lttng_ht_iter iter;
6409 struct ust_app *app;
6410 struct ust_app_notify_sock_obj *obj;
6411
6412 assert(sock >= 0);
6413
6414 rcu_read_lock();
6415
6416 obj = zmalloc(sizeof(*obj));
6417 if (!obj) {
6418 /*
6419 * An ENOMEM is kind of uncool. If this strikes we continue the
6420 * procedure but the call_rcu will not be called. In this case, we
6421 * accept the fd leak rather than possibly creating an unsynchronized
6422 * state between threads.
6423 *
6424 * TODO: The notify object should be created once the notify socket is
6425 * registered and stored independantely from the ust app object. The
6426 * tricky part is to synchronize the teardown of the application and
6427 * this notify object. Let's keep that in mind so we can avoid this
6428 * kind of shenanigans with ENOMEM in the teardown path.
6429 */
6430 err_enomem = 1;
6431 } else {
6432 obj->fd = sock;
6433 }
6434
6435 DBG("UST app notify socket unregister %d", sock);
6436
6437 /*
6438 * Lookup application by notify socket. If this fails, this means that the
6439 * hash table delete has already been done by the application
6440 * unregistration process so we can safely close the notify socket in a
6441 * call RCU.
6442 */
6443 app = find_app_by_notify_sock(sock);
6444 if (!app) {
6445 goto close_socket;
6446 }
6447
6448 iter.iter.node = &app->notify_sock_n.node;
6449
6450 /*
6451 * Whatever happens here either we fail or succeed, in both cases we have
6452 * to close the socket after a grace period to continue to the call RCU
6453 * here. If the deletion is successful, the application is not visible
6454 * anymore by other threads and is it fails it means that it was already
6455 * deleted from the hash table so either way we just have to close the
6456 * socket.
6457 */
6458 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
6459
6460 close_socket:
6461 rcu_read_unlock();
6462
6463 /*
6464 * Close socket after a grace period to avoid for the socket to be reused
6465 * before the application object is freed creating potential race between
6466 * threads trying to add unique in the global hash table.
6467 */
6468 if (!err_enomem) {
6469 call_rcu(&obj->head, close_notify_sock_rcu);
6470 }
6471 }
6472
6473 /*
6474 * Destroy a ust app data structure and free its memory.
6475 */
6476 void ust_app_destroy(struct ust_app *app)
6477 {
6478 if (!app) {
6479 return;
6480 }
6481
6482 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
6483 }
6484
6485 /*
6486 * Take a snapshot for a given UST session. The snapshot is sent to the given
6487 * output.
6488 *
6489 * Returns LTTNG_OK on success or a LTTNG_ERR error code.
6490 */
6491 enum lttng_error_code ust_app_snapshot_record(
6492 const struct ltt_ust_session *usess,
6493 const struct consumer_output *output, int wait,
6494 uint64_t nb_packets_per_stream)
6495 {
6496 int ret = 0;
6497 enum lttng_error_code status = LTTNG_OK;
6498 struct lttng_ht_iter iter;
6499 struct ust_app *app;
6500 char *trace_path = NULL;
6501
6502 assert(usess);
6503 assert(output);
6504
6505 rcu_read_lock();
6506
6507 switch (usess->buffer_type) {
6508 case LTTNG_BUFFER_PER_UID:
6509 {
6510 struct buffer_reg_uid *reg;
6511
6512 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6513 struct buffer_reg_channel *reg_chan;
6514 struct consumer_socket *socket;
6515 char pathname[PATH_MAX];
6516 size_t consumer_path_offset = 0;
6517
6518 if (!reg->registry->reg.ust->metadata_key) {
6519 /* Skip since no metadata is present */
6520 continue;
6521 }
6522
6523 /* Get consumer socket to use to push the metadata.*/
6524 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
6525 usess->consumer);
6526 if (!socket) {
6527 status = LTTNG_ERR_INVALID;
6528 goto error;
6529 }
6530
6531 memset(pathname, 0, sizeof(pathname));
6532 ret = snprintf(pathname, sizeof(pathname),
6533 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
6534 reg->uid, reg->bits_per_long);
6535 if (ret < 0) {
6536 PERROR("snprintf snapshot path");
6537 status = LTTNG_ERR_INVALID;
6538 goto error;
6539 }
6540 /* Free path allowed on previous iteration. */
6541 free(trace_path);
6542 trace_path = setup_channel_trace_path(usess->consumer, pathname,
6543 &consumer_path_offset);
6544 if (!trace_path) {
6545 status = LTTNG_ERR_INVALID;
6546 goto error;
6547 }
6548 /* Add the UST default trace dir to path. */
6549 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6550 reg_chan, node.node) {
6551 status = consumer_snapshot_channel(socket,
6552 reg_chan->consumer_key,
6553 output, 0, usess->uid,
6554 usess->gid, &trace_path[consumer_path_offset], wait,
6555 nb_packets_per_stream);
6556 if (status != LTTNG_OK) {
6557 goto error;
6558 }
6559 }
6560 status = consumer_snapshot_channel(socket,
6561 reg->registry->reg.ust->metadata_key, output, 1,
6562 usess->uid, usess->gid, &trace_path[consumer_path_offset],
6563 wait, 0);
6564 if (status != LTTNG_OK) {
6565 goto error;
6566 }
6567 }
6568 break;
6569 }
6570 case LTTNG_BUFFER_PER_PID:
6571 {
6572 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6573 struct consumer_socket *socket;
6574 struct lttng_ht_iter chan_iter;
6575 struct ust_app_channel *ua_chan;
6576 struct ust_app_session *ua_sess;
6577 struct ust_registry_session *registry;
6578 char pathname[PATH_MAX];
6579 size_t consumer_path_offset = 0;
6580
6581 ua_sess = lookup_session_by_app(usess, app);
6582 if (!ua_sess) {
6583 /* Session not associated with this app. */
6584 continue;
6585 }
6586
6587 /* Get the right consumer socket for the application. */
6588 socket = consumer_find_socket_by_bitness(app->bits_per_long,
6589 output);
6590 if (!socket) {
6591 status = LTTNG_ERR_INVALID;
6592 goto error;
6593 }
6594
6595 /* Add the UST default trace dir to path. */
6596 memset(pathname, 0, sizeof(pathname));
6597 ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
6598 ua_sess->path);
6599 if (ret < 0) {
6600 status = LTTNG_ERR_INVALID;
6601 PERROR("snprintf snapshot path");
6602 goto error;
6603 }
6604 /* Free path allowed on previous iteration. */
6605 free(trace_path);
6606 trace_path = setup_channel_trace_path(usess->consumer, pathname,
6607 &consumer_path_offset);
6608 if (!trace_path) {
6609 status = LTTNG_ERR_INVALID;
6610 goto error;
6611 }
6612 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6613 ua_chan, node.node) {
6614 status = consumer_snapshot_channel(socket,
6615 ua_chan->key, output, 0,
6616 lttng_credentials_get_uid(&ua_sess->effective_credentials),
6617 lttng_credentials_get_gid(&ua_sess->effective_credentials),
6618 &trace_path[consumer_path_offset], wait,
6619 nb_packets_per_stream);
6620 switch (status) {
6621 case LTTNG_OK:
6622 break;
6623 case LTTNG_ERR_CHAN_NOT_FOUND:
6624 continue;
6625 default:
6626 goto error;
6627 }
6628 }
6629
6630 registry = get_session_registry(ua_sess);
6631 if (!registry) {
6632 DBG("Application session is being torn down. Skip application.");
6633 continue;
6634 }
6635 status = consumer_snapshot_channel(socket,
6636 registry->metadata_key, output, 1,
6637 lttng_credentials_get_uid(&ua_sess->effective_credentials),
6638 lttng_credentials_get_gid(&ua_sess->effective_credentials),
6639 &trace_path[consumer_path_offset], wait, 0);
6640 switch (status) {
6641 case LTTNG_OK:
6642 break;
6643 case LTTNG_ERR_CHAN_NOT_FOUND:
6644 continue;
6645 default:
6646 goto error;
6647 }
6648 }
6649 break;
6650 }
6651 default:
6652 assert(0);
6653 break;
6654 }
6655
6656 error:
6657 free(trace_path);
6658 rcu_read_unlock();
6659 return status;
6660 }
6661
6662 /*
6663 * Return the size taken by one more packet per stream.
6664 */
6665 uint64_t ust_app_get_size_one_more_packet_per_stream(
6666 const struct ltt_ust_session *usess, uint64_t cur_nr_packets)
6667 {
6668 uint64_t tot_size = 0;
6669 struct ust_app *app;
6670 struct lttng_ht_iter iter;
6671
6672 assert(usess);
6673
6674 switch (usess->buffer_type) {
6675 case LTTNG_BUFFER_PER_UID:
6676 {
6677 struct buffer_reg_uid *reg;
6678
6679 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6680 struct buffer_reg_channel *reg_chan;
6681
6682 rcu_read_lock();
6683 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6684 reg_chan, node.node) {
6685 if (cur_nr_packets >= reg_chan->num_subbuf) {
6686 /*
6687 * Don't take channel into account if we
6688 * already grab all its packets.
6689 */
6690 continue;
6691 }
6692 tot_size += reg_chan->subbuf_size * reg_chan->stream_count;
6693 }
6694 rcu_read_unlock();
6695 }
6696 break;
6697 }
6698 case LTTNG_BUFFER_PER_PID:
6699 {
6700 rcu_read_lock();
6701 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6702 struct ust_app_channel *ua_chan;
6703 struct ust_app_session *ua_sess;
6704 struct lttng_ht_iter chan_iter;
6705
6706 ua_sess = lookup_session_by_app(usess, app);
6707 if (!ua_sess) {
6708 /* Session not associated with this app. */
6709 continue;
6710 }
6711
6712 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6713 ua_chan, node.node) {
6714 if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
6715 /*
6716 * Don't take channel into account if we
6717 * already grab all its packets.
6718 */
6719 continue;
6720 }
6721 tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
6722 }
6723 }
6724 rcu_read_unlock();
6725 break;
6726 }
6727 default:
6728 assert(0);
6729 break;
6730 }
6731
6732 return tot_size;
6733 }
6734
6735 int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id,
6736 struct cds_list_head *buffer_reg_uid_list,
6737 struct consumer_output *consumer, uint64_t uchan_id,
6738 int overwrite, uint64_t *discarded, uint64_t *lost)
6739 {
6740 int ret;
6741 uint64_t consumer_chan_key;
6742
6743 *discarded = 0;
6744 *lost = 0;
6745
6746 ret = buffer_reg_uid_consumer_channel_key(
6747 buffer_reg_uid_list, uchan_id, &consumer_chan_key);
6748 if (ret < 0) {
6749 /* Not found */
6750 ret = 0;
6751 goto end;
6752 }
6753
6754 if (overwrite) {
6755 ret = consumer_get_lost_packets(ust_session_id,
6756 consumer_chan_key, consumer, lost);
6757 } else {
6758 ret = consumer_get_discarded_events(ust_session_id,
6759 consumer_chan_key, consumer, discarded);
6760 }
6761
6762 end:
6763 return ret;
6764 }
6765
6766 int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
6767 struct ltt_ust_channel *uchan,
6768 struct consumer_output *consumer, int overwrite,
6769 uint64_t *discarded, uint64_t *lost)
6770 {
6771 int ret = 0;
6772 struct lttng_ht_iter iter;
6773 struct lttng_ht_node_str *ua_chan_node;
6774 struct ust_app *app;
6775 struct ust_app_session *ua_sess;
6776 struct ust_app_channel *ua_chan;
6777
6778 *discarded = 0;
6779 *lost = 0;
6780
6781 rcu_read_lock();
6782 /*
6783 * Iterate over every registered applications. Sum counters for
6784 * all applications containing requested session and channel.
6785 */
6786 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6787 struct lttng_ht_iter uiter;
6788
6789 ua_sess = lookup_session_by_app(usess, app);
6790 if (ua_sess == NULL) {
6791 continue;
6792 }
6793
6794 /* Get channel */
6795 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
6796 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
6797 /* If the session is found for the app, the channel must be there */
6798 assert(ua_chan_node);
6799
6800 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
6801
6802 if (overwrite) {
6803 uint64_t _lost;
6804
6805 ret = consumer_get_lost_packets(usess->id, ua_chan->key,
6806 consumer, &_lost);
6807 if (ret < 0) {
6808 break;
6809 }
6810 (*lost) += _lost;
6811 } else {
6812 uint64_t _discarded;
6813
6814 ret = consumer_get_discarded_events(usess->id,
6815 ua_chan->key, consumer, &_discarded);
6816 if (ret < 0) {
6817 break;
6818 }
6819 (*discarded) += _discarded;
6820 }
6821 }
6822
6823 rcu_read_unlock();
6824 return ret;
6825 }
6826
6827 static
6828 int ust_app_regenerate_statedump(struct ltt_ust_session *usess,
6829 struct ust_app *app)
6830 {
6831 int ret = 0;
6832 struct ust_app_session *ua_sess;
6833
6834 DBG("Regenerating the metadata for ust app pid %d", app->pid);
6835
6836 rcu_read_lock();
6837
6838 ua_sess = lookup_session_by_app(usess, app);
6839 if (ua_sess == NULL) {
6840 /* The session is in teardown process. Ignore and continue. */
6841 goto end;
6842 }
6843
6844 pthread_mutex_lock(&ua_sess->lock);
6845
6846 if (ua_sess->deleted) {
6847 goto end_unlock;
6848 }
6849
6850 pthread_mutex_lock(&app->sock_lock);
6851 ret = ustctl_regenerate_statedump(app->sock, ua_sess->handle);
6852 pthread_mutex_unlock(&app->sock_lock);
6853
6854 end_unlock:
6855 pthread_mutex_unlock(&ua_sess->lock);
6856
6857 end:
6858 rcu_read_unlock();
6859 health_code_update();
6860 return ret;
6861 }
6862
6863 /*
6864 * Regenerate the statedump for each app in the session.
6865 */
6866 int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
6867 {
6868 int ret = 0;
6869 struct lttng_ht_iter iter;
6870 struct ust_app *app;
6871
6872 DBG("Regenerating the metadata for all UST apps");
6873
6874 rcu_read_lock();
6875
6876 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6877 if (!app->compatible) {
6878 continue;
6879 }
6880
6881 ret = ust_app_regenerate_statedump(usess, app);
6882 if (ret < 0) {
6883 /* Continue to the next app even on error */
6884 continue;
6885 }
6886 }
6887
6888 rcu_read_unlock();
6889
6890 return 0;
6891 }
6892
6893 /*
6894 * Rotate all the channels of a session.
6895 *
6896 * Return LTTNG_OK on success or else an LTTng error code.
6897 */
6898 enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
6899 {
6900 int ret;
6901 enum lttng_error_code cmd_ret = LTTNG_OK;
6902 struct lttng_ht_iter iter;
6903 struct ust_app *app;
6904 struct ltt_ust_session *usess = session->ust_session;
6905
6906 assert(usess);
6907
6908 rcu_read_lock();
6909
6910 switch (usess->buffer_type) {
6911 case LTTNG_BUFFER_PER_UID:
6912 {
6913 struct buffer_reg_uid *reg;
6914
6915 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6916 struct buffer_reg_channel *reg_chan;
6917 struct consumer_socket *socket;
6918
6919 if (!reg->registry->reg.ust->metadata_key) {
6920 /* Skip since no metadata is present */
6921 continue;
6922 }
6923
6924 /* Get consumer socket to use to push the metadata.*/
6925 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
6926 usess->consumer);
6927 if (!socket) {
6928 cmd_ret = LTTNG_ERR_INVALID;
6929 goto error;
6930 }
6931
6932 /* Rotate the data channels. */
6933 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6934 reg_chan, node.node) {
6935 ret = consumer_rotate_channel(socket,
6936 reg_chan->consumer_key,
6937 usess->uid, usess->gid,
6938 usess->consumer,
6939 /* is_metadata_channel */ false);
6940 if (ret < 0) {
6941 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
6942 goto error;
6943 }
6944 }
6945
6946 (void) push_metadata(reg->registry->reg.ust, usess->consumer);
6947
6948 ret = consumer_rotate_channel(socket,
6949 reg->registry->reg.ust->metadata_key,
6950 usess->uid, usess->gid,
6951 usess->consumer,
6952 /* is_metadata_channel */ true);
6953 if (ret < 0) {
6954 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
6955 goto error;
6956 }
6957 }
6958 break;
6959 }
6960 case LTTNG_BUFFER_PER_PID:
6961 {
6962 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6963 struct consumer_socket *socket;
6964 struct lttng_ht_iter chan_iter;
6965 struct ust_app_channel *ua_chan;
6966 struct ust_app_session *ua_sess;
6967 struct ust_registry_session *registry;
6968
6969 ua_sess = lookup_session_by_app(usess, app);
6970 if (!ua_sess) {
6971 /* Session not associated with this app. */
6972 continue;
6973 }
6974
6975 /* Get the right consumer socket for the application. */
6976 socket = consumer_find_socket_by_bitness(app->bits_per_long,
6977 usess->consumer);
6978 if (!socket) {
6979 cmd_ret = LTTNG_ERR_INVALID;
6980 goto error;
6981 }
6982
6983 registry = get_session_registry(ua_sess);
6984 if (!registry) {
6985 DBG("Application session is being torn down. Skip application.");
6986 continue;
6987 }
6988
6989 /* Rotate the data channels. */
6990 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6991 ua_chan, node.node) {
6992 ret = consumer_rotate_channel(socket,
6993 ua_chan->key,
6994 lttng_credentials_get_uid(&ua_sess->effective_credentials),
6995 lttng_credentials_get_gid(&ua_sess->effective_credentials),
6996 ua_sess->consumer,
6997 /* is_metadata_channel */ false);
6998 if (ret < 0) {
6999 /* Per-PID buffer and application going away. */
7000 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
7001 continue;
7002 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7003 goto error;
7004 }
7005 }
7006
7007 /* Rotate the metadata channel. */
7008 (void) push_metadata(registry, usess->consumer);
7009 ret = consumer_rotate_channel(socket,
7010 registry->metadata_key,
7011 lttng_credentials_get_uid(&ua_sess->effective_credentials),
7012 lttng_credentials_get_gid(&ua_sess->effective_credentials),
7013 ua_sess->consumer,
7014 /* is_metadata_channel */ true);
7015 if (ret < 0) {
7016 /* Per-PID buffer and application going away. */
7017 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
7018 continue;
7019 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7020 goto error;
7021 }
7022 }
7023 break;
7024 }
7025 default:
7026 assert(0);
7027 break;
7028 }
7029
7030 cmd_ret = LTTNG_OK;
7031
7032 error:
7033 rcu_read_unlock();
7034 return cmd_ret;
7035 }
7036
7037 enum lttng_error_code ust_app_create_channel_subdirectories(
7038 const struct ltt_ust_session *usess)
7039 {
7040 enum lttng_error_code ret = LTTNG_OK;
7041 struct lttng_ht_iter iter;
7042 enum lttng_trace_chunk_status chunk_status;
7043 char *pathname_index;
7044 int fmt_ret;
7045
7046 assert(usess->current_trace_chunk);
7047 rcu_read_lock();
7048
7049 switch (usess->buffer_type) {
7050 case LTTNG_BUFFER_PER_UID:
7051 {
7052 struct buffer_reg_uid *reg;
7053
7054 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7055 fmt_ret = asprintf(&pathname_index,
7056 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH "/" DEFAULT_INDEX_DIR,
7057 reg->uid, reg->bits_per_long);
7058 if (fmt_ret < 0) {
7059 ERR("Failed to format channel index directory");
7060 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7061 goto error;
7062 }
7063
7064 /*
7065 * Create the index subdirectory which will take care
7066 * of implicitly creating the channel's path.
7067 */
7068 chunk_status = lttng_trace_chunk_create_subdirectory(
7069 usess->current_trace_chunk,
7070 pathname_index);
7071 free(pathname_index);
7072 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7073 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7074 goto error;
7075 }
7076 }
7077 break;
7078 }
7079 case LTTNG_BUFFER_PER_PID:
7080 {
7081 struct ust_app *app;
7082
7083 /*
7084 * Create the toplevel ust/ directory in case no apps are running.
7085 */
7086 chunk_status = lttng_trace_chunk_create_subdirectory(
7087 usess->current_trace_chunk,
7088 DEFAULT_UST_TRACE_DIR);
7089 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7090 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7091 goto error;
7092 }
7093
7094 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
7095 pid_n.node) {
7096 struct ust_app_session *ua_sess;
7097 struct ust_registry_session *registry;
7098
7099 ua_sess = lookup_session_by_app(usess, app);
7100 if (!ua_sess) {
7101 /* Session not associated with this app. */
7102 continue;
7103 }
7104
7105 registry = get_session_registry(ua_sess);
7106 if (!registry) {
7107 DBG("Application session is being torn down. Skip application.");
7108 continue;
7109 }
7110
7111 fmt_ret = asprintf(&pathname_index,
7112 DEFAULT_UST_TRACE_DIR "/%s/" DEFAULT_INDEX_DIR,
7113 ua_sess->path);
7114 if (fmt_ret < 0) {
7115 ERR("Failed to format channel index directory");
7116 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7117 goto error;
7118 }
7119 /*
7120 * Create the index subdirectory which will take care
7121 * of implicitly creating the channel's path.
7122 */
7123 chunk_status = lttng_trace_chunk_create_subdirectory(
7124 usess->current_trace_chunk,
7125 pathname_index);
7126 free(pathname_index);
7127 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7128 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7129 goto error;
7130 }
7131 }
7132 break;
7133 }
7134 default:
7135 abort();
7136 }
7137
7138 ret = LTTNG_OK;
7139 error:
7140 rcu_read_unlock();
7141 return ret;
7142 }
7143
7144 /*
7145 * Clear all the channels of a session.
7146 *
7147 * Return LTTNG_OK on success or else an LTTng error code.
7148 */
7149 enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
7150 {
7151 int ret;
7152 enum lttng_error_code cmd_ret = LTTNG_OK;
7153 struct lttng_ht_iter iter;
7154 struct ust_app *app;
7155 struct ltt_ust_session *usess = session->ust_session;
7156
7157 assert(usess);
7158
7159 rcu_read_lock();
7160
7161 if (usess->active) {
7162 ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
7163 cmd_ret = LTTNG_ERR_FATAL;
7164 goto end;
7165 }
7166
7167 switch (usess->buffer_type) {
7168 case LTTNG_BUFFER_PER_UID:
7169 {
7170 struct buffer_reg_uid *reg;
7171
7172 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7173 struct buffer_reg_channel *reg_chan;
7174 struct consumer_socket *socket;
7175
7176 /* Get consumer socket to use to push the metadata.*/
7177 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
7178 usess->consumer);
7179 if (!socket) {
7180 cmd_ret = LTTNG_ERR_INVALID;
7181 goto error_socket;
7182 }
7183
7184 /* Clear the data channels. */
7185 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
7186 reg_chan, node.node) {
7187 ret = consumer_clear_channel(socket,
7188 reg_chan->consumer_key);
7189 if (ret < 0) {
7190 goto error;
7191 }
7192 }
7193
7194 (void) push_metadata(reg->registry->reg.ust, usess->consumer);
7195
7196 /*
7197 * Clear the metadata channel.
7198 * Metadata channel is not cleared per se but we still need to
7199 * perform a rotation operation on it behind the scene.
7200 */
7201 ret = consumer_clear_channel(socket,
7202 reg->registry->reg.ust->metadata_key);
7203 if (ret < 0) {
7204 goto error;
7205 }
7206 }
7207 break;
7208 }
7209 case LTTNG_BUFFER_PER_PID:
7210 {
7211 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7212 struct consumer_socket *socket;
7213 struct lttng_ht_iter chan_iter;
7214 struct ust_app_channel *ua_chan;
7215 struct ust_app_session *ua_sess;
7216 struct ust_registry_session *registry;
7217
7218 ua_sess = lookup_session_by_app(usess, app);
7219 if (!ua_sess) {
7220 /* Session not associated with this app. */
7221 continue;
7222 }
7223
7224 /* Get the right consumer socket for the application. */
7225 socket = consumer_find_socket_by_bitness(app->bits_per_long,
7226 usess->consumer);
7227 if (!socket) {
7228 cmd_ret = LTTNG_ERR_INVALID;
7229 goto error_socket;
7230 }
7231
7232 registry = get_session_registry(ua_sess);
7233 if (!registry) {
7234 DBG("Application session is being torn down. Skip application.");
7235 continue;
7236 }
7237
7238 /* Clear the data channels. */
7239 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
7240 ua_chan, node.node) {
7241 ret = consumer_clear_channel(socket, ua_chan->key);
7242 if (ret < 0) {
7243 /* Per-PID buffer and application going away. */
7244 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7245 continue;
7246 }
7247 goto error;
7248 }
7249 }
7250
7251 (void) push_metadata(registry, usess->consumer);
7252
7253 /*
7254 * Clear the metadata channel.
7255 * Metadata channel is not cleared per se but we still need to
7256 * perform rotation operation on it behind the scene.
7257 */
7258 ret = consumer_clear_channel(socket, registry->metadata_key);
7259 if (ret < 0) {
7260 /* Per-PID buffer and application going away. */
7261 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7262 continue;
7263 }
7264 goto error;
7265 }
7266 }
7267 break;
7268 }
7269 default:
7270 assert(0);
7271 break;
7272 }
7273
7274 cmd_ret = LTTNG_OK;
7275 goto end;
7276
7277 error:
7278 switch (-ret) {
7279 case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED:
7280 cmd_ret = LTTNG_ERR_CLEAR_RELAY_DISALLOWED;
7281 break;
7282 default:
7283 cmd_ret = LTTNG_ERR_CLEAR_FAIL_CONSUMER;
7284 }
7285
7286 error_socket:
7287 end:
7288 rcu_read_unlock();
7289 return cmd_ret;
7290 }
7291
7292 /*
7293 * This function skips the metadata channel as the begin/end timestamps of a
7294 * metadata packet are useless.
7295 *
7296 * Moreover, opening a packet after a "clear" will cause problems for live
7297 * sessions as it will introduce padding that was not part of the first trace
7298 * chunk. The relay daemon expects the content of the metadata stream of
7299 * successive metadata trace chunks to be strict supersets of one another.
7300 *
7301 * For example, flushing a packet at the beginning of the metadata stream of
7302 * a trace chunk resulting from a "clear" session command will cause the
7303 * size of the metadata stream of the new trace chunk to not match the size of
7304 * the metadata stream of the original chunk. This will confuse the relay
7305 * daemon as the same "offset" in a metadata stream will no longer point
7306 * to the same content.
7307 */
7308 enum lttng_error_code ust_app_open_packets(struct ltt_session *session)
7309 {
7310 enum lttng_error_code ret = LTTNG_OK;
7311 struct lttng_ht_iter iter;
7312 struct ltt_ust_session *usess = session->ust_session;
7313
7314 assert(usess);
7315
7316 rcu_read_lock();
7317
7318 switch (usess->buffer_type) {
7319 case LTTNG_BUFFER_PER_UID:
7320 {
7321 struct buffer_reg_uid *reg;
7322
7323 cds_list_for_each_entry (
7324 reg, &usess->buffer_reg_uid_list, lnode) {
7325 struct buffer_reg_channel *reg_chan;
7326 struct consumer_socket *socket;
7327
7328 socket = consumer_find_socket_by_bitness(
7329 reg->bits_per_long, usess->consumer);
7330 if (!socket) {
7331 ret = LTTNG_ERR_FATAL;
7332 goto error;
7333 }
7334
7335 cds_lfht_for_each_entry(reg->registry->channels->ht,
7336 &iter.iter, reg_chan, node.node) {
7337 const int open_ret =
7338 consumer_open_channel_packets(
7339 socket,
7340 reg_chan->consumer_key);
7341
7342 if (open_ret < 0) {
7343 ret = LTTNG_ERR_UNK;
7344 goto error;
7345 }
7346 }
7347 }
7348 break;
7349 }
7350 case LTTNG_BUFFER_PER_PID:
7351 {
7352 struct ust_app *app;
7353
7354 cds_lfht_for_each_entry (
7355 ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7356 struct consumer_socket *socket;
7357 struct lttng_ht_iter chan_iter;
7358 struct ust_app_channel *ua_chan;
7359 struct ust_app_session *ua_sess;
7360 struct ust_registry_session *registry;
7361
7362 ua_sess = lookup_session_by_app(usess, app);
7363 if (!ua_sess) {
7364 /* Session not associated with this app. */
7365 continue;
7366 }
7367
7368 /* Get the right consumer socket for the application. */
7369 socket = consumer_find_socket_by_bitness(
7370 app->bits_per_long, usess->consumer);
7371 if (!socket) {
7372 ret = LTTNG_ERR_FATAL;
7373 goto error;
7374 }
7375
7376 registry = get_session_registry(ua_sess);
7377 if (!registry) {
7378 DBG("Application session is being torn down. Skip application.");
7379 continue;
7380 }
7381
7382 cds_lfht_for_each_entry(ua_sess->channels->ht,
7383 &chan_iter.iter, ua_chan, node.node) {
7384 const int open_ret =
7385 consumer_open_channel_packets(
7386 socket,
7387 ua_chan->key);
7388
7389 if (open_ret < 0) {
7390 /*
7391 * Per-PID buffer and application going
7392 * away.
7393 */
7394 if (open_ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7395 continue;
7396 }
7397
7398 ret = LTTNG_ERR_UNK;
7399 goto error;
7400 }
7401 }
7402 }
7403 break;
7404 }
7405 default:
7406 abort();
7407 break;
7408 }
7409
7410 error:
7411 rcu_read_unlock();
7412 return ret;
7413 }
This page took 0.230739 seconds and 5 git commands to generate.