c00dd2877fd08d1aed942ac1fce52ab4ff25d3c8
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
3 * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
4 *
5 * SPDX-License-Identifier: GPL-2.0-only
6 *
7 */
8
9 #define _LGPL_SOURCE
10 #include <errno.h>
11 #include <fcntl.h>
12 #include <inttypes.h>
13 #include <pthread.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <sys/mman.h>
18 #include <sys/stat.h>
19 #include <sys/types.h>
20 #include <unistd.h>
21 #include <urcu/compiler.h>
22 #include <signal.h>
23
24 #include <common/bytecode/bytecode.h>
25 #include <common/compat/errno.h>
26 #include <common/common.h>
27 #include <common/hashtable/utils.h>
28 #include <lttng/event-rule/event-rule.h>
29 #include <lttng/event-rule/event-rule-internal.h>
30 #include <lttng/event-rule/tracepoint.h>
31 #include <lttng/condition/condition.h>
32 #include <lttng/condition/event-rule-matches-internal.h>
33 #include <lttng/condition/event-rule-matches.h>
34 #include <lttng/trigger/trigger-internal.h>
35 #include <common/sessiond-comm/sessiond-comm.h>
36
37 #include "buffer-registry.h"
38 #include "condition-internal.h"
39 #include "fd-limit.h"
40 #include "health-sessiond.h"
41 #include "ust-app.h"
42 #include "ust-consumer.h"
43 #include "lttng-ust-ctl.h"
44 #include "lttng-ust-error.h"
45 #include "utils.h"
46 #include "session.h"
47 #include "lttng-sessiond.h"
48 #include "notification-thread-commands.h"
49 #include "rotate.h"
50 #include "event.h"
51 #include "event-notifier-error-accounting.h"
52
53
54 struct lttng_ht *ust_app_ht;
55 struct lttng_ht *ust_app_ht_by_sock;
56 struct lttng_ht *ust_app_ht_by_notify_sock;
57
58 static
59 int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
60
61 /* Next available channel key. Access under next_channel_key_lock. */
62 static uint64_t _next_channel_key;
63 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
64
65 /* Next available session ID. Access under next_session_id_lock. */
66 static uint64_t _next_session_id;
67 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
68
69 /*
70 * Return the incremented value of next_channel_key.
71 */
72 static uint64_t get_next_channel_key(void)
73 {
74 uint64_t ret;
75
76 pthread_mutex_lock(&next_channel_key_lock);
77 ret = ++_next_channel_key;
78 pthread_mutex_unlock(&next_channel_key_lock);
79 return ret;
80 }
81
82 /*
83 * Return the atomically incremented value of next_session_id.
84 */
85 static uint64_t get_next_session_id(void)
86 {
87 uint64_t ret;
88
89 pthread_mutex_lock(&next_session_id_lock);
90 ret = ++_next_session_id;
91 pthread_mutex_unlock(&next_session_id_lock);
92 return ret;
93 }
94
95 static void copy_channel_attr_to_ustctl(
96 struct lttng_ust_ctl_consumer_channel_attr *attr,
97 struct lttng_ust_abi_channel_attr *uattr)
98 {
99 /* Copy event attributes since the layout is different. */
100 attr->subbuf_size = uattr->subbuf_size;
101 attr->num_subbuf = uattr->num_subbuf;
102 attr->overwrite = uattr->overwrite;
103 attr->switch_timer_interval = uattr->switch_timer_interval;
104 attr->read_timer_interval = uattr->read_timer_interval;
105 attr->output = uattr->output;
106 attr->blocking_timeout = uattr->u.s.blocking_timeout;
107 }
108
109 /*
110 * Match function for the hash table lookup.
111 *
112 * It matches an ust app event based on three attributes which are the event
113 * name, the filter bytecode and the loglevel.
114 */
115 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
116 {
117 struct ust_app_event *event;
118 const struct ust_app_ht_key *key;
119 int ev_loglevel_value;
120
121 assert(node);
122 assert(_key);
123
124 event = caa_container_of(node, struct ust_app_event, node.node);
125 key = _key;
126 ev_loglevel_value = event->attr.loglevel;
127
128 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
129
130 /* Event name */
131 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
132 goto no_match;
133 }
134
135 /* Event loglevel. */
136 if (ev_loglevel_value != key->loglevel_type) {
137 if (event->attr.loglevel_type == LTTNG_UST_ABI_LOGLEVEL_ALL
138 && key->loglevel_type == 0 &&
139 ev_loglevel_value == -1) {
140 /*
141 * Match is accepted. This is because on event creation, the
142 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
143 * -1 are accepted for this loglevel type since 0 is the one set by
144 * the API when receiving an enable event.
145 */
146 } else {
147 goto no_match;
148 }
149 }
150
151 /* One of the filters is NULL, fail. */
152 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
153 goto no_match;
154 }
155
156 if (key->filter && event->filter) {
157 /* Both filters exists, check length followed by the bytecode. */
158 if (event->filter->len != key->filter->len ||
159 memcmp(event->filter->data, key->filter->data,
160 event->filter->len) != 0) {
161 goto no_match;
162 }
163 }
164
165 /* One of the exclusions is NULL, fail. */
166 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
167 goto no_match;
168 }
169
170 if (key->exclusion && event->exclusion) {
171 /* Both exclusions exists, check count followed by the names. */
172 if (event->exclusion->count != key->exclusion->count ||
173 memcmp(event->exclusion->names, key->exclusion->names,
174 event->exclusion->count * LTTNG_UST_ABI_SYM_NAME_LEN) != 0) {
175 goto no_match;
176 }
177 }
178
179
180 /* Match. */
181 return 1;
182
183 no_match:
184 return 0;
185 }
186
187 /*
188 * Unique add of an ust app event in the given ht. This uses the custom
189 * ht_match_ust_app_event match function and the event name as hash.
190 */
191 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
192 struct ust_app_event *event)
193 {
194 struct cds_lfht_node *node_ptr;
195 struct ust_app_ht_key key;
196 struct lttng_ht *ht;
197
198 assert(ua_chan);
199 assert(ua_chan->events);
200 assert(event);
201
202 ht = ua_chan->events;
203 key.name = event->attr.name;
204 key.filter = event->filter;
205 key.loglevel_type = event->attr.loglevel;
206 key.exclusion = event->exclusion;
207
208 node_ptr = cds_lfht_add_unique(ht->ht,
209 ht->hash_fct(event->node.key, lttng_ht_seed),
210 ht_match_ust_app_event, &key, &event->node.node);
211 assert(node_ptr == &event->node.node);
212 }
213
214 /*
215 * Close the notify socket from the given RCU head object. This MUST be called
216 * through a call_rcu().
217 */
218 static void close_notify_sock_rcu(struct rcu_head *head)
219 {
220 int ret;
221 struct ust_app_notify_sock_obj *obj =
222 caa_container_of(head, struct ust_app_notify_sock_obj, head);
223
224 /* Must have a valid fd here. */
225 assert(obj->fd >= 0);
226
227 ret = close(obj->fd);
228 if (ret) {
229 ERR("close notify sock %d RCU", obj->fd);
230 }
231 lttng_fd_put(LTTNG_FD_APPS, 1);
232
233 free(obj);
234 }
235
236 /*
237 * Return the session registry according to the buffer type of the given
238 * session.
239 *
240 * A registry per UID object MUST exists before calling this function or else
241 * it assert() if not found. RCU read side lock must be acquired.
242 */
243 static struct ust_registry_session *get_session_registry(
244 struct ust_app_session *ua_sess)
245 {
246 struct ust_registry_session *registry = NULL;
247
248 assert(ua_sess);
249
250 switch (ua_sess->buffer_type) {
251 case LTTNG_BUFFER_PER_PID:
252 {
253 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
254 if (!reg_pid) {
255 goto error;
256 }
257 registry = reg_pid->registry->reg.ust;
258 break;
259 }
260 case LTTNG_BUFFER_PER_UID:
261 {
262 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
263 ua_sess->tracing_id, ua_sess->bits_per_long,
264 lttng_credentials_get_uid(&ua_sess->real_credentials));
265 if (!reg_uid) {
266 goto error;
267 }
268 registry = reg_uid->registry->reg.ust;
269 break;
270 }
271 default:
272 assert(0);
273 };
274
275 error:
276 return registry;
277 }
278
279 /*
280 * Delete ust context safely. RCU read lock must be held before calling
281 * this function.
282 */
283 static
284 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
285 struct ust_app *app)
286 {
287 int ret;
288
289 assert(ua_ctx);
290
291 if (ua_ctx->obj) {
292 pthread_mutex_lock(&app->sock_lock);
293 ret = lttng_ust_ctl_release_object(sock, ua_ctx->obj);
294 pthread_mutex_unlock(&app->sock_lock);
295 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
296 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
297 sock, ua_ctx->obj->handle, ret);
298 }
299 free(ua_ctx->obj);
300 }
301 free(ua_ctx);
302 }
303
304 /*
305 * Delete ust app event safely. RCU read lock must be held before calling
306 * this function.
307 */
308 static
309 void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
310 struct ust_app *app)
311 {
312 int ret;
313
314 assert(ua_event);
315
316 free(ua_event->filter);
317 if (ua_event->exclusion != NULL)
318 free(ua_event->exclusion);
319 if (ua_event->obj != NULL) {
320 pthread_mutex_lock(&app->sock_lock);
321 ret = lttng_ust_ctl_release_object(sock, ua_event->obj);
322 pthread_mutex_unlock(&app->sock_lock);
323 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
324 ERR("UST app sock %d release event obj failed with ret %d",
325 sock, ret);
326 }
327 free(ua_event->obj);
328 }
329 free(ua_event);
330 }
331
332 /*
333 * Delayed reclaim of a ust_app_event_notifier_rule object. This MUST be called
334 * through a call_rcu().
335 */
336 static
337 void free_ust_app_event_notifier_rule_rcu(struct rcu_head *head)
338 {
339 struct ust_app_event_notifier_rule *obj = caa_container_of(
340 head, struct ust_app_event_notifier_rule, rcu_head);
341
342 free(obj);
343 }
344
345 /*
346 * Delete ust app event notifier rule safely.
347 */
348 static void delete_ust_app_event_notifier_rule(int sock,
349 struct ust_app_event_notifier_rule *ua_event_notifier_rule,
350 struct ust_app *app)
351 {
352 int ret;
353
354 assert(ua_event_notifier_rule);
355
356 if (ua_event_notifier_rule->exclusion != NULL) {
357 free(ua_event_notifier_rule->exclusion);
358 }
359
360 if (ua_event_notifier_rule->obj != NULL) {
361 pthread_mutex_lock(&app->sock_lock);
362 ret = lttng_ust_ctl_release_object(sock, ua_event_notifier_rule->obj);
363 pthread_mutex_unlock(&app->sock_lock);
364 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
365 ERR("Failed to release event notifier object: app = '%s' (ppid %d), ret = %d",
366 app->name, (int) app->ppid, ret);
367 }
368
369 free(ua_event_notifier_rule->obj);
370 }
371
372 lttng_trigger_put(ua_event_notifier_rule->trigger);
373 call_rcu(&ua_event_notifier_rule->rcu_head,
374 free_ust_app_event_notifier_rule_rcu);
375 }
376
377 /*
378 * Release ust data object of the given stream.
379 *
380 * Return 0 on success or else a negative value.
381 */
382 static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
383 struct ust_app *app)
384 {
385 int ret = 0;
386
387 assert(stream);
388
389 if (stream->obj) {
390 pthread_mutex_lock(&app->sock_lock);
391 ret = lttng_ust_ctl_release_object(sock, stream->obj);
392 pthread_mutex_unlock(&app->sock_lock);
393 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
394 ERR("UST app sock %d release stream obj failed with ret %d",
395 sock, ret);
396 }
397 lttng_fd_put(LTTNG_FD_APPS, 2);
398 free(stream->obj);
399 }
400
401 return ret;
402 }
403
404 /*
405 * Delete ust app stream safely. RCU read lock must be held before calling
406 * this function.
407 */
408 static
409 void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
410 struct ust_app *app)
411 {
412 assert(stream);
413
414 (void) release_ust_app_stream(sock, stream, app);
415 free(stream);
416 }
417
418 /*
419 * We need to execute ht_destroy outside of RCU read-side critical
420 * section and outside of call_rcu thread, so we postpone its execution
421 * using ht_cleanup_push. It is simpler than to change the semantic of
422 * the many callers of delete_ust_app_session().
423 */
424 static
425 void delete_ust_app_channel_rcu(struct rcu_head *head)
426 {
427 struct ust_app_channel *ua_chan =
428 caa_container_of(head, struct ust_app_channel, rcu_head);
429
430 ht_cleanup_push(ua_chan->ctx);
431 ht_cleanup_push(ua_chan->events);
432 free(ua_chan);
433 }
434
435 /*
436 * Extract the lost packet or discarded events counter when the channel is
437 * being deleted and store the value in the parent channel so we can
438 * access it from lttng list and at stop/destroy.
439 *
440 * The session list lock must be held by the caller.
441 */
442 static
443 void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
444 {
445 uint64_t discarded = 0, lost = 0;
446 struct ltt_session *session;
447 struct ltt_ust_channel *uchan;
448
449 if (ua_chan->attr.type != LTTNG_UST_ABI_CHAN_PER_CPU) {
450 return;
451 }
452
453 rcu_read_lock();
454 session = session_find_by_id(ua_chan->session->tracing_id);
455 if (!session || !session->ust_session) {
456 /*
457 * Not finding the session is not an error because there are
458 * multiple ways the channels can be torn down.
459 *
460 * 1) The session daemon can initiate the destruction of the
461 * ust app session after receiving a destroy command or
462 * during its shutdown/teardown.
463 * 2) The application, since we are in per-pid tracing, is
464 * unregistering and tearing down its ust app session.
465 *
466 * Both paths are protected by the session list lock which
467 * ensures that the accounting of lost packets and discarded
468 * events is done exactly once. The session is then unpublished
469 * from the session list, resulting in this condition.
470 */
471 goto end;
472 }
473
474 if (ua_chan->attr.overwrite) {
475 consumer_get_lost_packets(ua_chan->session->tracing_id,
476 ua_chan->key, session->ust_session->consumer,
477 &lost);
478 } else {
479 consumer_get_discarded_events(ua_chan->session->tracing_id,
480 ua_chan->key, session->ust_session->consumer,
481 &discarded);
482 }
483 uchan = trace_ust_find_channel_by_name(
484 session->ust_session->domain_global.channels,
485 ua_chan->name);
486 if (!uchan) {
487 ERR("Missing UST channel to store discarded counters");
488 goto end;
489 }
490
491 uchan->per_pid_closed_app_discarded += discarded;
492 uchan->per_pid_closed_app_lost += lost;
493
494 end:
495 rcu_read_unlock();
496 if (session) {
497 session_put(session);
498 }
499 }
500
501 /*
502 * Delete ust app channel safely. RCU read lock must be held before calling
503 * this function.
504 *
505 * The session list lock must be held by the caller.
506 */
507 static
508 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
509 struct ust_app *app)
510 {
511 int ret;
512 struct lttng_ht_iter iter;
513 struct ust_app_event *ua_event;
514 struct ust_app_ctx *ua_ctx;
515 struct ust_app_stream *stream, *stmp;
516 struct ust_registry_session *registry;
517
518 assert(ua_chan);
519
520 DBG3("UST app deleting channel %s", ua_chan->name);
521
522 /* Wipe stream */
523 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
524 cds_list_del(&stream->list);
525 delete_ust_app_stream(sock, stream, app);
526 }
527
528 /* Wipe context */
529 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
530 cds_list_del(&ua_ctx->list);
531 ret = lttng_ht_del(ua_chan->ctx, &iter);
532 assert(!ret);
533 delete_ust_app_ctx(sock, ua_ctx, app);
534 }
535
536 /* Wipe events */
537 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
538 node.node) {
539 ret = lttng_ht_del(ua_chan->events, &iter);
540 assert(!ret);
541 delete_ust_app_event(sock, ua_event, app);
542 }
543
544 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
545 /* Wipe and free registry from session registry. */
546 registry = get_session_registry(ua_chan->session);
547 if (registry) {
548 ust_registry_channel_del_free(registry, ua_chan->key,
549 sock >= 0);
550 }
551 /*
552 * A negative socket can be used by the caller when
553 * cleaning-up a ua_chan in an error path. Skip the
554 * accounting in this case.
555 */
556 if (sock >= 0) {
557 save_per_pid_lost_discarded_counters(ua_chan);
558 }
559 }
560
561 if (ua_chan->obj != NULL) {
562 /* Remove channel from application UST object descriptor. */
563 iter.iter.node = &ua_chan->ust_objd_node.node;
564 ret = lttng_ht_del(app->ust_objd, &iter);
565 assert(!ret);
566 pthread_mutex_lock(&app->sock_lock);
567 ret = lttng_ust_ctl_release_object(sock, ua_chan->obj);
568 pthread_mutex_unlock(&app->sock_lock);
569 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
570 ERR("UST app sock %d release channel obj failed with ret %d",
571 sock, ret);
572 }
573 lttng_fd_put(LTTNG_FD_APPS, 1);
574 free(ua_chan->obj);
575 }
576 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
577 }
578
579 int ust_app_register_done(struct ust_app *app)
580 {
581 int ret;
582
583 pthread_mutex_lock(&app->sock_lock);
584 ret = lttng_ust_ctl_register_done(app->sock);
585 pthread_mutex_unlock(&app->sock_lock);
586 return ret;
587 }
588
589 int ust_app_release_object(struct ust_app *app, struct lttng_ust_abi_object_data *data)
590 {
591 int ret, sock;
592
593 if (app) {
594 pthread_mutex_lock(&app->sock_lock);
595 sock = app->sock;
596 } else {
597 sock = -1;
598 }
599 ret = lttng_ust_ctl_release_object(sock, data);
600 if (app) {
601 pthread_mutex_unlock(&app->sock_lock);
602 }
603 return ret;
604 }
605
606 /*
607 * Push metadata to consumer socket.
608 *
609 * RCU read-side lock must be held to guarantee existance of socket.
610 * Must be called with the ust app session lock held.
611 * Must be called with the registry lock held.
612 *
613 * On success, return the len of metadata pushed or else a negative value.
614 * Returning a -EPIPE return value means we could not send the metadata,
615 * but it can be caused by recoverable errors (e.g. the application has
616 * terminated concurrently).
617 */
618 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
619 struct consumer_socket *socket, int send_zero_data)
620 {
621 int ret;
622 char *metadata_str = NULL;
623 size_t len, offset, new_metadata_len_sent;
624 ssize_t ret_val;
625 uint64_t metadata_key, metadata_version;
626
627 assert(registry);
628 assert(socket);
629
630 metadata_key = registry->metadata_key;
631
632 /*
633 * Means that no metadata was assigned to the session. This can
634 * happens if no start has been done previously.
635 */
636 if (!metadata_key) {
637 return 0;
638 }
639
640 offset = registry->metadata_len_sent;
641 len = registry->metadata_len - registry->metadata_len_sent;
642 new_metadata_len_sent = registry->metadata_len;
643 metadata_version = registry->metadata_version;
644 if (len == 0) {
645 DBG3("No metadata to push for metadata key %" PRIu64,
646 registry->metadata_key);
647 ret_val = len;
648 if (send_zero_data) {
649 DBG("No metadata to push");
650 goto push_data;
651 }
652 goto end;
653 }
654
655 /* Allocate only what we have to send. */
656 metadata_str = zmalloc(len);
657 if (!metadata_str) {
658 PERROR("zmalloc ust app metadata string");
659 ret_val = -ENOMEM;
660 goto error;
661 }
662 /* Copy what we haven't sent out. */
663 memcpy(metadata_str, registry->metadata + offset, len);
664
665 push_data:
666 pthread_mutex_unlock(&registry->lock);
667 /*
668 * We need to unlock the registry while we push metadata to
669 * break a circular dependency between the consumerd metadata
670 * lock and the sessiond registry lock. Indeed, pushing metadata
671 * to the consumerd awaits that it gets pushed all the way to
672 * relayd, but doing so requires grabbing the metadata lock. If
673 * a concurrent metadata request is being performed by
674 * consumerd, this can try to grab the registry lock on the
675 * sessiond while holding the metadata lock on the consumer
676 * daemon. Those push and pull schemes are performed on two
677 * different bidirectionnal communication sockets.
678 */
679 ret = consumer_push_metadata(socket, metadata_key,
680 metadata_str, len, offset, metadata_version);
681 pthread_mutex_lock(&registry->lock);
682 if (ret < 0) {
683 /*
684 * There is an acceptable race here between the registry
685 * metadata key assignment and the creation on the
686 * consumer. The session daemon can concurrently push
687 * metadata for this registry while being created on the
688 * consumer since the metadata key of the registry is
689 * assigned *before* it is setup to avoid the consumer
690 * to ask for metadata that could possibly be not found
691 * in the session daemon.
692 *
693 * The metadata will get pushed either by the session
694 * being stopped or the consumer requesting metadata if
695 * that race is triggered.
696 */
697 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
698 ret = 0;
699 } else {
700 ERR("Error pushing metadata to consumer");
701 }
702 ret_val = ret;
703 goto error_push;
704 } else {
705 /*
706 * Metadata may have been concurrently pushed, since
707 * we're not holding the registry lock while pushing to
708 * consumer. This is handled by the fact that we send
709 * the metadata content, size, and the offset at which
710 * that metadata belongs. This may arrive out of order
711 * on the consumer side, and the consumer is able to
712 * deal with overlapping fragments. The consumer
713 * supports overlapping fragments, which must be
714 * contiguous starting from offset 0. We keep the
715 * largest metadata_len_sent value of the concurrent
716 * send.
717 */
718 registry->metadata_len_sent =
719 max_t(size_t, registry->metadata_len_sent,
720 new_metadata_len_sent);
721 }
722 free(metadata_str);
723 return len;
724
725 end:
726 error:
727 if (ret_val) {
728 /*
729 * On error, flag the registry that the metadata is
730 * closed. We were unable to push anything and this
731 * means that either the consumer is not responding or
732 * the metadata cache has been destroyed on the
733 * consumer.
734 */
735 registry->metadata_closed = 1;
736 }
737 error_push:
738 free(metadata_str);
739 return ret_val;
740 }
741
742 /*
743 * For a given application and session, push metadata to consumer.
744 * Either sock or consumer is required : if sock is NULL, the default
745 * socket to send the metadata is retrieved from consumer, if sock
746 * is not NULL we use it to send the metadata.
747 * RCU read-side lock must be held while calling this function,
748 * therefore ensuring existance of registry. It also ensures existance
749 * of socket throughout this function.
750 *
751 * Return 0 on success else a negative error.
752 * Returning a -EPIPE return value means we could not send the metadata,
753 * but it can be caused by recoverable errors (e.g. the application has
754 * terminated concurrently).
755 */
756 static int push_metadata(struct ust_registry_session *registry,
757 struct consumer_output *consumer)
758 {
759 int ret_val;
760 ssize_t ret;
761 struct consumer_socket *socket;
762
763 assert(registry);
764 assert(consumer);
765
766 pthread_mutex_lock(&registry->lock);
767 if (registry->metadata_closed) {
768 ret_val = -EPIPE;
769 goto error;
770 }
771
772 /* Get consumer socket to use to push the metadata.*/
773 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
774 consumer);
775 if (!socket) {
776 ret_val = -1;
777 goto error;
778 }
779
780 ret = ust_app_push_metadata(registry, socket, 0);
781 if (ret < 0) {
782 ret_val = ret;
783 goto error;
784 }
785 pthread_mutex_unlock(&registry->lock);
786 return 0;
787
788 error:
789 pthread_mutex_unlock(&registry->lock);
790 return ret_val;
791 }
792
793 /*
794 * Send to the consumer a close metadata command for the given session. Once
795 * done, the metadata channel is deleted and the session metadata pointer is
796 * nullified. The session lock MUST be held unless the application is
797 * in the destroy path.
798 *
799 * Do not hold the registry lock while communicating with the consumerd, because
800 * doing so causes inter-process deadlocks between consumerd and sessiond with
801 * the metadata request notification.
802 *
803 * Return 0 on success else a negative value.
804 */
805 static int close_metadata(struct ust_registry_session *registry,
806 struct consumer_output *consumer)
807 {
808 int ret;
809 struct consumer_socket *socket;
810 uint64_t metadata_key;
811 bool registry_was_already_closed;
812
813 assert(registry);
814 assert(consumer);
815
816 rcu_read_lock();
817
818 pthread_mutex_lock(&registry->lock);
819 metadata_key = registry->metadata_key;
820 registry_was_already_closed = registry->metadata_closed;
821 if (metadata_key != 0) {
822 /*
823 * Metadata closed. Even on error this means that the consumer
824 * is not responding or not found so either way a second close
825 * should NOT be emit for this registry.
826 */
827 registry->metadata_closed = 1;
828 }
829 pthread_mutex_unlock(&registry->lock);
830
831 if (metadata_key == 0 || registry_was_already_closed) {
832 ret = 0;
833 goto end;
834 }
835
836 /* Get consumer socket to use to push the metadata.*/
837 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
838 consumer);
839 if (!socket) {
840 ret = -1;
841 goto end;
842 }
843
844 ret = consumer_close_metadata(socket, metadata_key);
845 if (ret < 0) {
846 goto end;
847 }
848
849 end:
850 rcu_read_unlock();
851 return ret;
852 }
853
854 /*
855 * We need to execute ht_destroy outside of RCU read-side critical
856 * section and outside of call_rcu thread, so we postpone its execution
857 * using ht_cleanup_push. It is simpler than to change the semantic of
858 * the many callers of delete_ust_app_session().
859 */
860 static
861 void delete_ust_app_session_rcu(struct rcu_head *head)
862 {
863 struct ust_app_session *ua_sess =
864 caa_container_of(head, struct ust_app_session, rcu_head);
865
866 ht_cleanup_push(ua_sess->channels);
867 free(ua_sess);
868 }
869
870 /*
871 * Delete ust app session safely. RCU read lock must be held before calling
872 * this function.
873 *
874 * The session list lock must be held by the caller.
875 */
876 static
877 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
878 struct ust_app *app)
879 {
880 int ret;
881 struct lttng_ht_iter iter;
882 struct ust_app_channel *ua_chan;
883 struct ust_registry_session *registry;
884
885 assert(ua_sess);
886
887 pthread_mutex_lock(&ua_sess->lock);
888
889 assert(!ua_sess->deleted);
890 ua_sess->deleted = true;
891
892 registry = get_session_registry(ua_sess);
893 /* Registry can be null on error path during initialization. */
894 if (registry) {
895 /* Push metadata for application before freeing the application. */
896 (void) push_metadata(registry, ua_sess->consumer);
897
898 /*
899 * Don't ask to close metadata for global per UID buffers. Close
900 * metadata only on destroy trace session in this case. Also, the
901 * previous push metadata could have flag the metadata registry to
902 * close so don't send a close command if closed.
903 */
904 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
905 /* And ask to close it for this session registry. */
906 (void) close_metadata(registry, ua_sess->consumer);
907 }
908 }
909
910 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
911 node.node) {
912 ret = lttng_ht_del(ua_sess->channels, &iter);
913 assert(!ret);
914 delete_ust_app_channel(sock, ua_chan, app);
915 }
916
917 /* In case of per PID, the registry is kept in the session. */
918 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
919 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
920 if (reg_pid) {
921 /*
922 * Registry can be null on error path during
923 * initialization.
924 */
925 buffer_reg_pid_remove(reg_pid);
926 buffer_reg_pid_destroy(reg_pid);
927 }
928 }
929
930 if (ua_sess->handle != -1) {
931 pthread_mutex_lock(&app->sock_lock);
932 ret = lttng_ust_ctl_release_handle(sock, ua_sess->handle);
933 pthread_mutex_unlock(&app->sock_lock);
934 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
935 ERR("UST app sock %d release session handle failed with ret %d",
936 sock, ret);
937 }
938 /* Remove session from application UST object descriptor. */
939 iter.iter.node = &ua_sess->ust_objd_node.node;
940 ret = lttng_ht_del(app->ust_sessions_objd, &iter);
941 assert(!ret);
942 }
943
944 pthread_mutex_unlock(&ua_sess->lock);
945
946 consumer_output_put(ua_sess->consumer);
947
948 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
949 }
950
951 /*
952 * Delete a traceable application structure from the global list. Never call
953 * this function outside of a call_rcu call.
954 *
955 * RCU read side lock should _NOT_ be held when calling this function.
956 */
957 static
958 void delete_ust_app(struct ust_app *app)
959 {
960 int ret, sock;
961 struct ust_app_session *ua_sess, *tmp_ua_sess;
962 struct lttng_ht_iter iter;
963 struct ust_app_event_notifier_rule *event_notifier_rule;
964 bool event_notifier_write_fd_is_open;
965
966 /*
967 * The session list lock must be held during this function to guarantee
968 * the existence of ua_sess.
969 */
970 session_lock_list();
971 /* Delete ust app sessions info */
972 sock = app->sock;
973 app->sock = -1;
974
975 /* Wipe sessions */
976 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
977 teardown_node) {
978 /* Free every object in the session and the session. */
979 rcu_read_lock();
980 delete_ust_app_session(sock, ua_sess, app);
981 rcu_read_unlock();
982 }
983
984 /* Remove the event notifier rules associated with this app. */
985 rcu_read_lock();
986 cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
987 &iter.iter, event_notifier_rule, node.node) {
988 ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &iter);
989 assert(!ret);
990
991 delete_ust_app_event_notifier_rule(
992 app->sock, event_notifier_rule, app);
993 }
994
995 rcu_read_unlock();
996
997 ht_cleanup_push(app->sessions);
998 ht_cleanup_push(app->ust_sessions_objd);
999 ht_cleanup_push(app->ust_objd);
1000 ht_cleanup_push(app->token_to_event_notifier_rule_ht);
1001
1002 /*
1003 * This could be NULL if the event notifier setup failed (e.g the app
1004 * was killed or the tracer does not support this feature).
1005 */
1006 if (app->event_notifier_group.object) {
1007 enum lttng_error_code ret_code;
1008 enum event_notifier_error_accounting_status status;
1009
1010 const int event_notifier_read_fd = lttng_pipe_get_readfd(
1011 app->event_notifier_group.event_pipe);
1012
1013 ret_code = notification_thread_command_remove_tracer_event_source(
1014 the_notification_thread_handle,
1015 event_notifier_read_fd);
1016 if (ret_code != LTTNG_OK) {
1017 ERR("Failed to remove application tracer event source from notification thread");
1018 }
1019
1020 status = event_notifier_error_accounting_unregister_app(app);
1021 if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
1022 ERR("Error unregistering app from event notifier error accounting");
1023 }
1024
1025 lttng_ust_ctl_release_object(sock, app->event_notifier_group.object);
1026 free(app->event_notifier_group.object);
1027 }
1028
1029 event_notifier_write_fd_is_open = lttng_pipe_is_write_open(
1030 app->event_notifier_group.event_pipe);
1031 lttng_pipe_destroy(app->event_notifier_group.event_pipe);
1032 /*
1033 * Release the file descriptors reserved for the event notifier pipe.
1034 * The app could be destroyed before the write end of the pipe could be
1035 * passed to the application (and closed). In that case, both file
1036 * descriptors must be released.
1037 */
1038 lttng_fd_put(LTTNG_FD_APPS, event_notifier_write_fd_is_open ? 2 : 1);
1039
1040 /*
1041 * Wait until we have deleted the application from the sock hash table
1042 * before closing this socket, otherwise an application could re-use the
1043 * socket ID and race with the teardown, using the same hash table entry.
1044 *
1045 * It's OK to leave the close in call_rcu. We want it to stay unique for
1046 * all RCU readers that could run concurrently with unregister app,
1047 * therefore we _need_ to only close that socket after a grace period. So
1048 * it should stay in this RCU callback.
1049 *
1050 * This close() is a very important step of the synchronization model so
1051 * every modification to this function must be carefully reviewed.
1052 */
1053 ret = close(sock);
1054 if (ret) {
1055 PERROR("close");
1056 }
1057 lttng_fd_put(LTTNG_FD_APPS, 1);
1058
1059 DBG2("UST app pid %d deleted", app->pid);
1060 free(app);
1061 session_unlock_list();
1062 }
1063
1064 /*
1065 * URCU intermediate call to delete an UST app.
1066 */
1067 static
1068 void delete_ust_app_rcu(struct rcu_head *head)
1069 {
1070 struct lttng_ht_node_ulong *node =
1071 caa_container_of(head, struct lttng_ht_node_ulong, head);
1072 struct ust_app *app =
1073 caa_container_of(node, struct ust_app, pid_n);
1074
1075 DBG3("Call RCU deleting app PID %d", app->pid);
1076 delete_ust_app(app);
1077 }
1078
1079 /*
1080 * Delete the session from the application ht and delete the data structure by
1081 * freeing every object inside and releasing them.
1082 *
1083 * The session list lock must be held by the caller.
1084 */
1085 static void destroy_app_session(struct ust_app *app,
1086 struct ust_app_session *ua_sess)
1087 {
1088 int ret;
1089 struct lttng_ht_iter iter;
1090
1091 assert(app);
1092 assert(ua_sess);
1093
1094 iter.iter.node = &ua_sess->node.node;
1095 ret = lttng_ht_del(app->sessions, &iter);
1096 if (ret) {
1097 /* Already scheduled for teardown. */
1098 goto end;
1099 }
1100
1101 /* Once deleted, free the data structure. */
1102 delete_ust_app_session(app->sock, ua_sess, app);
1103
1104 end:
1105 return;
1106 }
1107
1108 /*
1109 * Alloc new UST app session.
1110 */
1111 static
1112 struct ust_app_session *alloc_ust_app_session(void)
1113 {
1114 struct ust_app_session *ua_sess;
1115
1116 /* Init most of the default value by allocating and zeroing */
1117 ua_sess = zmalloc(sizeof(struct ust_app_session));
1118 if (ua_sess == NULL) {
1119 PERROR("malloc");
1120 goto error_free;
1121 }
1122
1123 ua_sess->handle = -1;
1124 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1125 ua_sess->metadata_attr.type = LTTNG_UST_ABI_CHAN_METADATA;
1126 pthread_mutex_init(&ua_sess->lock, NULL);
1127
1128 return ua_sess;
1129
1130 error_free:
1131 return NULL;
1132 }
1133
1134 /*
1135 * Alloc new UST app channel.
1136 */
1137 static
1138 struct ust_app_channel *alloc_ust_app_channel(const char *name,
1139 struct ust_app_session *ua_sess,
1140 struct lttng_ust_abi_channel_attr *attr)
1141 {
1142 struct ust_app_channel *ua_chan;
1143
1144 /* Init most of the default value by allocating and zeroing */
1145 ua_chan = zmalloc(sizeof(struct ust_app_channel));
1146 if (ua_chan == NULL) {
1147 PERROR("malloc");
1148 goto error;
1149 }
1150
1151 /* Setup channel name */
1152 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
1153 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1154
1155 ua_chan->enabled = 1;
1156 ua_chan->handle = -1;
1157 ua_chan->session = ua_sess;
1158 ua_chan->key = get_next_channel_key();
1159 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1160 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1161 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
1162
1163 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
1164 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
1165
1166 /* Copy attributes */
1167 if (attr) {
1168 /* Translate from lttng_ust_channel to lttng_ust_ctl_consumer_channel_attr. */
1169 ua_chan->attr.subbuf_size = attr->subbuf_size;
1170 ua_chan->attr.num_subbuf = attr->num_subbuf;
1171 ua_chan->attr.overwrite = attr->overwrite;
1172 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
1173 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
1174 ua_chan->attr.output = attr->output;
1175 ua_chan->attr.blocking_timeout = attr->u.s.blocking_timeout;
1176 }
1177 /* By default, the channel is a per cpu channel. */
1178 ua_chan->attr.type = LTTNG_UST_ABI_CHAN_PER_CPU;
1179
1180 DBG3("UST app channel %s allocated", ua_chan->name);
1181
1182 return ua_chan;
1183
1184 error:
1185 return NULL;
1186 }
1187
1188 /*
1189 * Allocate and initialize a UST app stream.
1190 *
1191 * Return newly allocated stream pointer or NULL on error.
1192 */
1193 struct ust_app_stream *ust_app_alloc_stream(void)
1194 {
1195 struct ust_app_stream *stream = NULL;
1196
1197 stream = zmalloc(sizeof(*stream));
1198 if (stream == NULL) {
1199 PERROR("zmalloc ust app stream");
1200 goto error;
1201 }
1202
1203 /* Zero could be a valid value for a handle so flag it to -1. */
1204 stream->handle = -1;
1205
1206 error:
1207 return stream;
1208 }
1209
1210 /*
1211 * Alloc new UST app event.
1212 */
1213 static
1214 struct ust_app_event *alloc_ust_app_event(char *name,
1215 struct lttng_ust_abi_event *attr)
1216 {
1217 struct ust_app_event *ua_event;
1218
1219 /* Init most of the default value by allocating and zeroing */
1220 ua_event = zmalloc(sizeof(struct ust_app_event));
1221 if (ua_event == NULL) {
1222 PERROR("Failed to allocate ust_app_event structure");
1223 goto error;
1224 }
1225
1226 ua_event->enabled = 1;
1227 strncpy(ua_event->name, name, sizeof(ua_event->name));
1228 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1229 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
1230
1231 /* Copy attributes */
1232 if (attr) {
1233 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1234 }
1235
1236 DBG3("UST app event %s allocated", ua_event->name);
1237
1238 return ua_event;
1239
1240 error:
1241 return NULL;
1242 }
1243
1244 /*
1245 * Allocate a new UST app event notifier rule.
1246 */
1247 static struct ust_app_event_notifier_rule *alloc_ust_app_event_notifier_rule(
1248 struct lttng_trigger *trigger)
1249 {
1250 enum lttng_event_rule_generate_exclusions_status
1251 generate_exclusion_status;
1252 struct ust_app_event_notifier_rule *ua_event_notifier_rule;
1253 struct lttng_condition *condition = NULL;
1254 const struct lttng_event_rule *event_rule = NULL;
1255
1256 ua_event_notifier_rule = zmalloc(sizeof(struct ust_app_event_notifier_rule));
1257 if (ua_event_notifier_rule == NULL) {
1258 PERROR("Failed to allocate ust_app_event_notifier_rule structure");
1259 goto error;
1260 }
1261
1262 ua_event_notifier_rule->enabled = 1;
1263 ua_event_notifier_rule->token = lttng_trigger_get_tracer_token(trigger);
1264 lttng_ht_node_init_u64(&ua_event_notifier_rule->node,
1265 ua_event_notifier_rule->token);
1266
1267 condition = lttng_trigger_get_condition(trigger);
1268 assert(condition);
1269 assert(lttng_condition_get_type(condition) ==
1270 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
1271
1272 assert(LTTNG_CONDITION_STATUS_OK ==
1273 lttng_condition_event_rule_matches_get_rule(
1274 condition, &event_rule));
1275 assert(event_rule);
1276
1277 ua_event_notifier_rule->error_counter_index =
1278 lttng_condition_event_rule_matches_get_error_counter_index(condition);
1279 /* Acquire the event notifier's reference to the trigger. */
1280 lttng_trigger_get(trigger);
1281
1282 ua_event_notifier_rule->trigger = trigger;
1283 ua_event_notifier_rule->filter = lttng_event_rule_get_filter_bytecode(event_rule);
1284 generate_exclusion_status = lttng_event_rule_generate_exclusions(
1285 event_rule, &ua_event_notifier_rule->exclusion);
1286 switch (generate_exclusion_status) {
1287 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_OK:
1288 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_NONE:
1289 break;
1290 default:
1291 /* Error occured. */
1292 ERR("Failed to generate exclusions from trigger while allocating an event notifier rule");
1293 goto error_put_trigger;
1294 }
1295
1296 DBG3("UST app event notifier rule allocated: token = %" PRIu64,
1297 ua_event_notifier_rule->token);
1298
1299 return ua_event_notifier_rule;
1300
1301 error_put_trigger:
1302 lttng_trigger_put(trigger);
1303 error:
1304 free(ua_event_notifier_rule);
1305 return NULL;
1306 }
1307
1308 /*
1309 * Alloc new UST app context.
1310 */
1311 static
1312 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
1313 {
1314 struct ust_app_ctx *ua_ctx;
1315
1316 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
1317 if (ua_ctx == NULL) {
1318 goto error;
1319 }
1320
1321 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1322
1323 if (uctx) {
1324 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
1325 if (uctx->ctx == LTTNG_UST_ABI_CONTEXT_APP_CONTEXT) {
1326 char *provider_name = NULL, *ctx_name = NULL;
1327
1328 provider_name = strdup(uctx->u.app_ctx.provider_name);
1329 ctx_name = strdup(uctx->u.app_ctx.ctx_name);
1330 if (!provider_name || !ctx_name) {
1331 free(provider_name);
1332 free(ctx_name);
1333 goto error;
1334 }
1335
1336 ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
1337 ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
1338 }
1339 }
1340
1341 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
1342 return ua_ctx;
1343 error:
1344 free(ua_ctx);
1345 return NULL;
1346 }
1347
1348 /*
1349 * Create a liblttng-ust filter bytecode from given bytecode.
1350 *
1351 * Return allocated filter or NULL on error.
1352 */
1353 static struct lttng_ust_abi_filter_bytecode *create_ust_filter_bytecode_from_bytecode(
1354 const struct lttng_bytecode *orig_f)
1355 {
1356 struct lttng_ust_abi_filter_bytecode *filter = NULL;
1357
1358 /* Copy filter bytecode. */
1359 filter = zmalloc(sizeof(*filter) + orig_f->len);
1360 if (!filter) {
1361 PERROR("Failed to allocate lttng_ust_filter_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
1362 goto error;
1363 }
1364
1365 assert(sizeof(struct lttng_bytecode) ==
1366 sizeof(struct lttng_ust_abi_filter_bytecode));
1367 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1368 error:
1369 return filter;
1370 }
1371
1372 /*
1373 * Create a liblttng-ust capture bytecode from given bytecode.
1374 *
1375 * Return allocated filter or NULL on error.
1376 */
1377 static struct lttng_ust_abi_capture_bytecode *
1378 create_ust_capture_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
1379 {
1380 struct lttng_ust_abi_capture_bytecode *capture = NULL;
1381
1382 /* Copy capture bytecode. */
1383 capture = zmalloc(sizeof(*capture) + orig_f->len);
1384 if (!capture) {
1385 PERROR("Failed to allocate lttng_ust_abi_capture_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
1386 goto error;
1387 }
1388
1389 assert(sizeof(struct lttng_bytecode) ==
1390 sizeof(struct lttng_ust_abi_capture_bytecode));
1391 memcpy(capture, orig_f, sizeof(*capture) + orig_f->len);
1392 error:
1393 return capture;
1394 }
1395
1396 /*
1397 * Find an ust_app using the sock and return it. RCU read side lock must be
1398 * held before calling this helper function.
1399 */
1400 struct ust_app *ust_app_find_by_sock(int sock)
1401 {
1402 struct lttng_ht_node_ulong *node;
1403 struct lttng_ht_iter iter;
1404
1405 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
1406 node = lttng_ht_iter_get_node_ulong(&iter);
1407 if (node == NULL) {
1408 DBG2("UST app find by sock %d not found", sock);
1409 goto error;
1410 }
1411
1412 return caa_container_of(node, struct ust_app, sock_n);
1413
1414 error:
1415 return NULL;
1416 }
1417
1418 /*
1419 * Find an ust_app using the notify sock and return it. RCU read side lock must
1420 * be held before calling this helper function.
1421 */
1422 static struct ust_app *find_app_by_notify_sock(int sock)
1423 {
1424 struct lttng_ht_node_ulong *node;
1425 struct lttng_ht_iter iter;
1426
1427 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1428 &iter);
1429 node = lttng_ht_iter_get_node_ulong(&iter);
1430 if (node == NULL) {
1431 DBG2("UST app find by notify sock %d not found", sock);
1432 goto error;
1433 }
1434
1435 return caa_container_of(node, struct ust_app, notify_sock_n);
1436
1437 error:
1438 return NULL;
1439 }
1440
1441 /*
1442 * Lookup for an ust app event based on event name, filter bytecode and the
1443 * event loglevel.
1444 *
1445 * Return an ust_app_event object or NULL on error.
1446 */
1447 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1448 const char *name, const struct lttng_bytecode *filter,
1449 int loglevel_value,
1450 const struct lttng_event_exclusion *exclusion)
1451 {
1452 struct lttng_ht_iter iter;
1453 struct lttng_ht_node_str *node;
1454 struct ust_app_event *event = NULL;
1455 struct ust_app_ht_key key;
1456
1457 assert(name);
1458 assert(ht);
1459
1460 /* Setup key for event lookup. */
1461 key.name = name;
1462 key.filter = filter;
1463 key.loglevel_type = loglevel_value;
1464 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1465 key.exclusion = exclusion;
1466
1467 /* Lookup using the event name as hash and a custom match fct. */
1468 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1469 ht_match_ust_app_event, &key, &iter.iter);
1470 node = lttng_ht_iter_get_node_str(&iter);
1471 if (node == NULL) {
1472 goto end;
1473 }
1474
1475 event = caa_container_of(node, struct ust_app_event, node);
1476
1477 end:
1478 return event;
1479 }
1480
1481 /*
1482 * Look-up an event notifier rule based on its token id.
1483 *
1484 * Must be called with the RCU read lock held.
1485 * Return an ust_app_event_notifier_rule object or NULL on error.
1486 */
1487 static struct ust_app_event_notifier_rule *find_ust_app_event_notifier_rule(
1488 struct lttng_ht *ht, uint64_t token)
1489 {
1490 struct lttng_ht_iter iter;
1491 struct lttng_ht_node_u64 *node;
1492 struct ust_app_event_notifier_rule *event_notifier_rule = NULL;
1493
1494 assert(ht);
1495
1496 lttng_ht_lookup(ht, &token, &iter);
1497 node = lttng_ht_iter_get_node_u64(&iter);
1498 if (node == NULL) {
1499 DBG2("UST app event notifier rule token not found: token = %" PRIu64,
1500 token);
1501 goto end;
1502 }
1503
1504 event_notifier_rule = caa_container_of(
1505 node, struct ust_app_event_notifier_rule, node);
1506 end:
1507 return event_notifier_rule;
1508 }
1509
1510 /*
1511 * Create the channel context on the tracer.
1512 *
1513 * Called with UST app session lock held.
1514 */
1515 static
1516 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1517 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1518 {
1519 int ret;
1520
1521 health_code_update();
1522
1523 pthread_mutex_lock(&app->sock_lock);
1524 ret = lttng_ust_ctl_add_context(app->sock, &ua_ctx->ctx,
1525 ua_chan->obj, &ua_ctx->obj);
1526 pthread_mutex_unlock(&app->sock_lock);
1527 if (ret < 0) {
1528 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1529 ERR("UST app create channel context failed for app (pid: %d) "
1530 "with ret %d", app->pid, ret);
1531 } else {
1532 /*
1533 * This is normal behavior, an application can die during the
1534 * creation process. Don't report an error so the execution can
1535 * continue normally.
1536 */
1537 ret = 0;
1538 DBG3("UST app add context failed. Application is dead.");
1539 }
1540 goto error;
1541 }
1542
1543 ua_ctx->handle = ua_ctx->obj->handle;
1544
1545 DBG2("UST app context handle %d created successfully for channel %s",
1546 ua_ctx->handle, ua_chan->name);
1547
1548 error:
1549 health_code_update();
1550 return ret;
1551 }
1552
1553 /*
1554 * Set the filter on the tracer.
1555 */
1556 static int set_ust_object_filter(struct ust_app *app,
1557 const struct lttng_bytecode *bytecode,
1558 struct lttng_ust_abi_object_data *ust_object)
1559 {
1560 int ret;
1561 struct lttng_ust_abi_filter_bytecode *ust_bytecode = NULL;
1562
1563 health_code_update();
1564
1565 ust_bytecode = create_ust_filter_bytecode_from_bytecode(bytecode);
1566 if (!ust_bytecode) {
1567 ret = -LTTNG_ERR_NOMEM;
1568 goto error;
1569 }
1570 pthread_mutex_lock(&app->sock_lock);
1571 ret = lttng_ust_ctl_set_filter(app->sock, ust_bytecode,
1572 ust_object);
1573 pthread_mutex_unlock(&app->sock_lock);
1574 if (ret < 0) {
1575 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1576 ERR("UST app set object filter failed: object = %p of app pid = %d, ret = %d",
1577 ust_object, app->pid, ret);
1578 } else {
1579 /*
1580 * This is normal behavior, an application can die during the
1581 * creation process. Don't report an error so the execution can
1582 * continue normally.
1583 */
1584 ret = 0;
1585 DBG3("Failed to set UST app object filter. Application is dead.");
1586 }
1587 goto error;
1588 }
1589
1590 DBG2("UST filter successfully set: object = %p", ust_object);
1591
1592 error:
1593 health_code_update();
1594 free(ust_bytecode);
1595 return ret;
1596 }
1597
1598 /*
1599 * Set a capture bytecode for the passed object.
1600 * The sequence number enforces the ordering at runtime and on reception of
1601 * the captured payloads.
1602 */
1603 static int set_ust_capture(struct ust_app *app,
1604 const struct lttng_bytecode *bytecode,
1605 unsigned int capture_seqnum,
1606 struct lttng_ust_abi_object_data *ust_object)
1607 {
1608 int ret;
1609 struct lttng_ust_abi_capture_bytecode *ust_bytecode = NULL;
1610
1611 health_code_update();
1612
1613 ust_bytecode = create_ust_capture_bytecode_from_bytecode(bytecode);
1614 if (!ust_bytecode) {
1615 ret = -LTTNG_ERR_NOMEM;
1616 goto error;
1617 }
1618
1619 /*
1620 * Set the sequence number to ensure the capture of fields is ordered.
1621 */
1622 ust_bytecode->seqnum = capture_seqnum;
1623
1624 pthread_mutex_lock(&app->sock_lock);
1625 ret = lttng_ust_ctl_set_capture(app->sock, ust_bytecode,
1626 ust_object);
1627 pthread_mutex_unlock(&app->sock_lock);
1628 if (ret < 0) {
1629 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1630 ERR("UST app set object capture failed: object = %p of app pid = %d, ret = %d",
1631 ust_object, app->pid, ret);
1632 } else {
1633 /*
1634 * This is normal behavior, an application can die during the
1635 * creation process. Don't report an error so the execution can
1636 * continue normally.
1637 */
1638 ret = 0;
1639 DBG3("Failed to set UST app object capture. Application is dead.");
1640 }
1641
1642 goto error;
1643 }
1644
1645 DBG2("UST capture successfully set: object = %p", ust_object);
1646
1647 error:
1648 health_code_update();
1649 free(ust_bytecode);
1650 return ret;
1651 }
1652
1653 static
1654 struct lttng_ust_abi_event_exclusion *create_ust_exclusion_from_exclusion(
1655 const struct lttng_event_exclusion *exclusion)
1656 {
1657 struct lttng_ust_abi_event_exclusion *ust_exclusion = NULL;
1658 size_t exclusion_alloc_size = sizeof(struct lttng_ust_abi_event_exclusion) +
1659 LTTNG_UST_ABI_SYM_NAME_LEN * exclusion->count;
1660
1661 ust_exclusion = zmalloc(exclusion_alloc_size);
1662 if (!ust_exclusion) {
1663 PERROR("malloc");
1664 goto end;
1665 }
1666
1667 assert(sizeof(struct lttng_event_exclusion) ==
1668 sizeof(struct lttng_ust_abi_event_exclusion));
1669 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1670 end:
1671 return ust_exclusion;
1672 }
1673
1674 /*
1675 * Set event exclusions on the tracer.
1676 */
1677 static int set_ust_object_exclusions(struct ust_app *app,
1678 const struct lttng_event_exclusion *exclusions,
1679 struct lttng_ust_abi_object_data *ust_object)
1680 {
1681 int ret;
1682 struct lttng_ust_abi_event_exclusion *ust_exclusions = NULL;
1683
1684 assert(exclusions && exclusions->count > 0);
1685
1686 health_code_update();
1687
1688 ust_exclusions = create_ust_exclusion_from_exclusion(
1689 exclusions);
1690 if (!ust_exclusions) {
1691 ret = -LTTNG_ERR_NOMEM;
1692 goto error;
1693 }
1694 pthread_mutex_lock(&app->sock_lock);
1695 ret = lttng_ust_ctl_set_exclusion(app->sock, ust_exclusions, ust_object);
1696 pthread_mutex_unlock(&app->sock_lock);
1697 if (ret < 0) {
1698 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1699 ERR("Failed to set UST app exclusions for object %p of app (pid: %d) "
1700 "with ret %d", ust_object, app->pid, ret);
1701 } else {
1702 /*
1703 * This is normal behavior, an application can die during the
1704 * creation process. Don't report an error so the execution can
1705 * continue normally.
1706 */
1707 ret = 0;
1708 DBG3("Failed to set UST app object exclusions. Application is dead.");
1709 }
1710 goto error;
1711 }
1712
1713 DBG2("UST exclusions set successfully for object %p", ust_object);
1714
1715 error:
1716 health_code_update();
1717 free(ust_exclusions);
1718 return ret;
1719 }
1720
1721 /*
1722 * Disable the specified event on to UST tracer for the UST session.
1723 */
1724 static int disable_ust_object(struct ust_app *app,
1725 struct lttng_ust_abi_object_data *object)
1726 {
1727 int ret;
1728
1729 health_code_update();
1730
1731 pthread_mutex_lock(&app->sock_lock);
1732 ret = lttng_ust_ctl_disable(app->sock, object);
1733 pthread_mutex_unlock(&app->sock_lock);
1734 if (ret < 0) {
1735 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1736 ERR("Failed to disable UST app object %p app (pid: %d) with ret %d",
1737 object, app->pid, ret);
1738 } else {
1739 /*
1740 * This is normal behavior, an application can die during the
1741 * creation process. Don't report an error so the execution can
1742 * continue normally.
1743 */
1744 ret = 0;
1745 DBG3("Failed to disable UST app object. Application is dead.");
1746 }
1747 goto error;
1748 }
1749
1750 DBG2("UST app object %p disabled successfully for app (pid: %d)",
1751 object, app->pid);
1752
1753 error:
1754 health_code_update();
1755 return ret;
1756 }
1757
1758 /*
1759 * Disable the specified channel on to UST tracer for the UST session.
1760 */
1761 static int disable_ust_channel(struct ust_app *app,
1762 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1763 {
1764 int ret;
1765
1766 health_code_update();
1767
1768 pthread_mutex_lock(&app->sock_lock);
1769 ret = lttng_ust_ctl_disable(app->sock, ua_chan->obj);
1770 pthread_mutex_unlock(&app->sock_lock);
1771 if (ret < 0) {
1772 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1773 ERR("UST app channel %s disable failed for app (pid: %d) "
1774 "and session handle %d with ret %d",
1775 ua_chan->name, app->pid, ua_sess->handle, ret);
1776 } else {
1777 /*
1778 * This is normal behavior, an application can die during the
1779 * creation process. Don't report an error so the execution can
1780 * continue normally.
1781 */
1782 ret = 0;
1783 DBG3("UST app disable channel failed. Application is dead.");
1784 }
1785 goto error;
1786 }
1787
1788 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1789 ua_chan->name, app->pid);
1790
1791 error:
1792 health_code_update();
1793 return ret;
1794 }
1795
1796 /*
1797 * Enable the specified channel on to UST tracer for the UST session.
1798 */
1799 static int enable_ust_channel(struct ust_app *app,
1800 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1801 {
1802 int ret;
1803
1804 health_code_update();
1805
1806 pthread_mutex_lock(&app->sock_lock);
1807 ret = lttng_ust_ctl_enable(app->sock, ua_chan->obj);
1808 pthread_mutex_unlock(&app->sock_lock);
1809 if (ret < 0) {
1810 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1811 ERR("UST app channel %s enable failed for app (pid: %d) "
1812 "and session handle %d with ret %d",
1813 ua_chan->name, app->pid, ua_sess->handle, ret);
1814 } else {
1815 /*
1816 * This is normal behavior, an application can die during the
1817 * creation process. Don't report an error so the execution can
1818 * continue normally.
1819 */
1820 ret = 0;
1821 DBG3("UST app enable channel failed. Application is dead.");
1822 }
1823 goto error;
1824 }
1825
1826 ua_chan->enabled = 1;
1827
1828 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1829 ua_chan->name, app->pid);
1830
1831 error:
1832 health_code_update();
1833 return ret;
1834 }
1835
1836 /*
1837 * Enable the specified event on to UST tracer for the UST session.
1838 */
1839 static int enable_ust_object(
1840 struct ust_app *app, struct lttng_ust_abi_object_data *ust_object)
1841 {
1842 int ret;
1843
1844 health_code_update();
1845
1846 pthread_mutex_lock(&app->sock_lock);
1847 ret = lttng_ust_ctl_enable(app->sock, ust_object);
1848 pthread_mutex_unlock(&app->sock_lock);
1849 if (ret < 0) {
1850 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1851 ERR("UST app enable failed for object %p app (pid: %d) with ret %d",
1852 ust_object, app->pid, ret);
1853 } else {
1854 /*
1855 * This is normal behavior, an application can die during the
1856 * creation process. Don't report an error so the execution can
1857 * continue normally.
1858 */
1859 ret = 0;
1860 DBG3("Failed to enable UST app object. Application is dead.");
1861 }
1862 goto error;
1863 }
1864
1865 DBG2("UST app object %p enabled successfully for app (pid: %d)",
1866 ust_object, app->pid);
1867
1868 error:
1869 health_code_update();
1870 return ret;
1871 }
1872
1873 /*
1874 * Send channel and stream buffer to application.
1875 *
1876 * Return 0 on success. On error, a negative value is returned.
1877 */
1878 static int send_channel_pid_to_ust(struct ust_app *app,
1879 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1880 {
1881 int ret;
1882 struct ust_app_stream *stream, *stmp;
1883
1884 assert(app);
1885 assert(ua_sess);
1886 assert(ua_chan);
1887
1888 health_code_update();
1889
1890 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1891 app->sock);
1892
1893 /* Send channel to the application. */
1894 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1895 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1896 ret = -ENOTCONN; /* Caused by app exiting. */
1897 goto error;
1898 } else if (ret < 0) {
1899 goto error;
1900 }
1901
1902 health_code_update();
1903
1904 /* Send all streams to application. */
1905 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1906 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1907 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1908 ret = -ENOTCONN; /* Caused by app exiting. */
1909 goto error;
1910 } else if (ret < 0) {
1911 goto error;
1912 }
1913 /* We don't need the stream anymore once sent to the tracer. */
1914 cds_list_del(&stream->list);
1915 delete_ust_app_stream(-1, stream, app);
1916 }
1917 /* Flag the channel that it is sent to the application. */
1918 ua_chan->is_sent = 1;
1919
1920 error:
1921 health_code_update();
1922 return ret;
1923 }
1924
1925 /*
1926 * Create the specified event onto the UST tracer for a UST session.
1927 *
1928 * Should be called with session mutex held.
1929 */
1930 static
1931 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1932 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1933 {
1934 int ret = 0;
1935
1936 health_code_update();
1937
1938 /* Create UST event on tracer */
1939 pthread_mutex_lock(&app->sock_lock);
1940 ret = lttng_ust_ctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1941 &ua_event->obj);
1942 pthread_mutex_unlock(&app->sock_lock);
1943 if (ret < 0) {
1944 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1945 abort();
1946 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1947 ua_event->attr.name, app->pid, ret);
1948 } else {
1949 /*
1950 * This is normal behavior, an application can die during the
1951 * creation process. Don't report an error so the execution can
1952 * continue normally.
1953 */
1954 ret = 0;
1955 DBG3("UST app create event failed. Application is dead.");
1956 }
1957 goto error;
1958 }
1959
1960 ua_event->handle = ua_event->obj->handle;
1961
1962 DBG2("UST app event %s created successfully for pid:%d object: %p",
1963 ua_event->attr.name, app->pid, ua_event->obj);
1964
1965 health_code_update();
1966
1967 /* Set filter if one is present. */
1968 if (ua_event->filter) {
1969 ret = set_ust_object_filter(app, ua_event->filter, ua_event->obj);
1970 if (ret < 0) {
1971 goto error;
1972 }
1973 }
1974
1975 /* Set exclusions for the event */
1976 if (ua_event->exclusion) {
1977 ret = set_ust_object_exclusions(app, ua_event->exclusion, ua_event->obj);
1978 if (ret < 0) {
1979 goto error;
1980 }
1981 }
1982
1983 /* If event not enabled, disable it on the tracer */
1984 if (ua_event->enabled) {
1985 /*
1986 * We now need to explicitly enable the event, since it
1987 * is now disabled at creation.
1988 */
1989 ret = enable_ust_object(app, ua_event->obj);
1990 if (ret < 0) {
1991 /*
1992 * If we hit an EPERM, something is wrong with our enable call. If
1993 * we get an EEXIST, there is a problem on the tracer side since we
1994 * just created it.
1995 */
1996 switch (ret) {
1997 case -LTTNG_UST_ERR_PERM:
1998 /* Code flow problem */
1999 assert(0);
2000 case -LTTNG_UST_ERR_EXIST:
2001 /* It's OK for our use case. */
2002 ret = 0;
2003 break;
2004 default:
2005 break;
2006 }
2007 goto error;
2008 }
2009 }
2010
2011 error:
2012 health_code_update();
2013 return ret;
2014 }
2015
2016 static int init_ust_event_notifier_from_event_rule(
2017 const struct lttng_event_rule *rule,
2018 struct lttng_ust_abi_event_notifier *event_notifier)
2019 {
2020 enum lttng_event_rule_status status;
2021 enum lttng_ust_abi_loglevel_type ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
2022 int loglevel = -1, ret = 0;
2023 const char *pattern;
2024
2025 /* For now only LTTNG_EVENT_RULE_TYPE_TRACEPOINT are supported. */
2026 assert(lttng_event_rule_get_type(rule) ==
2027 LTTNG_EVENT_RULE_TYPE_TRACEPOINT);
2028
2029 memset(event_notifier, 0, sizeof(*event_notifier));
2030
2031 if (lttng_event_rule_targets_agent_domain(rule)) {
2032 /*
2033 * Special event for agents
2034 * The actual meat of the event is in the filter that will be
2035 * attached later on.
2036 * Set the default values for the agent event.
2037 */
2038 pattern = event_get_default_agent_ust_name(
2039 lttng_event_rule_get_domain_type(rule));
2040 loglevel = 0;
2041 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
2042 } else {
2043 const struct lttng_log_level_rule *log_level_rule;
2044
2045 status = lttng_event_rule_tracepoint_get_pattern(rule, &pattern);
2046 if (status != LTTNG_EVENT_RULE_STATUS_OK) {
2047 /* At this point, this is a fatal error. */
2048 abort();
2049 }
2050
2051 status = lttng_event_rule_tracepoint_get_log_level_rule(
2052 rule, &log_level_rule);
2053 if (status == LTTNG_EVENT_RULE_STATUS_UNSET) {
2054 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
2055 } else if (status == LTTNG_EVENT_RULE_STATUS_OK) {
2056 enum lttng_log_level_rule_status llr_status;
2057
2058 switch (lttng_log_level_rule_get_type(log_level_rule)) {
2059 case LTTNG_LOG_LEVEL_RULE_TYPE_EXACTLY:
2060 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_SINGLE;
2061 llr_status = lttng_log_level_rule_exactly_get_level(
2062 log_level_rule, &loglevel);
2063 break;
2064 case LTTNG_LOG_LEVEL_RULE_TYPE_AT_LEAST_AS_SEVERE_AS:
2065 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_RANGE;
2066 llr_status = lttng_log_level_rule_at_least_as_severe_as_get_level(
2067 log_level_rule, &loglevel);
2068 break;
2069 default:
2070 abort();
2071 }
2072
2073 assert(llr_status == LTTNG_LOG_LEVEL_RULE_STATUS_OK);
2074 } else {
2075 /* At this point this is a fatal error. */
2076 abort();
2077 }
2078 }
2079
2080 event_notifier->event.instrumentation = LTTNG_UST_ABI_TRACEPOINT;
2081 ret = lttng_strncpy(event_notifier->event.name, pattern,
2082 LTTNG_UST_ABI_SYM_NAME_LEN - 1);
2083 if (ret) {
2084 ERR("Failed to copy event rule pattern to notifier: pattern = '%s' ",
2085 pattern);
2086 goto end;
2087 }
2088
2089 event_notifier->event.loglevel_type = ust_loglevel_type;
2090 event_notifier->event.loglevel = loglevel;
2091 end:
2092 return ret;
2093 }
2094
2095 /*
2096 * Create the specified event notifier against the user space tracer of a
2097 * given application.
2098 */
2099 static int create_ust_event_notifier(struct ust_app *app,
2100 struct ust_app_event_notifier_rule *ua_event_notifier_rule)
2101 {
2102 int ret = 0;
2103 enum lttng_condition_status condition_status;
2104 const struct lttng_condition *condition = NULL;
2105 struct lttng_ust_abi_event_notifier event_notifier;
2106 const struct lttng_event_rule *event_rule = NULL;
2107 unsigned int capture_bytecode_count = 0, i;
2108 enum lttng_condition_status cond_status;
2109
2110 health_code_update();
2111 assert(app->event_notifier_group.object);
2112
2113 condition = lttng_trigger_get_const_condition(
2114 ua_event_notifier_rule->trigger);
2115 assert(condition);
2116 assert(lttng_condition_get_type(condition) ==
2117 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
2118
2119 condition_status = lttng_condition_event_rule_matches_get_rule(
2120 condition, &event_rule);
2121 assert(condition_status == LTTNG_CONDITION_STATUS_OK);
2122
2123 assert(event_rule);
2124 assert(lttng_event_rule_get_type(event_rule) == LTTNG_EVENT_RULE_TYPE_TRACEPOINT);
2125
2126 init_ust_event_notifier_from_event_rule(event_rule, &event_notifier);
2127 event_notifier.event.token = ua_event_notifier_rule->token;
2128 event_notifier.error_counter_index = ua_event_notifier_rule->error_counter_index;
2129
2130 /* Create UST event notifier against the tracer. */
2131 pthread_mutex_lock(&app->sock_lock);
2132 ret = lttng_ust_ctl_create_event_notifier(app->sock, &event_notifier,
2133 app->event_notifier_group.object,
2134 &ua_event_notifier_rule->obj);
2135 pthread_mutex_unlock(&app->sock_lock);
2136 if (ret < 0) {
2137 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2138 ERR("Error ustctl create event notifier: name = '%s', app = '%s' (ppid: %d), ret = %d",
2139 event_notifier.event.name, app->name,
2140 app->ppid, ret);
2141 } else {
2142 /*
2143 * This is normal behavior, an application can die
2144 * during the creation process. Don't report an error so
2145 * the execution can continue normally.
2146 */
2147 ret = 0;
2148 DBG3("UST app create event notifier failed (application is dead): app = '%s' (ppid = %d)",
2149 app->name, app->ppid);
2150 }
2151
2152 goto error;
2153 }
2154
2155 ua_event_notifier_rule->handle = ua_event_notifier_rule->obj->handle;
2156
2157 DBG2("UST app event notifier %s created successfully: app = '%s' (ppid: %d), object: %p",
2158 event_notifier.event.name, app->name, app->ppid,
2159 ua_event_notifier_rule->obj);
2160
2161 health_code_update();
2162
2163 /* Set filter if one is present. */
2164 if (ua_event_notifier_rule->filter) {
2165 ret = set_ust_object_filter(app, ua_event_notifier_rule->filter,
2166 ua_event_notifier_rule->obj);
2167 if (ret < 0) {
2168 goto error;
2169 }
2170 }
2171
2172 /* Set exclusions for the event. */
2173 if (ua_event_notifier_rule->exclusion) {
2174 ret = set_ust_object_exclusions(app,
2175 ua_event_notifier_rule->exclusion,
2176 ua_event_notifier_rule->obj);
2177 if (ret < 0) {
2178 goto error;
2179 }
2180 }
2181
2182 /* Set the capture bytecodes. */
2183 cond_status = lttng_condition_event_rule_matches_get_capture_descriptor_count(
2184 condition, &capture_bytecode_count);
2185 assert(cond_status == LTTNG_CONDITION_STATUS_OK);
2186
2187 for (i = 0; i < capture_bytecode_count; i++) {
2188 const struct lttng_bytecode *capture_bytecode =
2189 lttng_condition_event_rule_matches_get_capture_bytecode_at_index(
2190 condition, i);
2191
2192 ret = set_ust_capture(app, capture_bytecode, i,
2193 ua_event_notifier_rule->obj);
2194 if (ret < 0) {
2195 goto error;
2196 }
2197 }
2198
2199 /*
2200 * We now need to explicitly enable the event, since it
2201 * is disabled at creation.
2202 */
2203 ret = enable_ust_object(app, ua_event_notifier_rule->obj);
2204 if (ret < 0) {
2205 /*
2206 * If we hit an EPERM, something is wrong with our enable call.
2207 * If we get an EEXIST, there is a problem on the tracer side
2208 * since we just created it.
2209 */
2210 switch (ret) {
2211 case -LTTNG_UST_ERR_PERM:
2212 /* Code flow problem. */
2213 abort();
2214 case -LTTNG_UST_ERR_EXIST:
2215 /* It's OK for our use case. */
2216 ret = 0;
2217 break;
2218 default:
2219 break;
2220 }
2221
2222 goto error;
2223 }
2224
2225 ua_event_notifier_rule->enabled = true;
2226
2227 error:
2228 health_code_update();
2229 return ret;
2230 }
2231
2232 /*
2233 * Copy data between an UST app event and a LTT event.
2234 */
2235 static void shadow_copy_event(struct ust_app_event *ua_event,
2236 struct ltt_ust_event *uevent)
2237 {
2238 size_t exclusion_alloc_size;
2239
2240 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
2241 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
2242
2243 ua_event->enabled = uevent->enabled;
2244
2245 /* Copy event attributes */
2246 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
2247
2248 /* Copy filter bytecode */
2249 if (uevent->filter) {
2250 ua_event->filter = lttng_bytecode_copy(uevent->filter);
2251 /* Filter might be NULL here in case of ENONEM. */
2252 }
2253
2254 /* Copy exclusion data */
2255 if (uevent->exclusion) {
2256 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
2257 LTTNG_UST_ABI_SYM_NAME_LEN * uevent->exclusion->count;
2258 ua_event->exclusion = zmalloc(exclusion_alloc_size);
2259 if (ua_event->exclusion == NULL) {
2260 PERROR("malloc");
2261 } else {
2262 memcpy(ua_event->exclusion, uevent->exclusion,
2263 exclusion_alloc_size);
2264 }
2265 }
2266 }
2267
2268 /*
2269 * Copy data between an UST app channel and a LTT channel.
2270 */
2271 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
2272 struct ltt_ust_channel *uchan)
2273 {
2274 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
2275
2276 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
2277 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
2278
2279 ua_chan->tracefile_size = uchan->tracefile_size;
2280 ua_chan->tracefile_count = uchan->tracefile_count;
2281
2282 /* Copy event attributes since the layout is different. */
2283 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
2284 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
2285 ua_chan->attr.overwrite = uchan->attr.overwrite;
2286 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
2287 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
2288 ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
2289 ua_chan->attr.output = uchan->attr.output;
2290 ua_chan->attr.blocking_timeout = uchan->attr.u.s.blocking_timeout;
2291
2292 /*
2293 * Note that the attribute channel type is not set since the channel on the
2294 * tracing registry side does not have this information.
2295 */
2296
2297 ua_chan->enabled = uchan->enabled;
2298 ua_chan->tracing_channel_id = uchan->id;
2299
2300 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
2301 }
2302
2303 /*
2304 * Copy data between a UST app session and a regular LTT session.
2305 */
2306 static void shadow_copy_session(struct ust_app_session *ua_sess,
2307 struct ltt_ust_session *usess, struct ust_app *app)
2308 {
2309 struct tm *timeinfo;
2310 char datetime[16];
2311 int ret;
2312 char tmp_shm_path[PATH_MAX];
2313
2314 timeinfo = localtime(&app->registration_time);
2315 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
2316
2317 DBG2("Shadow copy of session handle %d", ua_sess->handle);
2318
2319 ua_sess->tracing_id = usess->id;
2320 ua_sess->id = get_next_session_id();
2321 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.uid, app->uid);
2322 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.gid, app->gid);
2323 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.uid, usess->uid);
2324 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.gid, usess->gid);
2325 ua_sess->buffer_type = usess->buffer_type;
2326 ua_sess->bits_per_long = app->bits_per_long;
2327
2328 /* There is only one consumer object per session possible. */
2329 consumer_output_get(usess->consumer);
2330 ua_sess->consumer = usess->consumer;
2331
2332 ua_sess->output_traces = usess->output_traces;
2333 ua_sess->live_timer_interval = usess->live_timer_interval;
2334 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
2335 &usess->metadata_attr);
2336
2337 switch (ua_sess->buffer_type) {
2338 case LTTNG_BUFFER_PER_PID:
2339 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
2340 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
2341 datetime);
2342 break;
2343 case LTTNG_BUFFER_PER_UID:
2344 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
2345 DEFAULT_UST_TRACE_UID_PATH,
2346 lttng_credentials_get_uid(&ua_sess->real_credentials),
2347 app->bits_per_long);
2348 break;
2349 default:
2350 assert(0);
2351 goto error;
2352 }
2353 if (ret < 0) {
2354 PERROR("asprintf UST shadow copy session");
2355 assert(0);
2356 goto error;
2357 }
2358
2359 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
2360 sizeof(ua_sess->root_shm_path));
2361 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
2362 strncpy(ua_sess->shm_path, usess->shm_path,
2363 sizeof(ua_sess->shm_path));
2364 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2365 if (ua_sess->shm_path[0]) {
2366 switch (ua_sess->buffer_type) {
2367 case LTTNG_BUFFER_PER_PID:
2368 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
2369 "/" DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
2370 app->name, app->pid, datetime);
2371 break;
2372 case LTTNG_BUFFER_PER_UID:
2373 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
2374 "/" DEFAULT_UST_TRACE_UID_PATH,
2375 app->uid, app->bits_per_long);
2376 break;
2377 default:
2378 assert(0);
2379 goto error;
2380 }
2381 if (ret < 0) {
2382 PERROR("sprintf UST shadow copy session");
2383 assert(0);
2384 goto error;
2385 }
2386 strncat(ua_sess->shm_path, tmp_shm_path,
2387 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
2388 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2389 }
2390 return;
2391
2392 error:
2393 consumer_output_put(ua_sess->consumer);
2394 }
2395
2396 /*
2397 * Lookup sesison wrapper.
2398 */
2399 static
2400 void __lookup_session_by_app(const struct ltt_ust_session *usess,
2401 struct ust_app *app, struct lttng_ht_iter *iter)
2402 {
2403 /* Get right UST app session from app */
2404 lttng_ht_lookup(app->sessions, &usess->id, iter);
2405 }
2406
2407 /*
2408 * Return ust app session from the app session hashtable using the UST session
2409 * id.
2410 */
2411 static struct ust_app_session *lookup_session_by_app(
2412 const struct ltt_ust_session *usess, struct ust_app *app)
2413 {
2414 struct lttng_ht_iter iter;
2415 struct lttng_ht_node_u64 *node;
2416
2417 __lookup_session_by_app(usess, app, &iter);
2418 node = lttng_ht_iter_get_node_u64(&iter);
2419 if (node == NULL) {
2420 goto error;
2421 }
2422
2423 return caa_container_of(node, struct ust_app_session, node);
2424
2425 error:
2426 return NULL;
2427 }
2428
2429 /*
2430 * Setup buffer registry per PID for the given session and application. If none
2431 * is found, a new one is created, added to the global registry and
2432 * initialized. If regp is valid, it's set with the newly created object.
2433 *
2434 * Return 0 on success or else a negative value.
2435 */
2436 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
2437 struct ust_app *app, struct buffer_reg_pid **regp)
2438 {
2439 int ret = 0;
2440 struct buffer_reg_pid *reg_pid;
2441
2442 assert(ua_sess);
2443 assert(app);
2444
2445 rcu_read_lock();
2446
2447 reg_pid = buffer_reg_pid_find(ua_sess->id);
2448 if (!reg_pid) {
2449 /*
2450 * This is the create channel path meaning that if there is NO
2451 * registry available, we have to create one for this session.
2452 */
2453 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
2454 ua_sess->root_shm_path, ua_sess->shm_path);
2455 if (ret < 0) {
2456 goto error;
2457 }
2458 } else {
2459 goto end;
2460 }
2461
2462 /* Initialize registry. */
2463 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
2464 app->bits_per_long, app->uint8_t_alignment,
2465 app->uint16_t_alignment, app->uint32_t_alignment,
2466 app->uint64_t_alignment, app->long_alignment,
2467 app->byte_order, app->version.major, app->version.minor,
2468 reg_pid->root_shm_path, reg_pid->shm_path,
2469 lttng_credentials_get_uid(&ua_sess->effective_credentials),
2470 lttng_credentials_get_gid(&ua_sess->effective_credentials),
2471 ua_sess->tracing_id,
2472 app->uid);
2473 if (ret < 0) {
2474 /*
2475 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2476 * destroy the buffer registry, because it is always expected
2477 * that if the buffer registry can be found, its ust registry is
2478 * non-NULL.
2479 */
2480 buffer_reg_pid_destroy(reg_pid);
2481 goto error;
2482 }
2483
2484 buffer_reg_pid_add(reg_pid);
2485
2486 DBG3("UST app buffer registry per PID created successfully");
2487
2488 end:
2489 if (regp) {
2490 *regp = reg_pid;
2491 }
2492 error:
2493 rcu_read_unlock();
2494 return ret;
2495 }
2496
2497 /*
2498 * Setup buffer registry per UID for the given session and application. If none
2499 * is found, a new one is created, added to the global registry and
2500 * initialized. If regp is valid, it's set with the newly created object.
2501 *
2502 * Return 0 on success or else a negative value.
2503 */
2504 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
2505 struct ust_app_session *ua_sess,
2506 struct ust_app *app, struct buffer_reg_uid **regp)
2507 {
2508 int ret = 0;
2509 struct buffer_reg_uid *reg_uid;
2510
2511 assert(usess);
2512 assert(app);
2513
2514 rcu_read_lock();
2515
2516 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2517 if (!reg_uid) {
2518 /*
2519 * This is the create channel path meaning that if there is NO
2520 * registry available, we have to create one for this session.
2521 */
2522 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
2523 LTTNG_DOMAIN_UST, &reg_uid,
2524 ua_sess->root_shm_path, ua_sess->shm_path);
2525 if (ret < 0) {
2526 goto error;
2527 }
2528 } else {
2529 goto end;
2530 }
2531
2532 /* Initialize registry. */
2533 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
2534 app->bits_per_long, app->uint8_t_alignment,
2535 app->uint16_t_alignment, app->uint32_t_alignment,
2536 app->uint64_t_alignment, app->long_alignment,
2537 app->byte_order, app->version.major,
2538 app->version.minor, reg_uid->root_shm_path,
2539 reg_uid->shm_path, usess->uid, usess->gid,
2540 ua_sess->tracing_id, app->uid);
2541 if (ret < 0) {
2542 /*
2543 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2544 * destroy the buffer registry, because it is always expected
2545 * that if the buffer registry can be found, its ust registry is
2546 * non-NULL.
2547 */
2548 buffer_reg_uid_destroy(reg_uid, NULL);
2549 goto error;
2550 }
2551 /* Add node to teardown list of the session. */
2552 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2553
2554 buffer_reg_uid_add(reg_uid);
2555
2556 DBG3("UST app buffer registry per UID created successfully");
2557 end:
2558 if (regp) {
2559 *regp = reg_uid;
2560 }
2561 error:
2562 rcu_read_unlock();
2563 return ret;
2564 }
2565
2566 /*
2567 * Create a session on the tracer side for the given app.
2568 *
2569 * On success, ua_sess_ptr is populated with the session pointer or else left
2570 * untouched. If the session was created, is_created is set to 1. On error,
2571 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2572 * be NULL.
2573 *
2574 * Returns 0 on success or else a negative code which is either -ENOMEM or
2575 * -ENOTCONN which is the default code if the lttng_ust_ctl_create_session fails.
2576 */
2577 static int find_or_create_ust_app_session(struct ltt_ust_session *usess,
2578 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2579 int *is_created)
2580 {
2581 int ret, created = 0;
2582 struct ust_app_session *ua_sess;
2583
2584 assert(usess);
2585 assert(app);
2586 assert(ua_sess_ptr);
2587
2588 health_code_update();
2589
2590 ua_sess = lookup_session_by_app(usess, app);
2591 if (ua_sess == NULL) {
2592 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
2593 app->pid, usess->id);
2594 ua_sess = alloc_ust_app_session();
2595 if (ua_sess == NULL) {
2596 /* Only malloc can failed so something is really wrong */
2597 ret = -ENOMEM;
2598 goto error;
2599 }
2600 shadow_copy_session(ua_sess, usess, app);
2601 created = 1;
2602 }
2603
2604 switch (usess->buffer_type) {
2605 case LTTNG_BUFFER_PER_PID:
2606 /* Init local registry. */
2607 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
2608 if (ret < 0) {
2609 delete_ust_app_session(-1, ua_sess, app);
2610 goto error;
2611 }
2612 break;
2613 case LTTNG_BUFFER_PER_UID:
2614 /* Look for a global registry. If none exists, create one. */
2615 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
2616 if (ret < 0) {
2617 delete_ust_app_session(-1, ua_sess, app);
2618 goto error;
2619 }
2620 break;
2621 default:
2622 assert(0);
2623 ret = -EINVAL;
2624 goto error;
2625 }
2626
2627 health_code_update();
2628
2629 if (ua_sess->handle == -1) {
2630 pthread_mutex_lock(&app->sock_lock);
2631 ret = lttng_ust_ctl_create_session(app->sock);
2632 pthread_mutex_unlock(&app->sock_lock);
2633 if (ret < 0) {
2634 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2635 ERR("Creating session for app pid %d with ret %d",
2636 app->pid, ret);
2637 } else {
2638 DBG("UST app creating session failed. Application is dead");
2639 /*
2640 * This is normal behavior, an application can die during the
2641 * creation process. Don't report an error so the execution can
2642 * continue normally. This will get flagged ENOTCONN and the
2643 * caller will handle it.
2644 */
2645 ret = 0;
2646 }
2647 delete_ust_app_session(-1, ua_sess, app);
2648 if (ret != -ENOMEM) {
2649 /*
2650 * Tracer is probably gone or got an internal error so let's
2651 * behave like it will soon unregister or not usable.
2652 */
2653 ret = -ENOTCONN;
2654 }
2655 goto error;
2656 }
2657
2658 ua_sess->handle = ret;
2659
2660 /* Add ust app session to app's HT */
2661 lttng_ht_node_init_u64(&ua_sess->node,
2662 ua_sess->tracing_id);
2663 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
2664 lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
2665 lttng_ht_add_unique_ulong(app->ust_sessions_objd,
2666 &ua_sess->ust_objd_node);
2667
2668 DBG2("UST app session created successfully with handle %d", ret);
2669 }
2670
2671 *ua_sess_ptr = ua_sess;
2672 if (is_created) {
2673 *is_created = created;
2674 }
2675
2676 /* Everything went well. */
2677 ret = 0;
2678
2679 error:
2680 health_code_update();
2681 return ret;
2682 }
2683
2684 /*
2685 * Match function for a hash table lookup of ust_app_ctx.
2686 *
2687 * It matches an ust app context based on the context type and, in the case
2688 * of perf counters, their name.
2689 */
2690 static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2691 {
2692 struct ust_app_ctx *ctx;
2693 const struct lttng_ust_context_attr *key;
2694
2695 assert(node);
2696 assert(_key);
2697
2698 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2699 key = _key;
2700
2701 /* Context type */
2702 if (ctx->ctx.ctx != key->ctx) {
2703 goto no_match;
2704 }
2705
2706 switch(key->ctx) {
2707 case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
2708 if (strncmp(key->u.perf_counter.name,
2709 ctx->ctx.u.perf_counter.name,
2710 sizeof(key->u.perf_counter.name))) {
2711 goto no_match;
2712 }
2713 break;
2714 case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
2715 if (strcmp(key->u.app_ctx.provider_name,
2716 ctx->ctx.u.app_ctx.provider_name) ||
2717 strcmp(key->u.app_ctx.ctx_name,
2718 ctx->ctx.u.app_ctx.ctx_name)) {
2719 goto no_match;
2720 }
2721 break;
2722 default:
2723 break;
2724 }
2725
2726 /* Match. */
2727 return 1;
2728
2729 no_match:
2730 return 0;
2731 }
2732
2733 /*
2734 * Lookup for an ust app context from an lttng_ust_context.
2735 *
2736 * Must be called while holding RCU read side lock.
2737 * Return an ust_app_ctx object or NULL on error.
2738 */
2739 static
2740 struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
2741 struct lttng_ust_context_attr *uctx)
2742 {
2743 struct lttng_ht_iter iter;
2744 struct lttng_ht_node_ulong *node;
2745 struct ust_app_ctx *app_ctx = NULL;
2746
2747 assert(uctx);
2748 assert(ht);
2749
2750 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2751 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2752 ht_match_ust_app_ctx, uctx, &iter.iter);
2753 node = lttng_ht_iter_get_node_ulong(&iter);
2754 if (!node) {
2755 goto end;
2756 }
2757
2758 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2759
2760 end:
2761 return app_ctx;
2762 }
2763
2764 /*
2765 * Create a context for the channel on the tracer.
2766 *
2767 * Called with UST app session lock held and a RCU read side lock.
2768 */
2769 static
2770 int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
2771 struct lttng_ust_context_attr *uctx,
2772 struct ust_app *app)
2773 {
2774 int ret = 0;
2775 struct ust_app_ctx *ua_ctx;
2776
2777 DBG2("UST app adding context to channel %s", ua_chan->name);
2778
2779 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2780 if (ua_ctx) {
2781 ret = -EEXIST;
2782 goto error;
2783 }
2784
2785 ua_ctx = alloc_ust_app_ctx(uctx);
2786 if (ua_ctx == NULL) {
2787 /* malloc failed */
2788 ret = -ENOMEM;
2789 goto error;
2790 }
2791
2792 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
2793 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
2794 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
2795
2796 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2797 if (ret < 0) {
2798 goto error;
2799 }
2800
2801 error:
2802 return ret;
2803 }
2804
2805 /*
2806 * Enable on the tracer side a ust app event for the session and channel.
2807 *
2808 * Called with UST app session lock held.
2809 */
2810 static
2811 int enable_ust_app_event(struct ust_app_session *ua_sess,
2812 struct ust_app_event *ua_event, struct ust_app *app)
2813 {
2814 int ret;
2815
2816 ret = enable_ust_object(app, ua_event->obj);
2817 if (ret < 0) {
2818 goto error;
2819 }
2820
2821 ua_event->enabled = 1;
2822
2823 error:
2824 return ret;
2825 }
2826
2827 /*
2828 * Disable on the tracer side a ust app event for the session and channel.
2829 */
2830 static int disable_ust_app_event(struct ust_app_session *ua_sess,
2831 struct ust_app_event *ua_event, struct ust_app *app)
2832 {
2833 int ret;
2834
2835 ret = disable_ust_object(app, ua_event->obj);
2836 if (ret < 0) {
2837 goto error;
2838 }
2839
2840 ua_event->enabled = 0;
2841
2842 error:
2843 return ret;
2844 }
2845
2846 /*
2847 * Lookup ust app channel for session and disable it on the tracer side.
2848 */
2849 static
2850 int disable_ust_app_channel(struct ust_app_session *ua_sess,
2851 struct ust_app_channel *ua_chan, struct ust_app *app)
2852 {
2853 int ret;
2854
2855 ret = disable_ust_channel(app, ua_sess, ua_chan);
2856 if (ret < 0) {
2857 goto error;
2858 }
2859
2860 ua_chan->enabled = 0;
2861
2862 error:
2863 return ret;
2864 }
2865
2866 /*
2867 * Lookup ust app channel for session and enable it on the tracer side. This
2868 * MUST be called with a RCU read side lock acquired.
2869 */
2870 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2871 struct ltt_ust_channel *uchan, struct ust_app *app)
2872 {
2873 int ret = 0;
2874 struct lttng_ht_iter iter;
2875 struct lttng_ht_node_str *ua_chan_node;
2876 struct ust_app_channel *ua_chan;
2877
2878 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2879 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2880 if (ua_chan_node == NULL) {
2881 DBG2("Unable to find channel %s in ust session id %" PRIu64,
2882 uchan->name, ua_sess->tracing_id);
2883 goto error;
2884 }
2885
2886 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2887
2888 ret = enable_ust_channel(app, ua_sess, ua_chan);
2889 if (ret < 0) {
2890 goto error;
2891 }
2892
2893 error:
2894 return ret;
2895 }
2896
2897 /*
2898 * Ask the consumer to create a channel and get it if successful.
2899 *
2900 * Called with UST app session lock held.
2901 *
2902 * Return 0 on success or else a negative value.
2903 */
2904 static int do_consumer_create_channel(struct ltt_ust_session *usess,
2905 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2906 int bitness, struct ust_registry_session *registry,
2907 uint64_t trace_archive_id)
2908 {
2909 int ret;
2910 unsigned int nb_fd = 0;
2911 struct consumer_socket *socket;
2912
2913 assert(usess);
2914 assert(ua_sess);
2915 assert(ua_chan);
2916 assert(registry);
2917
2918 rcu_read_lock();
2919 health_code_update();
2920
2921 /* Get the right consumer socket for the application. */
2922 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2923 if (!socket) {
2924 ret = -EINVAL;
2925 goto error;
2926 }
2927
2928 health_code_update();
2929
2930 /* Need one fd for the channel. */
2931 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2932 if (ret < 0) {
2933 ERR("Exhausted number of available FD upon create channel");
2934 goto error;
2935 }
2936
2937 /*
2938 * Ask consumer to create channel. The consumer will return the number of
2939 * stream we have to expect.
2940 */
2941 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2942 registry, usess->current_trace_chunk);
2943 if (ret < 0) {
2944 goto error_ask;
2945 }
2946
2947 /*
2948 * Compute the number of fd needed before receiving them. It must be 2 per
2949 * stream (2 being the default value here).
2950 */
2951 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2952
2953 /* Reserve the amount of file descriptor we need. */
2954 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2955 if (ret < 0) {
2956 ERR("Exhausted number of available FD upon create channel");
2957 goto error_fd_get_stream;
2958 }
2959
2960 health_code_update();
2961
2962 /*
2963 * Now get the channel from the consumer. This call will populate the stream
2964 * list of that channel and set the ust objects.
2965 */
2966 if (usess->consumer->enabled) {
2967 ret = ust_consumer_get_channel(socket, ua_chan);
2968 if (ret < 0) {
2969 goto error_destroy;
2970 }
2971 }
2972
2973 rcu_read_unlock();
2974 return 0;
2975
2976 error_destroy:
2977 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2978 error_fd_get_stream:
2979 /*
2980 * Initiate a destroy channel on the consumer since we had an error
2981 * handling it on our side. The return value is of no importance since we
2982 * already have a ret value set by the previous error that we need to
2983 * return.
2984 */
2985 (void) ust_consumer_destroy_channel(socket, ua_chan);
2986 error_ask:
2987 lttng_fd_put(LTTNG_FD_APPS, 1);
2988 error:
2989 health_code_update();
2990 rcu_read_unlock();
2991 return ret;
2992 }
2993
2994 /*
2995 * Duplicate the ust data object of the ust app stream and save it in the
2996 * buffer registry stream.
2997 *
2998 * Return 0 on success or else a negative value.
2999 */
3000 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
3001 struct ust_app_stream *stream)
3002 {
3003 int ret;
3004
3005 assert(reg_stream);
3006 assert(stream);
3007
3008 /* Reserve the amount of file descriptor we need. */
3009 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
3010 if (ret < 0) {
3011 ERR("Exhausted number of available FD upon duplicate stream");
3012 goto error;
3013 }
3014
3015 /* Duplicate object for stream once the original is in the registry. */
3016 ret = lttng_ust_ctl_duplicate_ust_object_data(&stream->obj,
3017 reg_stream->obj.ust);
3018 if (ret < 0) {
3019 ERR("Duplicate stream obj from %p to %p failed with ret %d",
3020 reg_stream->obj.ust, stream->obj, ret);
3021 lttng_fd_put(LTTNG_FD_APPS, 2);
3022 goto error;
3023 }
3024 stream->handle = stream->obj->handle;
3025
3026 error:
3027 return ret;
3028 }
3029
3030 /*
3031 * Duplicate the ust data object of the ust app. channel and save it in the
3032 * buffer registry channel.
3033 *
3034 * Return 0 on success or else a negative value.
3035 */
3036 static int duplicate_channel_object(struct buffer_reg_channel *buf_reg_chan,
3037 struct ust_app_channel *ua_chan)
3038 {
3039 int ret;
3040
3041 assert(buf_reg_chan);
3042 assert(ua_chan);
3043
3044 /* Need two fds for the channel. */
3045 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3046 if (ret < 0) {
3047 ERR("Exhausted number of available FD upon duplicate channel");
3048 goto error_fd_get;
3049 }
3050
3051 /* Duplicate object for stream once the original is in the registry. */
3052 ret = lttng_ust_ctl_duplicate_ust_object_data(&ua_chan->obj, buf_reg_chan->obj.ust);
3053 if (ret < 0) {
3054 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
3055 buf_reg_chan->obj.ust, ua_chan->obj, ret);
3056 goto error;
3057 }
3058 ua_chan->handle = ua_chan->obj->handle;
3059
3060 return 0;
3061
3062 error:
3063 lttng_fd_put(LTTNG_FD_APPS, 1);
3064 error_fd_get:
3065 return ret;
3066 }
3067
3068 /*
3069 * For a given channel buffer registry, setup all streams of the given ust
3070 * application channel.
3071 *
3072 * Return 0 on success or else a negative value.
3073 */
3074 static int setup_buffer_reg_streams(struct buffer_reg_channel *buf_reg_chan,
3075 struct ust_app_channel *ua_chan,
3076 struct ust_app *app)
3077 {
3078 int ret = 0;
3079 struct ust_app_stream *stream, *stmp;
3080
3081 assert(buf_reg_chan);
3082 assert(ua_chan);
3083
3084 DBG2("UST app setup buffer registry stream");
3085
3086 /* Send all streams to application. */
3087 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
3088 struct buffer_reg_stream *reg_stream;
3089
3090 ret = buffer_reg_stream_create(&reg_stream);
3091 if (ret < 0) {
3092 goto error;
3093 }
3094
3095 /*
3096 * Keep original pointer and nullify it in the stream so the delete
3097 * stream call does not release the object.
3098 */
3099 reg_stream->obj.ust = stream->obj;
3100 stream->obj = NULL;
3101 buffer_reg_stream_add(reg_stream, buf_reg_chan);
3102
3103 /* We don't need the streams anymore. */
3104 cds_list_del(&stream->list);
3105 delete_ust_app_stream(-1, stream, app);
3106 }
3107
3108 error:
3109 return ret;
3110 }
3111
3112 /*
3113 * Create a buffer registry channel for the given session registry and
3114 * application channel object. If regp pointer is valid, it's set with the
3115 * created object. Important, the created object is NOT added to the session
3116 * registry hash table.
3117 *
3118 * Return 0 on success else a negative value.
3119 */
3120 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
3121 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
3122 {
3123 int ret;
3124 struct buffer_reg_channel *buf_reg_chan = NULL;
3125
3126 assert(reg_sess);
3127 assert(ua_chan);
3128
3129 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
3130
3131 /* Create buffer registry channel. */
3132 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &buf_reg_chan);
3133 if (ret < 0) {
3134 goto error_create;
3135 }
3136 assert(buf_reg_chan);
3137 buf_reg_chan->consumer_key = ua_chan->key;
3138 buf_reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
3139 buf_reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
3140
3141 /* Create and add a channel registry to session. */
3142 ret = ust_registry_channel_add(reg_sess->reg.ust,
3143 ua_chan->tracing_channel_id);
3144 if (ret < 0) {
3145 goto error;
3146 }
3147 buffer_reg_channel_add(reg_sess, buf_reg_chan);
3148
3149 if (regp) {
3150 *regp = buf_reg_chan;
3151 }
3152
3153 return 0;
3154
3155 error:
3156 /* Safe because the registry channel object was not added to any HT. */
3157 buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
3158 error_create:
3159 return ret;
3160 }
3161
3162 /*
3163 * Setup buffer registry channel for the given session registry and application
3164 * channel object. If regp pointer is valid, it's set with the created object.
3165 *
3166 * Return 0 on success else a negative value.
3167 */
3168 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
3169 struct ust_app_channel *ua_chan, struct buffer_reg_channel *buf_reg_chan,
3170 struct ust_app *app)
3171 {
3172 int ret;
3173
3174 assert(reg_sess);
3175 assert(buf_reg_chan);
3176 assert(ua_chan);
3177 assert(ua_chan->obj);
3178
3179 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
3180
3181 /* Setup all streams for the registry. */
3182 ret = setup_buffer_reg_streams(buf_reg_chan, ua_chan, app);
3183 if (ret < 0) {
3184 goto error;
3185 }
3186
3187 buf_reg_chan->obj.ust = ua_chan->obj;
3188 ua_chan->obj = NULL;
3189
3190 return 0;
3191
3192 error:
3193 buffer_reg_channel_remove(reg_sess, buf_reg_chan);
3194 buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
3195 return ret;
3196 }
3197
3198 /*
3199 * Send buffer registry channel to the application.
3200 *
3201 * Return 0 on success else a negative value.
3202 */
3203 static int send_channel_uid_to_ust(struct buffer_reg_channel *buf_reg_chan,
3204 struct ust_app *app, struct ust_app_session *ua_sess,
3205 struct ust_app_channel *ua_chan)
3206 {
3207 int ret;
3208 struct buffer_reg_stream *reg_stream;
3209
3210 assert(buf_reg_chan);
3211 assert(app);
3212 assert(ua_sess);
3213 assert(ua_chan);
3214
3215 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
3216
3217 ret = duplicate_channel_object(buf_reg_chan, ua_chan);
3218 if (ret < 0) {
3219 goto error;
3220 }
3221
3222 /* Send channel to the application. */
3223 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
3224 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3225 ret = -ENOTCONN; /* Caused by app exiting. */
3226 goto error;
3227 } else if (ret < 0) {
3228 goto error;
3229 }
3230
3231 health_code_update();
3232
3233 /* Send all streams to application. */
3234 pthread_mutex_lock(&buf_reg_chan->stream_list_lock);
3235 cds_list_for_each_entry(reg_stream, &buf_reg_chan->streams, lnode) {
3236 struct ust_app_stream stream;
3237
3238 ret = duplicate_stream_object(reg_stream, &stream);
3239 if (ret < 0) {
3240 goto error_stream_unlock;
3241 }
3242
3243 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
3244 if (ret < 0) {
3245 (void) release_ust_app_stream(-1, &stream, app);
3246 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3247 ret = -ENOTCONN; /* Caused by app exiting. */
3248 }
3249 goto error_stream_unlock;
3250 }
3251
3252 /*
3253 * The return value is not important here. This function will output an
3254 * error if needed.
3255 */
3256 (void) release_ust_app_stream(-1, &stream, app);
3257 }
3258 ua_chan->is_sent = 1;
3259
3260 error_stream_unlock:
3261 pthread_mutex_unlock(&buf_reg_chan->stream_list_lock);
3262 error:
3263 return ret;
3264 }
3265
3266 /*
3267 * Create and send to the application the created buffers with per UID buffers.
3268 *
3269 * This MUST be called with a RCU read side lock acquired.
3270 * The session list lock and the session's lock must be acquired.
3271 *
3272 * Return 0 on success else a negative value.
3273 */
3274 static int create_channel_per_uid(struct ust_app *app,
3275 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3276 struct ust_app_channel *ua_chan)
3277 {
3278 int ret;
3279 struct buffer_reg_uid *reg_uid;
3280 struct buffer_reg_channel *buf_reg_chan;
3281 struct ltt_session *session = NULL;
3282 enum lttng_error_code notification_ret;
3283 struct ust_registry_channel *ust_reg_chan;
3284
3285 assert(app);
3286 assert(usess);
3287 assert(ua_sess);
3288 assert(ua_chan);
3289
3290 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
3291
3292 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
3293 /*
3294 * The session creation handles the creation of this global registry
3295 * object. If none can be find, there is a code flow problem or a
3296 * teardown race.
3297 */
3298 assert(reg_uid);
3299
3300 buf_reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
3301 reg_uid);
3302 if (buf_reg_chan) {
3303 goto send_channel;
3304 }
3305
3306 /* Create the buffer registry channel object. */
3307 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &buf_reg_chan);
3308 if (ret < 0) {
3309 ERR("Error creating the UST channel \"%s\" registry instance",
3310 ua_chan->name);
3311 goto error;
3312 }
3313
3314 session = session_find_by_id(ua_sess->tracing_id);
3315 assert(session);
3316 assert(pthread_mutex_trylock(&session->lock));
3317 assert(session_trylock_list());
3318
3319 /*
3320 * Create the buffers on the consumer side. This call populates the
3321 * ust app channel object with all streams and data object.
3322 */
3323 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
3324 app->bits_per_long, reg_uid->registry->reg.ust,
3325 session->most_recent_chunk_id.value);
3326 if (ret < 0) {
3327 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3328 ua_chan->name);
3329
3330 /*
3331 * Let's remove the previously created buffer registry channel so
3332 * it's not visible anymore in the session registry.
3333 */
3334 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
3335 ua_chan->tracing_channel_id, false);
3336 buffer_reg_channel_remove(reg_uid->registry, buf_reg_chan);
3337 buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
3338 goto error;
3339 }
3340
3341 /*
3342 * Setup the streams and add it to the session registry.
3343 */
3344 ret = setup_buffer_reg_channel(reg_uid->registry,
3345 ua_chan, buf_reg_chan, app);
3346 if (ret < 0) {
3347 ERR("Error setting up UST channel \"%s\"", ua_chan->name);
3348 goto error;
3349 }
3350
3351 /* Notify the notification subsystem of the channel's creation. */
3352 pthread_mutex_lock(&reg_uid->registry->reg.ust->lock);
3353 ust_reg_chan = ust_registry_channel_find(reg_uid->registry->reg.ust,
3354 ua_chan->tracing_channel_id);
3355 assert(ust_reg_chan);
3356 ust_reg_chan->consumer_key = ua_chan->key;
3357 ust_reg_chan = NULL;
3358 pthread_mutex_unlock(&reg_uid->registry->reg.ust->lock);
3359
3360 notification_ret = notification_thread_command_add_channel(
3361 the_notification_thread_handle, session->name,
3362 lttng_credentials_get_uid(
3363 &ua_sess->effective_credentials),
3364 lttng_credentials_get_gid(
3365 &ua_sess->effective_credentials),
3366 ua_chan->name, ua_chan->key, LTTNG_DOMAIN_UST,
3367 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3368 if (notification_ret != LTTNG_OK) {
3369 ret = - (int) notification_ret;
3370 ERR("Failed to add channel to notification thread");
3371 goto error;
3372 }
3373
3374 send_channel:
3375 /* Send buffers to the application. */
3376 ret = send_channel_uid_to_ust(buf_reg_chan, app, ua_sess, ua_chan);
3377 if (ret < 0) {
3378 if (ret != -ENOTCONN) {
3379 ERR("Error sending channel to application");
3380 }
3381 goto error;
3382 }
3383
3384 error:
3385 if (session) {
3386 session_put(session);
3387 }
3388 return ret;
3389 }
3390
3391 /*
3392 * Create and send to the application the created buffers with per PID buffers.
3393 *
3394 * Called with UST app session lock held.
3395 * The session list lock and the session's lock must be acquired.
3396 *
3397 * Return 0 on success else a negative value.
3398 */
3399 static int create_channel_per_pid(struct ust_app *app,
3400 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3401 struct ust_app_channel *ua_chan)
3402 {
3403 int ret;
3404 struct ust_registry_session *registry;
3405 enum lttng_error_code cmd_ret;
3406 struct ltt_session *session = NULL;
3407 uint64_t chan_reg_key;
3408 struct ust_registry_channel *ust_reg_chan;
3409
3410 assert(app);
3411 assert(usess);
3412 assert(ua_sess);
3413 assert(ua_chan);
3414
3415 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
3416
3417 rcu_read_lock();
3418
3419 registry = get_session_registry(ua_sess);
3420 /* The UST app session lock is held, registry shall not be null. */
3421 assert(registry);
3422
3423 /* Create and add a new channel registry to session. */
3424 ret = ust_registry_channel_add(registry, ua_chan->key);
3425 if (ret < 0) {
3426 ERR("Error creating the UST channel \"%s\" registry instance",
3427 ua_chan->name);
3428 goto error;
3429 }
3430
3431 session = session_find_by_id(ua_sess->tracing_id);
3432 assert(session);
3433
3434 assert(pthread_mutex_trylock(&session->lock));
3435 assert(session_trylock_list());
3436
3437 /* Create and get channel on the consumer side. */
3438 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
3439 app->bits_per_long, registry,
3440 session->most_recent_chunk_id.value);
3441 if (ret < 0) {
3442 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3443 ua_chan->name);
3444 goto error_remove_from_registry;
3445 }
3446
3447 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
3448 if (ret < 0) {
3449 if (ret != -ENOTCONN) {
3450 ERR("Error sending channel to application");
3451 }
3452 goto error_remove_from_registry;
3453 }
3454
3455 chan_reg_key = ua_chan->key;
3456 pthread_mutex_lock(&registry->lock);
3457 ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key);
3458 assert(ust_reg_chan);
3459 ust_reg_chan->consumer_key = ua_chan->key;
3460 pthread_mutex_unlock(&registry->lock);
3461
3462 cmd_ret = notification_thread_command_add_channel(
3463 the_notification_thread_handle, session->name,
3464 lttng_credentials_get_uid(
3465 &ua_sess->effective_credentials),
3466 lttng_credentials_get_gid(
3467 &ua_sess->effective_credentials),
3468 ua_chan->name, ua_chan->key, LTTNG_DOMAIN_UST,
3469 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3470 if (cmd_ret != LTTNG_OK) {
3471 ret = - (int) cmd_ret;
3472 ERR("Failed to add channel to notification thread");
3473 goto error_remove_from_registry;
3474 }
3475
3476 error_remove_from_registry:
3477 if (ret) {
3478 ust_registry_channel_del_free(registry, ua_chan->key, false);
3479 }
3480 error:
3481 rcu_read_unlock();
3482 if (session) {
3483 session_put(session);
3484 }
3485 return ret;
3486 }
3487
3488 /*
3489 * From an already allocated ust app channel, create the channel buffers if
3490 * needed and send them to the application. This MUST be called with a RCU read
3491 * side lock acquired.
3492 *
3493 * Called with UST app session lock held.
3494 *
3495 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3496 * the application exited concurrently.
3497 */
3498 static int ust_app_channel_send(struct ust_app *app,
3499 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3500 struct ust_app_channel *ua_chan)
3501 {
3502 int ret;
3503
3504 assert(app);
3505 assert(usess);
3506 assert(usess->active);
3507 assert(ua_sess);
3508 assert(ua_chan);
3509
3510 /* Handle buffer type before sending the channel to the application. */
3511 switch (usess->buffer_type) {
3512 case LTTNG_BUFFER_PER_UID:
3513 {
3514 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
3515 if (ret < 0) {
3516 goto error;
3517 }
3518 break;
3519 }
3520 case LTTNG_BUFFER_PER_PID:
3521 {
3522 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
3523 if (ret < 0) {
3524 goto error;
3525 }
3526 break;
3527 }
3528 default:
3529 assert(0);
3530 ret = -EINVAL;
3531 goto error;
3532 }
3533
3534 /* Initialize ust objd object using the received handle and add it. */
3535 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
3536 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
3537
3538 /* If channel is not enabled, disable it on the tracer */
3539 if (!ua_chan->enabled) {
3540 ret = disable_ust_channel(app, ua_sess, ua_chan);
3541 if (ret < 0) {
3542 goto error;
3543 }
3544 }
3545
3546 error:
3547 return ret;
3548 }
3549
3550 /*
3551 * Create UST app channel and return it through ua_chanp if not NULL.
3552 *
3553 * Called with UST app session lock and RCU read-side lock held.
3554 *
3555 * Return 0 on success or else a negative value.
3556 */
3557 static int ust_app_channel_allocate(struct ust_app_session *ua_sess,
3558 struct ltt_ust_channel *uchan,
3559 enum lttng_ust_abi_chan_type type, struct ltt_ust_session *usess,
3560 struct ust_app_channel **ua_chanp)
3561 {
3562 int ret = 0;
3563 struct lttng_ht_iter iter;
3564 struct lttng_ht_node_str *ua_chan_node;
3565 struct ust_app_channel *ua_chan;
3566
3567 /* Lookup channel in the ust app session */
3568 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
3569 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
3570 if (ua_chan_node != NULL) {
3571 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3572 goto end;
3573 }
3574
3575 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
3576 if (ua_chan == NULL) {
3577 /* Only malloc can fail here */
3578 ret = -ENOMEM;
3579 goto error;
3580 }
3581 shadow_copy_channel(ua_chan, uchan);
3582
3583 /* Set channel type. */
3584 ua_chan->attr.type = type;
3585
3586 /* Only add the channel if successful on the tracer side. */
3587 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
3588 end:
3589 if (ua_chanp) {
3590 *ua_chanp = ua_chan;
3591 }
3592
3593 /* Everything went well. */
3594 return 0;
3595
3596 error:
3597 return ret;
3598 }
3599
3600 /*
3601 * Create UST app event and create it on the tracer side.
3602 *
3603 * Must be called with the RCU read side lock held.
3604 * Called with ust app session mutex held.
3605 */
3606 static
3607 int create_ust_app_event(struct ust_app_session *ua_sess,
3608 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
3609 struct ust_app *app)
3610 {
3611 int ret = 0;
3612 struct ust_app_event *ua_event;
3613
3614 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
3615 if (ua_event == NULL) {
3616 /* Only failure mode of alloc_ust_app_event(). */
3617 ret = -ENOMEM;
3618 goto end;
3619 }
3620 shadow_copy_event(ua_event, uevent);
3621
3622 /* Create it on the tracer side */
3623 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
3624 if (ret < 0) {
3625 /*
3626 * Not found previously means that it does not exist on the
3627 * tracer. If the application reports that the event existed,
3628 * it means there is a bug in the sessiond or lttng-ust
3629 * (or corruption, etc.)
3630 */
3631 if (ret == -LTTNG_UST_ERR_EXIST) {
3632 ERR("Tracer for application reported that an event being created already existed: "
3633 "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d",
3634 uevent->attr.name,
3635 app->pid, app->ppid, app->uid,
3636 app->gid);
3637 }
3638 goto error;
3639 }
3640
3641 add_unique_ust_app_event(ua_chan, ua_event);
3642
3643 DBG2("UST app create event completed: app = '%s' (ppid: %d)",
3644 app->name, app->ppid);
3645
3646 end:
3647 return ret;
3648
3649 error:
3650 /* Valid. Calling here is already in a read side lock */
3651 delete_ust_app_event(-1, ua_event, app);
3652 return ret;
3653 }
3654
3655 /*
3656 * Create UST app event notifier rule and create it on the tracer side.
3657 *
3658 * Must be called with the RCU read side lock held.
3659 * Called with ust app session mutex held.
3660 */
3661 static
3662 int create_ust_app_event_notifier_rule(struct lttng_trigger *trigger,
3663 struct ust_app *app)
3664 {
3665 int ret = 0;
3666 struct ust_app_event_notifier_rule *ua_event_notifier_rule;
3667
3668 ua_event_notifier_rule = alloc_ust_app_event_notifier_rule(trigger);
3669 if (ua_event_notifier_rule == NULL) {
3670 ret = -ENOMEM;
3671 goto end;
3672 }
3673
3674 /* Create it on the tracer side. */
3675 ret = create_ust_event_notifier(app, ua_event_notifier_rule);
3676 if (ret < 0) {
3677 /*
3678 * Not found previously means that it does not exist on the
3679 * tracer. If the application reports that the event existed,
3680 * it means there is a bug in the sessiond or lttng-ust
3681 * (or corruption, etc.)
3682 */
3683 if (ret == -LTTNG_UST_ERR_EXIST) {
3684 ERR("Tracer for application reported that an event notifier being created already exists: "
3685 "token = \"%" PRIu64 "\", pid = %d, ppid = %d, uid = %d, gid = %d",
3686 lttng_trigger_get_tracer_token(trigger),
3687 app->pid, app->ppid, app->uid,
3688 app->gid);
3689 }
3690 goto error;
3691 }
3692
3693 lttng_ht_add_unique_u64(app->token_to_event_notifier_rule_ht,
3694 &ua_event_notifier_rule->node);
3695
3696 DBG2("UST app create token event rule completed: app = '%s' (ppid: %d), token = %" PRIu64,
3697 app->name, app->ppid, lttng_trigger_get_tracer_token(trigger));
3698
3699 goto end;
3700
3701 error:
3702 /* The RCU read side lock is already being held by the caller. */
3703 delete_ust_app_event_notifier_rule(-1, ua_event_notifier_rule, app);
3704 end:
3705 return ret;
3706 }
3707
3708 /*
3709 * Create UST metadata and open it on the tracer side.
3710 *
3711 * Called with UST app session lock held and RCU read side lock.
3712 */
3713 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
3714 struct ust_app *app, struct consumer_output *consumer)
3715 {
3716 int ret = 0;
3717 struct ust_app_channel *metadata;
3718 struct consumer_socket *socket;
3719 struct ust_registry_session *registry;
3720 struct ltt_session *session = NULL;
3721
3722 assert(ua_sess);
3723 assert(app);
3724 assert(consumer);
3725
3726 registry = get_session_registry(ua_sess);
3727 /* The UST app session is held registry shall not be null. */
3728 assert(registry);
3729
3730 pthread_mutex_lock(&registry->lock);
3731
3732 /* Metadata already exists for this registry or it was closed previously */
3733 if (registry->metadata_key || registry->metadata_closed) {
3734 ret = 0;
3735 goto error;
3736 }
3737
3738 /* Allocate UST metadata */
3739 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
3740 if (!metadata) {
3741 /* malloc() failed */
3742 ret = -ENOMEM;
3743 goto error;
3744 }
3745
3746 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
3747
3748 /* Need one fd for the channel. */
3749 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3750 if (ret < 0) {
3751 ERR("Exhausted number of available FD upon create metadata");
3752 goto error;
3753 }
3754
3755 /* Get the right consumer socket for the application. */
3756 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
3757 if (!socket) {
3758 ret = -EINVAL;
3759 goto error_consumer;
3760 }
3761
3762 /*
3763 * Keep metadata key so we can identify it on the consumer side. Assign it
3764 * to the registry *before* we ask the consumer so we avoid the race of the
3765 * consumer requesting the metadata and the ask_channel call on our side
3766 * did not returned yet.
3767 */
3768 registry->metadata_key = metadata->key;
3769
3770 session = session_find_by_id(ua_sess->tracing_id);
3771 assert(session);
3772
3773 assert(pthread_mutex_trylock(&session->lock));
3774 assert(session_trylock_list());
3775
3776 /*
3777 * Ask the metadata channel creation to the consumer. The metadata object
3778 * will be created by the consumer and kept their. However, the stream is
3779 * never added or monitored until we do a first push metadata to the
3780 * consumer.
3781 */
3782 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
3783 registry, session->current_trace_chunk);
3784 if (ret < 0) {
3785 /* Nullify the metadata key so we don't try to close it later on. */
3786 registry->metadata_key = 0;
3787 goto error_consumer;
3788 }
3789
3790 /*
3791 * The setup command will make the metadata stream be sent to the relayd,
3792 * if applicable, and the thread managing the metadatas. This is important
3793 * because after this point, if an error occurs, the only way the stream
3794 * can be deleted is to be monitored in the consumer.
3795 */
3796 ret = consumer_setup_metadata(socket, metadata->key);
3797 if (ret < 0) {
3798 /* Nullify the metadata key so we don't try to close it later on. */
3799 registry->metadata_key = 0;
3800 goto error_consumer;
3801 }
3802
3803 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
3804 metadata->key, app->pid);
3805
3806 error_consumer:
3807 lttng_fd_put(LTTNG_FD_APPS, 1);
3808 delete_ust_app_channel(-1, metadata, app);
3809 error:
3810 pthread_mutex_unlock(&registry->lock);
3811 if (session) {
3812 session_put(session);
3813 }
3814 return ret;
3815 }
3816
3817 /*
3818 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3819 * acquired before calling this function.
3820 */
3821 struct ust_app *ust_app_find_by_pid(pid_t pid)
3822 {
3823 struct ust_app *app = NULL;
3824 struct lttng_ht_node_ulong *node;
3825 struct lttng_ht_iter iter;
3826
3827 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
3828 node = lttng_ht_iter_get_node_ulong(&iter);
3829 if (node == NULL) {
3830 DBG2("UST app no found with pid %d", pid);
3831 goto error;
3832 }
3833
3834 DBG2("Found UST app by pid %d", pid);
3835
3836 app = caa_container_of(node, struct ust_app, pid_n);
3837
3838 error:
3839 return app;
3840 }
3841
3842 /*
3843 * Allocate and init an UST app object using the registration information and
3844 * the command socket. This is called when the command socket connects to the
3845 * session daemon.
3846 *
3847 * The object is returned on success or else NULL.
3848 */
3849 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
3850 {
3851 int ret;
3852 struct ust_app *lta = NULL;
3853 struct lttng_pipe *event_notifier_event_source_pipe = NULL;
3854
3855 assert(msg);
3856 assert(sock >= 0);
3857
3858 DBG3("UST app creating application for socket %d", sock);
3859
3860 if ((msg->bits_per_long == 64 &&
3861 (uatomic_read(&the_ust_consumerd64_fd) ==
3862 -EINVAL)) ||
3863 (msg->bits_per_long == 32 &&
3864 (uatomic_read(&the_ust_consumerd32_fd) ==
3865 -EINVAL))) {
3866 ERR("Registration failed: application \"%s\" (pid: %d) has "
3867 "%d-bit long, but no consumerd for this size is available.\n",
3868 msg->name, msg->pid, msg->bits_per_long);
3869 goto error;
3870 }
3871
3872 /*
3873 * Reserve the two file descriptors of the event source pipe. The write
3874 * end will be closed once it is passed to the application, at which
3875 * point a single 'put' will be performed.
3876 */
3877 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
3878 if (ret) {
3879 ERR("Failed to reserve two file descriptors for the event source pipe while creating a new application instance: app = '%s' (ppid: %d)",
3880 msg->name, (int) msg->ppid);
3881 goto error;
3882 }
3883
3884 event_notifier_event_source_pipe = lttng_pipe_open(FD_CLOEXEC);
3885 if (!event_notifier_event_source_pipe) {
3886 PERROR("Failed to open application event source pipe: '%s' (ppid = %d)",
3887 msg->name, msg->ppid);
3888 goto error;
3889 }
3890
3891 lta = zmalloc(sizeof(struct ust_app));
3892 if (lta == NULL) {
3893 PERROR("malloc");
3894 goto error_free_pipe;
3895 }
3896
3897 lta->event_notifier_group.event_pipe = event_notifier_event_source_pipe;
3898
3899 lta->ppid = msg->ppid;
3900 lta->uid = msg->uid;
3901 lta->gid = msg->gid;
3902
3903 lta->bits_per_long = msg->bits_per_long;
3904 lta->uint8_t_alignment = msg->uint8_t_alignment;
3905 lta->uint16_t_alignment = msg->uint16_t_alignment;
3906 lta->uint32_t_alignment = msg->uint32_t_alignment;
3907 lta->uint64_t_alignment = msg->uint64_t_alignment;
3908 lta->long_alignment = msg->long_alignment;
3909 lta->byte_order = msg->byte_order;
3910
3911 lta->v_major = msg->major;
3912 lta->v_minor = msg->minor;
3913 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3914 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3915 lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3916 lta->notify_sock = -1;
3917 lta->token_to_event_notifier_rule_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3918
3919 /* Copy name and make sure it's NULL terminated. */
3920 strncpy(lta->name, msg->name, sizeof(lta->name));
3921 lta->name[UST_APP_PROCNAME_LEN] = '\0';
3922
3923 /*
3924 * Before this can be called, when receiving the registration information,
3925 * the application compatibility is checked. So, at this point, the
3926 * application can work with this session daemon.
3927 */
3928 lta->compatible = 1;
3929
3930 lta->pid = msg->pid;
3931 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
3932 lta->sock = sock;
3933 pthread_mutex_init(&lta->sock_lock, NULL);
3934 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
3935
3936 CDS_INIT_LIST_HEAD(&lta->teardown_head);
3937 return lta;
3938
3939 error_free_pipe:
3940 lttng_pipe_destroy(event_notifier_event_source_pipe);
3941 lttng_fd_put(LTTNG_FD_APPS, 2);
3942 error:
3943 return NULL;
3944 }
3945
3946 /*
3947 * For a given application object, add it to every hash table.
3948 */
3949 void ust_app_add(struct ust_app *app)
3950 {
3951 assert(app);
3952 assert(app->notify_sock >= 0);
3953
3954 app->registration_time = time(NULL);
3955
3956 rcu_read_lock();
3957
3958 /*
3959 * On a re-registration, we want to kick out the previous registration of
3960 * that pid
3961 */
3962 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
3963
3964 /*
3965 * The socket _should_ be unique until _we_ call close. So, a add_unique
3966 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3967 * already in the table.
3968 */
3969 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
3970
3971 /* Add application to the notify socket hash table. */
3972 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
3973 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
3974
3975 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
3976 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
3977 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
3978 app->v_minor);
3979
3980 rcu_read_unlock();
3981 }
3982
3983 /*
3984 * Set the application version into the object.
3985 *
3986 * Return 0 on success else a negative value either an errno code or a
3987 * LTTng-UST error code.
3988 */
3989 int ust_app_version(struct ust_app *app)
3990 {
3991 int ret;
3992
3993 assert(app);
3994
3995 pthread_mutex_lock(&app->sock_lock);
3996 ret = lttng_ust_ctl_tracer_version(app->sock, &app->version);
3997 pthread_mutex_unlock(&app->sock_lock);
3998 if (ret < 0) {
3999 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4000 ERR("UST app %d version failed with ret %d", app->sock, ret);
4001 } else {
4002 DBG3("UST app %d version failed. Application is dead", app->sock);
4003 }
4004 }
4005
4006 return ret;
4007 }
4008
4009 bool ust_app_supports_notifiers(const struct ust_app *app)
4010 {
4011 return app->v_major >= 9;
4012 }
4013
4014 bool ust_app_supports_counters(const struct ust_app *app)
4015 {
4016 return app->v_major >= 9;
4017 }
4018
4019 /*
4020 * Setup the base event notifier group.
4021 *
4022 * Return 0 on success else a negative value either an errno code or a
4023 * LTTng-UST error code.
4024 */
4025 int ust_app_setup_event_notifier_group(struct ust_app *app)
4026 {
4027 int ret;
4028 int event_pipe_write_fd;
4029 struct lttng_ust_abi_object_data *event_notifier_group = NULL;
4030 enum lttng_error_code lttng_ret;
4031 enum event_notifier_error_accounting_status event_notifier_error_accounting_status;
4032
4033 assert(app);
4034
4035 if (!ust_app_supports_notifiers(app)) {
4036 ret = -ENOSYS;
4037 goto error;
4038 }
4039
4040 /* Get the write side of the pipe. */
4041 event_pipe_write_fd = lttng_pipe_get_writefd(
4042 app->event_notifier_group.event_pipe);
4043
4044 pthread_mutex_lock(&app->sock_lock);
4045 ret = lttng_ust_ctl_create_event_notifier_group(app->sock,
4046 event_pipe_write_fd, &event_notifier_group);
4047 pthread_mutex_unlock(&app->sock_lock);
4048 if (ret < 0) {
4049 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4050 ERR("Failed to create application event notifier group: ret = %d, app socket fd = %d, event_pipe_write_fd = %d",
4051 ret, app->sock, event_pipe_write_fd);
4052 } else {
4053 DBG("Failed to create application event notifier group (application is dead): app socket fd = %d",
4054 app->sock);
4055 }
4056
4057 goto error;
4058 }
4059
4060 ret = lttng_pipe_write_close(app->event_notifier_group.event_pipe);
4061 if (ret) {
4062 ERR("Failed to close write end of the application's event source pipe: app = '%s' (ppid = %d)",
4063 app->name, app->ppid);
4064 goto error;
4065 }
4066
4067 /*
4068 * Release the file descriptor that was reserved for the write-end of
4069 * the pipe.
4070 */
4071 lttng_fd_put(LTTNG_FD_APPS, 1);
4072
4073 lttng_ret = notification_thread_command_add_tracer_event_source(
4074 the_notification_thread_handle,
4075 lttng_pipe_get_readfd(
4076 app->event_notifier_group.event_pipe),
4077 LTTNG_DOMAIN_UST);
4078 if (lttng_ret != LTTNG_OK) {
4079 ERR("Failed to add tracer event source to notification thread");
4080 ret = - 1;
4081 goto error;
4082 }
4083
4084 /* Assign handle only when the complete setup is valid. */
4085 app->event_notifier_group.object = event_notifier_group;
4086
4087 event_notifier_error_accounting_status =
4088 event_notifier_error_accounting_register_app(app);
4089 switch (event_notifier_error_accounting_status) {
4090 case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK:
4091 break;
4092 case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_UNSUPPORTED:
4093 DBG3("Failed to setup event notifier error accounting (application does not support notifier error accounting): app socket fd = %d, app name = '%s', app ppid = %d",
4094 app->sock, app->name, (int) app->ppid);
4095 ret = 0;
4096 goto error_accounting;
4097 case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_APP_DEAD:
4098 DBG3("Failed to setup event notifier error accounting (application is dead): app socket fd = %d, app name = '%s', app ppid = %d",
4099 app->sock, app->name, (int) app->ppid);
4100 ret = 0;
4101 goto error_accounting;
4102 default:
4103 ERR("Failed to setup event notifier error accounting for app");
4104 ret = -1;
4105 goto error_accounting;
4106 }
4107
4108 return ret;
4109
4110 error_accounting:
4111 lttng_ret = notification_thread_command_remove_tracer_event_source(
4112 the_notification_thread_handle,
4113 lttng_pipe_get_readfd(
4114 app->event_notifier_group.event_pipe));
4115 if (lttng_ret != LTTNG_OK) {
4116 ERR("Failed to remove application tracer event source from notification thread");
4117 }
4118
4119 error:
4120 lttng_ust_ctl_release_object(app->sock, app->event_notifier_group.object);
4121 free(app->event_notifier_group.object);
4122 app->event_notifier_group.object = NULL;
4123 return ret;
4124 }
4125
4126 /*
4127 * Unregister app by removing it from the global traceable app list and freeing
4128 * the data struct.
4129 *
4130 * The socket is already closed at this point so no close to sock.
4131 */
4132 void ust_app_unregister(int sock)
4133 {
4134 struct ust_app *lta;
4135 struct lttng_ht_node_ulong *node;
4136 struct lttng_ht_iter ust_app_sock_iter;
4137 struct lttng_ht_iter iter;
4138 struct ust_app_session *ua_sess;
4139 int ret;
4140
4141 rcu_read_lock();
4142
4143 /* Get the node reference for a call_rcu */
4144 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
4145 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
4146 assert(node);
4147
4148 lta = caa_container_of(node, struct ust_app, sock_n);
4149 DBG("PID %d unregistering with sock %d", lta->pid, sock);
4150
4151 /*
4152 * For per-PID buffers, perform "push metadata" and flush all
4153 * application streams before removing app from hash tables,
4154 * ensuring proper behavior of data_pending check.
4155 * Remove sessions so they are not visible during deletion.
4156 */
4157 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
4158 node.node) {
4159 struct ust_registry_session *registry;
4160
4161 ret = lttng_ht_del(lta->sessions, &iter);
4162 if (ret) {
4163 /* The session was already removed so scheduled for teardown. */
4164 continue;
4165 }
4166
4167 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
4168 (void) ust_app_flush_app_session(lta, ua_sess);
4169 }
4170
4171 /*
4172 * Add session to list for teardown. This is safe since at this point we
4173 * are the only one using this list.
4174 */
4175 pthread_mutex_lock(&ua_sess->lock);
4176
4177 if (ua_sess->deleted) {
4178 pthread_mutex_unlock(&ua_sess->lock);
4179 continue;
4180 }
4181
4182 /*
4183 * Normally, this is done in the delete session process which is
4184 * executed in the call rcu below. However, upon registration we can't
4185 * afford to wait for the grace period before pushing data or else the
4186 * data pending feature can race between the unregistration and stop
4187 * command where the data pending command is sent *before* the grace
4188 * period ended.
4189 *
4190 * The close metadata below nullifies the metadata pointer in the
4191 * session so the delete session will NOT push/close a second time.
4192 */
4193 registry = get_session_registry(ua_sess);
4194 if (registry) {
4195 /* Push metadata for application before freeing the application. */
4196 (void) push_metadata(registry, ua_sess->consumer);
4197
4198 /*
4199 * Don't ask to close metadata for global per UID buffers. Close
4200 * metadata only on destroy trace session in this case. Also, the
4201 * previous push metadata could have flag the metadata registry to
4202 * close so don't send a close command if closed.
4203 */
4204 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
4205 /* And ask to close it for this session registry. */
4206 (void) close_metadata(registry, ua_sess->consumer);
4207 }
4208 }
4209 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
4210
4211 pthread_mutex_unlock(&ua_sess->lock);
4212 }
4213
4214 /* Remove application from PID hash table */
4215 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
4216 assert(!ret);
4217
4218 /*
4219 * Remove application from notify hash table. The thread handling the
4220 * notify socket could have deleted the node so ignore on error because
4221 * either way it's valid. The close of that socket is handled by the
4222 * apps_notify_thread.
4223 */
4224 iter.iter.node = &lta->notify_sock_n.node;
4225 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
4226
4227 /*
4228 * Ignore return value since the node might have been removed before by an
4229 * add replace during app registration because the PID can be reassigned by
4230 * the OS.
4231 */
4232 iter.iter.node = &lta->pid_n.node;
4233 ret = lttng_ht_del(ust_app_ht, &iter);
4234 if (ret) {
4235 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
4236 lta->pid);
4237 }
4238
4239 /* Free memory */
4240 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
4241
4242 rcu_read_unlock();
4243 return;
4244 }
4245
4246 /*
4247 * Fill events array with all events name of all registered apps.
4248 */
4249 int ust_app_list_events(struct lttng_event **events)
4250 {
4251 int ret, handle;
4252 size_t nbmem, count = 0;
4253 struct lttng_ht_iter iter;
4254 struct ust_app *app;
4255 struct lttng_event *tmp_event;
4256
4257 nbmem = UST_APP_EVENT_LIST_SIZE;
4258 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
4259 if (tmp_event == NULL) {
4260 PERROR("zmalloc ust app events");
4261 ret = -ENOMEM;
4262 goto error;
4263 }
4264
4265 rcu_read_lock();
4266
4267 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4268 struct lttng_ust_abi_tracepoint_iter uiter;
4269
4270 health_code_update();
4271
4272 if (!app->compatible) {
4273 /*
4274 * TODO: In time, we should notice the caller of this error by
4275 * telling him that this is a version error.
4276 */
4277 continue;
4278 }
4279 pthread_mutex_lock(&app->sock_lock);
4280 handle = lttng_ust_ctl_tracepoint_list(app->sock);
4281 if (handle < 0) {
4282 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
4283 ERR("UST app list events getting handle failed for app pid %d",
4284 app->pid);
4285 }
4286 pthread_mutex_unlock(&app->sock_lock);
4287 continue;
4288 }
4289
4290 while ((ret = lttng_ust_ctl_tracepoint_list_get(app->sock, handle,
4291 &uiter)) != -LTTNG_UST_ERR_NOENT) {
4292 /* Handle ustctl error. */
4293 if (ret < 0) {
4294 int release_ret;
4295
4296 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4297 ERR("UST app tp list get failed for app %d with ret %d",
4298 app->sock, ret);
4299 } else {
4300 DBG3("UST app tp list get failed. Application is dead");
4301 /*
4302 * This is normal behavior, an application can die during the
4303 * creation process. Don't report an error so the execution can
4304 * continue normally. Continue normal execution.
4305 */
4306 break;
4307 }
4308 free(tmp_event);
4309 release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
4310 if (release_ret < 0 &&
4311 release_ret != -LTTNG_UST_ERR_EXITING &&
4312 release_ret != -EPIPE) {
4313 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4314 }
4315 pthread_mutex_unlock(&app->sock_lock);
4316 goto rcu_error;
4317 }
4318
4319 health_code_update();
4320 if (count >= nbmem) {
4321 /* In case the realloc fails, we free the memory */
4322 struct lttng_event *new_tmp_event;
4323 size_t new_nbmem;
4324
4325 new_nbmem = nbmem << 1;
4326 DBG2("Reallocating event list from %zu to %zu entries",
4327 nbmem, new_nbmem);
4328 new_tmp_event = realloc(tmp_event,
4329 new_nbmem * sizeof(struct lttng_event));
4330 if (new_tmp_event == NULL) {
4331 int release_ret;
4332
4333 PERROR("realloc ust app events");
4334 free(tmp_event);
4335 ret = -ENOMEM;
4336 release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
4337 if (release_ret < 0 &&
4338 release_ret != -LTTNG_UST_ERR_EXITING &&
4339 release_ret != -EPIPE) {
4340 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4341 }
4342 pthread_mutex_unlock(&app->sock_lock);
4343 goto rcu_error;
4344 }
4345 /* Zero the new memory */
4346 memset(new_tmp_event + nbmem, 0,
4347 (new_nbmem - nbmem) * sizeof(struct lttng_event));
4348 nbmem = new_nbmem;
4349 tmp_event = new_tmp_event;
4350 }
4351 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_ABI_SYM_NAME_LEN);
4352 tmp_event[count].loglevel = uiter.loglevel;
4353 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_ABI_TRACEPOINT;
4354 tmp_event[count].pid = app->pid;
4355 tmp_event[count].enabled = -1;
4356 count++;
4357 }
4358 ret = lttng_ust_ctl_release_handle(app->sock, handle);
4359 pthread_mutex_unlock(&app->sock_lock);
4360 if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4361 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
4362 }
4363 }
4364
4365 ret = count;
4366 *events = tmp_event;
4367
4368 DBG2("UST app list events done (%zu events)", count);
4369
4370 rcu_error:
4371 rcu_read_unlock();
4372 error:
4373 health_code_update();
4374 return ret;
4375 }
4376
4377 /*
4378 * Fill events array with all events name of all registered apps.
4379 */
4380 int ust_app_list_event_fields(struct lttng_event_field **fields)
4381 {
4382 int ret, handle;
4383 size_t nbmem, count = 0;
4384 struct lttng_ht_iter iter;
4385 struct ust_app *app;
4386 struct lttng_event_field *tmp_event;
4387
4388 nbmem = UST_APP_EVENT_LIST_SIZE;
4389 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
4390 if (tmp_event == NULL) {
4391 PERROR("zmalloc ust app event fields");
4392 ret = -ENOMEM;
4393 goto error;
4394 }
4395
4396 rcu_read_lock();
4397
4398 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4399 struct lttng_ust_abi_field_iter uiter;
4400
4401 health_code_update();
4402
4403 if (!app->compatible) {
4404 /*
4405 * TODO: In time, we should notice the caller of this error by
4406 * telling him that this is a version error.
4407 */
4408 continue;
4409 }
4410 pthread_mutex_lock(&app->sock_lock);
4411 handle = lttng_ust_ctl_tracepoint_field_list(app->sock);
4412 if (handle < 0) {
4413 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
4414 ERR("UST app list field getting handle failed for app pid %d",
4415 app->pid);
4416 }
4417 pthread_mutex_unlock(&app->sock_lock);
4418 continue;
4419 }
4420
4421 while ((ret = lttng_ust_ctl_tracepoint_field_list_get(app->sock, handle,
4422 &uiter)) != -LTTNG_UST_ERR_NOENT) {
4423 /* Handle ustctl error. */
4424 if (ret < 0) {
4425 int release_ret;
4426
4427 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4428 ERR("UST app tp list field failed for app %d with ret %d",
4429 app->sock, ret);
4430 } else {
4431 DBG3("UST app tp list field failed. Application is dead");
4432 /*
4433 * This is normal behavior, an application can die during the
4434 * creation process. Don't report an error so the execution can
4435 * continue normally. Reset list and count for next app.
4436 */
4437 break;
4438 }
4439 free(tmp_event);
4440 release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
4441 pthread_mutex_unlock(&app->sock_lock);
4442 if (release_ret < 0 &&
4443 release_ret != -LTTNG_UST_ERR_EXITING &&
4444 release_ret != -EPIPE) {
4445 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4446 }
4447 goto rcu_error;
4448 }
4449
4450 health_code_update();
4451 if (count >= nbmem) {
4452 /* In case the realloc fails, we free the memory */
4453 struct lttng_event_field *new_tmp_event;
4454 size_t new_nbmem;
4455
4456 new_nbmem = nbmem << 1;
4457 DBG2("Reallocating event field list from %zu to %zu entries",
4458 nbmem, new_nbmem);
4459 new_tmp_event = realloc(tmp_event,
4460 new_nbmem * sizeof(struct lttng_event_field));
4461 if (new_tmp_event == NULL) {
4462 int release_ret;
4463
4464 PERROR("realloc ust app event fields");
4465 free(tmp_event);
4466 ret = -ENOMEM;
4467 release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
4468 pthread_mutex_unlock(&app->sock_lock);
4469 if (release_ret &&
4470 release_ret != -LTTNG_UST_ERR_EXITING &&
4471 release_ret != -EPIPE) {
4472 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4473 }
4474 goto rcu_error;
4475 }
4476 /* Zero the new memory */
4477 memset(new_tmp_event + nbmem, 0,
4478 (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
4479 nbmem = new_nbmem;
4480 tmp_event = new_tmp_event;
4481 }
4482
4483 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
4484 /* Mapping between these enums matches 1 to 1. */
4485 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
4486 tmp_event[count].nowrite = uiter.nowrite;
4487
4488 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_ABI_SYM_NAME_LEN);
4489 tmp_event[count].event.loglevel = uiter.loglevel;
4490 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
4491 tmp_event[count].event.pid = app->pid;
4492 tmp_event[count].event.enabled = -1;
4493 count++;
4494 }
4495 ret = lttng_ust_ctl_release_handle(app->sock, handle);
4496 pthread_mutex_unlock(&app->sock_lock);
4497 if (ret < 0 &&
4498 ret != -LTTNG_UST_ERR_EXITING &&
4499 ret != -EPIPE) {
4500 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
4501 }
4502 }
4503
4504 ret = count;
4505 *fields = tmp_event;
4506
4507 DBG2("UST app list event fields done (%zu events)", count);
4508
4509 rcu_error:
4510 rcu_read_unlock();
4511 error:
4512 health_code_update();
4513 return ret;
4514 }
4515
4516 /*
4517 * Free and clean all traceable apps of the global list.
4518 *
4519 * Should _NOT_ be called with RCU read-side lock held.
4520 */
4521 void ust_app_clean_list(void)
4522 {
4523 int ret;
4524 struct ust_app *app;
4525 struct lttng_ht_iter iter;
4526
4527 DBG2("UST app cleaning registered apps hash table");
4528
4529 rcu_read_lock();
4530
4531 /* Cleanup notify socket hash table */
4532 if (ust_app_ht_by_notify_sock) {
4533 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
4534 notify_sock_n.node) {
4535 /*
4536 * Assert that all notifiers are gone as all triggers
4537 * are unregistered prior to this clean-up.
4538 */
4539 assert(lttng_ht_get_count(app->token_to_event_notifier_rule_ht) == 0);
4540
4541 ust_app_notify_sock_unregister(app->notify_sock);
4542 }
4543 }
4544
4545 if (ust_app_ht) {
4546 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4547 ret = lttng_ht_del(ust_app_ht, &iter);
4548 assert(!ret);
4549 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
4550 }
4551 }
4552
4553 /* Cleanup socket hash table */
4554 if (ust_app_ht_by_sock) {
4555 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
4556 sock_n.node) {
4557 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
4558 assert(!ret);
4559 }
4560 }
4561
4562 rcu_read_unlock();
4563
4564 /* Destroy is done only when the ht is empty */
4565 if (ust_app_ht) {
4566 ht_cleanup_push(ust_app_ht);
4567 }
4568 if (ust_app_ht_by_sock) {
4569 ht_cleanup_push(ust_app_ht_by_sock);
4570 }
4571 if (ust_app_ht_by_notify_sock) {
4572 ht_cleanup_push(ust_app_ht_by_notify_sock);
4573 }
4574 }
4575
4576 /*
4577 * Init UST app hash table.
4578 */
4579 int ust_app_ht_alloc(void)
4580 {
4581 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4582 if (!ust_app_ht) {
4583 return -1;
4584 }
4585 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4586 if (!ust_app_ht_by_sock) {
4587 return -1;
4588 }
4589 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4590 if (!ust_app_ht_by_notify_sock) {
4591 return -1;
4592 }
4593 return 0;
4594 }
4595
4596 /*
4597 * For a specific UST session, disable the channel for all registered apps.
4598 */
4599 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
4600 struct ltt_ust_channel *uchan)
4601 {
4602 int ret = 0;
4603 struct lttng_ht_iter iter;
4604 struct lttng_ht_node_str *ua_chan_node;
4605 struct ust_app *app;
4606 struct ust_app_session *ua_sess;
4607 struct ust_app_channel *ua_chan;
4608
4609 assert(usess->active);
4610 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
4611 uchan->name, usess->id);
4612
4613 rcu_read_lock();
4614
4615 /* For every registered applications */
4616 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4617 struct lttng_ht_iter uiter;
4618 if (!app->compatible) {
4619 /*
4620 * TODO: In time, we should notice the caller of this error by
4621 * telling him that this is a version error.
4622 */
4623 continue;
4624 }
4625 ua_sess = lookup_session_by_app(usess, app);
4626 if (ua_sess == NULL) {
4627 continue;
4628 }
4629
4630 /* Get channel */
4631 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4632 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4633 /* If the session if found for the app, the channel must be there */
4634 assert(ua_chan_node);
4635
4636 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4637 /* The channel must not be already disabled */
4638 assert(ua_chan->enabled == 1);
4639
4640 /* Disable channel onto application */
4641 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
4642 if (ret < 0) {
4643 /* XXX: We might want to report this error at some point... */
4644 continue;
4645 }
4646 }
4647
4648 rcu_read_unlock();
4649 return ret;
4650 }
4651
4652 /*
4653 * For a specific UST session, enable the channel for all registered apps.
4654 */
4655 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
4656 struct ltt_ust_channel *uchan)
4657 {
4658 int ret = 0;
4659 struct lttng_ht_iter iter;
4660 struct ust_app *app;
4661 struct ust_app_session *ua_sess;
4662
4663 assert(usess->active);
4664 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
4665 uchan->name, usess->id);
4666
4667 rcu_read_lock();
4668
4669 /* For every registered applications */
4670 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4671 if (!app->compatible) {
4672 /*
4673 * TODO: In time, we should notice the caller of this error by
4674 * telling him that this is a version error.
4675 */
4676 continue;
4677 }
4678 ua_sess = lookup_session_by_app(usess, app);
4679 if (ua_sess == NULL) {
4680 continue;
4681 }
4682
4683 /* Enable channel onto application */
4684 ret = enable_ust_app_channel(ua_sess, uchan, app);
4685 if (ret < 0) {
4686 /* XXX: We might want to report this error at some point... */
4687 continue;
4688 }
4689 }
4690
4691 rcu_read_unlock();
4692 return ret;
4693 }
4694
4695 /*
4696 * Disable an event in a channel and for a specific session.
4697 */
4698 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
4699 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4700 {
4701 int ret = 0;
4702 struct lttng_ht_iter iter, uiter;
4703 struct lttng_ht_node_str *ua_chan_node;
4704 struct ust_app *app;
4705 struct ust_app_session *ua_sess;
4706 struct ust_app_channel *ua_chan;
4707 struct ust_app_event *ua_event;
4708
4709 assert(usess->active);
4710 DBG("UST app disabling event %s for all apps in channel "
4711 "%s for session id %" PRIu64,
4712 uevent->attr.name, uchan->name, usess->id);
4713
4714 rcu_read_lock();
4715
4716 /* For all registered applications */
4717 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4718 if (!app->compatible) {
4719 /*
4720 * TODO: In time, we should notice the caller of this error by
4721 * telling him that this is a version error.
4722 */
4723 continue;
4724 }
4725 ua_sess = lookup_session_by_app(usess, app);
4726 if (ua_sess == NULL) {
4727 /* Next app */
4728 continue;
4729 }
4730
4731 /* Lookup channel in the ust app session */
4732 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4733 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4734 if (ua_chan_node == NULL) {
4735 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
4736 "Skipping", uchan->name, usess->id, app->pid);
4737 continue;
4738 }
4739 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4740
4741 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4742 uevent->filter, uevent->attr.loglevel,
4743 uevent->exclusion);
4744 if (ua_event == NULL) {
4745 DBG2("Event %s not found in channel %s for app pid %d."
4746 "Skipping", uevent->attr.name, uchan->name, app->pid);
4747 continue;
4748 }
4749
4750 ret = disable_ust_app_event(ua_sess, ua_event, app);
4751 if (ret < 0) {
4752 /* XXX: Report error someday... */
4753 continue;
4754 }
4755 }
4756
4757 rcu_read_unlock();
4758 return ret;
4759 }
4760
4761 /* The ua_sess lock must be held by the caller. */
4762 static
4763 int ust_app_channel_create(struct ltt_ust_session *usess,
4764 struct ust_app_session *ua_sess,
4765 struct ltt_ust_channel *uchan, struct ust_app *app,
4766 struct ust_app_channel **_ua_chan)
4767 {
4768 int ret = 0;
4769 struct ust_app_channel *ua_chan = NULL;
4770
4771 assert(ua_sess);
4772 ASSERT_LOCKED(ua_sess->lock);
4773
4774 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
4775 sizeof(uchan->name))) {
4776 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
4777 &uchan->attr);
4778 ret = 0;
4779 } else {
4780 struct ltt_ust_context *uctx = NULL;
4781
4782 /*
4783 * Create channel onto application and synchronize its
4784 * configuration.
4785 */
4786 ret = ust_app_channel_allocate(ua_sess, uchan,
4787 LTTNG_UST_ABI_CHAN_PER_CPU, usess,
4788 &ua_chan);
4789 if (ret < 0) {
4790 goto error;
4791 }
4792
4793 ret = ust_app_channel_send(app, usess,
4794 ua_sess, ua_chan);
4795 if (ret) {
4796 goto error;
4797 }
4798
4799 /* Add contexts. */
4800 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
4801 ret = create_ust_app_channel_context(ua_chan,
4802 &uctx->ctx, app);
4803 if (ret) {
4804 goto error;
4805 }
4806 }
4807 }
4808
4809 error:
4810 if (ret < 0) {
4811 switch (ret) {
4812 case -ENOTCONN:
4813 /*
4814 * The application's socket is not valid. Either a bad socket
4815 * or a timeout on it. We can't inform the caller that for a
4816 * specific app, the session failed so lets continue here.
4817 */
4818 ret = 0; /* Not an error. */
4819 break;
4820 case -ENOMEM:
4821 default:
4822 break;
4823 }
4824 }
4825
4826 if (ret == 0 && _ua_chan) {
4827 /*
4828 * Only return the application's channel on success. Note
4829 * that the channel can still be part of the application's
4830 * channel hashtable on error.
4831 */
4832 *_ua_chan = ua_chan;
4833 }
4834 return ret;
4835 }
4836
4837 /*
4838 * Enable event for a specific session and channel on the tracer.
4839 */
4840 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
4841 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4842 {
4843 int ret = 0;
4844 struct lttng_ht_iter iter, uiter;
4845 struct lttng_ht_node_str *ua_chan_node;
4846 struct ust_app *app;
4847 struct ust_app_session *ua_sess;
4848 struct ust_app_channel *ua_chan;
4849 struct ust_app_event *ua_event;
4850
4851 assert(usess->active);
4852 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
4853 uevent->attr.name, usess->id);
4854
4855 /*
4856 * NOTE: At this point, this function is called only if the session and
4857 * channel passed are already created for all apps. and enabled on the
4858 * tracer also.
4859 */
4860
4861 rcu_read_lock();
4862
4863 /* For all registered applications */
4864 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4865 if (!app->compatible) {
4866 /*
4867 * TODO: In time, we should notice the caller of this error by
4868 * telling him that this is a version error.
4869 */
4870 continue;
4871 }
4872 ua_sess = lookup_session_by_app(usess, app);
4873 if (!ua_sess) {
4874 /* The application has problem or is probably dead. */
4875 continue;
4876 }
4877
4878 pthread_mutex_lock(&ua_sess->lock);
4879
4880 if (ua_sess->deleted) {
4881 pthread_mutex_unlock(&ua_sess->lock);
4882 continue;
4883 }
4884
4885 /* Lookup channel in the ust app session */
4886 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4887 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4888 /*
4889 * It is possible that the channel cannot be found is
4890 * the channel/event creation occurs concurrently with
4891 * an application exit.
4892 */
4893 if (!ua_chan_node) {
4894 pthread_mutex_unlock(&ua_sess->lock);
4895 continue;
4896 }
4897
4898 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4899
4900 /* Get event node */
4901 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4902 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
4903 if (ua_event == NULL) {
4904 DBG3("UST app enable event %s not found for app PID %d."
4905 "Skipping app", uevent->attr.name, app->pid);
4906 goto next_app;
4907 }
4908
4909 ret = enable_ust_app_event(ua_sess, ua_event, app);
4910 if (ret < 0) {
4911 pthread_mutex_unlock(&ua_sess->lock);
4912 goto error;
4913 }
4914 next_app:
4915 pthread_mutex_unlock(&ua_sess->lock);
4916 }
4917
4918 error:
4919 rcu_read_unlock();
4920 return ret;
4921 }
4922
4923 /*
4924 * For a specific existing UST session and UST channel, creates the event for
4925 * all registered apps.
4926 */
4927 int ust_app_create_event_glb(struct ltt_ust_session *usess,
4928 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4929 {
4930 int ret = 0;
4931 struct lttng_ht_iter iter, uiter;
4932 struct lttng_ht_node_str *ua_chan_node;
4933 struct ust_app *app;
4934 struct ust_app_session *ua_sess;
4935 struct ust_app_channel *ua_chan;
4936
4937 assert(usess->active);
4938 DBG("UST app creating event %s for all apps for session id %" PRIu64,
4939 uevent->attr.name, usess->id);
4940
4941 rcu_read_lock();
4942
4943 /* For all registered applications */
4944 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4945 if (!app->compatible) {
4946 /*
4947 * TODO: In time, we should notice the caller of this error by
4948 * telling him that this is a version error.
4949 */
4950 continue;
4951 }
4952 ua_sess = lookup_session_by_app(usess, app);
4953 if (!ua_sess) {
4954 /* The application has problem or is probably dead. */
4955 continue;
4956 }
4957
4958 pthread_mutex_lock(&ua_sess->lock);
4959
4960 if (ua_sess->deleted) {
4961 pthread_mutex_unlock(&ua_sess->lock);
4962 continue;
4963 }
4964
4965 /* Lookup channel in the ust app session */
4966 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4967 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4968 /* If the channel is not found, there is a code flow error */
4969 assert(ua_chan_node);
4970
4971 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4972
4973 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4974 pthread_mutex_unlock(&ua_sess->lock);
4975 if (ret < 0) {
4976 if (ret != -LTTNG_UST_ERR_EXIST) {
4977 /* Possible value at this point: -ENOMEM. If so, we stop! */
4978 break;
4979 }
4980 DBG2("UST app event %s already exist on app PID %d",
4981 uevent->attr.name, app->pid);
4982 continue;
4983 }
4984 }
4985
4986 rcu_read_unlock();
4987 return ret;
4988 }
4989
4990 /*
4991 * Start tracing for a specific UST session and app.
4992 *
4993 * Called with UST app session lock held.
4994 *
4995 */
4996 static
4997 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
4998 {
4999 int ret = 0;
5000 struct ust_app_session *ua_sess;
5001
5002 DBG("Starting tracing for ust app pid %d", app->pid);
5003
5004 rcu_read_lock();
5005
5006 if (!app->compatible) {
5007 goto end;
5008 }
5009
5010 ua_sess = lookup_session_by_app(usess, app);
5011 if (ua_sess == NULL) {
5012 /* The session is in teardown process. Ignore and continue. */
5013 goto end;
5014 }
5015
5016 pthread_mutex_lock(&ua_sess->lock);
5017
5018 if (ua_sess->deleted) {
5019 pthread_mutex_unlock(&ua_sess->lock);
5020 goto end;
5021 }
5022
5023 if (ua_sess->enabled) {
5024 pthread_mutex_unlock(&ua_sess->lock);
5025 goto end;
5026 }
5027
5028 /* Upon restart, we skip the setup, already done */
5029 if (ua_sess->started) {
5030 goto skip_setup;
5031 }
5032
5033 health_code_update();
5034
5035 skip_setup:
5036 /* This starts the UST tracing */
5037 pthread_mutex_lock(&app->sock_lock);
5038 ret = lttng_ust_ctl_start_session(app->sock, ua_sess->handle);
5039 pthread_mutex_unlock(&app->sock_lock);
5040 if (ret < 0) {
5041 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5042 ERR("Error starting tracing for app pid: %d (ret: %d)",
5043 app->pid, ret);
5044 } else {
5045 DBG("UST app start session failed. Application is dead.");
5046 /*
5047 * This is normal behavior, an application can die during the
5048 * creation process. Don't report an error so the execution can
5049 * continue normally.
5050 */
5051 pthread_mutex_unlock(&ua_sess->lock);
5052 goto end;
5053 }
5054 goto error_unlock;
5055 }
5056
5057 /* Indicate that the session has been started once */
5058 ua_sess->started = 1;
5059 ua_sess->enabled = 1;
5060
5061 pthread_mutex_unlock(&ua_sess->lock);
5062
5063 health_code_update();
5064
5065 /* Quiescent wait after starting trace */
5066 pthread_mutex_lock(&app->sock_lock);
5067 ret = lttng_ust_ctl_wait_quiescent(app->sock);
5068 pthread_mutex_unlock(&app->sock_lock);
5069 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5070 ERR("UST app wait quiescent failed for app pid %d ret %d",
5071 app->pid, ret);
5072 }
5073
5074 end:
5075 rcu_read_unlock();
5076 health_code_update();
5077 return 0;
5078
5079 error_unlock:
5080 pthread_mutex_unlock(&ua_sess->lock);
5081 rcu_read_unlock();
5082 health_code_update();
5083 return -1;
5084 }
5085
5086 /*
5087 * Stop tracing for a specific UST session and app.
5088 */
5089 static
5090 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
5091 {
5092 int ret = 0;
5093 struct ust_app_session *ua_sess;
5094 struct ust_registry_session *registry;
5095
5096 DBG("Stopping tracing for ust app pid %d", app->pid);
5097
5098 rcu_read_lock();
5099
5100 if (!app->compatible) {
5101 goto end_no_session;
5102 }
5103
5104 ua_sess = lookup_session_by_app(usess, app);
5105 if (ua_sess == NULL) {
5106 goto end_no_session;
5107 }
5108
5109 pthread_mutex_lock(&ua_sess->lock);
5110
5111 if (ua_sess->deleted) {
5112 pthread_mutex_unlock(&ua_sess->lock);
5113 goto end_no_session;
5114 }
5115
5116 /*
5117 * If started = 0, it means that stop trace has been called for a session
5118 * that was never started. It's possible since we can have a fail start
5119 * from either the application manager thread or the command thread. Simply
5120 * indicate that this is a stop error.
5121 */
5122 if (!ua_sess->started) {
5123 goto error_rcu_unlock;
5124 }
5125
5126 health_code_update();
5127
5128 /* This inhibits UST tracing */
5129 pthread_mutex_lock(&app->sock_lock);
5130 ret = lttng_ust_ctl_stop_session(app->sock, ua_sess->handle);
5131 pthread_mutex_unlock(&app->sock_lock);
5132 if (ret < 0) {
5133 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5134 ERR("Error stopping tracing for app pid: %d (ret: %d)",
5135 app->pid, ret);
5136 } else {
5137 DBG("UST app stop session failed. Application is dead.");
5138 /*
5139 * This is normal behavior, an application can die during the
5140 * creation process. Don't report an error so the execution can
5141 * continue normally.
5142 */
5143 goto end_unlock;
5144 }
5145 goto error_rcu_unlock;
5146 }
5147
5148 health_code_update();
5149 ua_sess->enabled = 0;
5150
5151 /* Quiescent wait after stopping trace */
5152 pthread_mutex_lock(&app->sock_lock);
5153 ret = lttng_ust_ctl_wait_quiescent(app->sock);
5154 pthread_mutex_unlock(&app->sock_lock);
5155 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5156 ERR("UST app wait quiescent failed for app pid %d ret %d",
5157 app->pid, ret);
5158 }
5159
5160 health_code_update();
5161
5162 registry = get_session_registry(ua_sess);
5163
5164 /* The UST app session is held registry shall not be null. */
5165 assert(registry);
5166
5167 /* Push metadata for application before freeing the application. */
5168 (void) push_metadata(registry, ua_sess->consumer);
5169
5170 end_unlock:
5171 pthread_mutex_unlock(&ua_sess->lock);
5172 end_no_session:
5173 rcu_read_unlock();
5174 health_code_update();
5175 return 0;
5176
5177 error_rcu_unlock:
5178 pthread_mutex_unlock(&ua_sess->lock);
5179 rcu_read_unlock();
5180 health_code_update();
5181 return -1;
5182 }
5183
5184 static
5185 int ust_app_flush_app_session(struct ust_app *app,
5186 struct ust_app_session *ua_sess)
5187 {
5188 int ret, retval = 0;
5189 struct lttng_ht_iter iter;
5190 struct ust_app_channel *ua_chan;
5191 struct consumer_socket *socket;
5192
5193 DBG("Flushing app session buffers for ust app pid %d", app->pid);
5194
5195 rcu_read_lock();
5196
5197 if (!app->compatible) {
5198 goto end_not_compatible;
5199 }
5200
5201 pthread_mutex_lock(&ua_sess->lock);
5202
5203 if (ua_sess->deleted) {
5204 goto end_deleted;
5205 }
5206
5207 health_code_update();
5208
5209 /* Flushing buffers */
5210 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5211 ua_sess->consumer);
5212
5213 /* Flush buffers and push metadata. */
5214 switch (ua_sess->buffer_type) {
5215 case LTTNG_BUFFER_PER_PID:
5216 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
5217 node.node) {
5218 health_code_update();
5219 ret = consumer_flush_channel(socket, ua_chan->key);
5220 if (ret) {
5221 ERR("Error flushing consumer channel");
5222 retval = -1;
5223 continue;
5224 }
5225 }
5226 break;
5227 case LTTNG_BUFFER_PER_UID:
5228 default:
5229 assert(0);
5230 break;
5231 }
5232
5233 health_code_update();
5234
5235 end_deleted:
5236 pthread_mutex_unlock(&ua_sess->lock);
5237
5238 end_not_compatible:
5239 rcu_read_unlock();
5240 health_code_update();
5241 return retval;
5242 }
5243
5244 /*
5245 * Flush buffers for all applications for a specific UST session.
5246 * Called with UST session lock held.
5247 */
5248 static
5249 int ust_app_flush_session(struct ltt_ust_session *usess)
5250
5251 {
5252 int ret = 0;
5253
5254 DBG("Flushing session buffers for all ust apps");
5255
5256 rcu_read_lock();
5257
5258 /* Flush buffers and push metadata. */
5259 switch (usess->buffer_type) {
5260 case LTTNG_BUFFER_PER_UID:
5261 {
5262 struct buffer_reg_uid *reg;
5263 struct lttng_ht_iter iter;
5264
5265 /* Flush all per UID buffers associated to that session. */
5266 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5267 struct ust_registry_session *ust_session_reg;
5268 struct buffer_reg_channel *buf_reg_chan;
5269 struct consumer_socket *socket;
5270
5271 /* Get consumer socket to use to push the metadata.*/
5272 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5273 usess->consumer);
5274 if (!socket) {
5275 /* Ignore request if no consumer is found for the session. */
5276 continue;
5277 }
5278
5279 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5280 buf_reg_chan, node.node) {
5281 /*
5282 * The following call will print error values so the return
5283 * code is of little importance because whatever happens, we
5284 * have to try them all.
5285 */
5286 (void) consumer_flush_channel(socket, buf_reg_chan->consumer_key);
5287 }
5288
5289 ust_session_reg = reg->registry->reg.ust;
5290 /* Push metadata. */
5291 (void) push_metadata(ust_session_reg, usess->consumer);
5292 }
5293 break;
5294 }
5295 case LTTNG_BUFFER_PER_PID:
5296 {
5297 struct ust_app_session *ua_sess;
5298 struct lttng_ht_iter iter;
5299 struct ust_app *app;
5300
5301 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5302 ua_sess = lookup_session_by_app(usess, app);
5303 if (ua_sess == NULL) {
5304 continue;
5305 }
5306 (void) ust_app_flush_app_session(app, ua_sess);
5307 }
5308 break;
5309 }
5310 default:
5311 ret = -1;
5312 assert(0);
5313 break;
5314 }
5315
5316 rcu_read_unlock();
5317 health_code_update();
5318 return ret;
5319 }
5320
5321 static
5322 int ust_app_clear_quiescent_app_session(struct ust_app *app,
5323 struct ust_app_session *ua_sess)
5324 {
5325 int ret = 0;
5326 struct lttng_ht_iter iter;
5327 struct ust_app_channel *ua_chan;
5328 struct consumer_socket *socket;
5329
5330 DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
5331
5332 rcu_read_lock();
5333
5334 if (!app->compatible) {
5335 goto end_not_compatible;
5336 }
5337
5338 pthread_mutex_lock(&ua_sess->lock);
5339
5340 if (ua_sess->deleted) {
5341 goto end_unlock;
5342 }
5343
5344 health_code_update();
5345
5346 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5347 ua_sess->consumer);
5348 if (!socket) {
5349 ERR("Failed to find consumer (%" PRIu32 ") socket",
5350 app->bits_per_long);
5351 ret = -1;
5352 goto end_unlock;
5353 }
5354
5355 /* Clear quiescent state. */
5356 switch (ua_sess->buffer_type) {
5357 case LTTNG_BUFFER_PER_PID:
5358 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter,
5359 ua_chan, node.node) {
5360 health_code_update();
5361 ret = consumer_clear_quiescent_channel(socket,
5362 ua_chan->key);
5363 if (ret) {
5364 ERR("Error clearing quiescent state for consumer channel");
5365 ret = -1;
5366 continue;
5367 }
5368 }
5369 break;
5370 case LTTNG_BUFFER_PER_UID:
5371 default:
5372 assert(0);
5373 ret = -1;
5374 break;
5375 }
5376
5377 health_code_update();
5378
5379 end_unlock:
5380 pthread_mutex_unlock(&ua_sess->lock);
5381
5382 end_not_compatible:
5383 rcu_read_unlock();
5384 health_code_update();
5385 return ret;
5386 }
5387
5388 /*
5389 * Clear quiescent state in each stream for all applications for a
5390 * specific UST session.
5391 * Called with UST session lock held.
5392 */
5393 static
5394 int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
5395
5396 {
5397 int ret = 0;
5398
5399 DBG("Clearing stream quiescent state for all ust apps");
5400
5401 rcu_read_lock();
5402
5403 switch (usess->buffer_type) {
5404 case LTTNG_BUFFER_PER_UID:
5405 {
5406 struct lttng_ht_iter iter;
5407 struct buffer_reg_uid *reg;
5408
5409 /*
5410 * Clear quiescent for all per UID buffers associated to
5411 * that session.
5412 */
5413 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5414 struct consumer_socket *socket;
5415 struct buffer_reg_channel *buf_reg_chan;
5416
5417 /* Get associated consumer socket.*/
5418 socket = consumer_find_socket_by_bitness(
5419 reg->bits_per_long, usess->consumer);
5420 if (!socket) {
5421 /*
5422 * Ignore request if no consumer is found for
5423 * the session.
5424 */
5425 continue;
5426 }
5427
5428 cds_lfht_for_each_entry(reg->registry->channels->ht,
5429 &iter.iter, buf_reg_chan, node.node) {
5430 /*
5431 * The following call will print error values so
5432 * the return code is of little importance
5433 * because whatever happens, we have to try them
5434 * all.
5435 */
5436 (void) consumer_clear_quiescent_channel(socket,
5437 buf_reg_chan->consumer_key);
5438 }
5439 }
5440 break;
5441 }
5442 case LTTNG_BUFFER_PER_PID:
5443 {
5444 struct ust_app_session *ua_sess;
5445 struct lttng_ht_iter iter;
5446 struct ust_app *app;
5447
5448 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
5449 pid_n.node) {
5450 ua_sess = lookup_session_by_app(usess, app);
5451 if (ua_sess == NULL) {
5452 continue;
5453 }
5454 (void) ust_app_clear_quiescent_app_session(app,
5455 ua_sess);
5456 }
5457 break;
5458 }
5459 default:
5460 ret = -1;
5461 assert(0);
5462 break;
5463 }
5464
5465 rcu_read_unlock();
5466 health_code_update();
5467 return ret;
5468 }
5469
5470 /*
5471 * Destroy a specific UST session in apps.
5472 */
5473 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
5474 {
5475 int ret;
5476 struct ust_app_session *ua_sess;
5477 struct lttng_ht_iter iter;
5478 struct lttng_ht_node_u64 *node;
5479
5480 DBG("Destroy tracing for ust app pid %d", app->pid);
5481
5482 rcu_read_lock();
5483
5484 if (!app->compatible) {
5485 goto end;
5486 }
5487
5488 __lookup_session_by_app(usess, app, &iter);
5489 node = lttng_ht_iter_get_node_u64(&iter);
5490 if (node == NULL) {
5491 /* Session is being or is deleted. */
5492 goto end;
5493 }
5494 ua_sess = caa_container_of(node, struct ust_app_session, node);
5495
5496 health_code_update();
5497 destroy_app_session(app, ua_sess);
5498
5499 health_code_update();
5500
5501 /* Quiescent wait after stopping trace */
5502 pthread_mutex_lock(&app->sock_lock);
5503 ret = lttng_ust_ctl_wait_quiescent(app->sock);
5504 pthread_mutex_unlock(&app->sock_lock);
5505 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5506 ERR("UST app wait quiescent failed for app pid %d ret %d",
5507 app->pid, ret);
5508 }
5509 end:
5510 rcu_read_unlock();
5511 health_code_update();
5512 return 0;
5513 }
5514
5515 /*
5516 * Start tracing for the UST session.
5517 */
5518 int ust_app_start_trace_all(struct ltt_ust_session *usess)
5519 {
5520 struct lttng_ht_iter iter;
5521 struct ust_app *app;
5522
5523 DBG("Starting all UST traces");
5524
5525 /*
5526 * Even though the start trace might fail, flag this session active so
5527 * other application coming in are started by default.
5528 */
5529 usess->active = 1;
5530
5531 rcu_read_lock();
5532
5533 /*
5534 * In a start-stop-start use-case, we need to clear the quiescent state
5535 * of each channel set by the prior stop command, thus ensuring that a
5536 * following stop or destroy is sure to grab a timestamp_end near those
5537 * operations, even if the packet is empty.
5538 */
5539 (void) ust_app_clear_quiescent_session(usess);
5540
5541 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5542 ust_app_global_update(usess, app);
5543 }
5544
5545 rcu_read_unlock();
5546
5547 return 0;
5548 }
5549
5550 /*
5551 * Start tracing for the UST session.
5552 * Called with UST session lock held.
5553 */
5554 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
5555 {
5556 int ret = 0;
5557 struct lttng_ht_iter iter;
5558 struct ust_app *app;
5559
5560 DBG("Stopping all UST traces");
5561
5562 /*
5563 * Even though the stop trace might fail, flag this session inactive so
5564 * other application coming in are not started by default.
5565 */
5566 usess->active = 0;
5567
5568 rcu_read_lock();
5569
5570 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5571 ret = ust_app_stop_trace(usess, app);
5572 if (ret < 0) {
5573 /* Continue to next apps even on error */
5574 continue;
5575 }
5576 }
5577
5578 (void) ust_app_flush_session(usess);
5579
5580 rcu_read_unlock();
5581
5582 return 0;
5583 }
5584
5585 /*
5586 * Destroy app UST session.
5587 */
5588 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
5589 {
5590 int ret = 0;
5591 struct lttng_ht_iter iter;
5592 struct ust_app *app;
5593
5594 DBG("Destroy all UST traces");
5595
5596 rcu_read_lock();
5597
5598 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5599 ret = destroy_trace(usess, app);
5600 if (ret < 0) {
5601 /* Continue to next apps even on error */
5602 continue;
5603 }
5604 }
5605
5606 rcu_read_unlock();
5607
5608 return 0;
5609 }
5610
5611 /* The ua_sess lock must be held by the caller. */
5612 static
5613 int find_or_create_ust_app_channel(
5614 struct ltt_ust_session *usess,
5615 struct ust_app_session *ua_sess,
5616 struct ust_app *app,
5617 struct ltt_ust_channel *uchan,
5618 struct ust_app_channel **ua_chan)
5619 {
5620 int ret = 0;
5621 struct lttng_ht_iter iter;
5622 struct lttng_ht_node_str *ua_chan_node;
5623
5624 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter);
5625 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
5626 if (ua_chan_node) {
5627 *ua_chan = caa_container_of(ua_chan_node,
5628 struct ust_app_channel, node);
5629 goto end;
5630 }
5631
5632 ret = ust_app_channel_create(usess, ua_sess, uchan, app, ua_chan);
5633 if (ret) {
5634 goto end;
5635 }
5636 end:
5637 return ret;
5638 }
5639
5640 static
5641 int ust_app_channel_synchronize_event(struct ust_app_channel *ua_chan,
5642 struct ltt_ust_event *uevent, struct ust_app_session *ua_sess,
5643 struct ust_app *app)
5644 {
5645 int ret = 0;
5646 struct ust_app_event *ua_event = NULL;
5647
5648 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
5649 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
5650 if (!ua_event) {
5651 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
5652 if (ret < 0) {
5653 goto end;
5654 }
5655 } else {
5656 if (ua_event->enabled != uevent->enabled) {
5657 ret = uevent->enabled ?
5658 enable_ust_app_event(ua_sess, ua_event, app) :
5659 disable_ust_app_event(ua_sess, ua_event, app);
5660 }
5661 }
5662
5663 end:
5664 return ret;
5665 }
5666
5667 /* Called with RCU read-side lock held. */
5668 static
5669 void ust_app_synchronize_event_notifier_rules(struct ust_app *app)
5670 {
5671 int ret = 0;
5672 enum lttng_error_code ret_code;
5673 enum lttng_trigger_status t_status;
5674 struct lttng_ht_iter app_trigger_iter;
5675 struct lttng_triggers *triggers = NULL;
5676 struct ust_app_event_notifier_rule *event_notifier_rule;
5677 unsigned int count, i;
5678
5679 if (!ust_app_supports_notifiers(app)) {
5680 goto end;
5681 }
5682
5683 /*
5684 * Currrently, registering or unregistering a trigger with an
5685 * event rule condition causes a full synchronization of the event
5686 * notifiers.
5687 *
5688 * The first step attempts to add an event notifier for all registered
5689 * triggers that apply to the user space tracers. Then, the
5690 * application's event notifiers rules are all checked against the list
5691 * of registered triggers. Any event notifier that doesn't have a
5692 * matching trigger can be assumed to have been disabled.
5693 *
5694 * All of this is inefficient, but is put in place to get the feature
5695 * rolling as it is simpler at this moment. It will be optimized Soon™
5696 * to allow the state of enabled
5697 * event notifiers to be synchronized in a piece-wise way.
5698 */
5699
5700 /* Get all triggers using uid 0 (root) */
5701 ret_code = notification_thread_command_list_triggers(
5702 the_notification_thread_handle, 0, &triggers);
5703 if (ret_code != LTTNG_OK) {
5704 goto end;
5705 }
5706
5707 assert(triggers);
5708
5709 t_status = lttng_triggers_get_count(triggers, &count);
5710 if (t_status != LTTNG_TRIGGER_STATUS_OK) {
5711 goto end;
5712 }
5713
5714 for (i = 0; i < count; i++) {
5715 struct lttng_condition *condition;
5716 struct lttng_event_rule *event_rule;
5717 struct lttng_trigger *trigger;
5718 const struct ust_app_event_notifier_rule *looked_up_event_notifier_rule;
5719 enum lttng_condition_status condition_status;
5720 uint64_t token;
5721
5722 trigger = lttng_triggers_borrow_mutable_at_index(triggers, i);
5723 assert(trigger);
5724
5725 token = lttng_trigger_get_tracer_token(trigger);
5726 condition = lttng_trigger_get_condition(trigger);
5727
5728 if (lttng_condition_get_type(condition) !=
5729 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES) {
5730 /* Does not apply */
5731 continue;
5732 }
5733
5734 condition_status =
5735 lttng_condition_event_rule_matches_borrow_rule_mutable(
5736 condition, &event_rule);
5737 assert(condition_status == LTTNG_CONDITION_STATUS_OK);
5738
5739 if (lttng_event_rule_get_domain_type(event_rule) == LTTNG_DOMAIN_KERNEL) {
5740 /* Skip kernel related triggers. */
5741 continue;
5742 }
5743
5744 /*
5745 * Find or create the associated token event rule. The caller
5746 * holds the RCU read lock, so this is safe to call without
5747 * explicitly acquiring it here.
5748 */
5749 looked_up_event_notifier_rule = find_ust_app_event_notifier_rule(
5750 app->token_to_event_notifier_rule_ht, token);
5751 if (!looked_up_event_notifier_rule) {
5752 ret = create_ust_app_event_notifier_rule(trigger, app);
5753 if (ret < 0) {
5754 goto end;
5755 }
5756 }
5757 }
5758
5759 rcu_read_lock();
5760 /* Remove all unknown event sources from the app. */
5761 cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
5762 &app_trigger_iter.iter, event_notifier_rule,
5763 node.node) {
5764 const uint64_t app_token = event_notifier_rule->token;
5765 bool found = false;
5766
5767 /*
5768 * Check if the app event trigger still exists on the
5769 * notification side.
5770 */
5771 for (i = 0; i < count; i++) {
5772 uint64_t notification_thread_token;
5773 const struct lttng_trigger *trigger =
5774 lttng_triggers_get_at_index(
5775 triggers, i);
5776
5777 assert(trigger);
5778
5779 notification_thread_token =
5780 lttng_trigger_get_tracer_token(trigger);
5781
5782 if (notification_thread_token == app_token) {
5783 found = true;
5784 break;
5785 }
5786 }
5787
5788 if (found) {
5789 /* Still valid. */
5790 continue;
5791 }
5792
5793 /*
5794 * This trigger was unregistered, disable it on the tracer's
5795 * side.
5796 */
5797 ret = lttng_ht_del(app->token_to_event_notifier_rule_ht,
5798 &app_trigger_iter);
5799 assert(ret == 0);
5800
5801 /* Callee logs errors. */
5802 (void) disable_ust_object(app, event_notifier_rule->obj);
5803
5804 delete_ust_app_event_notifier_rule(
5805 app->sock, event_notifier_rule, app);
5806 }
5807
5808 rcu_read_unlock();
5809
5810 end:
5811 lttng_triggers_destroy(triggers);
5812 return;
5813 }
5814
5815 /*
5816 * RCU read lock must be held by the caller.
5817 */
5818 static
5819 void ust_app_synchronize_all_channels(struct ltt_ust_session *usess,
5820 struct ust_app_session *ua_sess,
5821 struct ust_app *app)
5822 {
5823 int ret = 0;
5824 struct cds_lfht_iter uchan_iter;
5825 struct ltt_ust_channel *uchan;
5826
5827 assert(usess);
5828 assert(ua_sess);
5829 assert(app);
5830
5831 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &uchan_iter,
5832 uchan, node.node) {
5833 struct ust_app_channel *ua_chan;
5834 struct cds_lfht_iter uevent_iter;
5835 struct ltt_ust_event *uevent;
5836
5837 /*
5838 * Search for a matching ust_app_channel. If none is found,
5839 * create it. Creating the channel will cause the ua_chan
5840 * structure to be allocated, the channel buffers to be
5841 * allocated (if necessary) and sent to the application, and
5842 * all enabled contexts will be added to the channel.
5843 */
5844 ret = find_or_create_ust_app_channel(usess, ua_sess,
5845 app, uchan, &ua_chan);
5846 if (ret) {
5847 /* Tracer is probably gone or ENOMEM. */
5848 goto end;
5849 }
5850
5851 if (!ua_chan) {
5852 /* ua_chan will be NULL for the metadata channel */
5853 continue;
5854 }
5855
5856 cds_lfht_for_each_entry(uchan->events->ht, &uevent_iter, uevent,
5857 node.node) {
5858 ret = ust_app_channel_synchronize_event(ua_chan,
5859 uevent, ua_sess, app);
5860 if (ret) {
5861 goto end;
5862 }
5863 }
5864
5865 if (ua_chan->enabled != uchan->enabled) {
5866 ret = uchan->enabled ?
5867 enable_ust_app_channel(ua_sess, uchan, app) :
5868 disable_ust_app_channel(ua_sess, ua_chan, app);
5869 if (ret) {
5870 goto end;
5871 }
5872 }
5873 }
5874 end:
5875 return;
5876 }
5877
5878 /*
5879 * The caller must ensure that the application is compatible and is tracked
5880 * by the process attribute trackers.
5881 */
5882 static
5883 void ust_app_synchronize(struct ltt_ust_session *usess,
5884 struct ust_app *app)
5885 {
5886 int ret = 0;
5887 struct ust_app_session *ua_sess = NULL;
5888
5889 /*
5890 * The application's configuration should only be synchronized for
5891 * active sessions.
5892 */
5893 assert(usess->active);
5894
5895 ret = find_or_create_ust_app_session(usess, app, &ua_sess, NULL);
5896 if (ret < 0) {
5897 /* Tracer is probably gone or ENOMEM. */
5898 goto error;
5899 }
5900 assert(ua_sess);
5901
5902 pthread_mutex_lock(&ua_sess->lock);
5903 if (ua_sess->deleted) {
5904 pthread_mutex_unlock(&ua_sess->lock);
5905 goto end;
5906 }
5907
5908 rcu_read_lock();
5909
5910 ust_app_synchronize_all_channels(usess, ua_sess, app);
5911
5912 /*
5913 * Create the metadata for the application. This returns gracefully if a
5914 * metadata was already set for the session.
5915 *
5916 * The metadata channel must be created after the data channels as the
5917 * consumer daemon assumes this ordering. When interacting with a relay
5918 * daemon, the consumer will use this assumption to send the
5919 * "STREAMS_SENT" message to the relay daemon.
5920 */
5921 ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
5922 if (ret < 0) {
5923 goto error_unlock;
5924 }
5925
5926 rcu_read_unlock();
5927
5928 end:
5929 pthread_mutex_unlock(&ua_sess->lock);
5930 /* Everything went well at this point. */
5931 return;
5932
5933 error_unlock:
5934 rcu_read_unlock();
5935 pthread_mutex_unlock(&ua_sess->lock);
5936 error:
5937 if (ua_sess) {
5938 destroy_app_session(app, ua_sess);
5939 }
5940 return;
5941 }
5942
5943 static
5944 void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
5945 {
5946 struct ust_app_session *ua_sess;
5947
5948 ua_sess = lookup_session_by_app(usess, app);
5949 if (ua_sess == NULL) {
5950 return;
5951 }
5952 destroy_app_session(app, ua_sess);
5953 }
5954
5955 /*
5956 * Add channels/events from UST global domain to registered apps at sock.
5957 *
5958 * Called with session lock held.
5959 * Called with RCU read-side lock held.
5960 */
5961 void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
5962 {
5963 assert(usess);
5964 assert(usess->active);
5965
5966 DBG2("UST app global update for app sock %d for session id %" PRIu64,
5967 app->sock, usess->id);
5968
5969 if (!app->compatible) {
5970 return;
5971 }
5972 if (trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID,
5973 usess, app->pid) &&
5974 trace_ust_id_tracker_lookup(
5975 LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID,
5976 usess, app->uid) &&
5977 trace_ust_id_tracker_lookup(
5978 LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID,
5979 usess, app->gid)) {
5980 /*
5981 * Synchronize the application's internal tracing configuration
5982 * and start tracing.
5983 */
5984 ust_app_synchronize(usess, app);
5985 ust_app_start_trace(usess, app);
5986 } else {
5987 ust_app_global_destroy(usess, app);
5988 }
5989 }
5990
5991 /*
5992 * Add all event notifiers to an application.
5993 *
5994 * Called with session lock held.
5995 * Called with RCU read-side lock held.
5996 */
5997 void ust_app_global_update_event_notifier_rules(struct ust_app *app)
5998 {
5999 DBG2("UST application global event notifier rules update: app = '%s' (ppid: %d)",
6000 app->name, app->ppid);
6001
6002 if (!app->compatible || !ust_app_supports_notifiers(app)) {
6003 return;
6004 }
6005
6006 if (app->event_notifier_group.object == NULL) {
6007 WARN("UST app global update of event notifiers for app skipped since communication handle is null: app = '%s' (ppid: %d)",
6008 app->name, app->ppid);
6009 return;
6010 }
6011
6012 ust_app_synchronize_event_notifier_rules(app);
6013 }
6014
6015 /*
6016 * Called with session lock held.
6017 */
6018 void ust_app_global_update_all(struct ltt_ust_session *usess)
6019 {
6020 struct lttng_ht_iter iter;
6021 struct ust_app *app;
6022
6023 rcu_read_lock();
6024 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6025 ust_app_global_update(usess, app);
6026 }
6027 rcu_read_unlock();
6028 }
6029
6030 void ust_app_global_update_all_event_notifier_rules(void)
6031 {
6032 struct lttng_ht_iter iter;
6033 struct ust_app *app;
6034
6035 rcu_read_lock();
6036 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6037 ust_app_global_update_event_notifier_rules(app);
6038 }
6039
6040 rcu_read_unlock();
6041 }
6042
6043 /*
6044 * Add context to a specific channel for global UST domain.
6045 */
6046 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
6047 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
6048 {
6049 int ret = 0;
6050 struct lttng_ht_node_str *ua_chan_node;
6051 struct lttng_ht_iter iter, uiter;
6052 struct ust_app_channel *ua_chan = NULL;
6053 struct ust_app_session *ua_sess;
6054 struct ust_app *app;
6055
6056 assert(usess->active);
6057
6058 rcu_read_lock();
6059 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6060 if (!app->compatible) {
6061 /*
6062 * TODO: In time, we should notice the caller of this error by
6063 * telling him that this is a version error.
6064 */
6065 continue;
6066 }
6067 ua_sess = lookup_session_by_app(usess, app);
6068 if (ua_sess == NULL) {
6069 continue;
6070 }
6071
6072 pthread_mutex_lock(&ua_sess->lock);
6073
6074 if (ua_sess->deleted) {
6075 pthread_mutex_unlock(&ua_sess->lock);
6076 continue;
6077 }
6078
6079 /* Lookup channel in the ust app session */
6080 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
6081 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
6082 if (ua_chan_node == NULL) {
6083 goto next_app;
6084 }
6085 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
6086 node);
6087 ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
6088 if (ret < 0) {
6089 goto next_app;
6090 }
6091 next_app:
6092 pthread_mutex_unlock(&ua_sess->lock);
6093 }
6094
6095 rcu_read_unlock();
6096 return ret;
6097 }
6098
6099 /*
6100 * Receive registration and populate the given msg structure.
6101 *
6102 * On success return 0 else a negative value returned by the ustctl call.
6103 */
6104 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
6105 {
6106 int ret;
6107 uint32_t pid, ppid, uid, gid;
6108
6109 assert(msg);
6110
6111 ret = lttng_ust_ctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
6112 &pid, &ppid, &uid, &gid,
6113 &msg->bits_per_long,
6114 &msg->uint8_t_alignment,
6115 &msg->uint16_t_alignment,
6116 &msg->uint32_t_alignment,
6117 &msg->uint64_t_alignment,
6118 &msg->long_alignment,
6119 &msg->byte_order,
6120 msg->name);
6121 if (ret < 0) {
6122 switch (-ret) {
6123 case EPIPE:
6124 case ECONNRESET:
6125 case LTTNG_UST_ERR_EXITING:
6126 DBG3("UST app recv reg message failed. Application died");
6127 break;
6128 case LTTNG_UST_ERR_UNSUP_MAJOR:
6129 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
6130 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
6131 LTTNG_UST_ABI_MINOR_VERSION);
6132 break;
6133 default:
6134 ERR("UST app recv reg message failed with ret %d", ret);
6135 break;
6136 }
6137 goto error;
6138 }
6139 msg->pid = (pid_t) pid;
6140 msg->ppid = (pid_t) ppid;
6141 msg->uid = (uid_t) uid;
6142 msg->gid = (gid_t) gid;
6143
6144 error:
6145 return ret;
6146 }
6147
6148 /*
6149 * Return a ust app session object using the application object and the
6150 * session object descriptor has a key. If not found, NULL is returned.
6151 * A RCU read side lock MUST be acquired when calling this function.
6152 */
6153 static struct ust_app_session *find_session_by_objd(struct ust_app *app,
6154 int objd)
6155 {
6156 struct lttng_ht_node_ulong *node;
6157 struct lttng_ht_iter iter;
6158 struct ust_app_session *ua_sess = NULL;
6159
6160 assert(app);
6161
6162 lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
6163 node = lttng_ht_iter_get_node_ulong(&iter);
6164 if (node == NULL) {
6165 DBG2("UST app session find by objd %d not found", objd);
6166 goto error;
6167 }
6168
6169 ua_sess = caa_container_of(node, struct ust_app_session, ust_objd_node);
6170
6171 error:
6172 return ua_sess;
6173 }
6174
6175 /*
6176 * Return a ust app channel object using the application object and the channel
6177 * object descriptor has a key. If not found, NULL is returned. A RCU read side
6178 * lock MUST be acquired before calling this function.
6179 */
6180 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
6181 int objd)
6182 {
6183 struct lttng_ht_node_ulong *node;
6184 struct lttng_ht_iter iter;
6185 struct ust_app_channel *ua_chan = NULL;
6186
6187 assert(app);
6188
6189 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
6190 node = lttng_ht_iter_get_node_ulong(&iter);
6191 if (node == NULL) {
6192 DBG2("UST app channel find by objd %d not found", objd);
6193 goto error;
6194 }
6195
6196 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
6197
6198 error:
6199 return ua_chan;
6200 }
6201
6202 /*
6203 * Reply to a register channel notification from an application on the notify
6204 * socket. The channel metadata is also created.
6205 *
6206 * The session UST registry lock is acquired in this function.
6207 *
6208 * On success 0 is returned else a negative value.
6209 */
6210 static int reply_ust_register_channel(int sock, int cobjd,
6211 size_t nr_fields, struct lttng_ust_ctl_field *fields)
6212 {
6213 int ret, ret_code = 0;
6214 uint32_t chan_id;
6215 uint64_t chan_reg_key;
6216 enum lttng_ust_ctl_channel_header type;
6217 struct ust_app *app;
6218 struct ust_app_channel *ua_chan;
6219 struct ust_app_session *ua_sess;
6220 struct ust_registry_session *registry;
6221 struct ust_registry_channel *ust_reg_chan;
6222
6223 rcu_read_lock();
6224
6225 /* Lookup application. If not found, there is a code flow error. */
6226 app = find_app_by_notify_sock(sock);
6227 if (!app) {
6228 DBG("Application socket %d is being torn down. Abort event notify",
6229 sock);
6230 ret = 0;
6231 goto error_rcu_unlock;
6232 }
6233
6234 /* Lookup channel by UST object descriptor. */
6235 ua_chan = find_channel_by_objd(app, cobjd);
6236 if (!ua_chan) {
6237 DBG("Application channel is being torn down. Abort event notify");
6238 ret = 0;
6239 goto error_rcu_unlock;
6240 }
6241
6242 assert(ua_chan->session);
6243 ua_sess = ua_chan->session;
6244
6245 /* Get right session registry depending on the session buffer type. */
6246 registry = get_session_registry(ua_sess);
6247 if (!registry) {
6248 DBG("Application session is being torn down. Abort event notify");
6249 ret = 0;
6250 goto error_rcu_unlock;
6251 };
6252
6253 /* Depending on the buffer type, a different channel key is used. */
6254 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
6255 chan_reg_key = ua_chan->tracing_channel_id;
6256 } else {
6257 chan_reg_key = ua_chan->key;
6258 }
6259
6260 pthread_mutex_lock(&registry->lock);
6261
6262 ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key);
6263 assert(ust_reg_chan);
6264
6265 if (!ust_reg_chan->register_done) {
6266 /*
6267 * TODO: eventually use the registry event count for
6268 * this channel to better guess header type for per-pid
6269 * buffers.
6270 */
6271 type = LTTNG_UST_CTL_CHANNEL_HEADER_LARGE;
6272 ust_reg_chan->nr_ctx_fields = nr_fields;
6273 ust_reg_chan->ctx_fields = fields;
6274 fields = NULL;
6275 ust_reg_chan->header_type = type;
6276 } else {
6277 /* Get current already assigned values. */
6278 type = ust_reg_chan->header_type;
6279 }
6280 /* Channel id is set during the object creation. */
6281 chan_id = ust_reg_chan->chan_id;
6282
6283 /* Append to metadata */
6284 if (!ust_reg_chan->metadata_dumped) {
6285 ret_code = ust_metadata_channel_statedump(registry, ust_reg_chan);
6286 if (ret_code) {
6287 ERR("Error appending channel metadata (errno = %d)", ret_code);
6288 goto reply;
6289 }
6290 }
6291
6292 reply:
6293 DBG3("UST app replying to register channel key %" PRIu64
6294 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
6295 ret_code);
6296
6297 ret = lttng_ust_ctl_reply_register_channel(sock, chan_id, type, ret_code);
6298 if (ret < 0) {
6299 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6300 ERR("UST app reply channel failed with ret %d", ret);
6301 } else {
6302 DBG3("UST app reply channel failed. Application died");
6303 }
6304 goto error;
6305 }
6306
6307 /* This channel registry registration is completed. */
6308 ust_reg_chan->register_done = 1;
6309
6310 error:
6311 pthread_mutex_unlock(&registry->lock);
6312 error_rcu_unlock:
6313 rcu_read_unlock();
6314 free(fields);
6315 return ret;
6316 }
6317
6318 /*
6319 * Add event to the UST channel registry. When the event is added to the
6320 * registry, the metadata is also created. Once done, this replies to the
6321 * application with the appropriate error code.
6322 *
6323 * The session UST registry lock is acquired in the function.
6324 *
6325 * On success 0 is returned else a negative value.
6326 */
6327 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
6328 char *sig, size_t nr_fields, struct lttng_ust_ctl_field *fields,
6329 int loglevel_value, char *model_emf_uri)
6330 {
6331 int ret, ret_code;
6332 uint32_t event_id = 0;
6333 uint64_t chan_reg_key;
6334 struct ust_app *app;
6335 struct ust_app_channel *ua_chan;
6336 struct ust_app_session *ua_sess;
6337 struct ust_registry_session *registry;
6338
6339 rcu_read_lock();
6340
6341 /* Lookup application. If not found, there is a code flow error. */
6342 app = find_app_by_notify_sock(sock);
6343 if (!app) {
6344 DBG("Application socket %d is being torn down. Abort event notify",
6345 sock);
6346 ret = 0;
6347 goto error_rcu_unlock;
6348 }
6349
6350 /* Lookup channel by UST object descriptor. */
6351 ua_chan = find_channel_by_objd(app, cobjd);
6352 if (!ua_chan) {
6353 DBG("Application channel is being torn down. Abort event notify");
6354 ret = 0;
6355 goto error_rcu_unlock;
6356 }
6357
6358 assert(ua_chan->session);
6359 ua_sess = ua_chan->session;
6360
6361 registry = get_session_registry(ua_sess);
6362 if (!registry) {
6363 DBG("Application session is being torn down. Abort event notify");
6364 ret = 0;
6365 goto error_rcu_unlock;
6366 }
6367
6368 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
6369 chan_reg_key = ua_chan->tracing_channel_id;
6370 } else {
6371 chan_reg_key = ua_chan->key;
6372 }
6373
6374 pthread_mutex_lock(&registry->lock);
6375
6376 /*
6377 * From this point on, this call acquires the ownership of the sig, fields
6378 * and model_emf_uri meaning any free are done inside it if needed. These
6379 * three variables MUST NOT be read/write after this.
6380 */
6381 ret_code = ust_registry_create_event(registry, chan_reg_key,
6382 sobjd, cobjd, name, sig, nr_fields, fields,
6383 loglevel_value, model_emf_uri, ua_sess->buffer_type,
6384 &event_id, app);
6385 sig = NULL;
6386 fields = NULL;
6387 model_emf_uri = NULL;
6388
6389 /*
6390 * The return value is returned to ustctl so in case of an error, the
6391 * application can be notified. In case of an error, it's important not to
6392 * return a negative error or else the application will get closed.
6393 */
6394 ret = lttng_ust_ctl_reply_register_event(sock, event_id, ret_code);
6395 if (ret < 0) {
6396 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6397 ERR("UST app reply event failed with ret %d", ret);
6398 } else {
6399 DBG3("UST app reply event failed. Application died");
6400 }
6401 /*
6402 * No need to wipe the create event since the application socket will
6403 * get close on error hence cleaning up everything by itself.
6404 */
6405 goto error;
6406 }
6407
6408 DBG3("UST registry event %s with id %" PRId32 " added successfully",
6409 name, event_id);
6410
6411 error:
6412 pthread_mutex_unlock(&registry->lock);
6413 error_rcu_unlock:
6414 rcu_read_unlock();
6415 free(sig);
6416 free(fields);
6417 free(model_emf_uri);
6418 return ret;
6419 }
6420
6421 /*
6422 * Add enum to the UST session registry. Once done, this replies to the
6423 * application with the appropriate error code.
6424 *
6425 * The session UST registry lock is acquired within this function.
6426 *
6427 * On success 0 is returned else a negative value.
6428 */
6429 static int add_enum_ust_registry(int sock, int sobjd, char *name,
6430 struct lttng_ust_ctl_enum_entry *entries, size_t nr_entries)
6431 {
6432 int ret = 0, ret_code;
6433 struct ust_app *app;
6434 struct ust_app_session *ua_sess;
6435 struct ust_registry_session *registry;
6436 uint64_t enum_id = -1ULL;
6437
6438 rcu_read_lock();
6439
6440 /* Lookup application. If not found, there is a code flow error. */
6441 app = find_app_by_notify_sock(sock);
6442 if (!app) {
6443 /* Return an error since this is not an error */
6444 DBG("Application socket %d is being torn down. Aborting enum registration",
6445 sock);
6446 free(entries);
6447 goto error_rcu_unlock;
6448 }
6449
6450 /* Lookup session by UST object descriptor. */
6451 ua_sess = find_session_by_objd(app, sobjd);
6452 if (!ua_sess) {
6453 /* Return an error since this is not an error */
6454 DBG("Application session is being torn down (session not found). Aborting enum registration.");
6455 free(entries);
6456 goto error_rcu_unlock;
6457 }
6458
6459 registry = get_session_registry(ua_sess);
6460 if (!registry) {
6461 DBG("Application session is being torn down (registry not found). Aborting enum registration.");
6462 free(entries);
6463 goto error_rcu_unlock;
6464 }
6465
6466 pthread_mutex_lock(&registry->lock);
6467
6468 /*
6469 * From this point on, the callee acquires the ownership of
6470 * entries. The variable entries MUST NOT be read/written after
6471 * call.
6472 */
6473 ret_code = ust_registry_create_or_find_enum(registry, sobjd, name,
6474 entries, nr_entries, &enum_id);
6475 entries = NULL;
6476
6477 /*
6478 * The return value is returned to ustctl so in case of an error, the
6479 * application can be notified. In case of an error, it's important not to
6480 * return a negative error or else the application will get closed.
6481 */
6482 ret = lttng_ust_ctl_reply_register_enum(sock, enum_id, ret_code);
6483 if (ret < 0) {
6484 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6485 ERR("UST app reply enum failed with ret %d", ret);
6486 } else {
6487 DBG3("UST app reply enum failed. Application died");
6488 }
6489 /*
6490 * No need to wipe the create enum since the application socket will
6491 * get close on error hence cleaning up everything by itself.
6492 */
6493 goto error;
6494 }
6495
6496 DBG3("UST registry enum %s added successfully or already found", name);
6497
6498 error:
6499 pthread_mutex_unlock(&registry->lock);
6500 error_rcu_unlock:
6501 rcu_read_unlock();
6502 return ret;
6503 }
6504
6505 /*
6506 * Handle application notification through the given notify socket.
6507 *
6508 * Return 0 on success or else a negative value.
6509 */
6510 int ust_app_recv_notify(int sock)
6511 {
6512 int ret;
6513 enum lttng_ust_ctl_notify_cmd cmd;
6514
6515 DBG3("UST app receiving notify from sock %d", sock);
6516
6517 ret = lttng_ust_ctl_recv_notify(sock, &cmd);
6518 if (ret < 0) {
6519 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6520 ERR("UST app recv notify failed with ret %d", ret);
6521 } else {
6522 DBG3("UST app recv notify failed. Application died");
6523 }
6524 goto error;
6525 }
6526
6527 switch (cmd) {
6528 case LTTNG_UST_CTL_NOTIFY_CMD_EVENT:
6529 {
6530 int sobjd, cobjd, loglevel_value;
6531 char name[LTTNG_UST_ABI_SYM_NAME_LEN], *sig, *model_emf_uri;
6532 size_t nr_fields;
6533 struct lttng_ust_ctl_field *fields;
6534
6535 DBG2("UST app ustctl register event received");
6536
6537 ret = lttng_ust_ctl_recv_register_event(sock, &sobjd, &cobjd, name,
6538 &loglevel_value, &sig, &nr_fields, &fields,
6539 &model_emf_uri);
6540 if (ret < 0) {
6541 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6542 ERR("UST app recv event failed with ret %d", ret);
6543 } else {
6544 DBG3("UST app recv event failed. Application died");
6545 }
6546 goto error;
6547 }
6548
6549 /*
6550 * Add event to the UST registry coming from the notify socket. This
6551 * call will free if needed the sig, fields and model_emf_uri. This
6552 * code path loses the ownsership of these variables and transfer them
6553 * to the this function.
6554 */
6555 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
6556 fields, loglevel_value, model_emf_uri);
6557 if (ret < 0) {
6558 goto error;
6559 }
6560
6561 break;
6562 }
6563 case LTTNG_UST_CTL_NOTIFY_CMD_CHANNEL:
6564 {
6565 int sobjd, cobjd;
6566 size_t nr_fields;
6567 struct lttng_ust_ctl_field *fields;
6568
6569 DBG2("UST app ustctl register channel received");
6570
6571 ret = lttng_ust_ctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
6572 &fields);
6573 if (ret < 0) {
6574 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6575 ERR("UST app recv channel failed with ret %d", ret);
6576 } else {
6577 DBG3("UST app recv channel failed. Application died");
6578 }
6579 goto error;
6580 }
6581
6582 /*
6583 * The fields ownership are transfered to this function call meaning
6584 * that if needed it will be freed. After this, it's invalid to access
6585 * fields or clean it up.
6586 */
6587 ret = reply_ust_register_channel(sock, cobjd, nr_fields,
6588 fields);
6589 if (ret < 0) {
6590 goto error;
6591 }
6592
6593 break;
6594 }
6595 case LTTNG_UST_CTL_NOTIFY_CMD_ENUM:
6596 {
6597 int sobjd;
6598 char name[LTTNG_UST_ABI_SYM_NAME_LEN];
6599 size_t nr_entries;
6600 struct lttng_ust_ctl_enum_entry *entries;
6601
6602 DBG2("UST app ustctl register enum received");
6603
6604 ret = lttng_ust_ctl_recv_register_enum(sock, &sobjd, name,
6605 &entries, &nr_entries);
6606 if (ret < 0) {
6607 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6608 ERR("UST app recv enum failed with ret %d", ret);
6609 } else {
6610 DBG3("UST app recv enum failed. Application died");
6611 }
6612 goto error;
6613 }
6614
6615 /* Callee assumes ownership of entries */
6616 ret = add_enum_ust_registry(sock, sobjd, name,
6617 entries, nr_entries);
6618 if (ret < 0) {
6619 goto error;
6620 }
6621
6622 break;
6623 }
6624 default:
6625 /* Should NEVER happen. */
6626 assert(0);
6627 }
6628
6629 error:
6630 return ret;
6631 }
6632
6633 /*
6634 * Once the notify socket hangs up, this is called. First, it tries to find the
6635 * corresponding application. On failure, the call_rcu to close the socket is
6636 * executed. If an application is found, it tries to delete it from the notify
6637 * socket hash table. Whathever the result, it proceeds to the call_rcu.
6638 *
6639 * Note that an object needs to be allocated here so on ENOMEM failure, the
6640 * call RCU is not done but the rest of the cleanup is.
6641 */
6642 void ust_app_notify_sock_unregister(int sock)
6643 {
6644 int err_enomem = 0;
6645 struct lttng_ht_iter iter;
6646 struct ust_app *app;
6647 struct ust_app_notify_sock_obj *obj;
6648
6649 assert(sock >= 0);
6650
6651 rcu_read_lock();
6652
6653 obj = zmalloc(sizeof(*obj));
6654 if (!obj) {
6655 /*
6656 * An ENOMEM is kind of uncool. If this strikes we continue the
6657 * procedure but the call_rcu will not be called. In this case, we
6658 * accept the fd leak rather than possibly creating an unsynchronized
6659 * state between threads.
6660 *
6661 * TODO: The notify object should be created once the notify socket is
6662 * registered and stored independantely from the ust app object. The
6663 * tricky part is to synchronize the teardown of the application and
6664 * this notify object. Let's keep that in mind so we can avoid this
6665 * kind of shenanigans with ENOMEM in the teardown path.
6666 */
6667 err_enomem = 1;
6668 } else {
6669 obj->fd = sock;
6670 }
6671
6672 DBG("UST app notify socket unregister %d", sock);
6673
6674 /*
6675 * Lookup application by notify socket. If this fails, this means that the
6676 * hash table delete has already been done by the application
6677 * unregistration process so we can safely close the notify socket in a
6678 * call RCU.
6679 */
6680 app = find_app_by_notify_sock(sock);
6681 if (!app) {
6682 goto close_socket;
6683 }
6684
6685 iter.iter.node = &app->notify_sock_n.node;
6686
6687 /*
6688 * Whatever happens here either we fail or succeed, in both cases we have
6689 * to close the socket after a grace period to continue to the call RCU
6690 * here. If the deletion is successful, the application is not visible
6691 * anymore by other threads and is it fails it means that it was already
6692 * deleted from the hash table so either way we just have to close the
6693 * socket.
6694 */
6695 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
6696
6697 close_socket:
6698 rcu_read_unlock();
6699
6700 /*
6701 * Close socket after a grace period to avoid for the socket to be reused
6702 * before the application object is freed creating potential race between
6703 * threads trying to add unique in the global hash table.
6704 */
6705 if (!err_enomem) {
6706 call_rcu(&obj->head, close_notify_sock_rcu);
6707 }
6708 }
6709
6710 /*
6711 * Destroy a ust app data structure and free its memory.
6712 */
6713 void ust_app_destroy(struct ust_app *app)
6714 {
6715 if (!app) {
6716 return;
6717 }
6718
6719 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
6720 }
6721
6722 /*
6723 * Take a snapshot for a given UST session. The snapshot is sent to the given
6724 * output.
6725 *
6726 * Returns LTTNG_OK on success or a LTTNG_ERR error code.
6727 */
6728 enum lttng_error_code ust_app_snapshot_record(
6729 const struct ltt_ust_session *usess,
6730 const struct consumer_output *output, int wait,
6731 uint64_t nb_packets_per_stream)
6732 {
6733 int ret = 0;
6734 enum lttng_error_code status = LTTNG_OK;
6735 struct lttng_ht_iter iter;
6736 struct ust_app *app;
6737 char *trace_path = NULL;
6738
6739 assert(usess);
6740 assert(output);
6741
6742 rcu_read_lock();
6743
6744 switch (usess->buffer_type) {
6745 case LTTNG_BUFFER_PER_UID:
6746 {
6747 struct buffer_reg_uid *reg;
6748
6749 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6750 struct buffer_reg_channel *buf_reg_chan;
6751 struct consumer_socket *socket;
6752 char pathname[PATH_MAX];
6753 size_t consumer_path_offset = 0;
6754
6755 if (!reg->registry->reg.ust->metadata_key) {
6756 /* Skip since no metadata is present */
6757 continue;
6758 }
6759
6760 /* Get consumer socket to use to push the metadata.*/
6761 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
6762 usess->consumer);
6763 if (!socket) {
6764 status = LTTNG_ERR_INVALID;
6765 goto error;
6766 }
6767
6768 memset(pathname, 0, sizeof(pathname));
6769 ret = snprintf(pathname, sizeof(pathname),
6770 DEFAULT_UST_TRACE_UID_PATH,
6771 reg->uid, reg->bits_per_long);
6772 if (ret < 0) {
6773 PERROR("snprintf snapshot path");
6774 status = LTTNG_ERR_INVALID;
6775 goto error;
6776 }
6777 /* Free path allowed on previous iteration. */
6778 free(trace_path);
6779 trace_path = setup_channel_trace_path(usess->consumer, pathname,
6780 &consumer_path_offset);
6781 if (!trace_path) {
6782 status = LTTNG_ERR_INVALID;
6783 goto error;
6784 }
6785 /* Add the UST default trace dir to path. */
6786 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6787 buf_reg_chan, node.node) {
6788 status = consumer_snapshot_channel(socket,
6789 buf_reg_chan->consumer_key,
6790 output, 0, usess->uid,
6791 usess->gid, &trace_path[consumer_path_offset], wait,
6792 nb_packets_per_stream);
6793 if (status != LTTNG_OK) {
6794 goto error;
6795 }
6796 }
6797 status = consumer_snapshot_channel(socket,
6798 reg->registry->reg.ust->metadata_key, output, 1,
6799 usess->uid, usess->gid, &trace_path[consumer_path_offset],
6800 wait, 0);
6801 if (status != LTTNG_OK) {
6802 goto error;
6803 }
6804 }
6805 break;
6806 }
6807 case LTTNG_BUFFER_PER_PID:
6808 {
6809 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6810 struct consumer_socket *socket;
6811 struct lttng_ht_iter chan_iter;
6812 struct ust_app_channel *ua_chan;
6813 struct ust_app_session *ua_sess;
6814 struct ust_registry_session *registry;
6815 char pathname[PATH_MAX];
6816 size_t consumer_path_offset = 0;
6817
6818 ua_sess = lookup_session_by_app(usess, app);
6819 if (!ua_sess) {
6820 /* Session not associated with this app. */
6821 continue;
6822 }
6823
6824 /* Get the right consumer socket for the application. */
6825 socket = consumer_find_socket_by_bitness(app->bits_per_long,
6826 output);
6827 if (!socket) {
6828 status = LTTNG_ERR_INVALID;
6829 goto error;
6830 }
6831
6832 /* Add the UST default trace dir to path. */
6833 memset(pathname, 0, sizeof(pathname));
6834 ret = snprintf(pathname, sizeof(pathname), "%s",
6835 ua_sess->path);
6836 if (ret < 0) {
6837 status = LTTNG_ERR_INVALID;
6838 PERROR("snprintf snapshot path");
6839 goto error;
6840 }
6841 /* Free path allowed on previous iteration. */
6842 free(trace_path);
6843 trace_path = setup_channel_trace_path(usess->consumer, pathname,
6844 &consumer_path_offset);
6845 if (!trace_path) {
6846 status = LTTNG_ERR_INVALID;
6847 goto error;
6848 }
6849 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6850 ua_chan, node.node) {
6851 status = consumer_snapshot_channel(socket,
6852 ua_chan->key, output, 0,
6853 lttng_credentials_get_uid(&ua_sess->effective_credentials),
6854 lttng_credentials_get_gid(&ua_sess->effective_credentials),
6855 &trace_path[consumer_path_offset], wait,
6856 nb_packets_per_stream);
6857 switch (status) {
6858 case LTTNG_OK:
6859 break;
6860 case LTTNG_ERR_CHAN_NOT_FOUND:
6861 continue;
6862 default:
6863 goto error;
6864 }
6865 }
6866
6867 registry = get_session_registry(ua_sess);
6868 if (!registry) {
6869 DBG("Application session is being torn down. Skip application.");
6870 continue;
6871 }
6872 status = consumer_snapshot_channel(socket,
6873 registry->metadata_key, output, 1,
6874 lttng_credentials_get_uid(&ua_sess->effective_credentials),
6875 lttng_credentials_get_gid(&ua_sess->effective_credentials),
6876 &trace_path[consumer_path_offset], wait, 0);
6877 switch (status) {
6878 case LTTNG_OK:
6879 break;
6880 case LTTNG_ERR_CHAN_NOT_FOUND:
6881 continue;
6882 default:
6883 goto error;
6884 }
6885 }
6886 break;
6887 }
6888 default:
6889 assert(0);
6890 break;
6891 }
6892
6893 error:
6894 free(trace_path);
6895 rcu_read_unlock();
6896 return status;
6897 }
6898
6899 /*
6900 * Return the size taken by one more packet per stream.
6901 */
6902 uint64_t ust_app_get_size_one_more_packet_per_stream(
6903 const struct ltt_ust_session *usess, uint64_t cur_nr_packets)
6904 {
6905 uint64_t tot_size = 0;
6906 struct ust_app *app;
6907 struct lttng_ht_iter iter;
6908
6909 assert(usess);
6910
6911 switch (usess->buffer_type) {
6912 case LTTNG_BUFFER_PER_UID:
6913 {
6914 struct buffer_reg_uid *reg;
6915
6916 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6917 struct buffer_reg_channel *buf_reg_chan;
6918
6919 rcu_read_lock();
6920 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6921 buf_reg_chan, node.node) {
6922 if (cur_nr_packets >= buf_reg_chan->num_subbuf) {
6923 /*
6924 * Don't take channel into account if we
6925 * already grab all its packets.
6926 */
6927 continue;
6928 }
6929 tot_size += buf_reg_chan->subbuf_size * buf_reg_chan->stream_count;
6930 }
6931 rcu_read_unlock();
6932 }
6933 break;
6934 }
6935 case LTTNG_BUFFER_PER_PID:
6936 {
6937 rcu_read_lock();
6938 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6939 struct ust_app_channel *ua_chan;
6940 struct ust_app_session *ua_sess;
6941 struct lttng_ht_iter chan_iter;
6942
6943 ua_sess = lookup_session_by_app(usess, app);
6944 if (!ua_sess) {
6945 /* Session not associated with this app. */
6946 continue;
6947 }
6948
6949 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6950 ua_chan, node.node) {
6951 if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
6952 /*
6953 * Don't take channel into account if we
6954 * already grab all its packets.
6955 */
6956 continue;
6957 }
6958 tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
6959 }
6960 }
6961 rcu_read_unlock();
6962 break;
6963 }
6964 default:
6965 assert(0);
6966 break;
6967 }
6968
6969 return tot_size;
6970 }
6971
6972 int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id,
6973 struct cds_list_head *buffer_reg_uid_list,
6974 struct consumer_output *consumer, uint64_t uchan_id,
6975 int overwrite, uint64_t *discarded, uint64_t *lost)
6976 {
6977 int ret;
6978 uint64_t consumer_chan_key;
6979
6980 *discarded = 0;
6981 *lost = 0;
6982
6983 ret = buffer_reg_uid_consumer_channel_key(
6984 buffer_reg_uid_list, uchan_id, &consumer_chan_key);
6985 if (ret < 0) {
6986 /* Not found */
6987 ret = 0;
6988 goto end;
6989 }
6990
6991 if (overwrite) {
6992 ret = consumer_get_lost_packets(ust_session_id,
6993 consumer_chan_key, consumer, lost);
6994 } else {
6995 ret = consumer_get_discarded_events(ust_session_id,
6996 consumer_chan_key, consumer, discarded);
6997 }
6998
6999 end:
7000 return ret;
7001 }
7002
7003 int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
7004 struct ltt_ust_channel *uchan,
7005 struct consumer_output *consumer, int overwrite,
7006 uint64_t *discarded, uint64_t *lost)
7007 {
7008 int ret = 0;
7009 struct lttng_ht_iter iter;
7010 struct lttng_ht_node_str *ua_chan_node;
7011 struct ust_app *app;
7012 struct ust_app_session *ua_sess;
7013 struct ust_app_channel *ua_chan;
7014
7015 *discarded = 0;
7016 *lost = 0;
7017
7018 rcu_read_lock();
7019 /*
7020 * Iterate over every registered applications. Sum counters for
7021 * all applications containing requested session and channel.
7022 */
7023 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7024 struct lttng_ht_iter uiter;
7025
7026 ua_sess = lookup_session_by_app(usess, app);
7027 if (ua_sess == NULL) {
7028 continue;
7029 }
7030
7031 /* Get channel */
7032 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
7033 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
7034 /* If the session is found for the app, the channel must be there */
7035 assert(ua_chan_node);
7036
7037 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
7038
7039 if (overwrite) {
7040 uint64_t _lost;
7041
7042 ret = consumer_get_lost_packets(usess->id, ua_chan->key,
7043 consumer, &_lost);
7044 if (ret < 0) {
7045 break;
7046 }
7047 (*lost) += _lost;
7048 } else {
7049 uint64_t _discarded;
7050
7051 ret = consumer_get_discarded_events(usess->id,
7052 ua_chan->key, consumer, &_discarded);
7053 if (ret < 0) {
7054 break;
7055 }
7056 (*discarded) += _discarded;
7057 }
7058 }
7059
7060 rcu_read_unlock();
7061 return ret;
7062 }
7063
7064 static
7065 int ust_app_regenerate_statedump(struct ltt_ust_session *usess,
7066 struct ust_app *app)
7067 {
7068 int ret = 0;
7069 struct ust_app_session *ua_sess;
7070
7071 DBG("Regenerating the metadata for ust app pid %d", app->pid);
7072
7073 rcu_read_lock();
7074
7075 ua_sess = lookup_session_by_app(usess, app);
7076 if (ua_sess == NULL) {
7077 /* The session is in teardown process. Ignore and continue. */
7078 goto end;
7079 }
7080
7081 pthread_mutex_lock(&ua_sess->lock);
7082
7083 if (ua_sess->deleted) {
7084 goto end_unlock;
7085 }
7086
7087 pthread_mutex_lock(&app->sock_lock);
7088 ret = lttng_ust_ctl_regenerate_statedump(app->sock, ua_sess->handle);
7089 pthread_mutex_unlock(&app->sock_lock);
7090
7091 end_unlock:
7092 pthread_mutex_unlock(&ua_sess->lock);
7093
7094 end:
7095 rcu_read_unlock();
7096 health_code_update();
7097 return ret;
7098 }
7099
7100 /*
7101 * Regenerate the statedump for each app in the session.
7102 */
7103 int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
7104 {
7105 int ret = 0;
7106 struct lttng_ht_iter iter;
7107 struct ust_app *app;
7108
7109 DBG("Regenerating the metadata for all UST apps");
7110
7111 rcu_read_lock();
7112
7113 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7114 if (!app->compatible) {
7115 continue;
7116 }
7117
7118 ret = ust_app_regenerate_statedump(usess, app);
7119 if (ret < 0) {
7120 /* Continue to the next app even on error */
7121 continue;
7122 }
7123 }
7124
7125 rcu_read_unlock();
7126
7127 return 0;
7128 }
7129
7130 /*
7131 * Rotate all the channels of a session.
7132 *
7133 * Return LTTNG_OK on success or else an LTTng error code.
7134 */
7135 enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
7136 {
7137 int ret;
7138 enum lttng_error_code cmd_ret = LTTNG_OK;
7139 struct lttng_ht_iter iter;
7140 struct ust_app *app;
7141 struct ltt_ust_session *usess = session->ust_session;
7142
7143 assert(usess);
7144
7145 rcu_read_lock();
7146
7147 switch (usess->buffer_type) {
7148 case LTTNG_BUFFER_PER_UID:
7149 {
7150 struct buffer_reg_uid *reg;
7151
7152 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7153 struct buffer_reg_channel *buf_reg_chan;
7154 struct consumer_socket *socket;
7155
7156 /* Get consumer socket to use to push the metadata.*/
7157 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
7158 usess->consumer);
7159 if (!socket) {
7160 cmd_ret = LTTNG_ERR_INVALID;
7161 goto error;
7162 }
7163
7164 /* Rotate the data channels. */
7165 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
7166 buf_reg_chan, node.node) {
7167 ret = consumer_rotate_channel(socket,
7168 buf_reg_chan->consumer_key,
7169 usess->uid, usess->gid,
7170 usess->consumer,
7171 /* is_metadata_channel */ false);
7172 if (ret < 0) {
7173 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7174 goto error;
7175 }
7176 }
7177
7178 /*
7179 * The metadata channel might not be present.
7180 *
7181 * Consumer stream allocation can be done
7182 * asynchronously and can fail on intermediary
7183 * operations (i.e add context) and lead to data
7184 * channels created with no metadata channel.
7185 */
7186 if (!reg->registry->reg.ust->metadata_key) {
7187 /* Skip since no metadata is present. */
7188 continue;
7189 }
7190
7191 (void) push_metadata(reg->registry->reg.ust, usess->consumer);
7192
7193 ret = consumer_rotate_channel(socket,
7194 reg->registry->reg.ust->metadata_key,
7195 usess->uid, usess->gid,
7196 usess->consumer,
7197 /* is_metadata_channel */ true);
7198 if (ret < 0) {
7199 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7200 goto error;
7201 }
7202 }
7203 break;
7204 }
7205 case LTTNG_BUFFER_PER_PID:
7206 {
7207 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7208 struct consumer_socket *socket;
7209 struct lttng_ht_iter chan_iter;
7210 struct ust_app_channel *ua_chan;
7211 struct ust_app_session *ua_sess;
7212 struct ust_registry_session *registry;
7213
7214 ua_sess = lookup_session_by_app(usess, app);
7215 if (!ua_sess) {
7216 /* Session not associated with this app. */
7217 continue;
7218 }
7219
7220 /* Get the right consumer socket for the application. */
7221 socket = consumer_find_socket_by_bitness(app->bits_per_long,
7222 usess->consumer);
7223 if (!socket) {
7224 cmd_ret = LTTNG_ERR_INVALID;
7225 goto error;
7226 }
7227
7228 registry = get_session_registry(ua_sess);
7229 if (!registry) {
7230 DBG("Application session is being torn down. Skip application.");
7231 continue;
7232 }
7233
7234 /* Rotate the data channels. */
7235 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
7236 ua_chan, node.node) {
7237 ret = consumer_rotate_channel(socket,
7238 ua_chan->key,
7239 lttng_credentials_get_uid(&ua_sess->effective_credentials),
7240 lttng_credentials_get_gid(&ua_sess->effective_credentials),
7241 ua_sess->consumer,
7242 /* is_metadata_channel */ false);
7243 if (ret < 0) {
7244 /* Per-PID buffer and application going away. */
7245 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
7246 continue;
7247 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7248 goto error;
7249 }
7250 }
7251
7252 /* Rotate the metadata channel. */
7253 (void) push_metadata(registry, usess->consumer);
7254 ret = consumer_rotate_channel(socket,
7255 registry->metadata_key,
7256 lttng_credentials_get_uid(&ua_sess->effective_credentials),
7257 lttng_credentials_get_gid(&ua_sess->effective_credentials),
7258 ua_sess->consumer,
7259 /* is_metadata_channel */ true);
7260 if (ret < 0) {
7261 /* Per-PID buffer and application going away. */
7262 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
7263 continue;
7264 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7265 goto error;
7266 }
7267 }
7268 break;
7269 }
7270 default:
7271 assert(0);
7272 break;
7273 }
7274
7275 cmd_ret = LTTNG_OK;
7276
7277 error:
7278 rcu_read_unlock();
7279 return cmd_ret;
7280 }
7281
7282 enum lttng_error_code ust_app_create_channel_subdirectories(
7283 const struct ltt_ust_session *usess)
7284 {
7285 enum lttng_error_code ret = LTTNG_OK;
7286 struct lttng_ht_iter iter;
7287 enum lttng_trace_chunk_status chunk_status;
7288 char *pathname_index;
7289 int fmt_ret;
7290
7291 assert(usess->current_trace_chunk);
7292 rcu_read_lock();
7293
7294 switch (usess->buffer_type) {
7295 case LTTNG_BUFFER_PER_UID:
7296 {
7297 struct buffer_reg_uid *reg;
7298
7299 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7300 fmt_ret = asprintf(&pathname_index,
7301 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH "/" DEFAULT_INDEX_DIR,
7302 reg->uid, reg->bits_per_long);
7303 if (fmt_ret < 0) {
7304 ERR("Failed to format channel index directory");
7305 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7306 goto error;
7307 }
7308
7309 /*
7310 * Create the index subdirectory which will take care
7311 * of implicitly creating the channel's path.
7312 */
7313 chunk_status = lttng_trace_chunk_create_subdirectory(
7314 usess->current_trace_chunk,
7315 pathname_index);
7316 free(pathname_index);
7317 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7318 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7319 goto error;
7320 }
7321 }
7322 break;
7323 }
7324 case LTTNG_BUFFER_PER_PID:
7325 {
7326 struct ust_app *app;
7327
7328 /*
7329 * Create the toplevel ust/ directory in case no apps are running.
7330 */
7331 chunk_status = lttng_trace_chunk_create_subdirectory(
7332 usess->current_trace_chunk,
7333 DEFAULT_UST_TRACE_DIR);
7334 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7335 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7336 goto error;
7337 }
7338
7339 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
7340 pid_n.node) {
7341 struct ust_app_session *ua_sess;
7342 struct ust_registry_session *registry;
7343
7344 ua_sess = lookup_session_by_app(usess, app);
7345 if (!ua_sess) {
7346 /* Session not associated with this app. */
7347 continue;
7348 }
7349
7350 registry = get_session_registry(ua_sess);
7351 if (!registry) {
7352 DBG("Application session is being torn down. Skip application.");
7353 continue;
7354 }
7355
7356 fmt_ret = asprintf(&pathname_index,
7357 DEFAULT_UST_TRACE_DIR "/%s/" DEFAULT_INDEX_DIR,
7358 ua_sess->path);
7359 if (fmt_ret < 0) {
7360 ERR("Failed to format channel index directory");
7361 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7362 goto error;
7363 }
7364 /*
7365 * Create the index subdirectory which will take care
7366 * of implicitly creating the channel's path.
7367 */
7368 chunk_status = lttng_trace_chunk_create_subdirectory(
7369 usess->current_trace_chunk,
7370 pathname_index);
7371 free(pathname_index);
7372 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7373 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7374 goto error;
7375 }
7376 }
7377 break;
7378 }
7379 default:
7380 abort();
7381 }
7382
7383 ret = LTTNG_OK;
7384 error:
7385 rcu_read_unlock();
7386 return ret;
7387 }
7388
7389 /*
7390 * Clear all the channels of a session.
7391 *
7392 * Return LTTNG_OK on success or else an LTTng error code.
7393 */
7394 enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
7395 {
7396 int ret;
7397 enum lttng_error_code cmd_ret = LTTNG_OK;
7398 struct lttng_ht_iter iter;
7399 struct ust_app *app;
7400 struct ltt_ust_session *usess = session->ust_session;
7401
7402 assert(usess);
7403
7404 rcu_read_lock();
7405
7406 if (usess->active) {
7407 ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
7408 cmd_ret = LTTNG_ERR_FATAL;
7409 goto end;
7410 }
7411
7412 switch (usess->buffer_type) {
7413 case LTTNG_BUFFER_PER_UID:
7414 {
7415 struct buffer_reg_uid *reg;
7416
7417 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7418 struct buffer_reg_channel *buf_reg_chan;
7419 struct consumer_socket *socket;
7420
7421 /* Get consumer socket to use to push the metadata.*/
7422 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
7423 usess->consumer);
7424 if (!socket) {
7425 cmd_ret = LTTNG_ERR_INVALID;
7426 goto error_socket;
7427 }
7428
7429 /* Clear the data channels. */
7430 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
7431 buf_reg_chan, node.node) {
7432 ret = consumer_clear_channel(socket,
7433 buf_reg_chan->consumer_key);
7434 if (ret < 0) {
7435 goto error;
7436 }
7437 }
7438
7439 (void) push_metadata(reg->registry->reg.ust, usess->consumer);
7440
7441 /*
7442 * Clear the metadata channel.
7443 * Metadata channel is not cleared per se but we still need to
7444 * perform a rotation operation on it behind the scene.
7445 */
7446 ret = consumer_clear_channel(socket,
7447 reg->registry->reg.ust->metadata_key);
7448 if (ret < 0) {
7449 goto error;
7450 }
7451 }
7452 break;
7453 }
7454 case LTTNG_BUFFER_PER_PID:
7455 {
7456 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7457 struct consumer_socket *socket;
7458 struct lttng_ht_iter chan_iter;
7459 struct ust_app_channel *ua_chan;
7460 struct ust_app_session *ua_sess;
7461 struct ust_registry_session *registry;
7462
7463 ua_sess = lookup_session_by_app(usess, app);
7464 if (!ua_sess) {
7465 /* Session not associated with this app. */
7466 continue;
7467 }
7468
7469 /* Get the right consumer socket for the application. */
7470 socket = consumer_find_socket_by_bitness(app->bits_per_long,
7471 usess->consumer);
7472 if (!socket) {
7473 cmd_ret = LTTNG_ERR_INVALID;
7474 goto error_socket;
7475 }
7476
7477 registry = get_session_registry(ua_sess);
7478 if (!registry) {
7479 DBG("Application session is being torn down. Skip application.");
7480 continue;
7481 }
7482
7483 /* Clear the data channels. */
7484 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
7485 ua_chan, node.node) {
7486 ret = consumer_clear_channel(socket, ua_chan->key);
7487 if (ret < 0) {
7488 /* Per-PID buffer and application going away. */
7489 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7490 continue;
7491 }
7492 goto error;
7493 }
7494 }
7495
7496 (void) push_metadata(registry, usess->consumer);
7497
7498 /*
7499 * Clear the metadata channel.
7500 * Metadata channel is not cleared per se but we still need to
7501 * perform rotation operation on it behind the scene.
7502 */
7503 ret = consumer_clear_channel(socket, registry->metadata_key);
7504 if (ret < 0) {
7505 /* Per-PID buffer and application going away. */
7506 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7507 continue;
7508 }
7509 goto error;
7510 }
7511 }
7512 break;
7513 }
7514 default:
7515 assert(0);
7516 break;
7517 }
7518
7519 cmd_ret = LTTNG_OK;
7520 goto end;
7521
7522 error:
7523 switch (-ret) {
7524 case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED:
7525 cmd_ret = LTTNG_ERR_CLEAR_RELAY_DISALLOWED;
7526 break;
7527 default:
7528 cmd_ret = LTTNG_ERR_CLEAR_FAIL_CONSUMER;
7529 }
7530
7531 error_socket:
7532 end:
7533 rcu_read_unlock();
7534 return cmd_ret;
7535 }
7536
7537 /*
7538 * This function skips the metadata channel as the begin/end timestamps of a
7539 * metadata packet are useless.
7540 *
7541 * Moreover, opening a packet after a "clear" will cause problems for live
7542 * sessions as it will introduce padding that was not part of the first trace
7543 * chunk. The relay daemon expects the content of the metadata stream of
7544 * successive metadata trace chunks to be strict supersets of one another.
7545 *
7546 * For example, flushing a packet at the beginning of the metadata stream of
7547 * a trace chunk resulting from a "clear" session command will cause the
7548 * size of the metadata stream of the new trace chunk to not match the size of
7549 * the metadata stream of the original chunk. This will confuse the relay
7550 * daemon as the same "offset" in a metadata stream will no longer point
7551 * to the same content.
7552 */
7553 enum lttng_error_code ust_app_open_packets(struct ltt_session *session)
7554 {
7555 enum lttng_error_code ret = LTTNG_OK;
7556 struct lttng_ht_iter iter;
7557 struct ltt_ust_session *usess = session->ust_session;
7558
7559 assert(usess);
7560
7561 rcu_read_lock();
7562
7563 switch (usess->buffer_type) {
7564 case LTTNG_BUFFER_PER_UID:
7565 {
7566 struct buffer_reg_uid *reg;
7567
7568 cds_list_for_each_entry (
7569 reg, &usess->buffer_reg_uid_list, lnode) {
7570 struct buffer_reg_channel *buf_reg_chan;
7571 struct consumer_socket *socket;
7572
7573 socket = consumer_find_socket_by_bitness(
7574 reg->bits_per_long, usess->consumer);
7575 if (!socket) {
7576 ret = LTTNG_ERR_FATAL;
7577 goto error;
7578 }
7579
7580 cds_lfht_for_each_entry(reg->registry->channels->ht,
7581 &iter.iter, buf_reg_chan, node.node) {
7582 const int open_ret =
7583 consumer_open_channel_packets(
7584 socket,
7585 buf_reg_chan->consumer_key);
7586
7587 if (open_ret < 0) {
7588 ret = LTTNG_ERR_UNK;
7589 goto error;
7590 }
7591 }
7592 }
7593 break;
7594 }
7595 case LTTNG_BUFFER_PER_PID:
7596 {
7597 struct ust_app *app;
7598
7599 cds_lfht_for_each_entry (
7600 ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7601 struct consumer_socket *socket;
7602 struct lttng_ht_iter chan_iter;
7603 struct ust_app_channel *ua_chan;
7604 struct ust_app_session *ua_sess;
7605 struct ust_registry_session *registry;
7606
7607 ua_sess = lookup_session_by_app(usess, app);
7608 if (!ua_sess) {
7609 /* Session not associated with this app. */
7610 continue;
7611 }
7612
7613 /* Get the right consumer socket for the application. */
7614 socket = consumer_find_socket_by_bitness(
7615 app->bits_per_long, usess->consumer);
7616 if (!socket) {
7617 ret = LTTNG_ERR_FATAL;
7618 goto error;
7619 }
7620
7621 registry = get_session_registry(ua_sess);
7622 if (!registry) {
7623 DBG("Application session is being torn down. Skip application.");
7624 continue;
7625 }
7626
7627 cds_lfht_for_each_entry(ua_sess->channels->ht,
7628 &chan_iter.iter, ua_chan, node.node) {
7629 const int open_ret =
7630 consumer_open_channel_packets(
7631 socket,
7632 ua_chan->key);
7633
7634 if (open_ret < 0) {
7635 /*
7636 * Per-PID buffer and application going
7637 * away.
7638 */
7639 if (open_ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7640 continue;
7641 }
7642
7643 ret = LTTNG_ERR_UNK;
7644 goto error;
7645 }
7646 }
7647 }
7648 break;
7649 }
7650 default:
7651 abort();
7652 break;
7653 }
7654
7655 error:
7656 rcu_read_unlock();
7657 return ret;
7658 }
This page took 0.257802 seconds and 4 git commands to generate.