Support flight recorder mode for a session
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2 only,
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18 #define _GNU_SOURCE
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <pthread.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/stat.h>
26 #include <sys/types.h>
27 #include <unistd.h>
28 #include <urcu/compiler.h>
29 #include <lttng/ust-error.h>
30 #include <signal.h>
31
32 #include <common/common.h>
33 #include <common/sessiond-comm/sessiond-comm.h>
34
35 #include "buffer-registry.h"
36 #include "fd-limit.h"
37 #include "health.h"
38 #include "ust-app.h"
39 #include "ust-consumer.h"
40 #include "ust-ctl.h"
41 #include "utils.h"
42
43 /* Next available channel key. */
44 static unsigned long next_channel_key;
45 static unsigned long next_session_id;
46
47 /*
48 * Return the atomically incremented value of next_channel_key.
49 */
50 static inline unsigned long get_next_channel_key(void)
51 {
52 return uatomic_add_return(&next_channel_key, 1);
53 }
54
55 /*
56 * Return the atomically incremented value of next_session_id.
57 */
58 static inline unsigned long get_next_session_id(void)
59 {
60 return uatomic_add_return(&next_session_id, 1);
61 }
62
63 static void copy_channel_attr_to_ustctl(
64 struct ustctl_consumer_channel_attr *attr,
65 struct lttng_ust_channel_attr *uattr)
66 {
67 /* Copy event attributes since the layout is different. */
68 attr->subbuf_size = uattr->subbuf_size;
69 attr->num_subbuf = uattr->num_subbuf;
70 attr->overwrite = uattr->overwrite;
71 attr->switch_timer_interval = uattr->switch_timer_interval;
72 attr->read_timer_interval = uattr->read_timer_interval;
73 attr->output = uattr->output;
74 }
75
76 /*
77 * Match function for the hash table lookup.
78 *
79 * It matches an ust app event based on three attributes which are the event
80 * name, the filter bytecode and the loglevel.
81 */
82 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
83 {
84 struct ust_app_event *event;
85 const struct ust_app_ht_key *key;
86
87 assert(node);
88 assert(_key);
89
90 event = caa_container_of(node, struct ust_app_event, node.node);
91 key = _key;
92
93 /* Match the 3 elements of the key: name, filter and loglevel. */
94
95 /* Event name */
96 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
97 goto no_match;
98 }
99
100 /* Event loglevel. */
101 if (event->attr.loglevel != key->loglevel) {
102 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
103 && key->loglevel == 0 && event->attr.loglevel == -1) {
104 /*
105 * Match is accepted. This is because on event creation, the
106 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
107 * -1 are accepted for this loglevel type since 0 is the one set by
108 * the API when receiving an enable event.
109 */
110 } else {
111 goto no_match;
112 }
113 }
114
115 /* One of the filters is NULL, fail. */
116 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
117 goto no_match;
118 }
119
120 if (key->filter && event->filter) {
121 /* Both filters exists, check length followed by the bytecode. */
122 if (event->filter->len != key->filter->len ||
123 memcmp(event->filter->data, key->filter->data,
124 event->filter->len) != 0) {
125 goto no_match;
126 }
127 }
128
129 /* Match. */
130 return 1;
131
132 no_match:
133 return 0;
134 }
135
136 /*
137 * Unique add of an ust app event in the given ht. This uses the custom
138 * ht_match_ust_app_event match function and the event name as hash.
139 */
140 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
141 struct ust_app_event *event)
142 {
143 struct cds_lfht_node *node_ptr;
144 struct ust_app_ht_key key;
145 struct lttng_ht *ht;
146
147 assert(ua_chan);
148 assert(ua_chan->events);
149 assert(event);
150
151 ht = ua_chan->events;
152 key.name = event->attr.name;
153 key.filter = event->filter;
154 key.loglevel = event->attr.loglevel;
155
156 node_ptr = cds_lfht_add_unique(ht->ht,
157 ht->hash_fct(event->node.key, lttng_ht_seed),
158 ht_match_ust_app_event, &key, &event->node.node);
159 assert(node_ptr == &event->node.node);
160 }
161
162 /*
163 * Close the notify socket from the given RCU head object. This MUST be called
164 * through a call_rcu().
165 */
166 static void close_notify_sock_rcu(struct rcu_head *head)
167 {
168 int ret;
169 struct ust_app_notify_sock_obj *obj =
170 caa_container_of(head, struct ust_app_notify_sock_obj, head);
171
172 /* Must have a valid fd here. */
173 assert(obj->fd >= 0);
174
175 ret = close(obj->fd);
176 if (ret) {
177 ERR("close notify sock %d RCU", obj->fd);
178 }
179 lttng_fd_put(LTTNG_FD_APPS, 1);
180
181 free(obj);
182 }
183
184 /*
185 * Return the session registry according to the buffer type of the given
186 * session.
187 *
188 * A registry per UID object MUST exists before calling this function or else
189 * it assert() if not found. RCU read side lock must be acquired.
190 */
191 static struct ust_registry_session *get_session_registry(
192 struct ust_app_session *ua_sess)
193 {
194 struct ust_registry_session *registry = NULL;
195
196 assert(ua_sess);
197
198 switch (ua_sess->buffer_type) {
199 case LTTNG_BUFFER_PER_PID:
200 {
201 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
202 if (!reg_pid) {
203 goto error;
204 }
205 registry = reg_pid->registry->reg.ust;
206 break;
207 }
208 case LTTNG_BUFFER_PER_UID:
209 {
210 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
211 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
212 if (!reg_uid) {
213 goto error;
214 }
215 registry = reg_uid->registry->reg.ust;
216 break;
217 }
218 default:
219 assert(0);
220 };
221
222 error:
223 return registry;
224 }
225
226 /*
227 * Delete ust context safely. RCU read lock must be held before calling
228 * this function.
229 */
230 static
231 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx)
232 {
233 int ret;
234
235 assert(ua_ctx);
236
237 if (ua_ctx->obj) {
238 ret = ustctl_release_object(sock, ua_ctx->obj);
239 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
240 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
241 sock, ua_ctx->obj->handle, ret);
242 }
243 free(ua_ctx->obj);
244 }
245 free(ua_ctx);
246 }
247
248 /*
249 * Delete ust app event safely. RCU read lock must be held before calling
250 * this function.
251 */
252 static
253 void delete_ust_app_event(int sock, struct ust_app_event *ua_event)
254 {
255 int ret;
256
257 assert(ua_event);
258
259 free(ua_event->filter);
260
261 if (ua_event->obj != NULL) {
262 ret = ustctl_release_object(sock, ua_event->obj);
263 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
264 ERR("UST app sock %d release event obj failed with ret %d",
265 sock, ret);
266 }
267 free(ua_event->obj);
268 }
269 free(ua_event);
270 }
271
272 /*
273 * Release ust data object of the given stream.
274 *
275 * Return 0 on success or else a negative value.
276 */
277 static int release_ust_app_stream(int sock, struct ust_app_stream *stream)
278 {
279 int ret = 0;
280
281 assert(stream);
282
283 if (stream->obj) {
284 ret = ustctl_release_object(sock, stream->obj);
285 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
286 ERR("UST app sock %d release stream obj failed with ret %d",
287 sock, ret);
288 }
289 lttng_fd_put(LTTNG_FD_APPS, 2);
290 free(stream->obj);
291 }
292
293 return ret;
294 }
295
296 /*
297 * Delete ust app stream safely. RCU read lock must be held before calling
298 * this function.
299 */
300 static
301 void delete_ust_app_stream(int sock, struct ust_app_stream *stream)
302 {
303 assert(stream);
304
305 (void) release_ust_app_stream(sock, stream);
306 free(stream);
307 }
308
309 /*
310 * We need to execute ht_destroy outside of RCU read-side critical
311 * section and outside of call_rcu thread, so we postpone its execution
312 * using ht_cleanup_push. It is simpler than to change the semantic of
313 * the many callers of delete_ust_app_session().
314 */
315 static
316 void delete_ust_app_channel_rcu(struct rcu_head *head)
317 {
318 struct ust_app_channel *ua_chan =
319 caa_container_of(head, struct ust_app_channel, rcu_head);
320
321 ht_cleanup_push(ua_chan->ctx);
322 ht_cleanup_push(ua_chan->events);
323 free(ua_chan);
324 }
325
326 /*
327 * Delete ust app channel safely. RCU read lock must be held before calling
328 * this function.
329 */
330 static
331 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
332 struct ust_app *app)
333 {
334 int ret;
335 struct lttng_ht_iter iter;
336 struct ust_app_event *ua_event;
337 struct ust_app_ctx *ua_ctx;
338 struct ust_app_stream *stream, *stmp;
339 struct ust_registry_session *registry;
340
341 assert(ua_chan);
342
343 DBG3("UST app deleting channel %s", ua_chan->name);
344
345 /* Wipe stream */
346 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
347 cds_list_del(&stream->list);
348 delete_ust_app_stream(sock, stream);
349 }
350
351 /* Wipe context */
352 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
353 ret = lttng_ht_del(ua_chan->ctx, &iter);
354 assert(!ret);
355 delete_ust_app_ctx(sock, ua_ctx);
356 }
357
358 /* Wipe events */
359 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
360 node.node) {
361 ret = lttng_ht_del(ua_chan->events, &iter);
362 assert(!ret);
363 delete_ust_app_event(sock, ua_event);
364 }
365
366 /* Wipe and free registry from session registry. */
367 registry = get_session_registry(ua_chan->session);
368 if (registry) {
369 ust_registry_channel_del_free(registry, ua_chan->key);
370 }
371
372 if (ua_chan->obj != NULL) {
373 /* Remove channel from application UST object descriptor. */
374 iter.iter.node = &ua_chan->ust_objd_node.node;
375 lttng_ht_del(app->ust_objd, &iter);
376 ret = ustctl_release_object(sock, ua_chan->obj);
377 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
378 ERR("UST app sock %d release channel obj failed with ret %d",
379 sock, ret);
380 }
381 lttng_fd_put(LTTNG_FD_APPS, 1);
382 free(ua_chan->obj);
383 }
384 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
385 }
386
387 /*
388 * Push metadata to consumer socket.
389 *
390 * The socket lock MUST be acquired.
391 * The ust app session lock MUST be acquired.
392 *
393 * On success, return the len of metadata pushed or else a negative value.
394 */
395 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
396 struct consumer_socket *socket, int send_zero_data)
397 {
398 int ret;
399 char *metadata_str = NULL;
400 size_t len, offset;
401 ssize_t ret_val;
402
403 assert(registry);
404 assert(socket);
405
406 /*
407 * On a push metadata error either the consumer is dead or the metadata
408 * channel has been destroyed because its endpoint might have died (e.g:
409 * relayd). If so, the metadata closed flag is set to 1 so we deny pushing
410 * metadata again which is not valid anymore on the consumer side.
411 *
412 * The ust app session mutex locked allows us to make this check without
413 * the registry lock.
414 */
415 if (registry->metadata_closed) {
416 return -EPIPE;
417 }
418
419 pthread_mutex_lock(&registry->lock);
420
421 offset = registry->metadata_len_sent;
422 len = registry->metadata_len - registry->metadata_len_sent;
423 if (len == 0) {
424 DBG3("No metadata to push for metadata key %" PRIu64,
425 registry->metadata_key);
426 ret_val = len;
427 if (send_zero_data) {
428 DBG("No metadata to push");
429 goto push_data;
430 }
431 goto end;
432 }
433
434 /* Allocate only what we have to send. */
435 metadata_str = zmalloc(len);
436 if (!metadata_str) {
437 PERROR("zmalloc ust app metadata string");
438 ret_val = -ENOMEM;
439 goto error;
440 }
441 /* Copy what we haven't send out. */
442 memcpy(metadata_str, registry->metadata + offset, len);
443 registry->metadata_len_sent += len;
444
445 push_data:
446 pthread_mutex_unlock(&registry->lock);
447 ret = consumer_push_metadata(socket, registry->metadata_key,
448 metadata_str, len, offset);
449 if (ret < 0) {
450 ret_val = ret;
451 goto error_push;
452 }
453
454 free(metadata_str);
455 return len;
456
457 end:
458 error:
459 pthread_mutex_unlock(&registry->lock);
460 error_push:
461 free(metadata_str);
462 return ret_val;
463 }
464
465 /*
466 * For a given application and session, push metadata to consumer. The session
467 * lock MUST be acquired here before calling this.
468 * Either sock or consumer is required : if sock is NULL, the default
469 * socket to send the metadata is retrieved from consumer, if sock
470 * is not NULL we use it to send the metadata.
471 *
472 * Return 0 on success else a negative error.
473 */
474 static int push_metadata(struct ust_registry_session *registry,
475 struct consumer_output *consumer)
476 {
477 int ret_val;
478 ssize_t ret;
479 struct consumer_socket *socket;
480
481 assert(registry);
482 assert(consumer);
483
484 rcu_read_lock();
485
486 /*
487 * Means that no metadata was assigned to the session. This can happens if
488 * no start has been done previously.
489 */
490 if (!registry->metadata_key) {
491 ret_val = 0;
492 goto end_rcu_unlock;
493 }
494
495 /* Get consumer socket to use to push the metadata.*/
496 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
497 consumer);
498 if (!socket) {
499 ret_val = -1;
500 goto error_rcu_unlock;
501 }
502
503 /*
504 * TODO: Currently, we hold the socket lock around sampling of the next
505 * metadata segment to ensure we send metadata over the consumer socket in
506 * the correct order. This makes the registry lock nest inside the socket
507 * lock.
508 *
509 * Please note that this is a temporary measure: we should move this lock
510 * back into ust_consumer_push_metadata() when the consumer gets the
511 * ability to reorder the metadata it receives.
512 */
513 pthread_mutex_lock(socket->lock);
514 ret = ust_app_push_metadata(registry, socket, 0);
515 pthread_mutex_unlock(socket->lock);
516 if (ret < 0) {
517 ret_val = ret;
518 goto error_rcu_unlock;
519 }
520
521 rcu_read_unlock();
522 return 0;
523
524 error_rcu_unlock:
525 /*
526 * On error, flag the registry that the metadata is closed. We were unable
527 * to push anything and this means that either the consumer is not
528 * responding or the metadata cache has been destroyed on the consumer.
529 */
530 registry->metadata_closed = 1;
531 end_rcu_unlock:
532 rcu_read_unlock();
533 return ret_val;
534 }
535
536 /*
537 * Send to the consumer a close metadata command for the given session. Once
538 * done, the metadata channel is deleted and the session metadata pointer is
539 * nullified. The session lock MUST be acquired here unless the application is
540 * in the destroy path.
541 *
542 * Return 0 on success else a negative value.
543 */
544 static int close_metadata(struct ust_registry_session *registry,
545 struct consumer_output *consumer)
546 {
547 int ret;
548 struct consumer_socket *socket;
549
550 assert(registry);
551 assert(consumer);
552
553 rcu_read_lock();
554
555 if (!registry->metadata_key || registry->metadata_closed) {
556 ret = 0;
557 goto end;
558 }
559
560 /* Get consumer socket to use to push the metadata.*/
561 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
562 consumer);
563 if (!socket) {
564 ret = -1;
565 goto error;
566 }
567
568 ret = consumer_close_metadata(socket, registry->metadata_key);
569 if (ret < 0) {
570 goto error;
571 }
572
573 error:
574 /*
575 * Metadata closed. Even on error this means that the consumer is not
576 * responding or not found so either way a second close should NOT be emit
577 * for this registry.
578 */
579 registry->metadata_closed = 1;
580 end:
581 rcu_read_unlock();
582 return ret;
583 }
584
585 /*
586 * We need to execute ht_destroy outside of RCU read-side critical
587 * section and outside of call_rcu thread, so we postpone its execution
588 * using ht_cleanup_push. It is simpler than to change the semantic of
589 * the many callers of delete_ust_app_session().
590 */
591 static
592 void delete_ust_app_session_rcu(struct rcu_head *head)
593 {
594 struct ust_app_session *ua_sess =
595 caa_container_of(head, struct ust_app_session, rcu_head);
596
597 ht_cleanup_push(ua_sess->channels);
598 free(ua_sess);
599 }
600
601 /*
602 * Delete ust app session safely. RCU read lock must be held before calling
603 * this function.
604 */
605 static
606 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
607 struct ust_app *app)
608 {
609 int ret;
610 struct lttng_ht_iter iter;
611 struct ust_app_channel *ua_chan;
612 struct ust_registry_session *registry;
613
614 assert(ua_sess);
615
616 pthread_mutex_lock(&ua_sess->lock);
617
618 registry = get_session_registry(ua_sess);
619 if (registry && !registry->metadata_closed) {
620 /* Push metadata for application before freeing the application. */
621 (void) push_metadata(registry, ua_sess->consumer);
622
623 /*
624 * Don't ask to close metadata for global per UID buffers. Close
625 * metadata only on destroy trace session in this case. Also, the
626 * previous push metadata could have flag the metadata registry to
627 * close so don't send a close command if closed.
628 */
629 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
630 !registry->metadata_closed) {
631 /* And ask to close it for this session registry. */
632 (void) close_metadata(registry, ua_sess->consumer);
633 }
634 }
635
636 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
637 node.node) {
638 ret = lttng_ht_del(ua_sess->channels, &iter);
639 assert(!ret);
640 delete_ust_app_channel(sock, ua_chan, app);
641 }
642
643 /* In case of per PID, the registry is kept in the session. */
644 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
645 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
646 if (reg_pid) {
647 buffer_reg_pid_remove(reg_pid);
648 buffer_reg_pid_destroy(reg_pid);
649 }
650 }
651
652 if (ua_sess->handle != -1) {
653 ret = ustctl_release_handle(sock, ua_sess->handle);
654 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
655 ERR("UST app sock %d release session handle failed with ret %d",
656 sock, ret);
657 }
658 }
659 pthread_mutex_unlock(&ua_sess->lock);
660
661 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
662 }
663
664 /*
665 * Delete a traceable application structure from the global list. Never call
666 * this function outside of a call_rcu call.
667 *
668 * RCU read side lock should _NOT_ be held when calling this function.
669 */
670 static
671 void delete_ust_app(struct ust_app *app)
672 {
673 int ret, sock;
674 struct ust_app_session *ua_sess, *tmp_ua_sess;
675
676 /* Delete ust app sessions info */
677 sock = app->sock;
678 app->sock = -1;
679
680 /* Wipe sessions */
681 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
682 teardown_node) {
683 /* Free every object in the session and the session. */
684 rcu_read_lock();
685 delete_ust_app_session(sock, ua_sess, app);
686 rcu_read_unlock();
687 }
688
689 ht_cleanup_push(app->sessions);
690 ht_cleanup_push(app->ust_objd);
691
692 /*
693 * Wait until we have deleted the application from the sock hash table
694 * before closing this socket, otherwise an application could re-use the
695 * socket ID and race with the teardown, using the same hash table entry.
696 *
697 * It's OK to leave the close in call_rcu. We want it to stay unique for
698 * all RCU readers that could run concurrently with unregister app,
699 * therefore we _need_ to only close that socket after a grace period. So
700 * it should stay in this RCU callback.
701 *
702 * This close() is a very important step of the synchronization model so
703 * every modification to this function must be carefully reviewed.
704 */
705 ret = close(sock);
706 if (ret) {
707 PERROR("close");
708 }
709 lttng_fd_put(LTTNG_FD_APPS, 1);
710
711 DBG2("UST app pid %d deleted", app->pid);
712 free(app);
713 }
714
715 /*
716 * URCU intermediate call to delete an UST app.
717 */
718 static
719 void delete_ust_app_rcu(struct rcu_head *head)
720 {
721 struct lttng_ht_node_ulong *node =
722 caa_container_of(head, struct lttng_ht_node_ulong, head);
723 struct ust_app *app =
724 caa_container_of(node, struct ust_app, pid_n);
725
726 DBG3("Call RCU deleting app PID %d", app->pid);
727 delete_ust_app(app);
728 }
729
730 /*
731 * Delete the session from the application ht and delete the data structure by
732 * freeing every object inside and releasing them.
733 */
734 static void destroy_app_session(struct ust_app *app,
735 struct ust_app_session *ua_sess)
736 {
737 int ret;
738 struct lttng_ht_iter iter;
739
740 assert(app);
741 assert(ua_sess);
742
743 iter.iter.node = &ua_sess->node.node;
744 ret = lttng_ht_del(app->sessions, &iter);
745 if (ret) {
746 /* Already scheduled for teardown. */
747 goto end;
748 }
749
750 /* Once deleted, free the data structure. */
751 delete_ust_app_session(app->sock, ua_sess, app);
752
753 end:
754 return;
755 }
756
757 /*
758 * Alloc new UST app session.
759 */
760 static
761 struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
762 {
763 struct ust_app_session *ua_sess;
764
765 /* Init most of the default value by allocating and zeroing */
766 ua_sess = zmalloc(sizeof(struct ust_app_session));
767 if (ua_sess == NULL) {
768 PERROR("malloc");
769 goto error_free;
770 }
771
772 ua_sess->handle = -1;
773 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
774 pthread_mutex_init(&ua_sess->lock, NULL);
775
776 return ua_sess;
777
778 error_free:
779 return NULL;
780 }
781
782 /*
783 * Alloc new UST app channel.
784 */
785 static
786 struct ust_app_channel *alloc_ust_app_channel(char *name,
787 struct ust_app_session *ua_sess,
788 struct lttng_ust_channel_attr *attr)
789 {
790 struct ust_app_channel *ua_chan;
791
792 /* Init most of the default value by allocating and zeroing */
793 ua_chan = zmalloc(sizeof(struct ust_app_channel));
794 if (ua_chan == NULL) {
795 PERROR("malloc");
796 goto error;
797 }
798
799 /* Setup channel name */
800 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
801 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
802
803 ua_chan->enabled = 1;
804 ua_chan->handle = -1;
805 ua_chan->session = ua_sess;
806 ua_chan->key = get_next_channel_key();
807 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
808 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
809 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
810
811 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
812
813 /* Copy attributes */
814 if (attr) {
815 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
816 ua_chan->attr.subbuf_size = attr->subbuf_size;
817 ua_chan->attr.num_subbuf = attr->num_subbuf;
818 ua_chan->attr.overwrite = attr->overwrite;
819 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
820 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
821 ua_chan->attr.output = attr->output;
822 }
823 /* By default, the channel is a per cpu channel. */
824 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
825
826 DBG3("UST app channel %s allocated", ua_chan->name);
827
828 return ua_chan;
829
830 error:
831 return NULL;
832 }
833
834 /*
835 * Allocate and initialize a UST app stream.
836 *
837 * Return newly allocated stream pointer or NULL on error.
838 */
839 struct ust_app_stream *ust_app_alloc_stream(void)
840 {
841 struct ust_app_stream *stream = NULL;
842
843 stream = zmalloc(sizeof(*stream));
844 if (stream == NULL) {
845 PERROR("zmalloc ust app stream");
846 goto error;
847 }
848
849 /* Zero could be a valid value for a handle so flag it to -1. */
850 stream->handle = -1;
851
852 error:
853 return stream;
854 }
855
856 /*
857 * Alloc new UST app event.
858 */
859 static
860 struct ust_app_event *alloc_ust_app_event(char *name,
861 struct lttng_ust_event *attr)
862 {
863 struct ust_app_event *ua_event;
864
865 /* Init most of the default value by allocating and zeroing */
866 ua_event = zmalloc(sizeof(struct ust_app_event));
867 if (ua_event == NULL) {
868 PERROR("malloc");
869 goto error;
870 }
871
872 ua_event->enabled = 1;
873 strncpy(ua_event->name, name, sizeof(ua_event->name));
874 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
875 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
876
877 /* Copy attributes */
878 if (attr) {
879 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
880 }
881
882 DBG3("UST app event %s allocated", ua_event->name);
883
884 return ua_event;
885
886 error:
887 return NULL;
888 }
889
890 /*
891 * Alloc new UST app context.
892 */
893 static
894 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context *uctx)
895 {
896 struct ust_app_ctx *ua_ctx;
897
898 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
899 if (ua_ctx == NULL) {
900 goto error;
901 }
902
903 if (uctx) {
904 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
905 }
906
907 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
908
909 error:
910 return ua_ctx;
911 }
912
913 /*
914 * Allocate a filter and copy the given original filter.
915 *
916 * Return allocated filter or NULL on error.
917 */
918 static struct lttng_ust_filter_bytecode *alloc_copy_ust_app_filter(
919 struct lttng_ust_filter_bytecode *orig_f)
920 {
921 struct lttng_ust_filter_bytecode *filter = NULL;
922
923 /* Copy filter bytecode */
924 filter = zmalloc(sizeof(*filter) + orig_f->len);
925 if (!filter) {
926 PERROR("zmalloc alloc ust app filter");
927 goto error;
928 }
929
930 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
931
932 error:
933 return filter;
934 }
935
936 /*
937 * Find an ust_app using the sock and return it. RCU read side lock must be
938 * held before calling this helper function.
939 */
940 static
941 struct ust_app *find_app_by_sock(int sock)
942 {
943 struct lttng_ht_node_ulong *node;
944 struct lttng_ht_iter iter;
945
946 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
947 node = lttng_ht_iter_get_node_ulong(&iter);
948 if (node == NULL) {
949 DBG2("UST app find by sock %d not found", sock);
950 goto error;
951 }
952
953 return caa_container_of(node, struct ust_app, sock_n);
954
955 error:
956 return NULL;
957 }
958
959 /*
960 * Find an ust_app using the notify sock and return it. RCU read side lock must
961 * be held before calling this helper function.
962 */
963 static struct ust_app *find_app_by_notify_sock(int sock)
964 {
965 struct lttng_ht_node_ulong *node;
966 struct lttng_ht_iter iter;
967
968 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
969 &iter);
970 node = lttng_ht_iter_get_node_ulong(&iter);
971 if (node == NULL) {
972 DBG2("UST app find by notify sock %d not found", sock);
973 goto error;
974 }
975
976 return caa_container_of(node, struct ust_app, notify_sock_n);
977
978 error:
979 return NULL;
980 }
981
982 /*
983 * Lookup for an ust app event based on event name, filter bytecode and the
984 * event loglevel.
985 *
986 * Return an ust_app_event object or NULL on error.
987 */
988 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
989 char *name, struct lttng_ust_filter_bytecode *filter, int loglevel)
990 {
991 struct lttng_ht_iter iter;
992 struct lttng_ht_node_str *node;
993 struct ust_app_event *event = NULL;
994 struct ust_app_ht_key key;
995
996 assert(name);
997 assert(ht);
998
999 /* Setup key for event lookup. */
1000 key.name = name;
1001 key.filter = filter;
1002 key.loglevel = loglevel;
1003
1004 /* Lookup using the event name as hash and a custom match fct. */
1005 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1006 ht_match_ust_app_event, &key, &iter.iter);
1007 node = lttng_ht_iter_get_node_str(&iter);
1008 if (node == NULL) {
1009 goto end;
1010 }
1011
1012 event = caa_container_of(node, struct ust_app_event, node);
1013
1014 end:
1015 return event;
1016 }
1017
1018 /*
1019 * Create the channel context on the tracer.
1020 *
1021 * Called with UST app session lock held.
1022 */
1023 static
1024 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1025 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1026 {
1027 int ret;
1028
1029 health_code_update();
1030
1031 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1032 ua_chan->obj, &ua_ctx->obj);
1033 if (ret < 0) {
1034 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1035 ERR("UST app create channel context failed for app (pid: %d) "
1036 "with ret %d", app->pid, ret);
1037 } else {
1038 DBG3("UST app disable event failed. Application is dead.");
1039 }
1040 goto error;
1041 }
1042
1043 ua_ctx->handle = ua_ctx->obj->handle;
1044
1045 DBG2("UST app context handle %d created successfully for channel %s",
1046 ua_ctx->handle, ua_chan->name);
1047
1048 error:
1049 health_code_update();
1050 return ret;
1051 }
1052
1053 /*
1054 * Set the filter on the tracer.
1055 */
1056 static
1057 int set_ust_event_filter(struct ust_app_event *ua_event,
1058 struct ust_app *app)
1059 {
1060 int ret;
1061
1062 health_code_update();
1063
1064 if (!ua_event->filter) {
1065 ret = 0;
1066 goto error;
1067 }
1068
1069 ret = ustctl_set_filter(app->sock, ua_event->filter,
1070 ua_event->obj);
1071 if (ret < 0) {
1072 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1073 ERR("UST app event %s filter failed for app (pid: %d) "
1074 "with ret %d", ua_event->attr.name, app->pid, ret);
1075 } else {
1076 DBG3("UST app filter event failed. Application is dead.");
1077 }
1078 goto error;
1079 }
1080
1081 DBG2("UST filter set successfully for event %s", ua_event->name);
1082
1083 error:
1084 health_code_update();
1085 return ret;
1086 }
1087
1088 /*
1089 * Disable the specified event on to UST tracer for the UST session.
1090 */
1091 static int disable_ust_event(struct ust_app *app,
1092 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1093 {
1094 int ret;
1095
1096 health_code_update();
1097
1098 ret = ustctl_disable(app->sock, ua_event->obj);
1099 if (ret < 0) {
1100 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1101 ERR("UST app event %s disable failed for app (pid: %d) "
1102 "and session handle %d with ret %d",
1103 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1104 } else {
1105 DBG3("UST app disable event failed. Application is dead.");
1106 }
1107 goto error;
1108 }
1109
1110 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1111 ua_event->attr.name, app->pid);
1112
1113 error:
1114 health_code_update();
1115 return ret;
1116 }
1117
1118 /*
1119 * Disable the specified channel on to UST tracer for the UST session.
1120 */
1121 static int disable_ust_channel(struct ust_app *app,
1122 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1123 {
1124 int ret;
1125
1126 health_code_update();
1127
1128 ret = ustctl_disable(app->sock, ua_chan->obj);
1129 if (ret < 0) {
1130 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1131 ERR("UST app channel %s disable failed for app (pid: %d) "
1132 "and session handle %d with ret %d",
1133 ua_chan->name, app->pid, ua_sess->handle, ret);
1134 } else {
1135 DBG3("UST app disable channel failed. Application is dead.");
1136 }
1137 goto error;
1138 }
1139
1140 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1141 ua_chan->name, app->pid);
1142
1143 error:
1144 health_code_update();
1145 return ret;
1146 }
1147
1148 /*
1149 * Enable the specified channel on to UST tracer for the UST session.
1150 */
1151 static int enable_ust_channel(struct ust_app *app,
1152 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1153 {
1154 int ret;
1155
1156 health_code_update();
1157
1158 ret = ustctl_enable(app->sock, ua_chan->obj);
1159 if (ret < 0) {
1160 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1161 ERR("UST app channel %s enable failed for app (pid: %d) "
1162 "and session handle %d with ret %d",
1163 ua_chan->name, app->pid, ua_sess->handle, ret);
1164 } else {
1165 DBG3("UST app enable channel failed. Application is dead.");
1166 }
1167 goto error;
1168 }
1169
1170 ua_chan->enabled = 1;
1171
1172 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1173 ua_chan->name, app->pid);
1174
1175 error:
1176 health_code_update();
1177 return ret;
1178 }
1179
1180 /*
1181 * Enable the specified event on to UST tracer for the UST session.
1182 */
1183 static int enable_ust_event(struct ust_app *app,
1184 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1185 {
1186 int ret;
1187
1188 health_code_update();
1189
1190 ret = ustctl_enable(app->sock, ua_event->obj);
1191 if (ret < 0) {
1192 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1193 ERR("UST app event %s enable failed for app (pid: %d) "
1194 "and session handle %d with ret %d",
1195 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1196 } else {
1197 DBG3("UST app enable event failed. Application is dead.");
1198 }
1199 goto error;
1200 }
1201
1202 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1203 ua_event->attr.name, app->pid);
1204
1205 error:
1206 health_code_update();
1207 return ret;
1208 }
1209
1210 /*
1211 * Send channel and stream buffer to application.
1212 *
1213 * Return 0 on success. On error, a negative value is returned.
1214 */
1215 static int send_channel_pid_to_ust(struct ust_app *app,
1216 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1217 {
1218 int ret;
1219 struct ust_app_stream *stream, *stmp;
1220
1221 assert(app);
1222 assert(ua_sess);
1223 assert(ua_chan);
1224
1225 health_code_update();
1226
1227 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1228 app->sock);
1229
1230 /* Send channel to the application. */
1231 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1232 if (ret < 0) {
1233 goto error;
1234 }
1235
1236 health_code_update();
1237
1238 /* Send all streams to application. */
1239 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1240 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1241 if (ret < 0) {
1242 goto error;
1243 }
1244 /* We don't need the stream anymore once sent to the tracer. */
1245 cds_list_del(&stream->list);
1246 delete_ust_app_stream(-1, stream);
1247 }
1248 /* Flag the channel that it is sent to the application. */
1249 ua_chan->is_sent = 1;
1250
1251 error:
1252 health_code_update();
1253 return ret;
1254 }
1255
1256 /*
1257 * Create the specified event onto the UST tracer for a UST session.
1258 *
1259 * Should be called with session mutex held.
1260 */
1261 static
1262 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1263 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1264 {
1265 int ret = 0;
1266
1267 health_code_update();
1268
1269 /* Create UST event on tracer */
1270 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1271 &ua_event->obj);
1272 if (ret < 0) {
1273 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1274 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1275 ua_event->attr.name, app->pid, ret);
1276 } else {
1277 DBG3("UST app create event failed. Application is dead.");
1278 }
1279 goto error;
1280 }
1281
1282 ua_event->handle = ua_event->obj->handle;
1283
1284 DBG2("UST app event %s created successfully for pid:%d",
1285 ua_event->attr.name, app->pid);
1286
1287 health_code_update();
1288
1289 /* Set filter if one is present. */
1290 if (ua_event->filter) {
1291 ret = set_ust_event_filter(ua_event, app);
1292 if (ret < 0) {
1293 goto error;
1294 }
1295 }
1296
1297 /* If event not enabled, disable it on the tracer */
1298 if (ua_event->enabled == 0) {
1299 ret = disable_ust_event(app, ua_sess, ua_event);
1300 if (ret < 0) {
1301 /*
1302 * If we hit an EPERM, something is wrong with our disable call. If
1303 * we get an EEXIST, there is a problem on the tracer side since we
1304 * just created it.
1305 */
1306 switch (ret) {
1307 case -LTTNG_UST_ERR_PERM:
1308 /* Code flow problem */
1309 assert(0);
1310 case -LTTNG_UST_ERR_EXIST:
1311 /* It's OK for our use case. */
1312 ret = 0;
1313 break;
1314 default:
1315 break;
1316 }
1317 goto error;
1318 }
1319 }
1320
1321 error:
1322 health_code_update();
1323 return ret;
1324 }
1325
1326 /*
1327 * Copy data between an UST app event and a LTT event.
1328 */
1329 static void shadow_copy_event(struct ust_app_event *ua_event,
1330 struct ltt_ust_event *uevent)
1331 {
1332 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1333 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1334
1335 ua_event->enabled = uevent->enabled;
1336
1337 /* Copy event attributes */
1338 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1339
1340 /* Copy filter bytecode */
1341 if (uevent->filter) {
1342 ua_event->filter = alloc_copy_ust_app_filter(uevent->filter);
1343 /* Filter might be NULL here in case of ENONEM. */
1344 }
1345 }
1346
1347 /*
1348 * Copy data between an UST app channel and a LTT channel.
1349 */
1350 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1351 struct ltt_ust_channel *uchan)
1352 {
1353 struct lttng_ht_iter iter;
1354 struct ltt_ust_event *uevent;
1355 struct ltt_ust_context *uctx;
1356 struct ust_app_event *ua_event;
1357 struct ust_app_ctx *ua_ctx;
1358
1359 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1360
1361 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1362 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1363
1364 ua_chan->tracefile_size = uchan->tracefile_size;
1365 ua_chan->tracefile_count = uchan->tracefile_count;
1366
1367 /* Copy event attributes since the layout is different. */
1368 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1369 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1370 ua_chan->attr.overwrite = uchan->attr.overwrite;
1371 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1372 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1373 ua_chan->attr.output = uchan->attr.output;
1374 /*
1375 * Note that the attribute channel type is not set since the channel on the
1376 * tracing registry side does not have this information.
1377 */
1378
1379 ua_chan->enabled = uchan->enabled;
1380 ua_chan->tracing_channel_id = uchan->id;
1381
1382 cds_lfht_for_each_entry(uchan->ctx->ht, &iter.iter, uctx, node.node) {
1383 ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1384 if (ua_ctx == NULL) {
1385 continue;
1386 }
1387 lttng_ht_node_init_ulong(&ua_ctx->node,
1388 (unsigned long) ua_ctx->ctx.ctx);
1389 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1390 }
1391
1392 /* Copy all events from ltt ust channel to ust app channel */
1393 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
1394 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1395 uevent->filter, uevent->attr.loglevel);
1396 if (ua_event == NULL) {
1397 DBG2("UST event %s not found on shadow copy channel",
1398 uevent->attr.name);
1399 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
1400 if (ua_event == NULL) {
1401 continue;
1402 }
1403 shadow_copy_event(ua_event, uevent);
1404 add_unique_ust_app_event(ua_chan, ua_event);
1405 }
1406 }
1407
1408 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1409 }
1410
1411 /*
1412 * Copy data between a UST app session and a regular LTT session.
1413 */
1414 static void shadow_copy_session(struct ust_app_session *ua_sess,
1415 struct ltt_ust_session *usess, struct ust_app *app)
1416 {
1417 struct lttng_ht_node_str *ua_chan_node;
1418 struct lttng_ht_iter iter;
1419 struct ltt_ust_channel *uchan;
1420 struct ust_app_channel *ua_chan;
1421 time_t rawtime;
1422 struct tm *timeinfo;
1423 char datetime[16];
1424 int ret;
1425
1426 /* Get date and time for unique app path */
1427 time(&rawtime);
1428 timeinfo = localtime(&rawtime);
1429 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1430
1431 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1432
1433 ua_sess->tracing_id = usess->id;
1434 ua_sess->id = get_next_session_id();
1435 ua_sess->uid = app->uid;
1436 ua_sess->gid = app->gid;
1437 ua_sess->euid = usess->uid;
1438 ua_sess->egid = usess->gid;
1439 ua_sess->buffer_type = usess->buffer_type;
1440 ua_sess->bits_per_long = app->bits_per_long;
1441 /* There is only one consumer object per session possible. */
1442 ua_sess->consumer = usess->consumer;
1443 ua_sess->output_traces = usess->output_traces;
1444
1445 switch (ua_sess->buffer_type) {
1446 case LTTNG_BUFFER_PER_PID:
1447 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1448 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1449 datetime);
1450 break;
1451 case LTTNG_BUFFER_PER_UID:
1452 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1453 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1454 break;
1455 default:
1456 assert(0);
1457 goto error;
1458 }
1459 if (ret < 0) {
1460 PERROR("asprintf UST shadow copy session");
1461 assert(0);
1462 goto error;
1463 }
1464
1465 /* Iterate over all channels in global domain. */
1466 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1467 uchan, node.node) {
1468 struct lttng_ht_iter uiter;
1469
1470 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1471 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
1472 if (ua_chan_node != NULL) {
1473 /* Session exist. Contiuing. */
1474 continue;
1475 }
1476
1477 DBG2("Channel %s not found on shadow session copy, creating it",
1478 uchan->name);
1479 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
1480 if (ua_chan == NULL) {
1481 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1482 continue;
1483 }
1484 shadow_copy_channel(ua_chan, uchan);
1485 /*
1486 * The concept of metadata channel does not exist on the tracing
1487 * registry side of the session daemon so this can only be a per CPU
1488 * channel and not metadata.
1489 */
1490 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1491
1492 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
1493 }
1494
1495 error:
1496 return;
1497 }
1498
1499 /*
1500 * Lookup sesison wrapper.
1501 */
1502 static
1503 void __lookup_session_by_app(struct ltt_ust_session *usess,
1504 struct ust_app *app, struct lttng_ht_iter *iter)
1505 {
1506 /* Get right UST app session from app */
1507 lttng_ht_lookup(app->sessions, (void *)((unsigned long) usess->id), iter);
1508 }
1509
1510 /*
1511 * Return ust app session from the app session hashtable using the UST session
1512 * id.
1513 */
1514 static struct ust_app_session *lookup_session_by_app(
1515 struct ltt_ust_session *usess, struct ust_app *app)
1516 {
1517 struct lttng_ht_iter iter;
1518 struct lttng_ht_node_ulong *node;
1519
1520 __lookup_session_by_app(usess, app, &iter);
1521 node = lttng_ht_iter_get_node_ulong(&iter);
1522 if (node == NULL) {
1523 goto error;
1524 }
1525
1526 return caa_container_of(node, struct ust_app_session, node);
1527
1528 error:
1529 return NULL;
1530 }
1531
1532 /*
1533 * Setup buffer registry per PID for the given session and application. If none
1534 * is found, a new one is created, added to the global registry and
1535 * initialized. If regp is valid, it's set with the newly created object.
1536 *
1537 * Return 0 on success or else a negative value.
1538 */
1539 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
1540 struct ust_app *app, struct buffer_reg_pid **regp)
1541 {
1542 int ret = 0;
1543 struct buffer_reg_pid *reg_pid;
1544
1545 assert(ua_sess);
1546 assert(app);
1547
1548 rcu_read_lock();
1549
1550 reg_pid = buffer_reg_pid_find(ua_sess->id);
1551 if (!reg_pid) {
1552 /*
1553 * This is the create channel path meaning that if there is NO
1554 * registry available, we have to create one for this session.
1555 */
1556 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid);
1557 if (ret < 0) {
1558 goto error;
1559 }
1560 buffer_reg_pid_add(reg_pid);
1561 } else {
1562 goto end;
1563 }
1564
1565 /* Initialize registry. */
1566 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
1567 app->bits_per_long, app->uint8_t_alignment,
1568 app->uint16_t_alignment, app->uint32_t_alignment,
1569 app->uint64_t_alignment, app->long_alignment,
1570 app->byte_order, app->version.major,
1571 app->version.minor);
1572 if (ret < 0) {
1573 goto error;
1574 }
1575
1576 DBG3("UST app buffer registry per PID created successfully");
1577
1578 end:
1579 if (regp) {
1580 *regp = reg_pid;
1581 }
1582 error:
1583 rcu_read_unlock();
1584 return ret;
1585 }
1586
1587 /*
1588 * Setup buffer registry per UID for the given session and application. If none
1589 * is found, a new one is created, added to the global registry and
1590 * initialized. If regp is valid, it's set with the newly created object.
1591 *
1592 * Return 0 on success or else a negative value.
1593 */
1594 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
1595 struct ust_app *app, struct buffer_reg_uid **regp)
1596 {
1597 int ret = 0;
1598 struct buffer_reg_uid *reg_uid;
1599
1600 assert(usess);
1601 assert(app);
1602
1603 rcu_read_lock();
1604
1605 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
1606 if (!reg_uid) {
1607 /*
1608 * This is the create channel path meaning that if there is NO
1609 * registry available, we have to create one for this session.
1610 */
1611 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
1612 LTTNG_DOMAIN_UST, &reg_uid);
1613 if (ret < 0) {
1614 goto error;
1615 }
1616 buffer_reg_uid_add(reg_uid);
1617 } else {
1618 goto end;
1619 }
1620
1621 /* Initialize registry. */
1622 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
1623 app->bits_per_long, app->uint8_t_alignment,
1624 app->uint16_t_alignment, app->uint32_t_alignment,
1625 app->uint64_t_alignment, app->long_alignment,
1626 app->byte_order, app->version.major,
1627 app->version.minor);
1628 if (ret < 0) {
1629 goto error;
1630 }
1631 /* Add node to teardown list of the session. */
1632 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
1633
1634 DBG3("UST app buffer registry per UID created successfully");
1635
1636 end:
1637 if (regp) {
1638 *regp = reg_uid;
1639 }
1640 error:
1641 rcu_read_unlock();
1642 return ret;
1643 }
1644
1645 /*
1646 * Create a session on the tracer side for the given app.
1647 *
1648 * On success, ua_sess_ptr is populated with the session pointer or else left
1649 * untouched. If the session was created, is_created is set to 1. On error,
1650 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
1651 * be NULL.
1652 *
1653 * Returns 0 on success or else a negative code which is either -ENOMEM or
1654 * -ENOTCONN which is the default code if the ustctl_create_session fails.
1655 */
1656 static int create_ust_app_session(struct ltt_ust_session *usess,
1657 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
1658 int *is_created)
1659 {
1660 int ret, created = 0;
1661 struct ust_app_session *ua_sess;
1662
1663 assert(usess);
1664 assert(app);
1665 assert(ua_sess_ptr);
1666
1667 health_code_update();
1668
1669 ua_sess = lookup_session_by_app(usess, app);
1670 if (ua_sess == NULL) {
1671 DBG2("UST app pid: %d session id %d not found, creating it",
1672 app->pid, usess->id);
1673 ua_sess = alloc_ust_app_session(app);
1674 if (ua_sess == NULL) {
1675 /* Only malloc can failed so something is really wrong */
1676 ret = -ENOMEM;
1677 goto error;
1678 }
1679 shadow_copy_session(ua_sess, usess, app);
1680 created = 1;
1681 }
1682
1683 switch (usess->buffer_type) {
1684 case LTTNG_BUFFER_PER_PID:
1685 /* Init local registry. */
1686 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
1687 if (ret < 0) {
1688 goto error;
1689 }
1690 break;
1691 case LTTNG_BUFFER_PER_UID:
1692 /* Look for a global registry. If none exists, create one. */
1693 ret = setup_buffer_reg_uid(usess, app, NULL);
1694 if (ret < 0) {
1695 goto error;
1696 }
1697 break;
1698 default:
1699 assert(0);
1700 ret = -EINVAL;
1701 goto error;
1702 }
1703
1704 health_code_update();
1705
1706 if (ua_sess->handle == -1) {
1707 ret = ustctl_create_session(app->sock);
1708 if (ret < 0) {
1709 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1710 ERR("Creating session for app pid %d with ret %d",
1711 app->pid, ret);
1712 } else {
1713 DBG("UST app creating session failed. Application is dead");
1714 }
1715 delete_ust_app_session(-1, ua_sess, app);
1716 if (ret != -ENOMEM) {
1717 /*
1718 * Tracer is probably gone or got an internal error so let's
1719 * behave like it will soon unregister or not usable.
1720 */
1721 ret = -ENOTCONN;
1722 }
1723 goto error;
1724 }
1725
1726 ua_sess->handle = ret;
1727
1728 /* Add ust app session to app's HT */
1729 lttng_ht_node_init_ulong(&ua_sess->node,
1730 (unsigned long) ua_sess->tracing_id);
1731 lttng_ht_add_unique_ulong(app->sessions, &ua_sess->node);
1732
1733 DBG2("UST app session created successfully with handle %d", ret);
1734 }
1735
1736 *ua_sess_ptr = ua_sess;
1737 if (is_created) {
1738 *is_created = created;
1739 }
1740
1741 /* Everything went well. */
1742 ret = 0;
1743
1744 error:
1745 health_code_update();
1746 return ret;
1747 }
1748
1749 /*
1750 * Create a context for the channel on the tracer.
1751 *
1752 * Called with UST app session lock held and a RCU read side lock.
1753 */
1754 static
1755 int create_ust_app_channel_context(struct ust_app_session *ua_sess,
1756 struct ust_app_channel *ua_chan, struct lttng_ust_context *uctx,
1757 struct ust_app *app)
1758 {
1759 int ret = 0;
1760 struct lttng_ht_iter iter;
1761 struct lttng_ht_node_ulong *node;
1762 struct ust_app_ctx *ua_ctx;
1763
1764 DBG2("UST app adding context to channel %s", ua_chan->name);
1765
1766 lttng_ht_lookup(ua_chan->ctx, (void *)((unsigned long)uctx->ctx), &iter);
1767 node = lttng_ht_iter_get_node_ulong(&iter);
1768 if (node != NULL) {
1769 ret = -EEXIST;
1770 goto error;
1771 }
1772
1773 ua_ctx = alloc_ust_app_ctx(uctx);
1774 if (ua_ctx == NULL) {
1775 /* malloc failed */
1776 ret = -1;
1777 goto error;
1778 }
1779
1780 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
1781 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1782
1783 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
1784 if (ret < 0) {
1785 goto error;
1786 }
1787
1788 error:
1789 return ret;
1790 }
1791
1792 /*
1793 * Enable on the tracer side a ust app event for the session and channel.
1794 *
1795 * Called with UST app session lock held.
1796 */
1797 static
1798 int enable_ust_app_event(struct ust_app_session *ua_sess,
1799 struct ust_app_event *ua_event, struct ust_app *app)
1800 {
1801 int ret;
1802
1803 ret = enable_ust_event(app, ua_sess, ua_event);
1804 if (ret < 0) {
1805 goto error;
1806 }
1807
1808 ua_event->enabled = 1;
1809
1810 error:
1811 return ret;
1812 }
1813
1814 /*
1815 * Disable on the tracer side a ust app event for the session and channel.
1816 */
1817 static int disable_ust_app_event(struct ust_app_session *ua_sess,
1818 struct ust_app_event *ua_event, struct ust_app *app)
1819 {
1820 int ret;
1821
1822 ret = disable_ust_event(app, ua_sess, ua_event);
1823 if (ret < 0) {
1824 goto error;
1825 }
1826
1827 ua_event->enabled = 0;
1828
1829 error:
1830 return ret;
1831 }
1832
1833 /*
1834 * Lookup ust app channel for session and disable it on the tracer side.
1835 */
1836 static
1837 int disable_ust_app_channel(struct ust_app_session *ua_sess,
1838 struct ust_app_channel *ua_chan, struct ust_app *app)
1839 {
1840 int ret;
1841
1842 ret = disable_ust_channel(app, ua_sess, ua_chan);
1843 if (ret < 0) {
1844 goto error;
1845 }
1846
1847 ua_chan->enabled = 0;
1848
1849 error:
1850 return ret;
1851 }
1852
1853 /*
1854 * Lookup ust app channel for session and enable it on the tracer side. This
1855 * MUST be called with a RCU read side lock acquired.
1856 */
1857 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
1858 struct ltt_ust_channel *uchan, struct ust_app *app)
1859 {
1860 int ret = 0;
1861 struct lttng_ht_iter iter;
1862 struct lttng_ht_node_str *ua_chan_node;
1863 struct ust_app_channel *ua_chan;
1864
1865 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
1866 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
1867 if (ua_chan_node == NULL) {
1868 DBG2("Unable to find channel %s in ust session id %u",
1869 uchan->name, ua_sess->tracing_id);
1870 goto error;
1871 }
1872
1873 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
1874
1875 ret = enable_ust_channel(app, ua_sess, ua_chan);
1876 if (ret < 0) {
1877 goto error;
1878 }
1879
1880 error:
1881 return ret;
1882 }
1883
1884 /*
1885 * Ask the consumer to create a channel and get it if successful.
1886 *
1887 * Return 0 on success or else a negative value.
1888 */
1889 static int do_consumer_create_channel(struct ltt_ust_session *usess,
1890 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
1891 int bitness, struct ust_registry_session *registry)
1892 {
1893 int ret;
1894 unsigned int nb_fd = 0;
1895 struct consumer_socket *socket;
1896
1897 assert(usess);
1898 assert(ua_sess);
1899 assert(ua_chan);
1900 assert(registry);
1901
1902 rcu_read_lock();
1903 health_code_update();
1904
1905 /* Get the right consumer socket for the application. */
1906 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
1907 if (!socket) {
1908 ret = -EINVAL;
1909 goto error;
1910 }
1911
1912 health_code_update();
1913
1914 /* Need one fd for the channel. */
1915 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
1916 if (ret < 0) {
1917 ERR("Exhausted number of available FD upon create channel");
1918 goto error;
1919 }
1920
1921 /*
1922 * Ask consumer to create channel. The consumer will return the number of
1923 * stream we have to expect.
1924 */
1925 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
1926 registry);
1927 if (ret < 0) {
1928 goto error_ask;
1929 }
1930
1931 /*
1932 * Compute the number of fd needed before receiving them. It must be 2 per
1933 * stream (2 being the default value here).
1934 */
1935 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
1936
1937 /* Reserve the amount of file descriptor we need. */
1938 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
1939 if (ret < 0) {
1940 ERR("Exhausted number of available FD upon create channel");
1941 goto error_fd_get_stream;
1942 }
1943
1944 health_code_update();
1945
1946 /*
1947 * Now get the channel from the consumer. This call wil populate the stream
1948 * list of that channel and set the ust objects.
1949 */
1950 if (usess->consumer->enabled) {
1951 ret = ust_consumer_get_channel(socket, ua_chan);
1952 if (ret < 0) {
1953 goto error_destroy;
1954 }
1955 }
1956
1957 rcu_read_unlock();
1958 return 0;
1959
1960 error_destroy:
1961 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
1962 error_fd_get_stream:
1963 /*
1964 * Initiate a destroy channel on the consumer since we had an error
1965 * handling it on our side. The return value is of no importance since we
1966 * already have a ret value set by the previous error that we need to
1967 * return.
1968 */
1969 (void) ust_consumer_destroy_channel(socket, ua_chan);
1970 error_ask:
1971 lttng_fd_put(LTTNG_FD_APPS, 1);
1972 error:
1973 health_code_update();
1974 rcu_read_unlock();
1975 return ret;
1976 }
1977
1978 /*
1979 * Duplicate the ust data object of the ust app stream and save it in the
1980 * buffer registry stream.
1981 *
1982 * Return 0 on success or else a negative value.
1983 */
1984 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
1985 struct ust_app_stream *stream)
1986 {
1987 int ret;
1988
1989 assert(reg_stream);
1990 assert(stream);
1991
1992 /* Reserve the amount of file descriptor we need. */
1993 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
1994 if (ret < 0) {
1995 ERR("Exhausted number of available FD upon duplicate stream");
1996 goto error;
1997 }
1998
1999 /* Duplicate object for stream once the original is in the registry. */
2000 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2001 reg_stream->obj.ust);
2002 if (ret < 0) {
2003 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2004 reg_stream->obj.ust, stream->obj, ret);
2005 lttng_fd_put(LTTNG_FD_APPS, 2);
2006 goto error;
2007 }
2008 stream->handle = stream->obj->handle;
2009
2010 error:
2011 return ret;
2012 }
2013
2014 /*
2015 * Duplicate the ust data object of the ust app. channel and save it in the
2016 * buffer registry channel.
2017 *
2018 * Return 0 on success or else a negative value.
2019 */
2020 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2021 struct ust_app_channel *ua_chan)
2022 {
2023 int ret;
2024
2025 assert(reg_chan);
2026 assert(ua_chan);
2027
2028 /* Need two fds for the channel. */
2029 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2030 if (ret < 0) {
2031 ERR("Exhausted number of available FD upon duplicate channel");
2032 goto error_fd_get;
2033 }
2034
2035 /* Duplicate object for stream once the original is in the registry. */
2036 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2037 if (ret < 0) {
2038 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2039 reg_chan->obj.ust, ua_chan->obj, ret);
2040 goto error;
2041 }
2042 ua_chan->handle = ua_chan->obj->handle;
2043
2044 return 0;
2045
2046 error:
2047 lttng_fd_put(LTTNG_FD_APPS, 1);
2048 error_fd_get:
2049 return ret;
2050 }
2051
2052 /*
2053 * For a given channel buffer registry, setup all streams of the given ust
2054 * application channel.
2055 *
2056 * Return 0 on success or else a negative value.
2057 */
2058 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2059 struct ust_app_channel *ua_chan)
2060 {
2061 int ret = 0;
2062 struct ust_app_stream *stream, *stmp;
2063
2064 assert(reg_chan);
2065 assert(ua_chan);
2066
2067 DBG2("UST app setup buffer registry stream");
2068
2069 /* Send all streams to application. */
2070 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2071 struct buffer_reg_stream *reg_stream;
2072
2073 ret = buffer_reg_stream_create(&reg_stream);
2074 if (ret < 0) {
2075 goto error;
2076 }
2077
2078 /*
2079 * Keep original pointer and nullify it in the stream so the delete
2080 * stream call does not release the object.
2081 */
2082 reg_stream->obj.ust = stream->obj;
2083 stream->obj = NULL;
2084 buffer_reg_stream_add(reg_stream, reg_chan);
2085
2086 /* We don't need the streams anymore. */
2087 cds_list_del(&stream->list);
2088 delete_ust_app_stream(-1, stream);
2089 }
2090
2091 error:
2092 return ret;
2093 }
2094
2095 /*
2096 * Create a buffer registry channel for the given session registry and
2097 * application channel object. If regp pointer is valid, it's set with the
2098 * created object. Important, the created object is NOT added to the session
2099 * registry hash table.
2100 *
2101 * Return 0 on success else a negative value.
2102 */
2103 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2104 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2105 {
2106 int ret;
2107 struct buffer_reg_channel *reg_chan = NULL;
2108
2109 assert(reg_sess);
2110 assert(ua_chan);
2111
2112 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2113
2114 /* Create buffer registry channel. */
2115 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2116 if (ret < 0) {
2117 goto error_create;
2118 }
2119 assert(reg_chan);
2120 reg_chan->consumer_key = ua_chan->key;
2121
2122 /* Create and add a channel registry to session. */
2123 ret = ust_registry_channel_add(reg_sess->reg.ust,
2124 ua_chan->tracing_channel_id);
2125 if (ret < 0) {
2126 goto error;
2127 }
2128 buffer_reg_channel_add(reg_sess, reg_chan);
2129
2130 if (regp) {
2131 *regp = reg_chan;
2132 }
2133
2134 return 0;
2135
2136 error:
2137 /* Safe because the registry channel object was not added to any HT. */
2138 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2139 error_create:
2140 return ret;
2141 }
2142
2143 /*
2144 * Setup buffer registry channel for the given session registry and application
2145 * channel object. If regp pointer is valid, it's set with the created object.
2146 *
2147 * Return 0 on success else a negative value.
2148 */
2149 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2150 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan)
2151 {
2152 int ret;
2153
2154 assert(reg_sess);
2155 assert(reg_chan);
2156 assert(ua_chan);
2157 assert(ua_chan->obj);
2158
2159 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2160
2161 /* Setup all streams for the registry. */
2162 ret = setup_buffer_reg_streams(reg_chan, ua_chan);
2163 if (ret < 0) {
2164 goto error;
2165 }
2166
2167 reg_chan->obj.ust = ua_chan->obj;
2168 ua_chan->obj = NULL;
2169
2170 return 0;
2171
2172 error:
2173 buffer_reg_channel_remove(reg_sess, reg_chan);
2174 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2175 return ret;
2176 }
2177
2178 /*
2179 * Send buffer registry channel to the application.
2180 *
2181 * Return 0 on success else a negative value.
2182 */
2183 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2184 struct ust_app *app, struct ust_app_session *ua_sess,
2185 struct ust_app_channel *ua_chan)
2186 {
2187 int ret;
2188 struct buffer_reg_stream *reg_stream;
2189
2190 assert(reg_chan);
2191 assert(app);
2192 assert(ua_sess);
2193 assert(ua_chan);
2194
2195 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2196
2197 ret = duplicate_channel_object(reg_chan, ua_chan);
2198 if (ret < 0) {
2199 goto error;
2200 }
2201
2202 /* Send channel to the application. */
2203 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2204 if (ret < 0) {
2205 goto error;
2206 }
2207
2208 health_code_update();
2209
2210 /* Send all streams to application. */
2211 pthread_mutex_lock(&reg_chan->stream_list_lock);
2212 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2213 struct ust_app_stream stream;
2214
2215 ret = duplicate_stream_object(reg_stream, &stream);
2216 if (ret < 0) {
2217 goto error_stream_unlock;
2218 }
2219
2220 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2221 if (ret < 0) {
2222 (void) release_ust_app_stream(-1, &stream);
2223 goto error_stream_unlock;
2224 }
2225
2226 /*
2227 * The return value is not important here. This function will output an
2228 * error if needed.
2229 */
2230 (void) release_ust_app_stream(-1, &stream);
2231 }
2232 ua_chan->is_sent = 1;
2233
2234 error_stream_unlock:
2235 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2236 error:
2237 return ret;
2238 }
2239
2240 /*
2241 * Create and send to the application the created buffers with per UID buffers.
2242 *
2243 * Return 0 on success else a negative value.
2244 */
2245 static int create_channel_per_uid(struct ust_app *app,
2246 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2247 struct ust_app_channel *ua_chan)
2248 {
2249 int ret;
2250 struct buffer_reg_uid *reg_uid;
2251 struct buffer_reg_channel *reg_chan;
2252
2253 assert(app);
2254 assert(usess);
2255 assert(ua_sess);
2256 assert(ua_chan);
2257
2258 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2259
2260 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2261 /*
2262 * The session creation handles the creation of this global registry
2263 * object. If none can be find, there is a code flow problem or a
2264 * teardown race.
2265 */
2266 assert(reg_uid);
2267
2268 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2269 reg_uid);
2270 if (!reg_chan) {
2271 /* Create the buffer registry channel object. */
2272 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2273 if (ret < 0) {
2274 goto error;
2275 }
2276 assert(reg_chan);
2277
2278 /*
2279 * Create the buffers on the consumer side. This call populates the
2280 * ust app channel object with all streams and data object.
2281 */
2282 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2283 app->bits_per_long, reg_uid->registry->reg.ust);
2284 if (ret < 0) {
2285 /*
2286 * Let's remove the previously created buffer registry channel so
2287 * it's not visible anymore in the session registry.
2288 */
2289 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2290 ua_chan->tracing_channel_id);
2291 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2292 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2293 goto error;
2294 }
2295
2296 /*
2297 * Setup the streams and add it to the session registry.
2298 */
2299 ret = setup_buffer_reg_channel(reg_uid->registry, ua_chan, reg_chan);
2300 if (ret < 0) {
2301 goto error;
2302 }
2303
2304 }
2305
2306 /* Send buffers to the application. */
2307 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2308 if (ret < 0) {
2309 goto error;
2310 }
2311
2312 error:
2313 return ret;
2314 }
2315
2316 /*
2317 * Create and send to the application the created buffers with per PID buffers.
2318 *
2319 * Return 0 on success else a negative value.
2320 */
2321 static int create_channel_per_pid(struct ust_app *app,
2322 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2323 struct ust_app_channel *ua_chan)
2324 {
2325 int ret;
2326 struct ust_registry_session *registry;
2327
2328 assert(app);
2329 assert(usess);
2330 assert(ua_sess);
2331 assert(ua_chan);
2332
2333 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2334
2335 rcu_read_lock();
2336
2337 registry = get_session_registry(ua_sess);
2338 assert(registry);
2339
2340 /* Create and add a new channel registry to session. */
2341 ret = ust_registry_channel_add(registry, ua_chan->key);
2342 if (ret < 0) {
2343 goto error;
2344 }
2345
2346 /* Create and get channel on the consumer side. */
2347 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2348 app->bits_per_long, registry);
2349 if (ret < 0) {
2350 goto error;
2351 }
2352
2353 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2354 if (ret < 0) {
2355 goto error;
2356 }
2357
2358 error:
2359 rcu_read_unlock();
2360 return ret;
2361 }
2362
2363 /*
2364 * From an already allocated ust app channel, create the channel buffers if
2365 * need and send it to the application. This MUST be called with a RCU read
2366 * side lock acquired.
2367 *
2368 * Return 0 on success or else a negative value.
2369 */
2370 static int do_create_channel(struct ust_app *app,
2371 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2372 struct ust_app_channel *ua_chan)
2373 {
2374 int ret;
2375
2376 assert(app);
2377 assert(usess);
2378 assert(ua_sess);
2379 assert(ua_chan);
2380
2381 /* Handle buffer type before sending the channel to the application. */
2382 switch (usess->buffer_type) {
2383 case LTTNG_BUFFER_PER_UID:
2384 {
2385 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
2386 if (ret < 0) {
2387 goto error;
2388 }
2389 break;
2390 }
2391 case LTTNG_BUFFER_PER_PID:
2392 {
2393 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
2394 if (ret < 0) {
2395 goto error;
2396 }
2397 break;
2398 }
2399 default:
2400 assert(0);
2401 ret = -EINVAL;
2402 goto error;
2403 }
2404
2405 /* Initialize ust objd object using the received handle and add it. */
2406 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
2407 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
2408
2409 /* If channel is not enabled, disable it on the tracer */
2410 if (!ua_chan->enabled) {
2411 ret = disable_ust_channel(app, ua_sess, ua_chan);
2412 if (ret < 0) {
2413 goto error;
2414 }
2415 }
2416
2417 error:
2418 return ret;
2419 }
2420
2421 /*
2422 * Create UST app channel and create it on the tracer. Set ua_chanp of the
2423 * newly created channel if not NULL.
2424 *
2425 * Called with UST app session lock and RCU read-side lock held.
2426 *
2427 * Return 0 on success or else a negative value.
2428 */
2429 static int create_ust_app_channel(struct ust_app_session *ua_sess,
2430 struct ltt_ust_channel *uchan, struct ust_app *app,
2431 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
2432 struct ust_app_channel **ua_chanp)
2433 {
2434 int ret = 0;
2435 struct lttng_ht_iter iter;
2436 struct lttng_ht_node_str *ua_chan_node;
2437 struct ust_app_channel *ua_chan;
2438
2439 /* Lookup channel in the ust app session */
2440 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2441 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2442 if (ua_chan_node != NULL) {
2443 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2444 goto end;
2445 }
2446
2447 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
2448 if (ua_chan == NULL) {
2449 /* Only malloc can fail here */
2450 ret = -ENOMEM;
2451 goto error_alloc;
2452 }
2453 shadow_copy_channel(ua_chan, uchan);
2454
2455 /* Set channel type. */
2456 ua_chan->attr.type = type;
2457
2458 ret = do_create_channel(app, usess, ua_sess, ua_chan);
2459 if (ret < 0) {
2460 goto error;
2461 }
2462
2463 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
2464 app->pid);
2465
2466 /* Only add the channel if successful on the tracer side. */
2467 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
2468
2469 end:
2470 if (ua_chanp) {
2471 *ua_chanp = ua_chan;
2472 }
2473
2474 /* Everything went well. */
2475 return 0;
2476
2477 error:
2478 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
2479 error_alloc:
2480 return ret;
2481 }
2482
2483 /*
2484 * Create UST app event and create it on the tracer side.
2485 *
2486 * Called with ust app session mutex held.
2487 */
2488 static
2489 int create_ust_app_event(struct ust_app_session *ua_sess,
2490 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
2491 struct ust_app *app)
2492 {
2493 int ret = 0;
2494 struct ust_app_event *ua_event;
2495
2496 /* Get event node */
2497 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
2498 uevent->filter, uevent->attr.loglevel);
2499 if (ua_event != NULL) {
2500 ret = -EEXIST;
2501 goto end;
2502 }
2503
2504 /* Does not exist so create one */
2505 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
2506 if (ua_event == NULL) {
2507 /* Only malloc can failed so something is really wrong */
2508 ret = -ENOMEM;
2509 goto end;
2510 }
2511 shadow_copy_event(ua_event, uevent);
2512
2513 /* Create it on the tracer side */
2514 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
2515 if (ret < 0) {
2516 /* Not found previously means that it does not exist on the tracer */
2517 assert(ret != -LTTNG_UST_ERR_EXIST);
2518 goto error;
2519 }
2520
2521 add_unique_ust_app_event(ua_chan, ua_event);
2522
2523 DBG2("UST app create event %s for PID %d completed", ua_event->name,
2524 app->pid);
2525
2526 end:
2527 return ret;
2528
2529 error:
2530 /* Valid. Calling here is already in a read side lock */
2531 delete_ust_app_event(-1, ua_event);
2532 return ret;
2533 }
2534
2535 /*
2536 * Create UST metadata and open it on the tracer side.
2537 *
2538 * Called with UST app session lock held and RCU read side lock.
2539 */
2540 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
2541 struct ust_app *app, struct consumer_output *consumer,
2542 struct ustctl_consumer_channel_attr *attr)
2543 {
2544 int ret = 0;
2545 struct ust_app_channel *metadata;
2546 struct consumer_socket *socket;
2547 struct ust_registry_session *registry;
2548
2549 assert(ua_sess);
2550 assert(app);
2551 assert(consumer);
2552
2553 registry = get_session_registry(ua_sess);
2554 assert(registry);
2555
2556 /* Metadata already exists for this registry or it was closed previously */
2557 if (registry->metadata_key || registry->metadata_closed) {
2558 ret = 0;
2559 goto error;
2560 }
2561
2562 /* Allocate UST metadata */
2563 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
2564 if (!metadata) {
2565 /* malloc() failed */
2566 ret = -ENOMEM;
2567 goto error;
2568 }
2569
2570 if (!attr) {
2571 /* Set default attributes for metadata. */
2572 metadata->attr.overwrite = DEFAULT_CHANNEL_OVERWRITE;
2573 metadata->attr.subbuf_size = default_get_metadata_subbuf_size();
2574 metadata->attr.num_subbuf = DEFAULT_METADATA_SUBBUF_NUM;
2575 metadata->attr.switch_timer_interval = DEFAULT_METADATA_SWITCH_TIMER;
2576 metadata->attr.read_timer_interval = DEFAULT_METADATA_READ_TIMER;
2577 metadata->attr.output = LTTNG_UST_MMAP;
2578 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2579 } else {
2580 memcpy(&metadata->attr, attr, sizeof(metadata->attr));
2581 metadata->attr.output = LTTNG_UST_MMAP;
2582 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2583 }
2584
2585 /* Need one fd for the channel. */
2586 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2587 if (ret < 0) {
2588 ERR("Exhausted number of available FD upon create metadata");
2589 goto error;
2590 }
2591
2592 /* Get the right consumer socket for the application. */
2593 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
2594 if (!socket) {
2595 ret = -EINVAL;
2596 goto error_consumer;
2597 }
2598
2599 /*
2600 * Keep metadata key so we can identify it on the consumer side. Assign it
2601 * to the registry *before* we ask the consumer so we avoid the race of the
2602 * consumer requesting the metadata and the ask_channel call on our side
2603 * did not returned yet.
2604 */
2605 registry->metadata_key = metadata->key;
2606
2607 /*
2608 * Ask the metadata channel creation to the consumer. The metadata object
2609 * will be created by the consumer and kept their. However, the stream is
2610 * never added or monitored until we do a first push metadata to the
2611 * consumer.
2612 */
2613 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
2614 registry);
2615 if (ret < 0) {
2616 /* Nullify the metadata key so we don't try to close it later on. */
2617 registry->metadata_key = 0;
2618 goto error_consumer;
2619 }
2620
2621 /*
2622 * The setup command will make the metadata stream be sent to the relayd,
2623 * if applicable, and the thread managing the metadatas. This is important
2624 * because after this point, if an error occurs, the only way the stream
2625 * can be deleted is to be monitored in the consumer.
2626 */
2627 ret = consumer_setup_metadata(socket, metadata->key);
2628 if (ret < 0) {
2629 /* Nullify the metadata key so we don't try to close it later on. */
2630 registry->metadata_key = 0;
2631 goto error_consumer;
2632 }
2633
2634 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
2635 metadata->key, app->pid);
2636
2637 error_consumer:
2638 lttng_fd_put(LTTNG_FD_APPS, 1);
2639 delete_ust_app_channel(-1, metadata, app);
2640 error:
2641 return ret;
2642 }
2643
2644 /*
2645 * Return pointer to traceable apps list.
2646 */
2647 struct lttng_ht *ust_app_get_ht(void)
2648 {
2649 return ust_app_ht;
2650 }
2651
2652 /*
2653 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
2654 * acquired before calling this function.
2655 */
2656 struct ust_app *ust_app_find_by_pid(pid_t pid)
2657 {
2658 struct ust_app *app = NULL;
2659 struct lttng_ht_node_ulong *node;
2660 struct lttng_ht_iter iter;
2661
2662 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
2663 node = lttng_ht_iter_get_node_ulong(&iter);
2664 if (node == NULL) {
2665 DBG2("UST app no found with pid %d", pid);
2666 goto error;
2667 }
2668
2669 DBG2("Found UST app by pid %d", pid);
2670
2671 app = caa_container_of(node, struct ust_app, pid_n);
2672
2673 error:
2674 return app;
2675 }
2676
2677 /*
2678 * Allocate and init an UST app object using the registration information and
2679 * the command socket. This is called when the command socket connects to the
2680 * session daemon.
2681 *
2682 * The object is returned on success or else NULL.
2683 */
2684 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
2685 {
2686 struct ust_app *lta = NULL;
2687
2688 assert(msg);
2689 assert(sock >= 0);
2690
2691 DBG3("UST app creating application for socket %d", sock);
2692
2693 if ((msg->bits_per_long == 64 &&
2694 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
2695 || (msg->bits_per_long == 32 &&
2696 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
2697 ERR("Registration failed: application \"%s\" (pid: %d) has "
2698 "%d-bit long, but no consumerd for this size is available.\n",
2699 msg->name, msg->pid, msg->bits_per_long);
2700 goto error;
2701 }
2702
2703 lta = zmalloc(sizeof(struct ust_app));
2704 if (lta == NULL) {
2705 PERROR("malloc");
2706 goto error;
2707 }
2708
2709 lta->ppid = msg->ppid;
2710 lta->uid = msg->uid;
2711 lta->gid = msg->gid;
2712
2713 lta->bits_per_long = msg->bits_per_long;
2714 lta->uint8_t_alignment = msg->uint8_t_alignment;
2715 lta->uint16_t_alignment = msg->uint16_t_alignment;
2716 lta->uint32_t_alignment = msg->uint32_t_alignment;
2717 lta->uint64_t_alignment = msg->uint64_t_alignment;
2718 lta->long_alignment = msg->long_alignment;
2719 lta->byte_order = msg->byte_order;
2720
2721 lta->v_major = msg->major;
2722 lta->v_minor = msg->minor;
2723 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2724 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2725 lta->notify_sock = -1;
2726
2727 /* Copy name and make sure it's NULL terminated. */
2728 strncpy(lta->name, msg->name, sizeof(lta->name));
2729 lta->name[UST_APP_PROCNAME_LEN] = '\0';
2730
2731 /*
2732 * Before this can be called, when receiving the registration information,
2733 * the application compatibility is checked. So, at this point, the
2734 * application can work with this session daemon.
2735 */
2736 lta->compatible = 1;
2737
2738 lta->pid = msg->pid;
2739 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
2740 lta->sock = sock;
2741 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
2742
2743 CDS_INIT_LIST_HEAD(&lta->teardown_head);
2744
2745 error:
2746 return lta;
2747 }
2748
2749 /*
2750 * For a given application object, add it to every hash table.
2751 */
2752 void ust_app_add(struct ust_app *app)
2753 {
2754 assert(app);
2755 assert(app->notify_sock >= 0);
2756
2757 rcu_read_lock();
2758
2759 /*
2760 * On a re-registration, we want to kick out the previous registration of
2761 * that pid
2762 */
2763 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
2764
2765 /*
2766 * The socket _should_ be unique until _we_ call close. So, a add_unique
2767 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
2768 * already in the table.
2769 */
2770 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
2771
2772 /* Add application to the notify socket hash table. */
2773 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
2774 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
2775
2776 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
2777 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
2778 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
2779 app->v_minor);
2780
2781 rcu_read_unlock();
2782 }
2783
2784 /*
2785 * Set the application version into the object.
2786 *
2787 * Return 0 on success else a negative value either an errno code or a
2788 * LTTng-UST error code.
2789 */
2790 int ust_app_version(struct ust_app *app)
2791 {
2792 int ret;
2793
2794 assert(app);
2795
2796 ret = ustctl_tracer_version(app->sock, &app->version);
2797 if (ret < 0) {
2798 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
2799 ERR("UST app %d verson failed with ret %d", app->sock, ret);
2800 } else {
2801 DBG3("UST app %d verion failed. Application is dead", app->sock);
2802 }
2803 }
2804
2805 return ret;
2806 }
2807
2808 /*
2809 * Unregister app by removing it from the global traceable app list and freeing
2810 * the data struct.
2811 *
2812 * The socket is already closed at this point so no close to sock.
2813 */
2814 void ust_app_unregister(int sock)
2815 {
2816 struct ust_app *lta;
2817 struct lttng_ht_node_ulong *node;
2818 struct lttng_ht_iter iter;
2819 struct ust_app_session *ua_sess;
2820 int ret;
2821
2822 rcu_read_lock();
2823
2824 /* Get the node reference for a call_rcu */
2825 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
2826 node = lttng_ht_iter_get_node_ulong(&iter);
2827 assert(node);
2828
2829 lta = caa_container_of(node, struct ust_app, sock_n);
2830 DBG("PID %d unregistering with sock %d", lta->pid, sock);
2831
2832 /* Remove application from PID hash table */
2833 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
2834 assert(!ret);
2835
2836 /*
2837 * Remove application from notify hash table. The thread handling the
2838 * notify socket could have deleted the node so ignore on error because
2839 * either way it's valid. The close of that socket is handled by the other
2840 * thread.
2841 */
2842 iter.iter.node = &lta->notify_sock_n.node;
2843 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
2844
2845 /*
2846 * Ignore return value since the node might have been removed before by an
2847 * add replace during app registration because the PID can be reassigned by
2848 * the OS.
2849 */
2850 iter.iter.node = &lta->pid_n.node;
2851 ret = lttng_ht_del(ust_app_ht, &iter);
2852 if (ret) {
2853 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
2854 lta->pid);
2855 }
2856
2857 /* Remove sessions so they are not visible during deletion.*/
2858 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
2859 node.node) {
2860 struct ust_registry_session *registry;
2861
2862 ret = lttng_ht_del(lta->sessions, &iter);
2863 if (ret) {
2864 /* The session was already removed so scheduled for teardown. */
2865 continue;
2866 }
2867
2868 /*
2869 * Add session to list for teardown. This is safe since at this point we
2870 * are the only one using this list.
2871 */
2872 pthread_mutex_lock(&ua_sess->lock);
2873
2874 /*
2875 * Normally, this is done in the delete session process which is
2876 * executed in the call rcu below. However, upon registration we can't
2877 * afford to wait for the grace period before pushing data or else the
2878 * data pending feature can race between the unregistration and stop
2879 * command where the data pending command is sent *before* the grace
2880 * period ended.
2881 *
2882 * The close metadata below nullifies the metadata pointer in the
2883 * session so the delete session will NOT push/close a second time.
2884 */
2885 registry = get_session_registry(ua_sess);
2886 if (registry && !registry->metadata_closed) {
2887 /* Push metadata for application before freeing the application. */
2888 (void) push_metadata(registry, ua_sess->consumer);
2889
2890 /*
2891 * Don't ask to close metadata for global per UID buffers. Close
2892 * metadata only on destroy trace session in this case. Also, the
2893 * previous push metadata could have flag the metadata registry to
2894 * close so don't send a close command if closed.
2895 */
2896 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
2897 !registry->metadata_closed) {
2898 /* And ask to close it for this session registry. */
2899 (void) close_metadata(registry, ua_sess->consumer);
2900 }
2901 }
2902
2903 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
2904 pthread_mutex_unlock(&ua_sess->lock);
2905 }
2906
2907 /* Free memory */
2908 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
2909
2910 rcu_read_unlock();
2911 return;
2912 }
2913
2914 /*
2915 * Return traceable_app_count
2916 */
2917 unsigned long ust_app_list_count(void)
2918 {
2919 unsigned long count;
2920
2921 rcu_read_lock();
2922 count = lttng_ht_get_count(ust_app_ht);
2923 rcu_read_unlock();
2924
2925 return count;
2926 }
2927
2928 /*
2929 * Fill events array with all events name of all registered apps.
2930 */
2931 int ust_app_list_events(struct lttng_event **events)
2932 {
2933 int ret, handle;
2934 size_t nbmem, count = 0;
2935 struct lttng_ht_iter iter;
2936 struct ust_app *app;
2937 struct lttng_event *tmp_event;
2938
2939 nbmem = UST_APP_EVENT_LIST_SIZE;
2940 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
2941 if (tmp_event == NULL) {
2942 PERROR("zmalloc ust app events");
2943 ret = -ENOMEM;
2944 goto error;
2945 }
2946
2947 rcu_read_lock();
2948
2949 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
2950 struct lttng_ust_tracepoint_iter uiter;
2951
2952 health_code_update();
2953
2954 if (!app->compatible) {
2955 /*
2956 * TODO: In time, we should notice the caller of this error by
2957 * telling him that this is a version error.
2958 */
2959 continue;
2960 }
2961 handle = ustctl_tracepoint_list(app->sock);
2962 if (handle < 0) {
2963 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
2964 ERR("UST app list events getting handle failed for app pid %d",
2965 app->pid);
2966 }
2967 continue;
2968 }
2969
2970 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
2971 &uiter)) != -LTTNG_UST_ERR_NOENT) {
2972 /* Handle ustctl error. */
2973 if (ret < 0) {
2974 free(tmp_event);
2975 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
2976 ERR("UST app tp list get failed for app %d with ret %d",
2977 app->sock, ret);
2978 } else {
2979 DBG3("UST app tp list get failed. Application is dead");
2980 }
2981 goto rcu_error;
2982 }
2983
2984 health_code_update();
2985 if (count >= nbmem) {
2986 /* In case the realloc fails, we free the memory */
2987 void *ptr;
2988
2989 DBG2("Reallocating event list from %zu to %zu entries", nbmem,
2990 2 * nbmem);
2991 nbmem *= 2;
2992 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event));
2993 if (ptr == NULL) {
2994 PERROR("realloc ust app events");
2995 free(tmp_event);
2996 ret = -ENOMEM;
2997 goto rcu_error;
2998 }
2999 tmp_event = ptr;
3000 }
3001 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3002 tmp_event[count].loglevel = uiter.loglevel;
3003 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3004 tmp_event[count].pid = app->pid;
3005 tmp_event[count].enabled = -1;
3006 count++;
3007 }
3008 }
3009
3010 ret = count;
3011 *events = tmp_event;
3012
3013 DBG2("UST app list events done (%zu events)", count);
3014
3015 rcu_error:
3016 rcu_read_unlock();
3017 error:
3018 health_code_update();
3019 return ret;
3020 }
3021
3022 /*
3023 * Fill events array with all events name of all registered apps.
3024 */
3025 int ust_app_list_event_fields(struct lttng_event_field **fields)
3026 {
3027 int ret, handle;
3028 size_t nbmem, count = 0;
3029 struct lttng_ht_iter iter;
3030 struct ust_app *app;
3031 struct lttng_event_field *tmp_event;
3032
3033 nbmem = UST_APP_EVENT_LIST_SIZE;
3034 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3035 if (tmp_event == NULL) {
3036 PERROR("zmalloc ust app event fields");
3037 ret = -ENOMEM;
3038 goto error;
3039 }
3040
3041 rcu_read_lock();
3042
3043 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3044 struct lttng_ust_field_iter uiter;
3045
3046 health_code_update();
3047
3048 if (!app->compatible) {
3049 /*
3050 * TODO: In time, we should notice the caller of this error by
3051 * telling him that this is a version error.
3052 */
3053 continue;
3054 }
3055 handle = ustctl_tracepoint_field_list(app->sock);
3056 if (handle < 0) {
3057 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3058 ERR("UST app list field getting handle failed for app pid %d",
3059 app->pid);
3060 }
3061 continue;
3062 }
3063
3064 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
3065 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3066 /* Handle ustctl error. */
3067 if (ret < 0) {
3068 free(tmp_event);
3069 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
3070 ERR("UST app tp list field failed for app %d with ret %d",
3071 app->sock, ret);
3072 } else {
3073 DBG3("UST app tp list field failed. Application is dead");
3074 }
3075 goto rcu_error;
3076 }
3077
3078 health_code_update();
3079 if (count >= nbmem) {
3080 /* In case the realloc fails, we free the memory */
3081 void *ptr;
3082
3083 DBG2("Reallocating event field list from %zu to %zu entries", nbmem,
3084 2 * nbmem);
3085 nbmem *= 2;
3086 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event_field));
3087 if (ptr == NULL) {
3088 PERROR("realloc ust app event fields");
3089 free(tmp_event);
3090 ret = -ENOMEM;
3091 goto rcu_error;
3092 }
3093 tmp_event = ptr;
3094 }
3095
3096 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
3097 tmp_event[count].type = uiter.type;
3098 tmp_event[count].nowrite = uiter.nowrite;
3099
3100 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3101 tmp_event[count].event.loglevel = uiter.loglevel;
3102 tmp_event[count].event.type = LTTNG_UST_TRACEPOINT;
3103 tmp_event[count].event.pid = app->pid;
3104 tmp_event[count].event.enabled = -1;
3105 count++;
3106 }
3107 }
3108
3109 ret = count;
3110 *fields = tmp_event;
3111
3112 DBG2("UST app list event fields done (%zu events)", count);
3113
3114 rcu_error:
3115 rcu_read_unlock();
3116 error:
3117 health_code_update();
3118 return ret;
3119 }
3120
3121 /*
3122 * Free and clean all traceable apps of the global list.
3123 *
3124 * Should _NOT_ be called with RCU read-side lock held.
3125 */
3126 void ust_app_clean_list(void)
3127 {
3128 int ret;
3129 struct ust_app *app;
3130 struct lttng_ht_iter iter;
3131
3132 DBG2("UST app cleaning registered apps hash table");
3133
3134 rcu_read_lock();
3135
3136 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3137 ret = lttng_ht_del(ust_app_ht, &iter);
3138 assert(!ret);
3139 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3140 }
3141
3142 /* Cleanup socket hash table */
3143 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3144 sock_n.node) {
3145 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3146 assert(!ret);
3147 }
3148
3149 /* Cleanup notify socket hash table */
3150 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
3151 notify_sock_n.node) {
3152 ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3153 assert(!ret);
3154 }
3155 rcu_read_unlock();
3156
3157 /* Destroy is done only when the ht is empty */
3158 ht_cleanup_push(ust_app_ht);
3159 ht_cleanup_push(ust_app_ht_by_sock);
3160 ht_cleanup_push(ust_app_ht_by_notify_sock);
3161 }
3162
3163 /*
3164 * Init UST app hash table.
3165 */
3166 void ust_app_ht_alloc(void)
3167 {
3168 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3169 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3170 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3171 }
3172
3173 /*
3174 * For a specific UST session, disable the channel for all registered apps.
3175 */
3176 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
3177 struct ltt_ust_channel *uchan)
3178 {
3179 int ret = 0;
3180 struct lttng_ht_iter iter;
3181 struct lttng_ht_node_str *ua_chan_node;
3182 struct ust_app *app;
3183 struct ust_app_session *ua_sess;
3184 struct ust_app_channel *ua_chan;
3185
3186 if (usess == NULL || uchan == NULL) {
3187 ERR("Disabling UST global channel with NULL values");
3188 ret = -1;
3189 goto error;
3190 }
3191
3192 DBG2("UST app disabling channel %s from global domain for session id %d",
3193 uchan->name, usess->id);
3194
3195 rcu_read_lock();
3196
3197 /* For every registered applications */
3198 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3199 struct lttng_ht_iter uiter;
3200 if (!app->compatible) {
3201 /*
3202 * TODO: In time, we should notice the caller of this error by
3203 * telling him that this is a version error.
3204 */
3205 continue;
3206 }
3207 ua_sess = lookup_session_by_app(usess, app);
3208 if (ua_sess == NULL) {
3209 continue;
3210 }
3211
3212 /* Get channel */
3213 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3214 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3215 /* If the session if found for the app, the channel must be there */
3216 assert(ua_chan_node);
3217
3218 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3219 /* The channel must not be already disabled */
3220 assert(ua_chan->enabled == 1);
3221
3222 /* Disable channel onto application */
3223 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
3224 if (ret < 0) {
3225 /* XXX: We might want to report this error at some point... */
3226 continue;
3227 }
3228 }
3229
3230 rcu_read_unlock();
3231
3232 error:
3233 return ret;
3234 }
3235
3236 /*
3237 * For a specific UST session, enable the channel for all registered apps.
3238 */
3239 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
3240 struct ltt_ust_channel *uchan)
3241 {
3242 int ret = 0;
3243 struct lttng_ht_iter iter;
3244 struct ust_app *app;
3245 struct ust_app_session *ua_sess;
3246
3247 if (usess == NULL || uchan == NULL) {
3248 ERR("Adding UST global channel to NULL values");
3249 ret = -1;
3250 goto error;
3251 }
3252
3253 DBG2("UST app enabling channel %s to global domain for session id %d",
3254 uchan->name, usess->id);
3255
3256 rcu_read_lock();
3257
3258 /* For every registered applications */
3259 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3260 if (!app->compatible) {
3261 /*
3262 * TODO: In time, we should notice the caller of this error by
3263 * telling him that this is a version error.
3264 */
3265 continue;
3266 }
3267 ua_sess = lookup_session_by_app(usess, app);
3268 if (ua_sess == NULL) {
3269 continue;
3270 }
3271
3272 /* Enable channel onto application */
3273 ret = enable_ust_app_channel(ua_sess, uchan, app);
3274 if (ret < 0) {
3275 /* XXX: We might want to report this error at some point... */
3276 continue;
3277 }
3278 }
3279
3280 rcu_read_unlock();
3281
3282 error:
3283 return ret;
3284 }
3285
3286 /*
3287 * Disable an event in a channel and for a specific session.
3288 */
3289 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
3290 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3291 {
3292 int ret = 0;
3293 struct lttng_ht_iter iter, uiter;
3294 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
3295 struct ust_app *app;
3296 struct ust_app_session *ua_sess;
3297 struct ust_app_channel *ua_chan;
3298 struct ust_app_event *ua_event;
3299
3300 DBG("UST app disabling event %s for all apps in channel "
3301 "%s for session id %d", uevent->attr.name, uchan->name, usess->id);
3302
3303 rcu_read_lock();
3304
3305 /* For all registered applications */
3306 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3307 if (!app->compatible) {
3308 /*
3309 * TODO: In time, we should notice the caller of this error by
3310 * telling him that this is a version error.
3311 */
3312 continue;
3313 }
3314 ua_sess = lookup_session_by_app(usess, app);
3315 if (ua_sess == NULL) {
3316 /* Next app */
3317 continue;
3318 }
3319
3320 /* Lookup channel in the ust app session */
3321 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3322 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3323 if (ua_chan_node == NULL) {
3324 DBG2("Channel %s not found in session id %d for app pid %d."
3325 "Skipping", uchan->name, usess->id, app->pid);
3326 continue;
3327 }
3328 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3329
3330 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &uiter);
3331 ua_event_node = lttng_ht_iter_get_node_str(&uiter);
3332 if (ua_event_node == NULL) {
3333 DBG2("Event %s not found in channel %s for app pid %d."
3334 "Skipping", uevent->attr.name, uchan->name, app->pid);
3335 continue;
3336 }
3337 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
3338
3339 ret = disable_ust_app_event(ua_sess, ua_event, app);
3340 if (ret < 0) {
3341 /* XXX: Report error someday... */
3342 continue;
3343 }
3344 }
3345
3346 rcu_read_unlock();
3347
3348 return ret;
3349 }
3350
3351 /*
3352 * For a specific UST session and UST channel, the event for all
3353 * registered apps.
3354 */
3355 int ust_app_disable_all_event_glb(struct ltt_ust_session *usess,
3356 struct ltt_ust_channel *uchan)
3357 {
3358 int ret = 0;
3359 struct lttng_ht_iter iter, uiter;
3360 struct lttng_ht_node_str *ua_chan_node;
3361 struct ust_app *app;
3362 struct ust_app_session *ua_sess;
3363 struct ust_app_channel *ua_chan;
3364 struct ust_app_event *ua_event;
3365
3366 DBG("UST app disabling all event for all apps in channel "
3367 "%s for session id %d", uchan->name, usess->id);
3368
3369 rcu_read_lock();
3370
3371 /* For all registered applications */
3372 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3373 if (!app->compatible) {
3374 /*
3375 * TODO: In time, we should notice the caller of this error by
3376 * telling him that this is a version error.
3377 */
3378 continue;
3379 }
3380 ua_sess = lookup_session_by_app(usess, app);
3381 if (!ua_sess) {
3382 /* The application has problem or is probably dead. */
3383 continue;
3384 }
3385
3386 /* Lookup channel in the ust app session */
3387 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3388 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3389 /* If the channel is not found, there is a code flow error */
3390 assert(ua_chan_node);
3391
3392 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3393
3394 /* Disable each events of channel */
3395 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
3396 node.node) {
3397 ret = disable_ust_app_event(ua_sess, ua_event, app);
3398 if (ret < 0) {
3399 /* XXX: Report error someday... */
3400 continue;
3401 }
3402 }
3403 }
3404
3405 rcu_read_unlock();
3406
3407 return ret;
3408 }
3409
3410 /*
3411 * For a specific UST session, create the channel for all registered apps.
3412 */
3413 int ust_app_create_channel_glb(struct ltt_ust_session *usess,
3414 struct ltt_ust_channel *uchan)
3415 {
3416 int ret = 0, created;
3417 struct lttng_ht_iter iter;
3418 struct ust_app *app;
3419 struct ust_app_session *ua_sess = NULL;
3420
3421 /* Very wrong code flow */
3422 assert(usess);
3423 assert(uchan);
3424
3425 DBG2("UST app adding channel %s to UST domain for session id %d",
3426 uchan->name, usess->id);
3427
3428 rcu_read_lock();
3429
3430 /* For every registered applications */
3431 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3432 if (!app->compatible) {
3433 /*
3434 * TODO: In time, we should notice the caller of this error by
3435 * telling him that this is a version error.
3436 */
3437 continue;
3438 }
3439 /*
3440 * Create session on the tracer side and add it to app session HT. Note
3441 * that if session exist, it will simply return a pointer to the ust
3442 * app session.
3443 */
3444 ret = create_ust_app_session(usess, app, &ua_sess, &created);
3445 if (ret < 0) {
3446 switch (ret) {
3447 case -ENOTCONN:
3448 /*
3449 * The application's socket is not valid. Either a bad socket
3450 * or a timeout on it. We can't inform the caller that for a
3451 * specific app, the session failed so lets continue here.
3452 */
3453 continue;
3454 case -ENOMEM:
3455 default:
3456 goto error_rcu_unlock;
3457 }
3458 }
3459 assert(ua_sess);
3460
3461 pthread_mutex_lock(&ua_sess->lock);
3462 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
3463 sizeof(uchan->name))) {
3464 struct ustctl_consumer_channel_attr attr;
3465 copy_channel_attr_to_ustctl(&attr, &uchan->attr);
3466 ret = create_ust_app_metadata(ua_sess, app, usess->consumer,
3467 &attr);
3468 } else {
3469 /* Create channel onto application. We don't need the chan ref. */
3470 ret = create_ust_app_channel(ua_sess, uchan, app,
3471 LTTNG_UST_CHAN_PER_CPU, usess, NULL);
3472 }
3473 pthread_mutex_unlock(&ua_sess->lock);
3474 if (ret < 0) {
3475 if (ret == -ENOMEM) {
3476 /* No more memory is a fatal error. Stop right now. */
3477 goto error_rcu_unlock;
3478 }
3479 /* Cleanup the created session if it's the case. */
3480 if (created) {
3481 destroy_app_session(app, ua_sess);
3482 }
3483 }
3484 }
3485
3486 error_rcu_unlock:
3487 rcu_read_unlock();
3488 return ret;
3489 }
3490
3491 /*
3492 * Enable event for a specific session and channel on the tracer.
3493 */
3494 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
3495 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3496 {
3497 int ret = 0;
3498 struct lttng_ht_iter iter, uiter;
3499 struct lttng_ht_node_str *ua_chan_node;
3500 struct ust_app *app;
3501 struct ust_app_session *ua_sess;
3502 struct ust_app_channel *ua_chan;
3503 struct ust_app_event *ua_event;
3504
3505 DBG("UST app enabling event %s for all apps for session id %d",
3506 uevent->attr.name, usess->id);
3507
3508 /*
3509 * NOTE: At this point, this function is called only if the session and
3510 * channel passed are already created for all apps. and enabled on the
3511 * tracer also.
3512 */
3513
3514 rcu_read_lock();
3515
3516 /* For all registered applications */
3517 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3518 if (!app->compatible) {
3519 /*
3520 * TODO: In time, we should notice the caller of this error by
3521 * telling him that this is a version error.
3522 */
3523 continue;
3524 }
3525 ua_sess = lookup_session_by_app(usess, app);
3526 if (!ua_sess) {
3527 /* The application has problem or is probably dead. */
3528 continue;
3529 }
3530
3531 pthread_mutex_lock(&ua_sess->lock);
3532
3533 /* Lookup channel in the ust app session */
3534 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3535 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3536 /* If the channel is not found, there is a code flow error */
3537 assert(ua_chan_node);
3538
3539 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3540
3541 /* Get event node */
3542 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
3543 uevent->filter, uevent->attr.loglevel);
3544 if (ua_event == NULL) {
3545 DBG3("UST app enable event %s not found for app PID %d."
3546 "Skipping app", uevent->attr.name, app->pid);
3547 goto next_app;
3548 }
3549
3550 ret = enable_ust_app_event(ua_sess, ua_event, app);
3551 if (ret < 0) {
3552 pthread_mutex_unlock(&ua_sess->lock);
3553 goto error;
3554 }
3555 next_app:
3556 pthread_mutex_unlock(&ua_sess->lock);
3557 }
3558
3559 error:
3560 rcu_read_unlock();
3561 return ret;
3562 }
3563
3564 /*
3565 * For a specific existing UST session and UST channel, creates the event for
3566 * all registered apps.
3567 */
3568 int ust_app_create_event_glb(struct ltt_ust_session *usess,
3569 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3570 {
3571 int ret = 0;
3572 struct lttng_ht_iter iter, uiter;
3573 struct lttng_ht_node_str *ua_chan_node;
3574 struct ust_app *app;
3575 struct ust_app_session *ua_sess;
3576 struct ust_app_channel *ua_chan;
3577
3578 DBG("UST app creating event %s for all apps for session id %d",
3579 uevent->attr.name, usess->id);
3580
3581 rcu_read_lock();
3582
3583 /* For all registered applications */
3584 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3585 if (!app->compatible) {
3586 /*
3587 * TODO: In time, we should notice the caller of this error by
3588 * telling him that this is a version error.
3589 */
3590 continue;
3591 }
3592 ua_sess = lookup_session_by_app(usess, app);
3593 if (!ua_sess) {
3594 /* The application has problem or is probably dead. */
3595 continue;
3596 }
3597
3598 pthread_mutex_lock(&ua_sess->lock);
3599 /* Lookup channel in the ust app session */
3600 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3601 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3602 /* If the channel is not found, there is a code flow error */
3603 assert(ua_chan_node);
3604
3605 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3606
3607 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
3608 pthread_mutex_unlock(&ua_sess->lock);
3609 if (ret < 0) {
3610 if (ret != -LTTNG_UST_ERR_EXIST) {
3611 /* Possible value at this point: -ENOMEM. If so, we stop! */
3612 break;
3613 }
3614 DBG2("UST app event %s already exist on app PID %d",
3615 uevent->attr.name, app->pid);
3616 continue;
3617 }
3618 }
3619
3620 rcu_read_unlock();
3621
3622 return ret;
3623 }
3624
3625 /*
3626 * Start tracing for a specific UST session and app.
3627 */
3628 static
3629 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
3630 {
3631 int ret = 0;
3632 struct ust_app_session *ua_sess;
3633
3634 DBG("Starting tracing for ust app pid %d", app->pid);
3635
3636 rcu_read_lock();
3637
3638 if (!app->compatible) {
3639 goto end;
3640 }
3641
3642 ua_sess = lookup_session_by_app(usess, app);
3643 if (ua_sess == NULL) {
3644 /* The session is in teardown process. Ignore and continue. */
3645 goto end;
3646 }
3647
3648 pthread_mutex_lock(&ua_sess->lock);
3649
3650 /* Upon restart, we skip the setup, already done */
3651 if (ua_sess->started) {
3652 goto skip_setup;
3653 }
3654
3655 /* Create directories if consumer is LOCAL and has a path defined. */
3656 if (usess->consumer->type == CONSUMER_DST_LOCAL &&
3657 strlen(usess->consumer->dst.trace_path) > 0) {
3658 ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
3659 S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
3660 if (ret < 0) {
3661 if (ret != -EEXIST) {
3662 ERR("Trace directory creation error");
3663 goto error_unlock;
3664 }
3665 }
3666 }
3667
3668 /*
3669 * Create the metadata for the application. This returns gracefully if a
3670 * metadata was already set for the session.
3671 */
3672 ret = create_ust_app_metadata(ua_sess, app, usess->consumer, NULL);
3673 if (ret < 0) {
3674 goto error_unlock;
3675 }
3676
3677 health_code_update();
3678
3679 skip_setup:
3680 /* This start the UST tracing */
3681 ret = ustctl_start_session(app->sock, ua_sess->handle);
3682 if (ret < 0) {
3683 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3684 ERR("Error starting tracing for app pid: %d (ret: %d)",
3685 app->pid, ret);
3686 } else {
3687 DBG("UST app start session failed. Application is dead.");
3688 }
3689 goto error_unlock;
3690 }
3691
3692 /* Indicate that the session has been started once */
3693 ua_sess->started = 1;
3694
3695 pthread_mutex_unlock(&ua_sess->lock);
3696
3697 health_code_update();
3698
3699 /* Quiescent wait after starting trace */
3700 ret = ustctl_wait_quiescent(app->sock);
3701 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3702 ERR("UST app wait quiescent failed for app pid %d ret %d",
3703 app->pid, ret);
3704 }
3705
3706 end:
3707 rcu_read_unlock();
3708 health_code_update();
3709 return 0;
3710
3711 error_unlock:
3712 pthread_mutex_unlock(&ua_sess->lock);
3713 rcu_read_unlock();
3714 health_code_update();
3715 return -1;
3716 }
3717
3718 /*
3719 * Stop tracing for a specific UST session and app.
3720 */
3721 static
3722 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
3723 {
3724 int ret = 0;
3725 struct ust_app_session *ua_sess;
3726 struct ust_registry_session *registry;
3727
3728 DBG("Stopping tracing for ust app pid %d", app->pid);
3729
3730 rcu_read_lock();
3731
3732 if (!app->compatible) {
3733 goto end_no_session;
3734 }
3735
3736 ua_sess = lookup_session_by_app(usess, app);
3737 if (ua_sess == NULL) {
3738 goto end_no_session;
3739 }
3740
3741 pthread_mutex_lock(&ua_sess->lock);
3742
3743 /*
3744 * If started = 0, it means that stop trace has been called for a session
3745 * that was never started. It's possible since we can have a fail start
3746 * from either the application manager thread or the command thread. Simply
3747 * indicate that this is a stop error.
3748 */
3749 if (!ua_sess->started) {
3750 goto error_rcu_unlock;
3751 }
3752
3753 health_code_update();
3754
3755 /* This inhibits UST tracing */
3756 ret = ustctl_stop_session(app->sock, ua_sess->handle);
3757 if (ret < 0) {
3758 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3759 ERR("Error stopping tracing for app pid: %d (ret: %d)",
3760 app->pid, ret);
3761 } else {
3762 DBG("UST app stop session failed. Application is dead.");
3763 }
3764 goto error_rcu_unlock;
3765 }
3766
3767 health_code_update();
3768
3769 /* Quiescent wait after stopping trace */
3770 ret = ustctl_wait_quiescent(app->sock);
3771 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3772 ERR("UST app wait quiescent failed for app pid %d ret %d",
3773 app->pid, ret);
3774 }
3775
3776 health_code_update();
3777
3778 registry = get_session_registry(ua_sess);
3779 assert(registry);
3780
3781 if (!registry->metadata_closed) {
3782 /* Push metadata for application before freeing the application. */
3783 (void) push_metadata(registry, ua_sess->consumer);
3784 }
3785
3786 pthread_mutex_unlock(&ua_sess->lock);
3787 end_no_session:
3788 rcu_read_unlock();
3789 health_code_update();
3790 return 0;
3791
3792 error_rcu_unlock:
3793 pthread_mutex_unlock(&ua_sess->lock);
3794 rcu_read_unlock();
3795 health_code_update();
3796 return -1;
3797 }
3798
3799 /*
3800 * Flush buffers for a specific UST session and app.
3801 */
3802 static
3803 int ust_app_flush_trace(struct ltt_ust_session *usess, struct ust_app *app)
3804 {
3805 int ret = 0;
3806 struct lttng_ht_iter iter;
3807 struct ust_app_session *ua_sess;
3808 struct ust_app_channel *ua_chan;
3809
3810 DBG("Flushing buffers for ust app pid %d", app->pid);
3811
3812 rcu_read_lock();
3813
3814 if (!app->compatible) {
3815 goto end_no_session;
3816 }
3817
3818 ua_sess = lookup_session_by_app(usess, app);
3819 if (ua_sess == NULL) {
3820 goto end_no_session;
3821 }
3822
3823 pthread_mutex_lock(&ua_sess->lock);
3824
3825 health_code_update();
3826
3827 /* Flushing buffers */
3828 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
3829 node.node) {
3830 health_code_update();
3831 assert(ua_chan->is_sent);
3832 ret = ustctl_sock_flush_buffer(app->sock, ua_chan->obj);
3833 if (ret < 0) {
3834 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3835 ERR("UST app PID %d channel %s flush failed with ret %d",
3836 app->pid, ua_chan->name, ret);
3837 } else {
3838 DBG3("UST app failed to flush %s. Application is dead.",
3839 ua_chan->name);
3840 /* No need to continue. */
3841 break;
3842 }
3843 /* Continuing flushing all buffers */
3844 continue;
3845 }
3846 }
3847
3848 health_code_update();
3849
3850 pthread_mutex_unlock(&ua_sess->lock);
3851 end_no_session:
3852 rcu_read_unlock();
3853 health_code_update();
3854 return 0;
3855 }
3856
3857 /*
3858 * Destroy a specific UST session in apps.
3859 */
3860 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
3861 {
3862 int ret;
3863 struct ust_app_session *ua_sess;
3864 struct lttng_ht_iter iter;
3865 struct lttng_ht_node_ulong *node;
3866
3867 DBG("Destroy tracing for ust app pid %d", app->pid);
3868
3869 rcu_read_lock();
3870
3871 if (!app->compatible) {
3872 goto end;
3873 }
3874
3875 __lookup_session_by_app(usess, app, &iter);
3876 node = lttng_ht_iter_get_node_ulong(&iter);
3877 if (node == NULL) {
3878 /* Session is being or is deleted. */
3879 goto end;
3880 }
3881 ua_sess = caa_container_of(node, struct ust_app_session, node);
3882
3883 health_code_update();
3884 destroy_app_session(app, ua_sess);
3885
3886 health_code_update();
3887
3888 /* Quiescent wait after stopping trace */
3889 ret = ustctl_wait_quiescent(app->sock);
3890 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3891 ERR("UST app wait quiescent failed for app pid %d ret %d",
3892 app->pid, ret);
3893 }
3894 end:
3895 rcu_read_unlock();
3896 health_code_update();
3897 return 0;
3898 }
3899
3900 /*
3901 * Start tracing for the UST session.
3902 */
3903 int ust_app_start_trace_all(struct ltt_ust_session *usess)
3904 {
3905 int ret = 0;
3906 struct lttng_ht_iter iter;
3907 struct ust_app *app;
3908
3909 DBG("Starting all UST traces");
3910
3911 rcu_read_lock();
3912
3913 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3914 ret = ust_app_start_trace(usess, app);
3915 if (ret < 0) {
3916 /* Continue to next apps even on error */
3917 continue;
3918 }
3919 }
3920
3921 rcu_read_unlock();
3922
3923 return 0;
3924 }
3925
3926 /*
3927 * Start tracing for the UST session.
3928 */
3929 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
3930 {
3931 int ret = 0;
3932 struct lttng_ht_iter iter;
3933 struct ust_app *app;
3934
3935 DBG("Stopping all UST traces");
3936
3937 rcu_read_lock();
3938
3939 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3940 ret = ust_app_stop_trace(usess, app);
3941 if (ret < 0) {
3942 /* Continue to next apps even on error */
3943 continue;
3944 }
3945 }
3946
3947 /* Flush buffers */
3948 switch (usess->buffer_type) {
3949 case LTTNG_BUFFER_PER_UID:
3950 {
3951 struct buffer_reg_uid *reg;
3952
3953 /* Flush all per UID buffers associated to that session. */
3954 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
3955 struct buffer_reg_channel *reg_chan;
3956 struct consumer_socket *socket;
3957
3958 /* Get consumer socket to use to push the metadata.*/
3959 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
3960 usess->consumer);
3961 if (!socket) {
3962 /* Ignore request if no consumer is found for the session. */
3963 continue;
3964 }
3965
3966 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
3967 reg_chan, node.node) {
3968 /*
3969 * The following call will print error values so the return
3970 * code is of little importance because whatever happens, we
3971 * have to try them all.
3972 */
3973 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
3974 }
3975 }
3976 break;
3977 }
3978 case LTTNG_BUFFER_PER_PID:
3979 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3980 ret = ust_app_flush_trace(usess, app);
3981 if (ret < 0) {
3982 /* Continue to next apps even on error */
3983 continue;
3984 }
3985 }
3986 break;
3987 default:
3988 assert(0);
3989 break;
3990 }
3991
3992 rcu_read_unlock();
3993
3994 return 0;
3995 }
3996
3997 /*
3998 * Destroy app UST session.
3999 */
4000 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
4001 {
4002 int ret = 0;
4003 struct lttng_ht_iter iter;
4004 struct ust_app *app;
4005
4006 DBG("Destroy all UST traces");
4007
4008 rcu_read_lock();
4009
4010 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4011 ret = destroy_trace(usess, app);
4012 if (ret < 0) {
4013 /* Continue to next apps even on error */
4014 continue;
4015 }
4016 }
4017
4018 rcu_read_unlock();
4019
4020 return 0;
4021 }
4022
4023 /*
4024 * Add channels/events from UST global domain to registered apps at sock.
4025 */
4026 void ust_app_global_update(struct ltt_ust_session *usess, int sock)
4027 {
4028 int ret = 0;
4029 struct lttng_ht_iter iter, uiter, iter_ctx;
4030 struct ust_app *app;
4031 struct ust_app_session *ua_sess = NULL;
4032 struct ust_app_channel *ua_chan;
4033 struct ust_app_event *ua_event;
4034 struct ust_app_ctx *ua_ctx;
4035
4036 assert(usess);
4037 assert(sock >= 0);
4038
4039 DBG2("UST app global update for app sock %d for session id %d", sock,
4040 usess->id);
4041
4042 rcu_read_lock();
4043
4044 app = find_app_by_sock(sock);
4045 if (app == NULL) {
4046 /*
4047 * Application can be unregistered before so this is possible hence
4048 * simply stopping the update.
4049 */
4050 DBG3("UST app update failed to find app sock %d", sock);
4051 goto error;
4052 }
4053
4054 if (!app->compatible) {
4055 goto error;
4056 }
4057
4058 ret = create_ust_app_session(usess, app, &ua_sess, NULL);
4059 if (ret < 0) {
4060 /* Tracer is probably gone or ENOMEM. */
4061 goto error;
4062 }
4063 assert(ua_sess);
4064
4065 pthread_mutex_lock(&ua_sess->lock);
4066
4067 /*
4068 * We can iterate safely here over all UST app session since the create ust
4069 * app session above made a shadow copy of the UST global domain from the
4070 * ltt ust session.
4071 */
4072 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4073 node.node) {
4074 /*
4075 * For a metadata channel, handle it differently.
4076 */
4077 if (!strncmp(ua_chan->name, DEFAULT_METADATA_NAME,
4078 sizeof(ua_chan->name))) {
4079 ret = create_ust_app_metadata(ua_sess, app, usess->consumer,
4080 &ua_chan->attr);
4081 if (ret < 0) {
4082 goto error_unlock;
4083 }
4084 /* Remove it from the hash table and continue!. */
4085 ret = lttng_ht_del(ua_sess->channels, &iter);
4086 assert(!ret);
4087 delete_ust_app_channel(-1, ua_chan, app);
4088 continue;
4089 } else {
4090 ret = do_create_channel(app, usess, ua_sess, ua_chan);
4091 if (ret < 0) {
4092 /*
4093 * Stop everything. On error, the application failed, no more
4094 * file descriptor are available or ENOMEM so stopping here is
4095 * the only thing we can do for now.
4096 */
4097 goto error_unlock;
4098 }
4099 }
4100
4101 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter_ctx.iter, ua_ctx,
4102 node.node) {
4103 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
4104 if (ret < 0) {
4105 goto error_unlock;
4106 }
4107 }
4108
4109
4110 /* For each events */
4111 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
4112 node.node) {
4113 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
4114 if (ret < 0) {
4115 goto error_unlock;
4116 }
4117 }
4118 }
4119
4120 pthread_mutex_unlock(&ua_sess->lock);
4121
4122 if (usess->start_trace) {
4123 ret = ust_app_start_trace(usess, app);
4124 if (ret < 0) {
4125 goto error;
4126 }
4127
4128 DBG2("UST trace started for app pid %d", app->pid);
4129 }
4130
4131 /* Everything went well at this point. */
4132 rcu_read_unlock();
4133 return;
4134
4135 error_unlock:
4136 pthread_mutex_unlock(&ua_sess->lock);
4137 error:
4138 if (ua_sess) {
4139 destroy_app_session(app, ua_sess);
4140 }
4141 rcu_read_unlock();
4142 return;
4143 }
4144
4145 /*
4146 * Add context to a specific channel for global UST domain.
4147 */
4148 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
4149 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
4150 {
4151 int ret = 0;
4152 struct lttng_ht_node_str *ua_chan_node;
4153 struct lttng_ht_iter iter, uiter;
4154 struct ust_app_channel *ua_chan = NULL;
4155 struct ust_app_session *ua_sess;
4156 struct ust_app *app;
4157
4158 rcu_read_lock();
4159
4160 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4161 if (!app->compatible) {
4162 /*
4163 * TODO: In time, we should notice the caller of this error by
4164 * telling him that this is a version error.
4165 */
4166 continue;
4167 }
4168 ua_sess = lookup_session_by_app(usess, app);
4169 if (ua_sess == NULL) {
4170 continue;
4171 }
4172
4173 pthread_mutex_lock(&ua_sess->lock);
4174 /* Lookup channel in the ust app session */
4175 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4176 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4177 if (ua_chan_node == NULL) {
4178 goto next_app;
4179 }
4180 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
4181 node);
4182 ret = create_ust_app_channel_context(ua_sess, ua_chan, &uctx->ctx, app);
4183 if (ret < 0) {
4184 goto next_app;
4185 }
4186 next_app:
4187 pthread_mutex_unlock(&ua_sess->lock);
4188 }
4189
4190 rcu_read_unlock();
4191 return ret;
4192 }
4193
4194 /*
4195 * Enable event for a channel from a UST session for a specific PID.
4196 */
4197 int ust_app_enable_event_pid(struct ltt_ust_session *usess,
4198 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4199 {
4200 int ret = 0;
4201 struct lttng_ht_iter iter;
4202 struct lttng_ht_node_str *ua_chan_node;
4203 struct ust_app *app;
4204 struct ust_app_session *ua_sess;
4205 struct ust_app_channel *ua_chan;
4206 struct ust_app_event *ua_event;
4207
4208 DBG("UST app enabling event %s for PID %d", uevent->attr.name, pid);
4209
4210 rcu_read_lock();
4211
4212 app = ust_app_find_by_pid(pid);
4213 if (app == NULL) {
4214 ERR("UST app enable event per PID %d not found", pid);
4215 ret = -1;
4216 goto end;
4217 }
4218
4219 if (!app->compatible) {
4220 ret = 0;
4221 goto end;
4222 }
4223
4224 ua_sess = lookup_session_by_app(usess, app);
4225 if (!ua_sess) {
4226 /* The application has problem or is probably dead. */
4227 ret = 0;
4228 goto end;
4229 }
4230
4231 pthread_mutex_lock(&ua_sess->lock);
4232 /* Lookup channel in the ust app session */
4233 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4234 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4235 /* If the channel is not found, there is a code flow error */
4236 assert(ua_chan_node);
4237
4238 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4239
4240 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4241 uevent->filter, uevent->attr.loglevel);
4242 if (ua_event == NULL) {
4243 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4244 if (ret < 0) {
4245 goto end_unlock;
4246 }
4247 } else {
4248 ret = enable_ust_app_event(ua_sess, ua_event, app);
4249 if (ret < 0) {
4250 goto end_unlock;
4251 }
4252 }
4253
4254 end_unlock:
4255 pthread_mutex_unlock(&ua_sess->lock);
4256 end:
4257 rcu_read_unlock();
4258 return ret;
4259 }
4260
4261 /*
4262 * Disable event for a channel from a UST session for a specific PID.
4263 */
4264 int ust_app_disable_event_pid(struct ltt_ust_session *usess,
4265 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4266 {
4267 int ret = 0;
4268 struct lttng_ht_iter iter;
4269 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
4270 struct ust_app *app;
4271 struct ust_app_session *ua_sess;
4272 struct ust_app_channel *ua_chan;
4273 struct ust_app_event *ua_event;
4274
4275 DBG("UST app disabling event %s for PID %d", uevent->attr.name, pid);
4276
4277 rcu_read_lock();
4278
4279 app = ust_app_find_by_pid(pid);
4280 if (app == NULL) {
4281 ERR("UST app disable event per PID %d not found", pid);
4282 ret = -1;
4283 goto error;
4284 }
4285
4286 if (!app->compatible) {
4287 ret = 0;
4288 goto error;
4289 }
4290
4291 ua_sess = lookup_session_by_app(usess, app);
4292 if (!ua_sess) {
4293 /* The application has problem or is probably dead. */
4294 goto error;
4295 }
4296
4297 /* Lookup channel in the ust app session */
4298 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4299 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4300 if (ua_chan_node == NULL) {
4301 /* Channel does not exist, skip disabling */
4302 goto error;
4303 }
4304 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4305
4306 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &iter);
4307 ua_event_node = lttng_ht_iter_get_node_str(&iter);
4308 if (ua_event_node == NULL) {
4309 /* Event does not exist, skip disabling */
4310 goto error;
4311 }
4312 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
4313
4314 ret = disable_ust_app_event(ua_sess, ua_event, app);
4315 if (ret < 0) {
4316 goto error;
4317 }
4318
4319 error:
4320 rcu_read_unlock();
4321 return ret;
4322 }
4323
4324 /*
4325 * Calibrate registered applications.
4326 */
4327 int ust_app_calibrate_glb(struct lttng_ust_calibrate *calibrate)
4328 {
4329 int ret = 0;
4330 struct lttng_ht_iter iter;
4331 struct ust_app *app;
4332
4333 rcu_read_lock();
4334
4335 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4336 if (!app->compatible) {
4337 /*
4338 * TODO: In time, we should notice the caller of this error by
4339 * telling him that this is a version error.
4340 */
4341 continue;
4342 }
4343
4344 health_code_update();
4345
4346 ret = ustctl_calibrate(app->sock, calibrate);
4347 if (ret < 0) {
4348 switch (ret) {
4349 case -ENOSYS:
4350 /* Means that it's not implemented on the tracer side. */
4351 ret = 0;
4352 break;
4353 default:
4354 DBG2("Calibrate app PID %d returned with error %d",
4355 app->pid, ret);
4356 break;
4357 }
4358 }
4359 }
4360
4361 DBG("UST app global domain calibration finished");
4362
4363 rcu_read_unlock();
4364
4365 health_code_update();
4366
4367 return ret;
4368 }
4369
4370 /*
4371 * Receive registration and populate the given msg structure.
4372 *
4373 * On success return 0 else a negative value returned by the ustctl call.
4374 */
4375 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
4376 {
4377 int ret;
4378 uint32_t pid, ppid, uid, gid;
4379
4380 assert(msg);
4381
4382 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
4383 &pid, &ppid, &uid, &gid,
4384 &msg->bits_per_long,
4385 &msg->uint8_t_alignment,
4386 &msg->uint16_t_alignment,
4387 &msg->uint32_t_alignment,
4388 &msg->uint64_t_alignment,
4389 &msg->long_alignment,
4390 &msg->byte_order,
4391 msg->name);
4392 if (ret < 0) {
4393 switch (-ret) {
4394 case EPIPE:
4395 case ECONNRESET:
4396 case LTTNG_UST_ERR_EXITING:
4397 DBG3("UST app recv reg message failed. Application died");
4398 break;
4399 case LTTNG_UST_ERR_UNSUP_MAJOR:
4400 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
4401 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
4402 LTTNG_UST_ABI_MINOR_VERSION);
4403 break;
4404 default:
4405 ERR("UST app recv reg message failed with ret %d", ret);
4406 break;
4407 }
4408 goto error;
4409 }
4410 msg->pid = (pid_t) pid;
4411 msg->ppid = (pid_t) ppid;
4412 msg->uid = (uid_t) uid;
4413 msg->gid = (gid_t) gid;
4414
4415 error:
4416 return ret;
4417 }
4418
4419 /*
4420 * Return a ust app channel object using the application object and the channel
4421 * object descriptor has a key. If not found, NULL is returned. A RCU read side
4422 * lock MUST be acquired before calling this function.
4423 */
4424 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
4425 int objd)
4426 {
4427 struct lttng_ht_node_ulong *node;
4428 struct lttng_ht_iter iter;
4429 struct ust_app_channel *ua_chan = NULL;
4430
4431 assert(app);
4432
4433 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
4434 node = lttng_ht_iter_get_node_ulong(&iter);
4435 if (node == NULL) {
4436 DBG2("UST app channel find by objd %d not found", objd);
4437 goto error;
4438 }
4439
4440 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
4441
4442 error:
4443 return ua_chan;
4444 }
4445
4446 /*
4447 * Reply to a register channel notification from an application on the notify
4448 * socket. The channel metadata is also created.
4449 *
4450 * The session UST registry lock is acquired in this function.
4451 *
4452 * On success 0 is returned else a negative value.
4453 */
4454 static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
4455 size_t nr_fields, struct ustctl_field *fields)
4456 {
4457 int ret, ret_code = 0;
4458 uint32_t chan_id, reg_count;
4459 uint64_t chan_reg_key;
4460 enum ustctl_channel_header type;
4461 struct ust_app *app;
4462 struct ust_app_channel *ua_chan;
4463 struct ust_app_session *ua_sess;
4464 struct ust_registry_session *registry;
4465 struct ust_registry_channel *chan_reg;
4466
4467 rcu_read_lock();
4468
4469 /* Lookup application. If not found, there is a code flow error. */
4470 app = find_app_by_notify_sock(sock);
4471 if (!app) {
4472 DBG("Application socket %d is being teardown. Abort event notify",
4473 sock);
4474 ret = 0;
4475 free(fields);
4476 goto error_rcu_unlock;
4477 }
4478
4479 /* Lookup channel by UST object descriptor. */
4480 ua_chan = find_channel_by_objd(app, cobjd);
4481 if (!ua_chan) {
4482 DBG("Application channel is being teardown. Abort event notify");
4483 ret = 0;
4484 free(fields);
4485 goto error_rcu_unlock;
4486 }
4487
4488 assert(ua_chan->session);
4489 ua_sess = ua_chan->session;
4490
4491 /* Get right session registry depending on the session buffer type. */
4492 registry = get_session_registry(ua_sess);
4493 assert(registry);
4494
4495 /* Depending on the buffer type, a different channel key is used. */
4496 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4497 chan_reg_key = ua_chan->tracing_channel_id;
4498 } else {
4499 chan_reg_key = ua_chan->key;
4500 }
4501
4502 pthread_mutex_lock(&registry->lock);
4503
4504 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
4505 assert(chan_reg);
4506
4507 if (!chan_reg->register_done) {
4508 reg_count = ust_registry_get_event_count(chan_reg);
4509 if (reg_count < 31) {
4510 type = USTCTL_CHANNEL_HEADER_COMPACT;
4511 } else {
4512 type = USTCTL_CHANNEL_HEADER_LARGE;
4513 }
4514
4515 chan_reg->nr_ctx_fields = nr_fields;
4516 chan_reg->ctx_fields = fields;
4517 chan_reg->header_type = type;
4518 } else {
4519 /* Get current already assigned values. */
4520 type = chan_reg->header_type;
4521 free(fields);
4522 /* Set to NULL so the error path does not do a double free. */
4523 fields = NULL;
4524 }
4525 /* Channel id is set during the object creation. */
4526 chan_id = chan_reg->chan_id;
4527
4528 /* Append to metadata */
4529 if (!chan_reg->metadata_dumped) {
4530 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
4531 if (ret_code) {
4532 ERR("Error appending channel metadata (errno = %d)", ret_code);
4533 goto reply;
4534 }
4535 }
4536
4537 reply:
4538 DBG3("UST app replying to register channel key %" PRIu64
4539 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
4540 ret_code);
4541
4542 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
4543 if (ret < 0) {
4544 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4545 ERR("UST app reply channel failed with ret %d", ret);
4546 } else {
4547 DBG3("UST app reply channel failed. Application died");
4548 }
4549 goto error;
4550 }
4551
4552 /* This channel registry registration is completed. */
4553 chan_reg->register_done = 1;
4554
4555 error:
4556 pthread_mutex_unlock(&registry->lock);
4557 error_rcu_unlock:
4558 rcu_read_unlock();
4559 if (ret) {
4560 free(fields);
4561 }
4562 return ret;
4563 }
4564
4565 /*
4566 * Add event to the UST channel registry. When the event is added to the
4567 * registry, the metadata is also created. Once done, this replies to the
4568 * application with the appropriate error code.
4569 *
4570 * The session UST registry lock is acquired in the function.
4571 *
4572 * On success 0 is returned else a negative value.
4573 */
4574 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
4575 char *sig, size_t nr_fields, struct ustctl_field *fields, int loglevel,
4576 char *model_emf_uri)
4577 {
4578 int ret, ret_code;
4579 uint32_t event_id = 0;
4580 uint64_t chan_reg_key;
4581 struct ust_app *app;
4582 struct ust_app_channel *ua_chan;
4583 struct ust_app_session *ua_sess;
4584 struct ust_registry_session *registry;
4585
4586 rcu_read_lock();
4587
4588 /* Lookup application. If not found, there is a code flow error. */
4589 app = find_app_by_notify_sock(sock);
4590 if (!app) {
4591 DBG("Application socket %d is being teardown. Abort event notify",
4592 sock);
4593 ret = 0;
4594 free(sig);
4595 free(fields);
4596 free(model_emf_uri);
4597 goto error_rcu_unlock;
4598 }
4599
4600 /* Lookup channel by UST object descriptor. */
4601 ua_chan = find_channel_by_objd(app, cobjd);
4602 if (!ua_chan) {
4603 DBG("Application channel is being teardown. Abort event notify");
4604 ret = 0;
4605 free(sig);
4606 free(fields);
4607 free(model_emf_uri);
4608 goto error_rcu_unlock;
4609 }
4610
4611 assert(ua_chan->session);
4612 ua_sess = ua_chan->session;
4613
4614 registry = get_session_registry(ua_sess);
4615 assert(registry);
4616
4617 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4618 chan_reg_key = ua_chan->tracing_channel_id;
4619 } else {
4620 chan_reg_key = ua_chan->key;
4621 }
4622
4623 pthread_mutex_lock(&registry->lock);
4624
4625 /*
4626 * From this point on, this call acquires the ownership of the sig, fields
4627 * and model_emf_uri meaning any free are done inside it if needed. These
4628 * three variables MUST NOT be read/write after this.
4629 */
4630 ret_code = ust_registry_create_event(registry, chan_reg_key,
4631 sobjd, cobjd, name, sig, nr_fields, fields, loglevel,
4632 model_emf_uri, ua_sess->buffer_type, &event_id);
4633
4634 /*
4635 * The return value is returned to ustctl so in case of an error, the
4636 * application can be notified. In case of an error, it's important not to
4637 * return a negative error or else the application will get closed.
4638 */
4639 ret = ustctl_reply_register_event(sock, event_id, ret_code);
4640 if (ret < 0) {
4641 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4642 ERR("UST app reply event failed with ret %d", ret);
4643 } else {
4644 DBG3("UST app reply event failed. Application died");
4645 }
4646 /*
4647 * No need to wipe the create event since the application socket will
4648 * get close on error hence cleaning up everything by itself.
4649 */
4650 goto error;
4651 }
4652
4653 DBG3("UST registry event %s with id %" PRId32 " added successfully",
4654 name, event_id);
4655
4656 error:
4657 pthread_mutex_unlock(&registry->lock);
4658 error_rcu_unlock:
4659 rcu_read_unlock();
4660 return ret;
4661 }
4662
4663 /*
4664 * Handle application notification through the given notify socket.
4665 *
4666 * Return 0 on success or else a negative value.
4667 */
4668 int ust_app_recv_notify(int sock)
4669 {
4670 int ret;
4671 enum ustctl_notify_cmd cmd;
4672
4673 DBG3("UST app receiving notify from sock %d", sock);
4674
4675 ret = ustctl_recv_notify(sock, &cmd);
4676 if (ret < 0) {
4677 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4678 ERR("UST app recv notify failed with ret %d", ret);
4679 } else {
4680 DBG3("UST app recv notify failed. Application died");
4681 }
4682 goto error;
4683 }
4684
4685 switch (cmd) {
4686 case USTCTL_NOTIFY_CMD_EVENT:
4687 {
4688 int sobjd, cobjd, loglevel;
4689 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
4690 size_t nr_fields;
4691 struct ustctl_field *fields;
4692
4693 DBG2("UST app ustctl register event received");
4694
4695 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name, &loglevel,
4696 &sig, &nr_fields, &fields, &model_emf_uri);
4697 if (ret < 0) {
4698 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4699 ERR("UST app recv event failed with ret %d", ret);
4700 } else {
4701 DBG3("UST app recv event failed. Application died");
4702 }
4703 goto error;
4704 }
4705
4706 /*
4707 * Add event to the UST registry coming from the notify socket. This
4708 * call will free if needed the sig, fields and model_emf_uri. This
4709 * code path loses the ownsership of these variables and transfer them
4710 * to the this function.
4711 */
4712 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
4713 fields, loglevel, model_emf_uri);
4714 if (ret < 0) {
4715 goto error;
4716 }
4717
4718 break;
4719 }
4720 case USTCTL_NOTIFY_CMD_CHANNEL:
4721 {
4722 int sobjd, cobjd;
4723 size_t nr_fields;
4724 struct ustctl_field *fields;
4725
4726 DBG2("UST app ustctl register channel received");
4727
4728 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
4729 &fields);
4730 if (ret < 0) {
4731 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4732 ERR("UST app recv channel failed with ret %d", ret);
4733 } else {
4734 DBG3("UST app recv channel failed. Application died");
4735 }
4736 goto error;
4737 }
4738
4739 /*
4740 * The fields ownership are transfered to this function call meaning
4741 * that if needed it will be freed. After this, it's invalid to access
4742 * fields or clean it up.
4743 */
4744 ret = reply_ust_register_channel(sock, sobjd, cobjd, nr_fields,
4745 fields);
4746 if (ret < 0) {
4747 goto error;
4748 }
4749
4750 break;
4751 }
4752 default:
4753 /* Should NEVER happen. */
4754 assert(0);
4755 }
4756
4757 error:
4758 return ret;
4759 }
4760
4761 /*
4762 * Once the notify socket hangs up, this is called. First, it tries to find the
4763 * corresponding application. On failure, the call_rcu to close the socket is
4764 * executed. If an application is found, it tries to delete it from the notify
4765 * socket hash table. Whathever the result, it proceeds to the call_rcu.
4766 *
4767 * Note that an object needs to be allocated here so on ENOMEM failure, the
4768 * call RCU is not done but the rest of the cleanup is.
4769 */
4770 void ust_app_notify_sock_unregister(int sock)
4771 {
4772 int err_enomem = 0;
4773 struct lttng_ht_iter iter;
4774 struct ust_app *app;
4775 struct ust_app_notify_sock_obj *obj;
4776
4777 assert(sock >= 0);
4778
4779 rcu_read_lock();
4780
4781 obj = zmalloc(sizeof(*obj));
4782 if (!obj) {
4783 /*
4784 * An ENOMEM is kind of uncool. If this strikes we continue the
4785 * procedure but the call_rcu will not be called. In this case, we
4786 * accept the fd leak rather than possibly creating an unsynchronized
4787 * state between threads.
4788 *
4789 * TODO: The notify object should be created once the notify socket is
4790 * registered and stored independantely from the ust app object. The
4791 * tricky part is to synchronize the teardown of the application and
4792 * this notify object. Let's keep that in mind so we can avoid this
4793 * kind of shenanigans with ENOMEM in the teardown path.
4794 */
4795 err_enomem = 1;
4796 } else {
4797 obj->fd = sock;
4798 }
4799
4800 DBG("UST app notify socket unregister %d", sock);
4801
4802 /*
4803 * Lookup application by notify socket. If this fails, this means that the
4804 * hash table delete has already been done by the application
4805 * unregistration process so we can safely close the notify socket in a
4806 * call RCU.
4807 */
4808 app = find_app_by_notify_sock(sock);
4809 if (!app) {
4810 goto close_socket;
4811 }
4812
4813 iter.iter.node = &app->notify_sock_n.node;
4814
4815 /*
4816 * Whatever happens here either we fail or succeed, in both cases we have
4817 * to close the socket after a grace period to continue to the call RCU
4818 * here. If the deletion is successful, the application is not visible
4819 * anymore by other threads and is it fails it means that it was already
4820 * deleted from the hash table so either way we just have to close the
4821 * socket.
4822 */
4823 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
4824
4825 close_socket:
4826 rcu_read_unlock();
4827
4828 /*
4829 * Close socket after a grace period to avoid for the socket to be reused
4830 * before the application object is freed creating potential race between
4831 * threads trying to add unique in the global hash table.
4832 */
4833 if (!err_enomem) {
4834 call_rcu(&obj->head, close_notify_sock_rcu);
4835 }
4836 }
4837
4838 /*
4839 * Destroy a ust app data structure and free its memory.
4840 */
4841 void ust_app_destroy(struct ust_app *app)
4842 {
4843 if (!app) {
4844 return;
4845 }
4846
4847 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
4848 }
4849
4850 /*
4851 * Take a snapshot for a given UST session. The snapshot is sent to the given
4852 * output.
4853 *
4854 * Return 0 on success or else a negative value.
4855 */
4856 int ust_app_snapshot_record(struct ltt_ust_session *usess,
4857 struct snapshot_output *output, int wait)
4858 {
4859 int ret = 0;
4860 struct lttng_ht_iter iter;
4861 struct ust_app *app;
4862
4863 assert(usess);
4864 assert(output);
4865
4866 rcu_read_lock();
4867
4868 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4869 struct consumer_socket *socket;
4870 struct lttng_ht_iter chan_iter;
4871 struct ust_app_channel *ua_chan;
4872 struct ust_app_session *ua_sess;
4873 struct ust_registry_session *registry;
4874
4875 ua_sess = lookup_session_by_app(usess, app);
4876 if (!ua_sess) {
4877 /* Session not associated with this app. */
4878 continue;
4879 }
4880
4881 /* Get the right consumer socket for the application. */
4882 socket = consumer_find_socket_by_bitness(app->bits_per_long,
4883 output->consumer);
4884 if (!socket) {
4885 ret = -EINVAL;
4886 goto error;
4887 }
4888
4889 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
4890 ua_chan, node.node) {
4891 ret = consumer_snapshot_channel(socket, ua_chan->key, output, 0,
4892 ua_sess->euid, ua_sess->egid, wait);
4893 if (ret < 0) {
4894 goto error;
4895 }
4896 }
4897
4898 registry = get_session_registry(ua_sess);
4899 assert(registry);
4900 ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
4901 1, ua_sess->euid, ua_sess->egid, wait);
4902 if (ret < 0) {
4903 goto error;
4904 }
4905
4906 }
4907
4908 error:
4909 rcu_read_unlock();
4910 return ret;
4911 }
This page took 0.161262 seconds and 6 git commands to generate.