Fix: channel and stream leak in consumerd
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2 only,
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18 #define _GNU_SOURCE
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <pthread.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/stat.h>
26 #include <sys/types.h>
27 #include <unistd.h>
28 #include <urcu/compiler.h>
29 #include <lttng/ust-error.h>
30 #include <signal.h>
31
32 #include <common/common.h>
33 #include <common/sessiond-comm/sessiond-comm.h>
34
35 #include "buffer-registry.h"
36 #include "fd-limit.h"
37 #include "health.h"
38 #include "ust-app.h"
39 #include "ust-consumer.h"
40 #include "ust-ctl.h"
41 #include "utils.h"
42
43 /* Next available channel key. */
44 static unsigned long next_channel_key;
45 static unsigned long next_session_id;
46
47 /*
48 * Return the atomically incremented value of next_channel_key.
49 */
50 static inline unsigned long get_next_channel_key(void)
51 {
52 return uatomic_add_return(&next_channel_key, 1);
53 }
54
55 /*
56 * Return the atomically incremented value of next_session_id.
57 */
58 static inline unsigned long get_next_session_id(void)
59 {
60 return uatomic_add_return(&next_session_id, 1);
61 }
62
63 static void copy_channel_attr_to_ustctl(
64 struct ustctl_consumer_channel_attr *attr,
65 struct lttng_ust_channel_attr *uattr)
66 {
67 /* Copy event attributes since the layout is different. */
68 attr->subbuf_size = uattr->subbuf_size;
69 attr->num_subbuf = uattr->num_subbuf;
70 attr->overwrite = uattr->overwrite;
71 attr->switch_timer_interval = uattr->switch_timer_interval;
72 attr->read_timer_interval = uattr->read_timer_interval;
73 attr->output = uattr->output;
74 }
75
76 /*
77 * Match function for the hash table lookup.
78 *
79 * It matches an ust app event based on three attributes which are the event
80 * name, the filter bytecode and the loglevel.
81 */
82 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
83 {
84 struct ust_app_event *event;
85 const struct ust_app_ht_key *key;
86
87 assert(node);
88 assert(_key);
89
90 event = caa_container_of(node, struct ust_app_event, node.node);
91 key = _key;
92
93 /* Match the 3 elements of the key: name, filter and loglevel. */
94
95 /* Event name */
96 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
97 goto no_match;
98 }
99
100 /* Event loglevel. */
101 if (event->attr.loglevel != key->loglevel) {
102 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
103 && key->loglevel == 0 && event->attr.loglevel == -1) {
104 /*
105 * Match is accepted. This is because on event creation, the
106 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
107 * -1 are accepted for this loglevel type since 0 is the one set by
108 * the API when receiving an enable event.
109 */
110 } else {
111 goto no_match;
112 }
113 }
114
115 /* One of the filters is NULL, fail. */
116 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
117 goto no_match;
118 }
119
120 if (key->filter && event->filter) {
121 /* Both filters exists, check length followed by the bytecode. */
122 if (event->filter->len != key->filter->len ||
123 memcmp(event->filter->data, key->filter->data,
124 event->filter->len) != 0) {
125 goto no_match;
126 }
127 }
128
129 /* Match. */
130 return 1;
131
132 no_match:
133 return 0;
134 }
135
136 /*
137 * Unique add of an ust app event in the given ht. This uses the custom
138 * ht_match_ust_app_event match function and the event name as hash.
139 */
140 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
141 struct ust_app_event *event)
142 {
143 struct cds_lfht_node *node_ptr;
144 struct ust_app_ht_key key;
145 struct lttng_ht *ht;
146
147 assert(ua_chan);
148 assert(ua_chan->events);
149 assert(event);
150
151 ht = ua_chan->events;
152 key.name = event->attr.name;
153 key.filter = event->filter;
154 key.loglevel = event->attr.loglevel;
155
156 node_ptr = cds_lfht_add_unique(ht->ht,
157 ht->hash_fct(event->node.key, lttng_ht_seed),
158 ht_match_ust_app_event, &key, &event->node.node);
159 assert(node_ptr == &event->node.node);
160 }
161
162 /*
163 * Close the notify socket from the given RCU head object. This MUST be called
164 * through a call_rcu().
165 */
166 static void close_notify_sock_rcu(struct rcu_head *head)
167 {
168 int ret;
169 struct ust_app_notify_sock_obj *obj =
170 caa_container_of(head, struct ust_app_notify_sock_obj, head);
171
172 /* Must have a valid fd here. */
173 assert(obj->fd >= 0);
174
175 ret = close(obj->fd);
176 if (ret) {
177 ERR("close notify sock %d RCU", obj->fd);
178 }
179 lttng_fd_put(LTTNG_FD_APPS, 1);
180
181 free(obj);
182 }
183
184 /*
185 * Return the session registry according to the buffer type of the given
186 * session.
187 *
188 * A registry per UID object MUST exists before calling this function or else
189 * it assert() if not found. RCU read side lock must be acquired.
190 */
191 static struct ust_registry_session *get_session_registry(
192 struct ust_app_session *ua_sess)
193 {
194 struct ust_registry_session *registry = NULL;
195
196 assert(ua_sess);
197
198 switch (ua_sess->buffer_type) {
199 case LTTNG_BUFFER_PER_PID:
200 {
201 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
202 if (!reg_pid) {
203 goto error;
204 }
205 registry = reg_pid->registry->reg.ust;
206 break;
207 }
208 case LTTNG_BUFFER_PER_UID:
209 {
210 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
211 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
212 if (!reg_uid) {
213 goto error;
214 }
215 registry = reg_uid->registry->reg.ust;
216 break;
217 }
218 default:
219 assert(0);
220 };
221
222 error:
223 return registry;
224 }
225
226 /*
227 * Delete ust context safely. RCU read lock must be held before calling
228 * this function.
229 */
230 static
231 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx)
232 {
233 int ret;
234
235 assert(ua_ctx);
236
237 if (ua_ctx->obj) {
238 ret = ustctl_release_object(sock, ua_ctx->obj);
239 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
240 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
241 sock, ua_ctx->obj->handle, ret);
242 }
243 free(ua_ctx->obj);
244 }
245 free(ua_ctx);
246 }
247
248 /*
249 * Delete ust app event safely. RCU read lock must be held before calling
250 * this function.
251 */
252 static
253 void delete_ust_app_event(int sock, struct ust_app_event *ua_event)
254 {
255 int ret;
256
257 assert(ua_event);
258
259 free(ua_event->filter);
260
261 if (ua_event->obj != NULL) {
262 ret = ustctl_release_object(sock, ua_event->obj);
263 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
264 ERR("UST app sock %d release event obj failed with ret %d",
265 sock, ret);
266 }
267 free(ua_event->obj);
268 }
269 free(ua_event);
270 }
271
272 /*
273 * Release ust data object of the given stream.
274 *
275 * Return 0 on success or else a negative value.
276 */
277 static int release_ust_app_stream(int sock, struct ust_app_stream *stream)
278 {
279 int ret = 0;
280
281 assert(stream);
282
283 if (stream->obj) {
284 ret = ustctl_release_object(sock, stream->obj);
285 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
286 ERR("UST app sock %d release stream obj failed with ret %d",
287 sock, ret);
288 }
289 lttng_fd_put(LTTNG_FD_APPS, 2);
290 free(stream->obj);
291 }
292
293 return ret;
294 }
295
296 /*
297 * Delete ust app stream safely. RCU read lock must be held before calling
298 * this function.
299 */
300 static
301 void delete_ust_app_stream(int sock, struct ust_app_stream *stream)
302 {
303 assert(stream);
304
305 (void) release_ust_app_stream(sock, stream);
306 free(stream);
307 }
308
309 /*
310 * We need to execute ht_destroy outside of RCU read-side critical
311 * section and outside of call_rcu thread, so we postpone its execution
312 * using ht_cleanup_push. It is simpler than to change the semantic of
313 * the many callers of delete_ust_app_session().
314 */
315 static
316 void delete_ust_app_channel_rcu(struct rcu_head *head)
317 {
318 struct ust_app_channel *ua_chan =
319 caa_container_of(head, struct ust_app_channel, rcu_head);
320
321 ht_cleanup_push(ua_chan->ctx);
322 ht_cleanup_push(ua_chan->events);
323 free(ua_chan);
324 }
325
326 /*
327 * Delete ust app channel safely. RCU read lock must be held before calling
328 * this function.
329 */
330 static
331 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
332 struct ust_app *app)
333 {
334 int ret;
335 struct lttng_ht_iter iter;
336 struct ust_app_event *ua_event;
337 struct ust_app_ctx *ua_ctx;
338 struct ust_app_stream *stream, *stmp;
339 struct ust_registry_session *registry;
340
341 assert(ua_chan);
342
343 DBG3("UST app deleting channel %s", ua_chan->name);
344
345 /* Wipe stream */
346 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
347 cds_list_del(&stream->list);
348 delete_ust_app_stream(sock, stream);
349 }
350
351 /* Wipe context */
352 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
353 ret = lttng_ht_del(ua_chan->ctx, &iter);
354 assert(!ret);
355 delete_ust_app_ctx(sock, ua_ctx);
356 }
357
358 /* Wipe events */
359 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
360 node.node) {
361 ret = lttng_ht_del(ua_chan->events, &iter);
362 assert(!ret);
363 delete_ust_app_event(sock, ua_event);
364 }
365
366 /* Wipe and free registry from session registry. */
367 registry = get_session_registry(ua_chan->session);
368 if (registry) {
369 ust_registry_channel_del_free(registry, ua_chan->key);
370 }
371
372 if (ua_chan->obj != NULL) {
373 /* Remove channel from application UST object descriptor. */
374 iter.iter.node = &ua_chan->ust_objd_node.node;
375 lttng_ht_del(app->ust_objd, &iter);
376 ret = ustctl_release_object(sock, ua_chan->obj);
377 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
378 ERR("UST app sock %d release channel obj failed with ret %d",
379 sock, ret);
380 }
381 lttng_fd_put(LTTNG_FD_APPS, 1);
382 free(ua_chan->obj);
383 }
384 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
385 }
386
387 /*
388 * Push metadata to consumer socket.
389 *
390 * The socket lock MUST be acquired.
391 * The ust app session lock MUST be acquired.
392 *
393 * On success, return the len of metadata pushed or else a negative value.
394 */
395 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
396 struct consumer_socket *socket, int send_zero_data)
397 {
398 int ret;
399 char *metadata_str = NULL;
400 size_t len, offset;
401 ssize_t ret_val;
402
403 assert(registry);
404 assert(socket);
405
406 /*
407 * On a push metadata error either the consumer is dead or the metadata
408 * channel has been destroyed because its endpoint might have died (e.g:
409 * relayd). If so, the metadata closed flag is set to 1 so we deny pushing
410 * metadata again which is not valid anymore on the consumer side.
411 *
412 * The ust app session mutex locked allows us to make this check without
413 * the registry lock.
414 */
415 if (registry->metadata_closed) {
416 return -EPIPE;
417 }
418
419 pthread_mutex_lock(&registry->lock);
420
421 offset = registry->metadata_len_sent;
422 len = registry->metadata_len - registry->metadata_len_sent;
423 if (len == 0) {
424 DBG3("No metadata to push for metadata key %" PRIu64,
425 registry->metadata_key);
426 ret_val = len;
427 if (send_zero_data) {
428 DBG("No metadata to push");
429 goto push_data;
430 }
431 goto end;
432 }
433
434 /* Allocate only what we have to send. */
435 metadata_str = zmalloc(len);
436 if (!metadata_str) {
437 PERROR("zmalloc ust app metadata string");
438 ret_val = -ENOMEM;
439 goto error;
440 }
441 /* Copy what we haven't send out. */
442 memcpy(metadata_str, registry->metadata + offset, len);
443 registry->metadata_len_sent += len;
444
445 push_data:
446 pthread_mutex_unlock(&registry->lock);
447 ret = consumer_push_metadata(socket, registry->metadata_key,
448 metadata_str, len, offset);
449 if (ret < 0) {
450 ret_val = ret;
451 goto error_push;
452 }
453
454 free(metadata_str);
455 return len;
456
457 end:
458 error:
459 pthread_mutex_unlock(&registry->lock);
460 error_push:
461 free(metadata_str);
462 return ret_val;
463 }
464
465 /*
466 * For a given application and session, push metadata to consumer. The session
467 * lock MUST be acquired here before calling this.
468 * Either sock or consumer is required : if sock is NULL, the default
469 * socket to send the metadata is retrieved from consumer, if sock
470 * is not NULL we use it to send the metadata.
471 *
472 * Return 0 on success else a negative error.
473 */
474 static int push_metadata(struct ust_registry_session *registry,
475 struct consumer_output *consumer)
476 {
477 int ret_val;
478 ssize_t ret;
479 struct consumer_socket *socket;
480
481 assert(registry);
482 assert(consumer);
483
484 rcu_read_lock();
485
486 /*
487 * Means that no metadata was assigned to the session. This can happens if
488 * no start has been done previously.
489 */
490 if (!registry->metadata_key) {
491 ret_val = 0;
492 goto end_rcu_unlock;
493 }
494
495 /* Get consumer socket to use to push the metadata.*/
496 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
497 consumer);
498 if (!socket) {
499 ret_val = -1;
500 goto error_rcu_unlock;
501 }
502
503 /*
504 * TODO: Currently, we hold the socket lock around sampling of the next
505 * metadata segment to ensure we send metadata over the consumer socket in
506 * the correct order. This makes the registry lock nest inside the socket
507 * lock.
508 *
509 * Please note that this is a temporary measure: we should move this lock
510 * back into ust_consumer_push_metadata() when the consumer gets the
511 * ability to reorder the metadata it receives.
512 */
513 pthread_mutex_lock(socket->lock);
514 ret = ust_app_push_metadata(registry, socket, 0);
515 pthread_mutex_unlock(socket->lock);
516 if (ret < 0) {
517 ret_val = ret;
518 goto error_rcu_unlock;
519 }
520
521 rcu_read_unlock();
522 return 0;
523
524 error_rcu_unlock:
525 /*
526 * On error, flag the registry that the metadata is closed. We were unable
527 * to push anything and this means that either the consumer is not
528 * responding or the metadata cache has been destroyed on the consumer.
529 */
530 registry->metadata_closed = 1;
531 end_rcu_unlock:
532 rcu_read_unlock();
533 return ret_val;
534 }
535
536 /*
537 * Send to the consumer a close metadata command for the given session. Once
538 * done, the metadata channel is deleted and the session metadata pointer is
539 * nullified. The session lock MUST be acquired here unless the application is
540 * in the destroy path.
541 *
542 * Return 0 on success else a negative value.
543 */
544 static int close_metadata(struct ust_registry_session *registry,
545 struct consumer_output *consumer)
546 {
547 int ret;
548 struct consumer_socket *socket;
549
550 assert(registry);
551 assert(consumer);
552
553 rcu_read_lock();
554
555 if (!registry->metadata_key || registry->metadata_closed) {
556 ret = 0;
557 goto end;
558 }
559
560 /* Get consumer socket to use to push the metadata.*/
561 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
562 consumer);
563 if (!socket) {
564 ret = -1;
565 goto error;
566 }
567
568 ret = consumer_close_metadata(socket, registry->metadata_key);
569 if (ret < 0) {
570 goto error;
571 }
572
573 error:
574 /*
575 * Metadata closed. Even on error this means that the consumer is not
576 * responding or not found so either way a second close should NOT be emit
577 * for this registry.
578 */
579 registry->metadata_closed = 1;
580 end:
581 rcu_read_unlock();
582 return ret;
583 }
584
585 /*
586 * We need to execute ht_destroy outside of RCU read-side critical
587 * section and outside of call_rcu thread, so we postpone its execution
588 * using ht_cleanup_push. It is simpler than to change the semantic of
589 * the many callers of delete_ust_app_session().
590 */
591 static
592 void delete_ust_app_session_rcu(struct rcu_head *head)
593 {
594 struct ust_app_session *ua_sess =
595 caa_container_of(head, struct ust_app_session, rcu_head);
596
597 ht_cleanup_push(ua_sess->channels);
598 free(ua_sess);
599 }
600
601 /*
602 * Delete ust app session safely. RCU read lock must be held before calling
603 * this function.
604 */
605 static
606 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
607 struct ust_app *app)
608 {
609 int ret;
610 struct lttng_ht_iter iter;
611 struct ust_app_channel *ua_chan;
612 struct ust_registry_session *registry;
613
614 assert(ua_sess);
615
616 pthread_mutex_lock(&ua_sess->lock);
617
618 registry = get_session_registry(ua_sess);
619 if (registry && !registry->metadata_closed) {
620 /* Push metadata for application before freeing the application. */
621 (void) push_metadata(registry, ua_sess->consumer);
622
623 /*
624 * Don't ask to close metadata for global per UID buffers. Close
625 * metadata only on destroy trace session in this case. Also, the
626 * previous push metadata could have flag the metadata registry to
627 * close so don't send a close command if closed.
628 */
629 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
630 !registry->metadata_closed) {
631 /* And ask to close it for this session registry. */
632 (void) close_metadata(registry, ua_sess->consumer);
633 }
634 }
635
636 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
637 node.node) {
638 ret = lttng_ht_del(ua_sess->channels, &iter);
639 assert(!ret);
640 delete_ust_app_channel(sock, ua_chan, app);
641 }
642
643 /* In case of per PID, the registry is kept in the session. */
644 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
645 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
646 if (reg_pid) {
647 buffer_reg_pid_remove(reg_pid);
648 buffer_reg_pid_destroy(reg_pid);
649 }
650 }
651
652 if (ua_sess->handle != -1) {
653 ret = ustctl_release_handle(sock, ua_sess->handle);
654 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
655 ERR("UST app sock %d release session handle failed with ret %d",
656 sock, ret);
657 }
658 }
659 pthread_mutex_unlock(&ua_sess->lock);
660
661 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
662 }
663
664 /*
665 * Delete a traceable application structure from the global list. Never call
666 * this function outside of a call_rcu call.
667 *
668 * RCU read side lock should _NOT_ be held when calling this function.
669 */
670 static
671 void delete_ust_app(struct ust_app *app)
672 {
673 int ret, sock;
674 struct ust_app_session *ua_sess, *tmp_ua_sess;
675
676 /* Delete ust app sessions info */
677 sock = app->sock;
678 app->sock = -1;
679
680 /* Wipe sessions */
681 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
682 teardown_node) {
683 /* Free every object in the session and the session. */
684 rcu_read_lock();
685 delete_ust_app_session(sock, ua_sess, app);
686 rcu_read_unlock();
687 }
688
689 ht_cleanup_push(app->sessions);
690 ht_cleanup_push(app->ust_objd);
691
692 /*
693 * Wait until we have deleted the application from the sock hash table
694 * before closing this socket, otherwise an application could re-use the
695 * socket ID and race with the teardown, using the same hash table entry.
696 *
697 * It's OK to leave the close in call_rcu. We want it to stay unique for
698 * all RCU readers that could run concurrently with unregister app,
699 * therefore we _need_ to only close that socket after a grace period. So
700 * it should stay in this RCU callback.
701 *
702 * This close() is a very important step of the synchronization model so
703 * every modification to this function must be carefully reviewed.
704 */
705 ret = close(sock);
706 if (ret) {
707 PERROR("close");
708 }
709 lttng_fd_put(LTTNG_FD_APPS, 1);
710
711 DBG2("UST app pid %d deleted", app->pid);
712 free(app);
713 }
714
715 /*
716 * URCU intermediate call to delete an UST app.
717 */
718 static
719 void delete_ust_app_rcu(struct rcu_head *head)
720 {
721 struct lttng_ht_node_ulong *node =
722 caa_container_of(head, struct lttng_ht_node_ulong, head);
723 struct ust_app *app =
724 caa_container_of(node, struct ust_app, pid_n);
725
726 DBG3("Call RCU deleting app PID %d", app->pid);
727 delete_ust_app(app);
728 }
729
730 /*
731 * Delete the session from the application ht and delete the data structure by
732 * freeing every object inside and releasing them.
733 */
734 static void destroy_app_session(struct ust_app *app,
735 struct ust_app_session *ua_sess)
736 {
737 int ret;
738 struct lttng_ht_iter iter;
739
740 assert(app);
741 assert(ua_sess);
742
743 iter.iter.node = &ua_sess->node.node;
744 ret = lttng_ht_del(app->sessions, &iter);
745 if (ret) {
746 /* Already scheduled for teardown. */
747 goto end;
748 }
749
750 /* Once deleted, free the data structure. */
751 delete_ust_app_session(app->sock, ua_sess, app);
752
753 end:
754 return;
755 }
756
757 /*
758 * Alloc new UST app session.
759 */
760 static
761 struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
762 {
763 struct ust_app_session *ua_sess;
764
765 /* Init most of the default value by allocating and zeroing */
766 ua_sess = zmalloc(sizeof(struct ust_app_session));
767 if (ua_sess == NULL) {
768 PERROR("malloc");
769 goto error_free;
770 }
771
772 ua_sess->handle = -1;
773 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
774 pthread_mutex_init(&ua_sess->lock, NULL);
775
776 return ua_sess;
777
778 error_free:
779 return NULL;
780 }
781
782 /*
783 * Alloc new UST app channel.
784 */
785 static
786 struct ust_app_channel *alloc_ust_app_channel(char *name,
787 struct ust_app_session *ua_sess,
788 struct lttng_ust_channel_attr *attr)
789 {
790 struct ust_app_channel *ua_chan;
791
792 /* Init most of the default value by allocating and zeroing */
793 ua_chan = zmalloc(sizeof(struct ust_app_channel));
794 if (ua_chan == NULL) {
795 PERROR("malloc");
796 goto error;
797 }
798
799 /* Setup channel name */
800 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
801 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
802
803 ua_chan->enabled = 1;
804 ua_chan->handle = -1;
805 ua_chan->session = ua_sess;
806 ua_chan->key = get_next_channel_key();
807 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
808 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
809 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
810
811 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
812
813 /* Copy attributes */
814 if (attr) {
815 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
816 ua_chan->attr.subbuf_size = attr->subbuf_size;
817 ua_chan->attr.num_subbuf = attr->num_subbuf;
818 ua_chan->attr.overwrite = attr->overwrite;
819 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
820 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
821 ua_chan->attr.output = attr->output;
822 }
823 /* By default, the channel is a per cpu channel. */
824 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
825
826 DBG3("UST app channel %s allocated", ua_chan->name);
827
828 return ua_chan;
829
830 error:
831 return NULL;
832 }
833
834 /*
835 * Allocate and initialize a UST app stream.
836 *
837 * Return newly allocated stream pointer or NULL on error.
838 */
839 struct ust_app_stream *ust_app_alloc_stream(void)
840 {
841 struct ust_app_stream *stream = NULL;
842
843 stream = zmalloc(sizeof(*stream));
844 if (stream == NULL) {
845 PERROR("zmalloc ust app stream");
846 goto error;
847 }
848
849 /* Zero could be a valid value for a handle so flag it to -1. */
850 stream->handle = -1;
851
852 error:
853 return stream;
854 }
855
856 /*
857 * Alloc new UST app event.
858 */
859 static
860 struct ust_app_event *alloc_ust_app_event(char *name,
861 struct lttng_ust_event *attr)
862 {
863 struct ust_app_event *ua_event;
864
865 /* Init most of the default value by allocating and zeroing */
866 ua_event = zmalloc(sizeof(struct ust_app_event));
867 if (ua_event == NULL) {
868 PERROR("malloc");
869 goto error;
870 }
871
872 ua_event->enabled = 1;
873 strncpy(ua_event->name, name, sizeof(ua_event->name));
874 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
875 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
876
877 /* Copy attributes */
878 if (attr) {
879 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
880 }
881
882 DBG3("UST app event %s allocated", ua_event->name);
883
884 return ua_event;
885
886 error:
887 return NULL;
888 }
889
890 /*
891 * Alloc new UST app context.
892 */
893 static
894 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context *uctx)
895 {
896 struct ust_app_ctx *ua_ctx;
897
898 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
899 if (ua_ctx == NULL) {
900 goto error;
901 }
902
903 if (uctx) {
904 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
905 }
906
907 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
908
909 error:
910 return ua_ctx;
911 }
912
913 /*
914 * Allocate a filter and copy the given original filter.
915 *
916 * Return allocated filter or NULL on error.
917 */
918 static struct lttng_ust_filter_bytecode *alloc_copy_ust_app_filter(
919 struct lttng_ust_filter_bytecode *orig_f)
920 {
921 struct lttng_ust_filter_bytecode *filter = NULL;
922
923 /* Copy filter bytecode */
924 filter = zmalloc(sizeof(*filter) + orig_f->len);
925 if (!filter) {
926 PERROR("zmalloc alloc ust app filter");
927 goto error;
928 }
929
930 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
931
932 error:
933 return filter;
934 }
935
936 /*
937 * Find an ust_app using the sock and return it. RCU read side lock must be
938 * held before calling this helper function.
939 */
940 static
941 struct ust_app *find_app_by_sock(int sock)
942 {
943 struct lttng_ht_node_ulong *node;
944 struct lttng_ht_iter iter;
945
946 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
947 node = lttng_ht_iter_get_node_ulong(&iter);
948 if (node == NULL) {
949 DBG2("UST app find by sock %d not found", sock);
950 goto error;
951 }
952
953 return caa_container_of(node, struct ust_app, sock_n);
954
955 error:
956 return NULL;
957 }
958
959 /*
960 * Find an ust_app using the notify sock and return it. RCU read side lock must
961 * be held before calling this helper function.
962 */
963 static struct ust_app *find_app_by_notify_sock(int sock)
964 {
965 struct lttng_ht_node_ulong *node;
966 struct lttng_ht_iter iter;
967
968 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
969 &iter);
970 node = lttng_ht_iter_get_node_ulong(&iter);
971 if (node == NULL) {
972 DBG2("UST app find by notify sock %d not found", sock);
973 goto error;
974 }
975
976 return caa_container_of(node, struct ust_app, notify_sock_n);
977
978 error:
979 return NULL;
980 }
981
982 /*
983 * Lookup for an ust app event based on event name, filter bytecode and the
984 * event loglevel.
985 *
986 * Return an ust_app_event object or NULL on error.
987 */
988 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
989 char *name, struct lttng_ust_filter_bytecode *filter, int loglevel)
990 {
991 struct lttng_ht_iter iter;
992 struct lttng_ht_node_str *node;
993 struct ust_app_event *event = NULL;
994 struct ust_app_ht_key key;
995
996 assert(name);
997 assert(ht);
998
999 /* Setup key for event lookup. */
1000 key.name = name;
1001 key.filter = filter;
1002 key.loglevel = loglevel;
1003
1004 /* Lookup using the event name as hash and a custom match fct. */
1005 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1006 ht_match_ust_app_event, &key, &iter.iter);
1007 node = lttng_ht_iter_get_node_str(&iter);
1008 if (node == NULL) {
1009 goto end;
1010 }
1011
1012 event = caa_container_of(node, struct ust_app_event, node);
1013
1014 end:
1015 return event;
1016 }
1017
1018 /*
1019 * Create the channel context on the tracer.
1020 *
1021 * Called with UST app session lock held.
1022 */
1023 static
1024 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1025 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1026 {
1027 int ret;
1028
1029 health_code_update();
1030
1031 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1032 ua_chan->obj, &ua_ctx->obj);
1033 if (ret < 0) {
1034 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1035 ERR("UST app create channel context failed for app (pid: %d) "
1036 "with ret %d", app->pid, ret);
1037 } else {
1038 DBG3("UST app disable event failed. Application is dead.");
1039 }
1040 goto error;
1041 }
1042
1043 ua_ctx->handle = ua_ctx->obj->handle;
1044
1045 DBG2("UST app context handle %d created successfully for channel %s",
1046 ua_ctx->handle, ua_chan->name);
1047
1048 error:
1049 health_code_update();
1050 return ret;
1051 }
1052
1053 /*
1054 * Set the filter on the tracer.
1055 */
1056 static
1057 int set_ust_event_filter(struct ust_app_event *ua_event,
1058 struct ust_app *app)
1059 {
1060 int ret;
1061
1062 health_code_update();
1063
1064 if (!ua_event->filter) {
1065 ret = 0;
1066 goto error;
1067 }
1068
1069 ret = ustctl_set_filter(app->sock, ua_event->filter,
1070 ua_event->obj);
1071 if (ret < 0) {
1072 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1073 ERR("UST app event %s filter failed for app (pid: %d) "
1074 "with ret %d", ua_event->attr.name, app->pid, ret);
1075 } else {
1076 DBG3("UST app filter event failed. Application is dead.");
1077 }
1078 goto error;
1079 }
1080
1081 DBG2("UST filter set successfully for event %s", ua_event->name);
1082
1083 error:
1084 health_code_update();
1085 return ret;
1086 }
1087
1088 /*
1089 * Disable the specified event on to UST tracer for the UST session.
1090 */
1091 static int disable_ust_event(struct ust_app *app,
1092 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1093 {
1094 int ret;
1095
1096 health_code_update();
1097
1098 ret = ustctl_disable(app->sock, ua_event->obj);
1099 if (ret < 0) {
1100 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1101 ERR("UST app event %s disable failed for app (pid: %d) "
1102 "and session handle %d with ret %d",
1103 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1104 } else {
1105 DBG3("UST app disable event failed. Application is dead.");
1106 }
1107 goto error;
1108 }
1109
1110 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1111 ua_event->attr.name, app->pid);
1112
1113 error:
1114 health_code_update();
1115 return ret;
1116 }
1117
1118 /*
1119 * Disable the specified channel on to UST tracer for the UST session.
1120 */
1121 static int disable_ust_channel(struct ust_app *app,
1122 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1123 {
1124 int ret;
1125
1126 health_code_update();
1127
1128 ret = ustctl_disable(app->sock, ua_chan->obj);
1129 if (ret < 0) {
1130 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1131 ERR("UST app channel %s disable failed for app (pid: %d) "
1132 "and session handle %d with ret %d",
1133 ua_chan->name, app->pid, ua_sess->handle, ret);
1134 } else {
1135 DBG3("UST app disable channel failed. Application is dead.");
1136 }
1137 goto error;
1138 }
1139
1140 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1141 ua_chan->name, app->pid);
1142
1143 error:
1144 health_code_update();
1145 return ret;
1146 }
1147
1148 /*
1149 * Enable the specified channel on to UST tracer for the UST session.
1150 */
1151 static int enable_ust_channel(struct ust_app *app,
1152 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1153 {
1154 int ret;
1155
1156 health_code_update();
1157
1158 ret = ustctl_enable(app->sock, ua_chan->obj);
1159 if (ret < 0) {
1160 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1161 ERR("UST app channel %s enable failed for app (pid: %d) "
1162 "and session handle %d with ret %d",
1163 ua_chan->name, app->pid, ua_sess->handle, ret);
1164 } else {
1165 DBG3("UST app enable channel failed. Application is dead.");
1166 }
1167 goto error;
1168 }
1169
1170 ua_chan->enabled = 1;
1171
1172 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1173 ua_chan->name, app->pid);
1174
1175 error:
1176 health_code_update();
1177 return ret;
1178 }
1179
1180 /*
1181 * Enable the specified event on to UST tracer for the UST session.
1182 */
1183 static int enable_ust_event(struct ust_app *app,
1184 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1185 {
1186 int ret;
1187
1188 health_code_update();
1189
1190 ret = ustctl_enable(app->sock, ua_event->obj);
1191 if (ret < 0) {
1192 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1193 ERR("UST app event %s enable failed for app (pid: %d) "
1194 "and session handle %d with ret %d",
1195 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1196 } else {
1197 DBG3("UST app enable event failed. Application is dead.");
1198 }
1199 goto error;
1200 }
1201
1202 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1203 ua_event->attr.name, app->pid);
1204
1205 error:
1206 health_code_update();
1207 return ret;
1208 }
1209
1210 /*
1211 * Send channel and stream buffer to application.
1212 *
1213 * Return 0 on success. On error, a negative value is returned.
1214 */
1215 static int send_channel_pid_to_ust(struct ust_app *app,
1216 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1217 {
1218 int ret;
1219 struct ust_app_stream *stream, *stmp;
1220
1221 assert(app);
1222 assert(ua_sess);
1223 assert(ua_chan);
1224
1225 health_code_update();
1226
1227 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1228 app->sock);
1229
1230 /* Send channel to the application. */
1231 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1232 if (ret < 0) {
1233 goto error;
1234 }
1235
1236 health_code_update();
1237
1238 /* Send all streams to application. */
1239 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1240 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1241 if (ret < 0) {
1242 goto error;
1243 }
1244 /* We don't need the stream anymore once sent to the tracer. */
1245 cds_list_del(&stream->list);
1246 delete_ust_app_stream(-1, stream);
1247 }
1248 /* Flag the channel that it is sent to the application. */
1249 ua_chan->is_sent = 1;
1250
1251 error:
1252 health_code_update();
1253 return ret;
1254 }
1255
1256 /*
1257 * Create the specified event onto the UST tracer for a UST session.
1258 *
1259 * Should be called with session mutex held.
1260 */
1261 static
1262 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1263 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1264 {
1265 int ret = 0;
1266
1267 health_code_update();
1268
1269 /* Create UST event on tracer */
1270 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1271 &ua_event->obj);
1272 if (ret < 0) {
1273 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1274 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1275 ua_event->attr.name, app->pid, ret);
1276 } else {
1277 DBG3("UST app create event failed. Application is dead.");
1278 }
1279 goto error;
1280 }
1281
1282 ua_event->handle = ua_event->obj->handle;
1283
1284 DBG2("UST app event %s created successfully for pid:%d",
1285 ua_event->attr.name, app->pid);
1286
1287 health_code_update();
1288
1289 /* Set filter if one is present. */
1290 if (ua_event->filter) {
1291 ret = set_ust_event_filter(ua_event, app);
1292 if (ret < 0) {
1293 goto error;
1294 }
1295 }
1296
1297 /* If event not enabled, disable it on the tracer */
1298 if (ua_event->enabled == 0) {
1299 ret = disable_ust_event(app, ua_sess, ua_event);
1300 if (ret < 0) {
1301 /*
1302 * If we hit an EPERM, something is wrong with our disable call. If
1303 * we get an EEXIST, there is a problem on the tracer side since we
1304 * just created it.
1305 */
1306 switch (ret) {
1307 case -LTTNG_UST_ERR_PERM:
1308 /* Code flow problem */
1309 assert(0);
1310 case -LTTNG_UST_ERR_EXIST:
1311 /* It's OK for our use case. */
1312 ret = 0;
1313 break;
1314 default:
1315 break;
1316 }
1317 goto error;
1318 }
1319 }
1320
1321 error:
1322 health_code_update();
1323 return ret;
1324 }
1325
1326 /*
1327 * Copy data between an UST app event and a LTT event.
1328 */
1329 static void shadow_copy_event(struct ust_app_event *ua_event,
1330 struct ltt_ust_event *uevent)
1331 {
1332 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1333 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1334
1335 ua_event->enabled = uevent->enabled;
1336
1337 /* Copy event attributes */
1338 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1339
1340 /* Copy filter bytecode */
1341 if (uevent->filter) {
1342 ua_event->filter = alloc_copy_ust_app_filter(uevent->filter);
1343 /* Filter might be NULL here in case of ENONEM. */
1344 }
1345 }
1346
1347 /*
1348 * Copy data between an UST app channel and a LTT channel.
1349 */
1350 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1351 struct ltt_ust_channel *uchan)
1352 {
1353 struct lttng_ht_iter iter;
1354 struct ltt_ust_event *uevent;
1355 struct ltt_ust_context *uctx;
1356 struct ust_app_event *ua_event;
1357 struct ust_app_ctx *ua_ctx;
1358
1359 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1360
1361 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1362 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1363
1364 ua_chan->tracefile_size = uchan->tracefile_size;
1365 ua_chan->tracefile_count = uchan->tracefile_count;
1366
1367 /* Copy event attributes since the layout is different. */
1368 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1369 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1370 ua_chan->attr.overwrite = uchan->attr.overwrite;
1371 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1372 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1373 ua_chan->attr.output = uchan->attr.output;
1374 /*
1375 * Note that the attribute channel type is not set since the channel on the
1376 * tracing registry side does not have this information.
1377 */
1378
1379 ua_chan->enabled = uchan->enabled;
1380 ua_chan->tracing_channel_id = uchan->id;
1381
1382 cds_lfht_for_each_entry(uchan->ctx->ht, &iter.iter, uctx, node.node) {
1383 ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1384 if (ua_ctx == NULL) {
1385 continue;
1386 }
1387 lttng_ht_node_init_ulong(&ua_ctx->node,
1388 (unsigned long) ua_ctx->ctx.ctx);
1389 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1390 }
1391
1392 /* Copy all events from ltt ust channel to ust app channel */
1393 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
1394 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1395 uevent->filter, uevent->attr.loglevel);
1396 if (ua_event == NULL) {
1397 DBG2("UST event %s not found on shadow copy channel",
1398 uevent->attr.name);
1399 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
1400 if (ua_event == NULL) {
1401 continue;
1402 }
1403 shadow_copy_event(ua_event, uevent);
1404 add_unique_ust_app_event(ua_chan, ua_event);
1405 }
1406 }
1407
1408 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1409 }
1410
1411 /*
1412 * Copy data between a UST app session and a regular LTT session.
1413 */
1414 static void shadow_copy_session(struct ust_app_session *ua_sess,
1415 struct ltt_ust_session *usess, struct ust_app *app)
1416 {
1417 struct lttng_ht_node_str *ua_chan_node;
1418 struct lttng_ht_iter iter;
1419 struct ltt_ust_channel *uchan;
1420 struct ust_app_channel *ua_chan;
1421 time_t rawtime;
1422 struct tm *timeinfo;
1423 char datetime[16];
1424 int ret;
1425
1426 /* Get date and time for unique app path */
1427 time(&rawtime);
1428 timeinfo = localtime(&rawtime);
1429 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1430
1431 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1432
1433 ua_sess->tracing_id = usess->id;
1434 ua_sess->id = get_next_session_id();
1435 ua_sess->uid = app->uid;
1436 ua_sess->gid = app->gid;
1437 ua_sess->euid = usess->uid;
1438 ua_sess->egid = usess->gid;
1439 ua_sess->buffer_type = usess->buffer_type;
1440 ua_sess->bits_per_long = app->bits_per_long;
1441 /* There is only one consumer object per session possible. */
1442 ua_sess->consumer = usess->consumer;
1443
1444 switch (ua_sess->buffer_type) {
1445 case LTTNG_BUFFER_PER_PID:
1446 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1447 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1448 datetime);
1449 break;
1450 case LTTNG_BUFFER_PER_UID:
1451 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1452 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1453 break;
1454 default:
1455 assert(0);
1456 goto error;
1457 }
1458 if (ret < 0) {
1459 PERROR("asprintf UST shadow copy session");
1460 assert(0);
1461 goto error;
1462 }
1463
1464 /* Iterate over all channels in global domain. */
1465 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1466 uchan, node.node) {
1467 struct lttng_ht_iter uiter;
1468
1469 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1470 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
1471 if (ua_chan_node != NULL) {
1472 /* Session exist. Contiuing. */
1473 continue;
1474 }
1475
1476 DBG2("Channel %s not found on shadow session copy, creating it",
1477 uchan->name);
1478 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
1479 if (ua_chan == NULL) {
1480 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1481 continue;
1482 }
1483 shadow_copy_channel(ua_chan, uchan);
1484 /*
1485 * The concept of metadata channel does not exist on the tracing
1486 * registry side of the session daemon so this can only be a per CPU
1487 * channel and not metadata.
1488 */
1489 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1490
1491 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
1492 }
1493
1494 error:
1495 return;
1496 }
1497
1498 /*
1499 * Lookup sesison wrapper.
1500 */
1501 static
1502 void __lookup_session_by_app(struct ltt_ust_session *usess,
1503 struct ust_app *app, struct lttng_ht_iter *iter)
1504 {
1505 /* Get right UST app session from app */
1506 lttng_ht_lookup(app->sessions, (void *)((unsigned long) usess->id), iter);
1507 }
1508
1509 /*
1510 * Return ust app session from the app session hashtable using the UST session
1511 * id.
1512 */
1513 static struct ust_app_session *lookup_session_by_app(
1514 struct ltt_ust_session *usess, struct ust_app *app)
1515 {
1516 struct lttng_ht_iter iter;
1517 struct lttng_ht_node_ulong *node;
1518
1519 __lookup_session_by_app(usess, app, &iter);
1520 node = lttng_ht_iter_get_node_ulong(&iter);
1521 if (node == NULL) {
1522 goto error;
1523 }
1524
1525 return caa_container_of(node, struct ust_app_session, node);
1526
1527 error:
1528 return NULL;
1529 }
1530
1531 /*
1532 * Setup buffer registry per PID for the given session and application. If none
1533 * is found, a new one is created, added to the global registry and
1534 * initialized. If regp is valid, it's set with the newly created object.
1535 *
1536 * Return 0 on success or else a negative value.
1537 */
1538 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
1539 struct ust_app *app, struct buffer_reg_pid **regp)
1540 {
1541 int ret = 0;
1542 struct buffer_reg_pid *reg_pid;
1543
1544 assert(ua_sess);
1545 assert(app);
1546
1547 rcu_read_lock();
1548
1549 reg_pid = buffer_reg_pid_find(ua_sess->id);
1550 if (!reg_pid) {
1551 /*
1552 * This is the create channel path meaning that if there is NO
1553 * registry available, we have to create one for this session.
1554 */
1555 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid);
1556 if (ret < 0) {
1557 goto error;
1558 }
1559 buffer_reg_pid_add(reg_pid);
1560 } else {
1561 goto end;
1562 }
1563
1564 /* Initialize registry. */
1565 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
1566 app->bits_per_long, app->uint8_t_alignment,
1567 app->uint16_t_alignment, app->uint32_t_alignment,
1568 app->uint64_t_alignment, app->long_alignment,
1569 app->byte_order, app->version.major,
1570 app->version.minor);
1571 if (ret < 0) {
1572 goto error;
1573 }
1574
1575 DBG3("UST app buffer registry per PID created successfully");
1576
1577 end:
1578 if (regp) {
1579 *regp = reg_pid;
1580 }
1581 error:
1582 rcu_read_unlock();
1583 return ret;
1584 }
1585
1586 /*
1587 * Setup buffer registry per UID for the given session and application. If none
1588 * is found, a new one is created, added to the global registry and
1589 * initialized. If regp is valid, it's set with the newly created object.
1590 *
1591 * Return 0 on success or else a negative value.
1592 */
1593 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
1594 struct ust_app *app, struct buffer_reg_uid **regp)
1595 {
1596 int ret = 0;
1597 struct buffer_reg_uid *reg_uid;
1598
1599 assert(usess);
1600 assert(app);
1601
1602 rcu_read_lock();
1603
1604 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
1605 if (!reg_uid) {
1606 /*
1607 * This is the create channel path meaning that if there is NO
1608 * registry available, we have to create one for this session.
1609 */
1610 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
1611 LTTNG_DOMAIN_UST, &reg_uid);
1612 if (ret < 0) {
1613 goto error;
1614 }
1615 buffer_reg_uid_add(reg_uid);
1616 } else {
1617 goto end;
1618 }
1619
1620 /* Initialize registry. */
1621 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
1622 app->bits_per_long, app->uint8_t_alignment,
1623 app->uint16_t_alignment, app->uint32_t_alignment,
1624 app->uint64_t_alignment, app->long_alignment,
1625 app->byte_order, app->version.major,
1626 app->version.minor);
1627 if (ret < 0) {
1628 goto error;
1629 }
1630 /* Add node to teardown list of the session. */
1631 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
1632
1633 DBG3("UST app buffer registry per UID created successfully");
1634
1635 end:
1636 if (regp) {
1637 *regp = reg_uid;
1638 }
1639 error:
1640 rcu_read_unlock();
1641 return ret;
1642 }
1643
1644 /*
1645 * Create a session on the tracer side for the given app.
1646 *
1647 * On success, ua_sess_ptr is populated with the session pointer or else left
1648 * untouched. If the session was created, is_created is set to 1. On error,
1649 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
1650 * be NULL.
1651 *
1652 * Returns 0 on success or else a negative code which is either -ENOMEM or
1653 * -ENOTCONN which is the default code if the ustctl_create_session fails.
1654 */
1655 static int create_ust_app_session(struct ltt_ust_session *usess,
1656 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
1657 int *is_created)
1658 {
1659 int ret, created = 0;
1660 struct ust_app_session *ua_sess;
1661
1662 assert(usess);
1663 assert(app);
1664 assert(ua_sess_ptr);
1665
1666 health_code_update();
1667
1668 ua_sess = lookup_session_by_app(usess, app);
1669 if (ua_sess == NULL) {
1670 DBG2("UST app pid: %d session id %d not found, creating it",
1671 app->pid, usess->id);
1672 ua_sess = alloc_ust_app_session(app);
1673 if (ua_sess == NULL) {
1674 /* Only malloc can failed so something is really wrong */
1675 ret = -ENOMEM;
1676 goto error;
1677 }
1678 shadow_copy_session(ua_sess, usess, app);
1679 created = 1;
1680 }
1681
1682 switch (usess->buffer_type) {
1683 case LTTNG_BUFFER_PER_PID:
1684 /* Init local registry. */
1685 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
1686 if (ret < 0) {
1687 goto error;
1688 }
1689 break;
1690 case LTTNG_BUFFER_PER_UID:
1691 /* Look for a global registry. If none exists, create one. */
1692 ret = setup_buffer_reg_uid(usess, app, NULL);
1693 if (ret < 0) {
1694 goto error;
1695 }
1696 break;
1697 default:
1698 assert(0);
1699 ret = -EINVAL;
1700 goto error;
1701 }
1702
1703 health_code_update();
1704
1705 if (ua_sess->handle == -1) {
1706 ret = ustctl_create_session(app->sock);
1707 if (ret < 0) {
1708 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1709 ERR("Creating session for app pid %d with ret %d",
1710 app->pid, ret);
1711 } else {
1712 DBG("UST app creating session failed. Application is dead");
1713 }
1714 delete_ust_app_session(-1, ua_sess, app);
1715 if (ret != -ENOMEM) {
1716 /*
1717 * Tracer is probably gone or got an internal error so let's
1718 * behave like it will soon unregister or not usable.
1719 */
1720 ret = -ENOTCONN;
1721 }
1722 goto error;
1723 }
1724
1725 ua_sess->handle = ret;
1726
1727 /* Add ust app session to app's HT */
1728 lttng_ht_node_init_ulong(&ua_sess->node,
1729 (unsigned long) ua_sess->tracing_id);
1730 lttng_ht_add_unique_ulong(app->sessions, &ua_sess->node);
1731
1732 DBG2("UST app session created successfully with handle %d", ret);
1733 }
1734
1735 *ua_sess_ptr = ua_sess;
1736 if (is_created) {
1737 *is_created = created;
1738 }
1739
1740 /* Everything went well. */
1741 ret = 0;
1742
1743 error:
1744 health_code_update();
1745 return ret;
1746 }
1747
1748 /*
1749 * Create a context for the channel on the tracer.
1750 *
1751 * Called with UST app session lock held and a RCU read side lock.
1752 */
1753 static
1754 int create_ust_app_channel_context(struct ust_app_session *ua_sess,
1755 struct ust_app_channel *ua_chan, struct lttng_ust_context *uctx,
1756 struct ust_app *app)
1757 {
1758 int ret = 0;
1759 struct lttng_ht_iter iter;
1760 struct lttng_ht_node_ulong *node;
1761 struct ust_app_ctx *ua_ctx;
1762
1763 DBG2("UST app adding context to channel %s", ua_chan->name);
1764
1765 lttng_ht_lookup(ua_chan->ctx, (void *)((unsigned long)uctx->ctx), &iter);
1766 node = lttng_ht_iter_get_node_ulong(&iter);
1767 if (node != NULL) {
1768 ret = -EEXIST;
1769 goto error;
1770 }
1771
1772 ua_ctx = alloc_ust_app_ctx(uctx);
1773 if (ua_ctx == NULL) {
1774 /* malloc failed */
1775 ret = -1;
1776 goto error;
1777 }
1778
1779 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
1780 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1781
1782 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
1783 if (ret < 0) {
1784 goto error;
1785 }
1786
1787 error:
1788 return ret;
1789 }
1790
1791 /*
1792 * Enable on the tracer side a ust app event for the session and channel.
1793 *
1794 * Called with UST app session lock held.
1795 */
1796 static
1797 int enable_ust_app_event(struct ust_app_session *ua_sess,
1798 struct ust_app_event *ua_event, struct ust_app *app)
1799 {
1800 int ret;
1801
1802 ret = enable_ust_event(app, ua_sess, ua_event);
1803 if (ret < 0) {
1804 goto error;
1805 }
1806
1807 ua_event->enabled = 1;
1808
1809 error:
1810 return ret;
1811 }
1812
1813 /*
1814 * Disable on the tracer side a ust app event for the session and channel.
1815 */
1816 static int disable_ust_app_event(struct ust_app_session *ua_sess,
1817 struct ust_app_event *ua_event, struct ust_app *app)
1818 {
1819 int ret;
1820
1821 ret = disable_ust_event(app, ua_sess, ua_event);
1822 if (ret < 0) {
1823 goto error;
1824 }
1825
1826 ua_event->enabled = 0;
1827
1828 error:
1829 return ret;
1830 }
1831
1832 /*
1833 * Lookup ust app channel for session and disable it on the tracer side.
1834 */
1835 static
1836 int disable_ust_app_channel(struct ust_app_session *ua_sess,
1837 struct ust_app_channel *ua_chan, struct ust_app *app)
1838 {
1839 int ret;
1840
1841 ret = disable_ust_channel(app, ua_sess, ua_chan);
1842 if (ret < 0) {
1843 goto error;
1844 }
1845
1846 ua_chan->enabled = 0;
1847
1848 error:
1849 return ret;
1850 }
1851
1852 /*
1853 * Lookup ust app channel for session and enable it on the tracer side. This
1854 * MUST be called with a RCU read side lock acquired.
1855 */
1856 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
1857 struct ltt_ust_channel *uchan, struct ust_app *app)
1858 {
1859 int ret = 0;
1860 struct lttng_ht_iter iter;
1861 struct lttng_ht_node_str *ua_chan_node;
1862 struct ust_app_channel *ua_chan;
1863
1864 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
1865 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
1866 if (ua_chan_node == NULL) {
1867 DBG2("Unable to find channel %s in ust session id %u",
1868 uchan->name, ua_sess->tracing_id);
1869 goto error;
1870 }
1871
1872 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
1873
1874 ret = enable_ust_channel(app, ua_sess, ua_chan);
1875 if (ret < 0) {
1876 goto error;
1877 }
1878
1879 error:
1880 return ret;
1881 }
1882
1883 /*
1884 * Ask the consumer to create a channel and get it if successful.
1885 *
1886 * Return 0 on success or else a negative value.
1887 */
1888 static int do_consumer_create_channel(struct ltt_ust_session *usess,
1889 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
1890 int bitness, struct ust_registry_session *registry)
1891 {
1892 int ret;
1893 unsigned int nb_fd = 0;
1894 struct consumer_socket *socket;
1895
1896 assert(usess);
1897 assert(ua_sess);
1898 assert(ua_chan);
1899 assert(registry);
1900
1901 rcu_read_lock();
1902 health_code_update();
1903
1904 /* Get the right consumer socket for the application. */
1905 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
1906 if (!socket) {
1907 ret = -EINVAL;
1908 goto error;
1909 }
1910
1911 health_code_update();
1912
1913 /* Need one fd for the channel. */
1914 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
1915 if (ret < 0) {
1916 ERR("Exhausted number of available FD upon create channel");
1917 goto error;
1918 }
1919
1920 /*
1921 * Ask consumer to create channel. The consumer will return the number of
1922 * stream we have to expect.
1923 */
1924 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
1925 registry);
1926 if (ret < 0) {
1927 goto error_ask;
1928 }
1929
1930 /*
1931 * Compute the number of fd needed before receiving them. It must be 2 per
1932 * stream (2 being the default value here).
1933 */
1934 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
1935
1936 /* Reserve the amount of file descriptor we need. */
1937 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
1938 if (ret < 0) {
1939 ERR("Exhausted number of available FD upon create channel");
1940 goto error_fd_get_stream;
1941 }
1942
1943 health_code_update();
1944
1945 /*
1946 * Now get the channel from the consumer. This call wil populate the stream
1947 * list of that channel and set the ust objects.
1948 */
1949 ret = ust_consumer_get_channel(socket, ua_chan);
1950 if (ret < 0) {
1951 goto error_destroy;
1952 }
1953
1954 rcu_read_unlock();
1955 return 0;
1956
1957 error_destroy:
1958 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
1959 error_fd_get_stream:
1960 /*
1961 * Initiate a destroy channel on the consumer since we had an error
1962 * handling it on our side. The return value is of no importance since we
1963 * already have a ret value set by the previous error that we need to
1964 * return.
1965 */
1966 (void) ust_consumer_destroy_channel(socket, ua_chan);
1967 error_ask:
1968 lttng_fd_put(LTTNG_FD_APPS, 1);
1969 error:
1970 health_code_update();
1971 rcu_read_unlock();
1972 return ret;
1973 }
1974
1975 /*
1976 * Duplicate the ust data object of the ust app stream and save it in the
1977 * buffer registry stream.
1978 *
1979 * Return 0 on success or else a negative value.
1980 */
1981 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
1982 struct ust_app_stream *stream)
1983 {
1984 int ret;
1985
1986 assert(reg_stream);
1987 assert(stream);
1988
1989 /* Reserve the amount of file descriptor we need. */
1990 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
1991 if (ret < 0) {
1992 ERR("Exhausted number of available FD upon duplicate stream");
1993 goto error;
1994 }
1995
1996 /* Duplicate object for stream once the original is in the registry. */
1997 ret = ustctl_duplicate_ust_object_data(&stream->obj,
1998 reg_stream->obj.ust);
1999 if (ret < 0) {
2000 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2001 reg_stream->obj.ust, stream->obj, ret);
2002 lttng_fd_put(LTTNG_FD_APPS, 2);
2003 goto error;
2004 }
2005 stream->handle = stream->obj->handle;
2006
2007 error:
2008 return ret;
2009 }
2010
2011 /*
2012 * Duplicate the ust data object of the ust app. channel and save it in the
2013 * buffer registry channel.
2014 *
2015 * Return 0 on success or else a negative value.
2016 */
2017 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2018 struct ust_app_channel *ua_chan)
2019 {
2020 int ret;
2021
2022 assert(reg_chan);
2023 assert(ua_chan);
2024
2025 /* Need two fds for the channel. */
2026 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2027 if (ret < 0) {
2028 ERR("Exhausted number of available FD upon duplicate channel");
2029 goto error_fd_get;
2030 }
2031
2032 /* Duplicate object for stream once the original is in the registry. */
2033 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2034 if (ret < 0) {
2035 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2036 reg_chan->obj.ust, ua_chan->obj, ret);
2037 goto error;
2038 }
2039 ua_chan->handle = ua_chan->obj->handle;
2040
2041 return 0;
2042
2043 error:
2044 lttng_fd_put(LTTNG_FD_APPS, 1);
2045 error_fd_get:
2046 return ret;
2047 }
2048
2049 /*
2050 * For a given channel buffer registry, setup all streams of the given ust
2051 * application channel.
2052 *
2053 * Return 0 on success or else a negative value.
2054 */
2055 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2056 struct ust_app_channel *ua_chan)
2057 {
2058 int ret = 0;
2059 struct ust_app_stream *stream, *stmp;
2060
2061 assert(reg_chan);
2062 assert(ua_chan);
2063
2064 DBG2("UST app setup buffer registry stream");
2065
2066 /* Send all streams to application. */
2067 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2068 struct buffer_reg_stream *reg_stream;
2069
2070 ret = buffer_reg_stream_create(&reg_stream);
2071 if (ret < 0) {
2072 goto error;
2073 }
2074
2075 /*
2076 * Keep original pointer and nullify it in the stream so the delete
2077 * stream call does not release the object.
2078 */
2079 reg_stream->obj.ust = stream->obj;
2080 stream->obj = NULL;
2081 buffer_reg_stream_add(reg_stream, reg_chan);
2082
2083 /* We don't need the streams anymore. */
2084 cds_list_del(&stream->list);
2085 delete_ust_app_stream(-1, stream);
2086 }
2087
2088 error:
2089 return ret;
2090 }
2091
2092 /*
2093 * Create a buffer registry channel for the given session registry and
2094 * application channel object. If regp pointer is valid, it's set with the
2095 * created object. Important, the created object is NOT added to the session
2096 * registry hash table.
2097 *
2098 * Return 0 on success else a negative value.
2099 */
2100 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2101 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2102 {
2103 int ret;
2104 struct buffer_reg_channel *reg_chan = NULL;
2105
2106 assert(reg_sess);
2107 assert(ua_chan);
2108
2109 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2110
2111 /* Create buffer registry channel. */
2112 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2113 if (ret < 0) {
2114 goto error_create;
2115 }
2116 assert(reg_chan);
2117 reg_chan->consumer_key = ua_chan->key;
2118
2119 /* Create and add a channel registry to session. */
2120 ret = ust_registry_channel_add(reg_sess->reg.ust,
2121 ua_chan->tracing_channel_id);
2122 if (ret < 0) {
2123 goto error;
2124 }
2125 buffer_reg_channel_add(reg_sess, reg_chan);
2126
2127 if (regp) {
2128 *regp = reg_chan;
2129 }
2130
2131 return 0;
2132
2133 error:
2134 /* Safe because the registry channel object was not added to any HT. */
2135 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2136 error_create:
2137 return ret;
2138 }
2139
2140 /*
2141 * Setup buffer registry channel for the given session registry and application
2142 * channel object. If regp pointer is valid, it's set with the created object.
2143 *
2144 * Return 0 on success else a negative value.
2145 */
2146 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2147 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan)
2148 {
2149 int ret;
2150
2151 assert(reg_sess);
2152 assert(reg_chan);
2153 assert(ua_chan);
2154 assert(ua_chan->obj);
2155
2156 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2157
2158 /* Setup all streams for the registry. */
2159 ret = setup_buffer_reg_streams(reg_chan, ua_chan);
2160 if (ret < 0) {
2161 goto error;
2162 }
2163
2164 reg_chan->obj.ust = ua_chan->obj;
2165 ua_chan->obj = NULL;
2166
2167 return 0;
2168
2169 error:
2170 buffer_reg_channel_remove(reg_sess, reg_chan);
2171 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2172 return ret;
2173 }
2174
2175 /*
2176 * Send buffer registry channel to the application.
2177 *
2178 * Return 0 on success else a negative value.
2179 */
2180 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2181 struct ust_app *app, struct ust_app_session *ua_sess,
2182 struct ust_app_channel *ua_chan)
2183 {
2184 int ret;
2185 struct buffer_reg_stream *reg_stream;
2186
2187 assert(reg_chan);
2188 assert(app);
2189 assert(ua_sess);
2190 assert(ua_chan);
2191
2192 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2193
2194 ret = duplicate_channel_object(reg_chan, ua_chan);
2195 if (ret < 0) {
2196 goto error;
2197 }
2198
2199 /* Send channel to the application. */
2200 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2201 if (ret < 0) {
2202 goto error;
2203 }
2204
2205 health_code_update();
2206
2207 /* Send all streams to application. */
2208 pthread_mutex_lock(&reg_chan->stream_list_lock);
2209 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2210 struct ust_app_stream stream;
2211
2212 ret = duplicate_stream_object(reg_stream, &stream);
2213 if (ret < 0) {
2214 goto error_stream_unlock;
2215 }
2216
2217 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2218 if (ret < 0) {
2219 (void) release_ust_app_stream(-1, &stream);
2220 goto error_stream_unlock;
2221 }
2222
2223 /*
2224 * The return value is not important here. This function will output an
2225 * error if needed.
2226 */
2227 (void) release_ust_app_stream(-1, &stream);
2228 }
2229 ua_chan->is_sent = 1;
2230
2231 error_stream_unlock:
2232 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2233 error:
2234 return ret;
2235 }
2236
2237 /*
2238 * Create and send to the application the created buffers with per UID buffers.
2239 *
2240 * Return 0 on success else a negative value.
2241 */
2242 static int create_channel_per_uid(struct ust_app *app,
2243 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2244 struct ust_app_channel *ua_chan)
2245 {
2246 int ret;
2247 struct buffer_reg_uid *reg_uid;
2248 struct buffer_reg_channel *reg_chan;
2249
2250 assert(app);
2251 assert(usess);
2252 assert(ua_sess);
2253 assert(ua_chan);
2254
2255 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2256
2257 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2258 /*
2259 * The session creation handles the creation of this global registry
2260 * object. If none can be find, there is a code flow problem or a
2261 * teardown race.
2262 */
2263 assert(reg_uid);
2264
2265 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2266 reg_uid);
2267 if (!reg_chan) {
2268 /* Create the buffer registry channel object. */
2269 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2270 if (ret < 0) {
2271 goto error;
2272 }
2273 assert(reg_chan);
2274
2275 /*
2276 * Create the buffers on the consumer side. This call populates the
2277 * ust app channel object with all streams and data object.
2278 */
2279 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2280 app->bits_per_long, reg_uid->registry->reg.ust);
2281 if (ret < 0) {
2282 /*
2283 * Let's remove the previously created buffer registry channel so
2284 * it's not visible anymore in the session registry.
2285 */
2286 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2287 ua_chan->tracing_channel_id);
2288 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2289 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2290 goto error;
2291 }
2292
2293 /*
2294 * Setup the streams and add it to the session registry.
2295 */
2296 ret = setup_buffer_reg_channel(reg_uid->registry, ua_chan, reg_chan);
2297 if (ret < 0) {
2298 goto error;
2299 }
2300
2301 }
2302
2303 /* Send buffers to the application. */
2304 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2305 if (ret < 0) {
2306 goto error;
2307 }
2308
2309 error:
2310 return ret;
2311 }
2312
2313 /*
2314 * Create and send to the application the created buffers with per PID buffers.
2315 *
2316 * Return 0 on success else a negative value.
2317 */
2318 static int create_channel_per_pid(struct ust_app *app,
2319 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2320 struct ust_app_channel *ua_chan)
2321 {
2322 int ret;
2323 struct ust_registry_session *registry;
2324
2325 assert(app);
2326 assert(usess);
2327 assert(ua_sess);
2328 assert(ua_chan);
2329
2330 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2331
2332 rcu_read_lock();
2333
2334 registry = get_session_registry(ua_sess);
2335 assert(registry);
2336
2337 /* Create and add a new channel registry to session. */
2338 ret = ust_registry_channel_add(registry, ua_chan->key);
2339 if (ret < 0) {
2340 goto error;
2341 }
2342
2343 /* Create and get channel on the consumer side. */
2344 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2345 app->bits_per_long, registry);
2346 if (ret < 0) {
2347 goto error;
2348 }
2349
2350 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2351 if (ret < 0) {
2352 goto error;
2353 }
2354
2355 error:
2356 rcu_read_unlock();
2357 return ret;
2358 }
2359
2360 /*
2361 * From an already allocated ust app channel, create the channel buffers if
2362 * need and send it to the application. This MUST be called with a RCU read
2363 * side lock acquired.
2364 *
2365 * Return 0 on success or else a negative value.
2366 */
2367 static int do_create_channel(struct ust_app *app,
2368 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2369 struct ust_app_channel *ua_chan)
2370 {
2371 int ret;
2372
2373 assert(app);
2374 assert(usess);
2375 assert(ua_sess);
2376 assert(ua_chan);
2377
2378 /* Handle buffer type before sending the channel to the application. */
2379 switch (usess->buffer_type) {
2380 case LTTNG_BUFFER_PER_UID:
2381 {
2382 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
2383 if (ret < 0) {
2384 goto error;
2385 }
2386 break;
2387 }
2388 case LTTNG_BUFFER_PER_PID:
2389 {
2390 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
2391 if (ret < 0) {
2392 goto error;
2393 }
2394 break;
2395 }
2396 default:
2397 assert(0);
2398 ret = -EINVAL;
2399 goto error;
2400 }
2401
2402 /* Initialize ust objd object using the received handle and add it. */
2403 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
2404 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
2405
2406 /* If channel is not enabled, disable it on the tracer */
2407 if (!ua_chan->enabled) {
2408 ret = disable_ust_channel(app, ua_sess, ua_chan);
2409 if (ret < 0) {
2410 goto error;
2411 }
2412 }
2413
2414 error:
2415 return ret;
2416 }
2417
2418 /*
2419 * Create UST app channel and create it on the tracer. Set ua_chanp of the
2420 * newly created channel if not NULL.
2421 *
2422 * Called with UST app session lock and RCU read-side lock held.
2423 *
2424 * Return 0 on success or else a negative value.
2425 */
2426 static int create_ust_app_channel(struct ust_app_session *ua_sess,
2427 struct ltt_ust_channel *uchan, struct ust_app *app,
2428 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
2429 struct ust_app_channel **ua_chanp)
2430 {
2431 int ret = 0;
2432 struct lttng_ht_iter iter;
2433 struct lttng_ht_node_str *ua_chan_node;
2434 struct ust_app_channel *ua_chan;
2435
2436 /* Lookup channel in the ust app session */
2437 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2438 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2439 if (ua_chan_node != NULL) {
2440 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2441 goto end;
2442 }
2443
2444 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
2445 if (ua_chan == NULL) {
2446 /* Only malloc can fail here */
2447 ret = -ENOMEM;
2448 goto error_alloc;
2449 }
2450 shadow_copy_channel(ua_chan, uchan);
2451
2452 /* Set channel type. */
2453 ua_chan->attr.type = type;
2454
2455 ret = do_create_channel(app, usess, ua_sess, ua_chan);
2456 if (ret < 0) {
2457 goto error;
2458 }
2459
2460 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
2461 app->pid);
2462
2463 /* Only add the channel if successful on the tracer side. */
2464 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
2465
2466 end:
2467 if (ua_chanp) {
2468 *ua_chanp = ua_chan;
2469 }
2470
2471 /* Everything went well. */
2472 return 0;
2473
2474 error:
2475 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
2476 error_alloc:
2477 return ret;
2478 }
2479
2480 /*
2481 * Create UST app event and create it on the tracer side.
2482 *
2483 * Called with ust app session mutex held.
2484 */
2485 static
2486 int create_ust_app_event(struct ust_app_session *ua_sess,
2487 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
2488 struct ust_app *app)
2489 {
2490 int ret = 0;
2491 struct ust_app_event *ua_event;
2492
2493 /* Get event node */
2494 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
2495 uevent->filter, uevent->attr.loglevel);
2496 if (ua_event != NULL) {
2497 ret = -EEXIST;
2498 goto end;
2499 }
2500
2501 /* Does not exist so create one */
2502 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
2503 if (ua_event == NULL) {
2504 /* Only malloc can failed so something is really wrong */
2505 ret = -ENOMEM;
2506 goto end;
2507 }
2508 shadow_copy_event(ua_event, uevent);
2509
2510 /* Create it on the tracer side */
2511 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
2512 if (ret < 0) {
2513 /* Not found previously means that it does not exist on the tracer */
2514 assert(ret != -LTTNG_UST_ERR_EXIST);
2515 goto error;
2516 }
2517
2518 add_unique_ust_app_event(ua_chan, ua_event);
2519
2520 DBG2("UST app create event %s for PID %d completed", ua_event->name,
2521 app->pid);
2522
2523 end:
2524 return ret;
2525
2526 error:
2527 /* Valid. Calling here is already in a read side lock */
2528 delete_ust_app_event(-1, ua_event);
2529 return ret;
2530 }
2531
2532 /*
2533 * Create UST metadata and open it on the tracer side.
2534 *
2535 * Called with UST app session lock held and RCU read side lock.
2536 */
2537 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
2538 struct ust_app *app, struct consumer_output *consumer,
2539 struct ustctl_consumer_channel_attr *attr)
2540 {
2541 int ret = 0;
2542 struct ust_app_channel *metadata;
2543 struct consumer_socket *socket;
2544 struct ust_registry_session *registry;
2545
2546 assert(ua_sess);
2547 assert(app);
2548 assert(consumer);
2549
2550 registry = get_session_registry(ua_sess);
2551 assert(registry);
2552
2553 /* Metadata already exists for this registry or it was closed previously */
2554 if (registry->metadata_key || registry->metadata_closed) {
2555 ret = 0;
2556 goto error;
2557 }
2558
2559 /* Allocate UST metadata */
2560 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
2561 if (!metadata) {
2562 /* malloc() failed */
2563 ret = -ENOMEM;
2564 goto error;
2565 }
2566
2567 if (!attr) {
2568 /* Set default attributes for metadata. */
2569 metadata->attr.overwrite = DEFAULT_CHANNEL_OVERWRITE;
2570 metadata->attr.subbuf_size = default_get_metadata_subbuf_size();
2571 metadata->attr.num_subbuf = DEFAULT_METADATA_SUBBUF_NUM;
2572 metadata->attr.switch_timer_interval = DEFAULT_METADATA_SWITCH_TIMER;
2573 metadata->attr.read_timer_interval = DEFAULT_METADATA_READ_TIMER;
2574 metadata->attr.output = LTTNG_UST_MMAP;
2575 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2576 } else {
2577 memcpy(&metadata->attr, attr, sizeof(metadata->attr));
2578 metadata->attr.output = LTTNG_UST_MMAP;
2579 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2580 }
2581
2582 /* Need one fd for the channel. */
2583 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2584 if (ret < 0) {
2585 ERR("Exhausted number of available FD upon create metadata");
2586 goto error;
2587 }
2588
2589 /* Get the right consumer socket for the application. */
2590 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
2591 if (!socket) {
2592 ret = -EINVAL;
2593 goto error_consumer;
2594 }
2595
2596 /*
2597 * Keep metadata key so we can identify it on the consumer side. Assign it
2598 * to the registry *before* we ask the consumer so we avoid the race of the
2599 * consumer requesting the metadata and the ask_channel call on our side
2600 * did not returned yet.
2601 */
2602 registry->metadata_key = metadata->key;
2603
2604 /*
2605 * Ask the metadata channel creation to the consumer. The metadata object
2606 * will be created by the consumer and kept their. However, the stream is
2607 * never added or monitored until we do a first push metadata to the
2608 * consumer.
2609 */
2610 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
2611 registry);
2612 if (ret < 0) {
2613 /* Nullify the metadata key so we don't try to close it later on. */
2614 registry->metadata_key = 0;
2615 goto error_consumer;
2616 }
2617
2618 /*
2619 * The setup command will make the metadata stream be sent to the relayd,
2620 * if applicable, and the thread managing the metadatas. This is important
2621 * because after this point, if an error occurs, the only way the stream
2622 * can be deleted is to be monitored in the consumer.
2623 */
2624 ret = consumer_setup_metadata(socket, metadata->key);
2625 if (ret < 0) {
2626 /* Nullify the metadata key so we don't try to close it later on. */
2627 registry->metadata_key = 0;
2628 goto error_consumer;
2629 }
2630
2631 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
2632 metadata->key, app->pid);
2633
2634 error_consumer:
2635 lttng_fd_put(LTTNG_FD_APPS, 1);
2636 delete_ust_app_channel(-1, metadata, app);
2637 error:
2638 return ret;
2639 }
2640
2641 /*
2642 * Return pointer to traceable apps list.
2643 */
2644 struct lttng_ht *ust_app_get_ht(void)
2645 {
2646 return ust_app_ht;
2647 }
2648
2649 /*
2650 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
2651 * acquired before calling this function.
2652 */
2653 struct ust_app *ust_app_find_by_pid(pid_t pid)
2654 {
2655 struct ust_app *app = NULL;
2656 struct lttng_ht_node_ulong *node;
2657 struct lttng_ht_iter iter;
2658
2659 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
2660 node = lttng_ht_iter_get_node_ulong(&iter);
2661 if (node == NULL) {
2662 DBG2("UST app no found with pid %d", pid);
2663 goto error;
2664 }
2665
2666 DBG2("Found UST app by pid %d", pid);
2667
2668 app = caa_container_of(node, struct ust_app, pid_n);
2669
2670 error:
2671 return app;
2672 }
2673
2674 /*
2675 * Allocate and init an UST app object using the registration information and
2676 * the command socket. This is called when the command socket connects to the
2677 * session daemon.
2678 *
2679 * The object is returned on success or else NULL.
2680 */
2681 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
2682 {
2683 struct ust_app *lta = NULL;
2684
2685 assert(msg);
2686 assert(sock >= 0);
2687
2688 DBG3("UST app creating application for socket %d", sock);
2689
2690 if ((msg->bits_per_long == 64 &&
2691 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
2692 || (msg->bits_per_long == 32 &&
2693 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
2694 ERR("Registration failed: application \"%s\" (pid: %d) has "
2695 "%d-bit long, but no consumerd for this size is available.\n",
2696 msg->name, msg->pid, msg->bits_per_long);
2697 goto error;
2698 }
2699
2700 lta = zmalloc(sizeof(struct ust_app));
2701 if (lta == NULL) {
2702 PERROR("malloc");
2703 goto error;
2704 }
2705
2706 lta->ppid = msg->ppid;
2707 lta->uid = msg->uid;
2708 lta->gid = msg->gid;
2709
2710 lta->bits_per_long = msg->bits_per_long;
2711 lta->uint8_t_alignment = msg->uint8_t_alignment;
2712 lta->uint16_t_alignment = msg->uint16_t_alignment;
2713 lta->uint32_t_alignment = msg->uint32_t_alignment;
2714 lta->uint64_t_alignment = msg->uint64_t_alignment;
2715 lta->long_alignment = msg->long_alignment;
2716 lta->byte_order = msg->byte_order;
2717
2718 lta->v_major = msg->major;
2719 lta->v_minor = msg->minor;
2720 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2721 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2722 lta->notify_sock = -1;
2723
2724 /* Copy name and make sure it's NULL terminated. */
2725 strncpy(lta->name, msg->name, sizeof(lta->name));
2726 lta->name[UST_APP_PROCNAME_LEN] = '\0';
2727
2728 /*
2729 * Before this can be called, when receiving the registration information,
2730 * the application compatibility is checked. So, at this point, the
2731 * application can work with this session daemon.
2732 */
2733 lta->compatible = 1;
2734
2735 lta->pid = msg->pid;
2736 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
2737 lta->sock = sock;
2738 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
2739
2740 CDS_INIT_LIST_HEAD(&lta->teardown_head);
2741
2742 error:
2743 return lta;
2744 }
2745
2746 /*
2747 * For a given application object, add it to every hash table.
2748 */
2749 void ust_app_add(struct ust_app *app)
2750 {
2751 assert(app);
2752 assert(app->notify_sock >= 0);
2753
2754 rcu_read_lock();
2755
2756 /*
2757 * On a re-registration, we want to kick out the previous registration of
2758 * that pid
2759 */
2760 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
2761
2762 /*
2763 * The socket _should_ be unique until _we_ call close. So, a add_unique
2764 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
2765 * already in the table.
2766 */
2767 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
2768
2769 /* Add application to the notify socket hash table. */
2770 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
2771 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
2772
2773 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
2774 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
2775 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
2776 app->v_minor);
2777
2778 rcu_read_unlock();
2779 }
2780
2781 /*
2782 * Set the application version into the object.
2783 *
2784 * Return 0 on success else a negative value either an errno code or a
2785 * LTTng-UST error code.
2786 */
2787 int ust_app_version(struct ust_app *app)
2788 {
2789 int ret;
2790
2791 assert(app);
2792
2793 ret = ustctl_tracer_version(app->sock, &app->version);
2794 if (ret < 0) {
2795 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
2796 ERR("UST app %d verson failed with ret %d", app->sock, ret);
2797 } else {
2798 DBG3("UST app %d verion failed. Application is dead", app->sock);
2799 }
2800 }
2801
2802 return ret;
2803 }
2804
2805 /*
2806 * Unregister app by removing it from the global traceable app list and freeing
2807 * the data struct.
2808 *
2809 * The socket is already closed at this point so no close to sock.
2810 */
2811 void ust_app_unregister(int sock)
2812 {
2813 struct ust_app *lta;
2814 struct lttng_ht_node_ulong *node;
2815 struct lttng_ht_iter iter;
2816 struct ust_app_session *ua_sess;
2817 int ret;
2818
2819 rcu_read_lock();
2820
2821 /* Get the node reference for a call_rcu */
2822 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
2823 node = lttng_ht_iter_get_node_ulong(&iter);
2824 assert(node);
2825
2826 lta = caa_container_of(node, struct ust_app, sock_n);
2827 DBG("PID %d unregistering with sock %d", lta->pid, sock);
2828
2829 /* Remove application from PID hash table */
2830 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
2831 assert(!ret);
2832
2833 /*
2834 * Remove application from notify hash table. The thread handling the
2835 * notify socket could have deleted the node so ignore on error because
2836 * either way it's valid. The close of that socket is handled by the other
2837 * thread.
2838 */
2839 iter.iter.node = &lta->notify_sock_n.node;
2840 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
2841
2842 /*
2843 * Ignore return value since the node might have been removed before by an
2844 * add replace during app registration because the PID can be reassigned by
2845 * the OS.
2846 */
2847 iter.iter.node = &lta->pid_n.node;
2848 ret = lttng_ht_del(ust_app_ht, &iter);
2849 if (ret) {
2850 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
2851 lta->pid);
2852 }
2853
2854 /* Remove sessions so they are not visible during deletion.*/
2855 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
2856 node.node) {
2857 struct ust_registry_session *registry;
2858
2859 ret = lttng_ht_del(lta->sessions, &iter);
2860 if (ret) {
2861 /* The session was already removed so scheduled for teardown. */
2862 continue;
2863 }
2864
2865 /*
2866 * Add session to list for teardown. This is safe since at this point we
2867 * are the only one using this list.
2868 */
2869 pthread_mutex_lock(&ua_sess->lock);
2870
2871 /*
2872 * Normally, this is done in the delete session process which is
2873 * executed in the call rcu below. However, upon registration we can't
2874 * afford to wait for the grace period before pushing data or else the
2875 * data pending feature can race between the unregistration and stop
2876 * command where the data pending command is sent *before* the grace
2877 * period ended.
2878 *
2879 * The close metadata below nullifies the metadata pointer in the
2880 * session so the delete session will NOT push/close a second time.
2881 */
2882 registry = get_session_registry(ua_sess);
2883 if (registry && !registry->metadata_closed) {
2884 /* Push metadata for application before freeing the application. */
2885 (void) push_metadata(registry, ua_sess->consumer);
2886
2887 /*
2888 * Don't ask to close metadata for global per UID buffers. Close
2889 * metadata only on destroy trace session in this case. Also, the
2890 * previous push metadata could have flag the metadata registry to
2891 * close so don't send a close command if closed.
2892 */
2893 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
2894 !registry->metadata_closed) {
2895 /* And ask to close it for this session registry. */
2896 (void) close_metadata(registry, ua_sess->consumer);
2897 }
2898 }
2899
2900 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
2901 pthread_mutex_unlock(&ua_sess->lock);
2902 }
2903
2904 /* Free memory */
2905 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
2906
2907 rcu_read_unlock();
2908 return;
2909 }
2910
2911 /*
2912 * Return traceable_app_count
2913 */
2914 unsigned long ust_app_list_count(void)
2915 {
2916 unsigned long count;
2917
2918 rcu_read_lock();
2919 count = lttng_ht_get_count(ust_app_ht);
2920 rcu_read_unlock();
2921
2922 return count;
2923 }
2924
2925 /*
2926 * Fill events array with all events name of all registered apps.
2927 */
2928 int ust_app_list_events(struct lttng_event **events)
2929 {
2930 int ret, handle;
2931 size_t nbmem, count = 0;
2932 struct lttng_ht_iter iter;
2933 struct ust_app *app;
2934 struct lttng_event *tmp_event;
2935
2936 nbmem = UST_APP_EVENT_LIST_SIZE;
2937 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
2938 if (tmp_event == NULL) {
2939 PERROR("zmalloc ust app events");
2940 ret = -ENOMEM;
2941 goto error;
2942 }
2943
2944 rcu_read_lock();
2945
2946 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
2947 struct lttng_ust_tracepoint_iter uiter;
2948
2949 health_code_update();
2950
2951 if (!app->compatible) {
2952 /*
2953 * TODO: In time, we should notice the caller of this error by
2954 * telling him that this is a version error.
2955 */
2956 continue;
2957 }
2958 handle = ustctl_tracepoint_list(app->sock);
2959 if (handle < 0) {
2960 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
2961 ERR("UST app list events getting handle failed for app pid %d",
2962 app->pid);
2963 }
2964 continue;
2965 }
2966
2967 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
2968 &uiter)) != -LTTNG_UST_ERR_NOENT) {
2969 /* Handle ustctl error. */
2970 if (ret < 0) {
2971 free(tmp_event);
2972 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
2973 ERR("UST app tp list get failed for app %d with ret %d",
2974 app->sock, ret);
2975 } else {
2976 DBG3("UST app tp list get failed. Application is dead");
2977 }
2978 goto rcu_error;
2979 }
2980
2981 health_code_update();
2982 if (count >= nbmem) {
2983 /* In case the realloc fails, we free the memory */
2984 void *ptr;
2985
2986 DBG2("Reallocating event list from %zu to %zu entries", nbmem,
2987 2 * nbmem);
2988 nbmem *= 2;
2989 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event));
2990 if (ptr == NULL) {
2991 PERROR("realloc ust app events");
2992 free(tmp_event);
2993 ret = -ENOMEM;
2994 goto rcu_error;
2995 }
2996 tmp_event = ptr;
2997 }
2998 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
2999 tmp_event[count].loglevel = uiter.loglevel;
3000 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3001 tmp_event[count].pid = app->pid;
3002 tmp_event[count].enabled = -1;
3003 count++;
3004 }
3005 }
3006
3007 ret = count;
3008 *events = tmp_event;
3009
3010 DBG2("UST app list events done (%zu events)", count);
3011
3012 rcu_error:
3013 rcu_read_unlock();
3014 error:
3015 health_code_update();
3016 return ret;
3017 }
3018
3019 /*
3020 * Fill events array with all events name of all registered apps.
3021 */
3022 int ust_app_list_event_fields(struct lttng_event_field **fields)
3023 {
3024 int ret, handle;
3025 size_t nbmem, count = 0;
3026 struct lttng_ht_iter iter;
3027 struct ust_app *app;
3028 struct lttng_event_field *tmp_event;
3029
3030 nbmem = UST_APP_EVENT_LIST_SIZE;
3031 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3032 if (tmp_event == NULL) {
3033 PERROR("zmalloc ust app event fields");
3034 ret = -ENOMEM;
3035 goto error;
3036 }
3037
3038 rcu_read_lock();
3039
3040 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3041 struct lttng_ust_field_iter uiter;
3042
3043 health_code_update();
3044
3045 if (!app->compatible) {
3046 /*
3047 * TODO: In time, we should notice the caller of this error by
3048 * telling him that this is a version error.
3049 */
3050 continue;
3051 }
3052 handle = ustctl_tracepoint_field_list(app->sock);
3053 if (handle < 0) {
3054 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3055 ERR("UST app list field getting handle failed for app pid %d",
3056 app->pid);
3057 }
3058 continue;
3059 }
3060
3061 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
3062 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3063 /* Handle ustctl error. */
3064 if (ret < 0) {
3065 free(tmp_event);
3066 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
3067 ERR("UST app tp list field failed for app %d with ret %d",
3068 app->sock, ret);
3069 } else {
3070 DBG3("UST app tp list field failed. Application is dead");
3071 }
3072 goto rcu_error;
3073 }
3074
3075 health_code_update();
3076 if (count >= nbmem) {
3077 /* In case the realloc fails, we free the memory */
3078 void *ptr;
3079
3080 DBG2("Reallocating event field list from %zu to %zu entries", nbmem,
3081 2 * nbmem);
3082 nbmem *= 2;
3083 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event_field));
3084 if (ptr == NULL) {
3085 PERROR("realloc ust app event fields");
3086 free(tmp_event);
3087 ret = -ENOMEM;
3088 goto rcu_error;
3089 }
3090 tmp_event = ptr;
3091 }
3092
3093 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
3094 tmp_event[count].type = uiter.type;
3095 tmp_event[count].nowrite = uiter.nowrite;
3096
3097 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3098 tmp_event[count].event.loglevel = uiter.loglevel;
3099 tmp_event[count].event.type = LTTNG_UST_TRACEPOINT;
3100 tmp_event[count].event.pid = app->pid;
3101 tmp_event[count].event.enabled = -1;
3102 count++;
3103 }
3104 }
3105
3106 ret = count;
3107 *fields = tmp_event;
3108
3109 DBG2("UST app list event fields done (%zu events)", count);
3110
3111 rcu_error:
3112 rcu_read_unlock();
3113 error:
3114 health_code_update();
3115 return ret;
3116 }
3117
3118 /*
3119 * Free and clean all traceable apps of the global list.
3120 *
3121 * Should _NOT_ be called with RCU read-side lock held.
3122 */
3123 void ust_app_clean_list(void)
3124 {
3125 int ret;
3126 struct ust_app *app;
3127 struct lttng_ht_iter iter;
3128
3129 DBG2("UST app cleaning registered apps hash table");
3130
3131 rcu_read_lock();
3132
3133 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3134 ret = lttng_ht_del(ust_app_ht, &iter);
3135 assert(!ret);
3136 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3137 }
3138
3139 /* Cleanup socket hash table */
3140 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3141 sock_n.node) {
3142 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3143 assert(!ret);
3144 }
3145
3146 /* Cleanup notify socket hash table */
3147 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
3148 notify_sock_n.node) {
3149 ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3150 assert(!ret);
3151 }
3152 rcu_read_unlock();
3153
3154 /* Destroy is done only when the ht is empty */
3155 ht_cleanup_push(ust_app_ht);
3156 ht_cleanup_push(ust_app_ht_by_sock);
3157 ht_cleanup_push(ust_app_ht_by_notify_sock);
3158 }
3159
3160 /*
3161 * Init UST app hash table.
3162 */
3163 void ust_app_ht_alloc(void)
3164 {
3165 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3166 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3167 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3168 }
3169
3170 /*
3171 * For a specific UST session, disable the channel for all registered apps.
3172 */
3173 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
3174 struct ltt_ust_channel *uchan)
3175 {
3176 int ret = 0;
3177 struct lttng_ht_iter iter;
3178 struct lttng_ht_node_str *ua_chan_node;
3179 struct ust_app *app;
3180 struct ust_app_session *ua_sess;
3181 struct ust_app_channel *ua_chan;
3182
3183 if (usess == NULL || uchan == NULL) {
3184 ERR("Disabling UST global channel with NULL values");
3185 ret = -1;
3186 goto error;
3187 }
3188
3189 DBG2("UST app disabling channel %s from global domain for session id %d",
3190 uchan->name, usess->id);
3191
3192 rcu_read_lock();
3193
3194 /* For every registered applications */
3195 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3196 struct lttng_ht_iter uiter;
3197 if (!app->compatible) {
3198 /*
3199 * TODO: In time, we should notice the caller of this error by
3200 * telling him that this is a version error.
3201 */
3202 continue;
3203 }
3204 ua_sess = lookup_session_by_app(usess, app);
3205 if (ua_sess == NULL) {
3206 continue;
3207 }
3208
3209 /* Get channel */
3210 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3211 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3212 /* If the session if found for the app, the channel must be there */
3213 assert(ua_chan_node);
3214
3215 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3216 /* The channel must not be already disabled */
3217 assert(ua_chan->enabled == 1);
3218
3219 /* Disable channel onto application */
3220 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
3221 if (ret < 0) {
3222 /* XXX: We might want to report this error at some point... */
3223 continue;
3224 }
3225 }
3226
3227 rcu_read_unlock();
3228
3229 error:
3230 return ret;
3231 }
3232
3233 /*
3234 * For a specific UST session, enable the channel for all registered apps.
3235 */
3236 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
3237 struct ltt_ust_channel *uchan)
3238 {
3239 int ret = 0;
3240 struct lttng_ht_iter iter;
3241 struct ust_app *app;
3242 struct ust_app_session *ua_sess;
3243
3244 if (usess == NULL || uchan == NULL) {
3245 ERR("Adding UST global channel to NULL values");
3246 ret = -1;
3247 goto error;
3248 }
3249
3250 DBG2("UST app enabling channel %s to global domain for session id %d",
3251 uchan->name, usess->id);
3252
3253 rcu_read_lock();
3254
3255 /* For every registered applications */
3256 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3257 if (!app->compatible) {
3258 /*
3259 * TODO: In time, we should notice the caller of this error by
3260 * telling him that this is a version error.
3261 */
3262 continue;
3263 }
3264 ua_sess = lookup_session_by_app(usess, app);
3265 if (ua_sess == NULL) {
3266 continue;
3267 }
3268
3269 /* Enable channel onto application */
3270 ret = enable_ust_app_channel(ua_sess, uchan, app);
3271 if (ret < 0) {
3272 /* XXX: We might want to report this error at some point... */
3273 continue;
3274 }
3275 }
3276
3277 rcu_read_unlock();
3278
3279 error:
3280 return ret;
3281 }
3282
3283 /*
3284 * Disable an event in a channel and for a specific session.
3285 */
3286 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
3287 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3288 {
3289 int ret = 0;
3290 struct lttng_ht_iter iter, uiter;
3291 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
3292 struct ust_app *app;
3293 struct ust_app_session *ua_sess;
3294 struct ust_app_channel *ua_chan;
3295 struct ust_app_event *ua_event;
3296
3297 DBG("UST app disabling event %s for all apps in channel "
3298 "%s for session id %d", uevent->attr.name, uchan->name, usess->id);
3299
3300 rcu_read_lock();
3301
3302 /* For all registered applications */
3303 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3304 if (!app->compatible) {
3305 /*
3306 * TODO: In time, we should notice the caller of this error by
3307 * telling him that this is a version error.
3308 */
3309 continue;
3310 }
3311 ua_sess = lookup_session_by_app(usess, app);
3312 if (ua_sess == NULL) {
3313 /* Next app */
3314 continue;
3315 }
3316
3317 /* Lookup channel in the ust app session */
3318 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3319 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3320 if (ua_chan_node == NULL) {
3321 DBG2("Channel %s not found in session id %d for app pid %d."
3322 "Skipping", uchan->name, usess->id, app->pid);
3323 continue;
3324 }
3325 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3326
3327 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &uiter);
3328 ua_event_node = lttng_ht_iter_get_node_str(&uiter);
3329 if (ua_event_node == NULL) {
3330 DBG2("Event %s not found in channel %s for app pid %d."
3331 "Skipping", uevent->attr.name, uchan->name, app->pid);
3332 continue;
3333 }
3334 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
3335
3336 ret = disable_ust_app_event(ua_sess, ua_event, app);
3337 if (ret < 0) {
3338 /* XXX: Report error someday... */
3339 continue;
3340 }
3341 }
3342
3343 rcu_read_unlock();
3344
3345 return ret;
3346 }
3347
3348 /*
3349 * For a specific UST session and UST channel, the event for all
3350 * registered apps.
3351 */
3352 int ust_app_disable_all_event_glb(struct ltt_ust_session *usess,
3353 struct ltt_ust_channel *uchan)
3354 {
3355 int ret = 0;
3356 struct lttng_ht_iter iter, uiter;
3357 struct lttng_ht_node_str *ua_chan_node;
3358 struct ust_app *app;
3359 struct ust_app_session *ua_sess;
3360 struct ust_app_channel *ua_chan;
3361 struct ust_app_event *ua_event;
3362
3363 DBG("UST app disabling all event for all apps in channel "
3364 "%s for session id %d", uchan->name, usess->id);
3365
3366 rcu_read_lock();
3367
3368 /* For all registered applications */
3369 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3370 if (!app->compatible) {
3371 /*
3372 * TODO: In time, we should notice the caller of this error by
3373 * telling him that this is a version error.
3374 */
3375 continue;
3376 }
3377 ua_sess = lookup_session_by_app(usess, app);
3378 if (!ua_sess) {
3379 /* The application has problem or is probably dead. */
3380 continue;
3381 }
3382
3383 /* Lookup channel in the ust app session */
3384 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3385 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3386 /* If the channel is not found, there is a code flow error */
3387 assert(ua_chan_node);
3388
3389 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3390
3391 /* Disable each events of channel */
3392 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
3393 node.node) {
3394 ret = disable_ust_app_event(ua_sess, ua_event, app);
3395 if (ret < 0) {
3396 /* XXX: Report error someday... */
3397 continue;
3398 }
3399 }
3400 }
3401
3402 rcu_read_unlock();
3403
3404 return ret;
3405 }
3406
3407 /*
3408 * For a specific UST session, create the channel for all registered apps.
3409 */
3410 int ust_app_create_channel_glb(struct ltt_ust_session *usess,
3411 struct ltt_ust_channel *uchan)
3412 {
3413 int ret = 0, created;
3414 struct lttng_ht_iter iter;
3415 struct ust_app *app;
3416 struct ust_app_session *ua_sess = NULL;
3417
3418 /* Very wrong code flow */
3419 assert(usess);
3420 assert(uchan);
3421
3422 DBG2("UST app adding channel %s to UST domain for session id %d",
3423 uchan->name, usess->id);
3424
3425 rcu_read_lock();
3426
3427 /* For every registered applications */
3428 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3429 if (!app->compatible) {
3430 /*
3431 * TODO: In time, we should notice the caller of this error by
3432 * telling him that this is a version error.
3433 */
3434 continue;
3435 }
3436 /*
3437 * Create session on the tracer side and add it to app session HT. Note
3438 * that if session exist, it will simply return a pointer to the ust
3439 * app session.
3440 */
3441 ret = create_ust_app_session(usess, app, &ua_sess, &created);
3442 if (ret < 0) {
3443 switch (ret) {
3444 case -ENOTCONN:
3445 /*
3446 * The application's socket is not valid. Either a bad socket
3447 * or a timeout on it. We can't inform the caller that for a
3448 * specific app, the session failed so lets continue here.
3449 */
3450 continue;
3451 case -ENOMEM:
3452 default:
3453 goto error_rcu_unlock;
3454 }
3455 }
3456 assert(ua_sess);
3457
3458 pthread_mutex_lock(&ua_sess->lock);
3459 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
3460 sizeof(uchan->name))) {
3461 struct ustctl_consumer_channel_attr attr;
3462 copy_channel_attr_to_ustctl(&attr, &uchan->attr);
3463 ret = create_ust_app_metadata(ua_sess, app, usess->consumer,
3464 &attr);
3465 } else {
3466 /* Create channel onto application. We don't need the chan ref. */
3467 ret = create_ust_app_channel(ua_sess, uchan, app,
3468 LTTNG_UST_CHAN_PER_CPU, usess, NULL);
3469 }
3470 pthread_mutex_unlock(&ua_sess->lock);
3471 if (ret < 0) {
3472 if (ret == -ENOMEM) {
3473 /* No more memory is a fatal error. Stop right now. */
3474 goto error_rcu_unlock;
3475 }
3476 /* Cleanup the created session if it's the case. */
3477 if (created) {
3478 destroy_app_session(app, ua_sess);
3479 }
3480 }
3481 }
3482
3483 error_rcu_unlock:
3484 rcu_read_unlock();
3485 return ret;
3486 }
3487
3488 /*
3489 * Enable event for a specific session and channel on the tracer.
3490 */
3491 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
3492 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3493 {
3494 int ret = 0;
3495 struct lttng_ht_iter iter, uiter;
3496 struct lttng_ht_node_str *ua_chan_node;
3497 struct ust_app *app;
3498 struct ust_app_session *ua_sess;
3499 struct ust_app_channel *ua_chan;
3500 struct ust_app_event *ua_event;
3501
3502 DBG("UST app enabling event %s for all apps for session id %d",
3503 uevent->attr.name, usess->id);
3504
3505 /*
3506 * NOTE: At this point, this function is called only if the session and
3507 * channel passed are already created for all apps. and enabled on the
3508 * tracer also.
3509 */
3510
3511 rcu_read_lock();
3512
3513 /* For all registered applications */
3514 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3515 if (!app->compatible) {
3516 /*
3517 * TODO: In time, we should notice the caller of this error by
3518 * telling him that this is a version error.
3519 */
3520 continue;
3521 }
3522 ua_sess = lookup_session_by_app(usess, app);
3523 if (!ua_sess) {
3524 /* The application has problem or is probably dead. */
3525 continue;
3526 }
3527
3528 pthread_mutex_lock(&ua_sess->lock);
3529
3530 /* Lookup channel in the ust app session */
3531 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3532 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3533 /* If the channel is not found, there is a code flow error */
3534 assert(ua_chan_node);
3535
3536 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3537
3538 /* Get event node */
3539 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
3540 uevent->filter, uevent->attr.loglevel);
3541 if (ua_event == NULL) {
3542 DBG3("UST app enable event %s not found for app PID %d."
3543 "Skipping app", uevent->attr.name, app->pid);
3544 goto next_app;
3545 }
3546
3547 ret = enable_ust_app_event(ua_sess, ua_event, app);
3548 if (ret < 0) {
3549 pthread_mutex_unlock(&ua_sess->lock);
3550 goto error;
3551 }
3552 next_app:
3553 pthread_mutex_unlock(&ua_sess->lock);
3554 }
3555
3556 error:
3557 rcu_read_unlock();
3558 return ret;
3559 }
3560
3561 /*
3562 * For a specific existing UST session and UST channel, creates the event for
3563 * all registered apps.
3564 */
3565 int ust_app_create_event_glb(struct ltt_ust_session *usess,
3566 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3567 {
3568 int ret = 0;
3569 struct lttng_ht_iter iter, uiter;
3570 struct lttng_ht_node_str *ua_chan_node;
3571 struct ust_app *app;
3572 struct ust_app_session *ua_sess;
3573 struct ust_app_channel *ua_chan;
3574
3575 DBG("UST app creating event %s for all apps for session id %d",
3576 uevent->attr.name, usess->id);
3577
3578 rcu_read_lock();
3579
3580 /* For all registered applications */
3581 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3582 if (!app->compatible) {
3583 /*
3584 * TODO: In time, we should notice the caller of this error by
3585 * telling him that this is a version error.
3586 */
3587 continue;
3588 }
3589 ua_sess = lookup_session_by_app(usess, app);
3590 if (!ua_sess) {
3591 /* The application has problem or is probably dead. */
3592 continue;
3593 }
3594
3595 pthread_mutex_lock(&ua_sess->lock);
3596 /* Lookup channel in the ust app session */
3597 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3598 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3599 /* If the channel is not found, there is a code flow error */
3600 assert(ua_chan_node);
3601
3602 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3603
3604 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
3605 pthread_mutex_unlock(&ua_sess->lock);
3606 if (ret < 0) {
3607 if (ret != -LTTNG_UST_ERR_EXIST) {
3608 /* Possible value at this point: -ENOMEM. If so, we stop! */
3609 break;
3610 }
3611 DBG2("UST app event %s already exist on app PID %d",
3612 uevent->attr.name, app->pid);
3613 continue;
3614 }
3615 }
3616
3617 rcu_read_unlock();
3618
3619 return ret;
3620 }
3621
3622 /*
3623 * Start tracing for a specific UST session and app.
3624 */
3625 static
3626 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
3627 {
3628 int ret = 0;
3629 struct ust_app_session *ua_sess;
3630
3631 DBG("Starting tracing for ust app pid %d", app->pid);
3632
3633 rcu_read_lock();
3634
3635 if (!app->compatible) {
3636 goto end;
3637 }
3638
3639 ua_sess = lookup_session_by_app(usess, app);
3640 if (ua_sess == NULL) {
3641 /* The session is in teardown process. Ignore and continue. */
3642 goto end;
3643 }
3644
3645 pthread_mutex_lock(&ua_sess->lock);
3646
3647 /* Upon restart, we skip the setup, already done */
3648 if (ua_sess->started) {
3649 goto skip_setup;
3650 }
3651
3652 /* Create directories if consumer is LOCAL and has a path defined. */
3653 if (usess->consumer->type == CONSUMER_DST_LOCAL &&
3654 strlen(usess->consumer->dst.trace_path) > 0) {
3655 ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
3656 S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
3657 if (ret < 0) {
3658 if (ret != -EEXIST) {
3659 ERR("Trace directory creation error");
3660 goto error_unlock;
3661 }
3662 }
3663 }
3664
3665 /*
3666 * Create the metadata for the application. This returns gracefully if a
3667 * metadata was already set for the session.
3668 */
3669 ret = create_ust_app_metadata(ua_sess, app, usess->consumer, NULL);
3670 if (ret < 0) {
3671 goto error_unlock;
3672 }
3673
3674 health_code_update();
3675
3676 skip_setup:
3677 /* This start the UST tracing */
3678 ret = ustctl_start_session(app->sock, ua_sess->handle);
3679 if (ret < 0) {
3680 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3681 ERR("Error starting tracing for app pid: %d (ret: %d)",
3682 app->pid, ret);
3683 } else {
3684 DBG("UST app start session failed. Application is dead.");
3685 }
3686 goto error_unlock;
3687 }
3688
3689 /* Indicate that the session has been started once */
3690 ua_sess->started = 1;
3691
3692 pthread_mutex_unlock(&ua_sess->lock);
3693
3694 health_code_update();
3695
3696 /* Quiescent wait after starting trace */
3697 ret = ustctl_wait_quiescent(app->sock);
3698 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3699 ERR("UST app wait quiescent failed for app pid %d ret %d",
3700 app->pid, ret);
3701 }
3702
3703 end:
3704 rcu_read_unlock();
3705 health_code_update();
3706 return 0;
3707
3708 error_unlock:
3709 pthread_mutex_unlock(&ua_sess->lock);
3710 rcu_read_unlock();
3711 health_code_update();
3712 return -1;
3713 }
3714
3715 /*
3716 * Stop tracing for a specific UST session and app.
3717 */
3718 static
3719 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
3720 {
3721 int ret = 0;
3722 struct ust_app_session *ua_sess;
3723 struct ust_registry_session *registry;
3724
3725 DBG("Stopping tracing for ust app pid %d", app->pid);
3726
3727 rcu_read_lock();
3728
3729 if (!app->compatible) {
3730 goto end_no_session;
3731 }
3732
3733 ua_sess = lookup_session_by_app(usess, app);
3734 if (ua_sess == NULL) {
3735 goto end_no_session;
3736 }
3737
3738 pthread_mutex_lock(&ua_sess->lock);
3739
3740 /*
3741 * If started = 0, it means that stop trace has been called for a session
3742 * that was never started. It's possible since we can have a fail start
3743 * from either the application manager thread or the command thread. Simply
3744 * indicate that this is a stop error.
3745 */
3746 if (!ua_sess->started) {
3747 goto error_rcu_unlock;
3748 }
3749
3750 health_code_update();
3751
3752 /* This inhibits UST tracing */
3753 ret = ustctl_stop_session(app->sock, ua_sess->handle);
3754 if (ret < 0) {
3755 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3756 ERR("Error stopping tracing for app pid: %d (ret: %d)",
3757 app->pid, ret);
3758 } else {
3759 DBG("UST app stop session failed. Application is dead.");
3760 }
3761 goto error_rcu_unlock;
3762 }
3763
3764 health_code_update();
3765
3766 /* Quiescent wait after stopping trace */
3767 ret = ustctl_wait_quiescent(app->sock);
3768 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3769 ERR("UST app wait quiescent failed for app pid %d ret %d",
3770 app->pid, ret);
3771 }
3772
3773 health_code_update();
3774
3775 registry = get_session_registry(ua_sess);
3776 assert(registry);
3777
3778 if (!registry->metadata_closed) {
3779 /* Push metadata for application before freeing the application. */
3780 (void) push_metadata(registry, ua_sess->consumer);
3781 }
3782
3783 pthread_mutex_unlock(&ua_sess->lock);
3784 end_no_session:
3785 rcu_read_unlock();
3786 health_code_update();
3787 return 0;
3788
3789 error_rcu_unlock:
3790 pthread_mutex_unlock(&ua_sess->lock);
3791 rcu_read_unlock();
3792 health_code_update();
3793 return -1;
3794 }
3795
3796 /*
3797 * Flush buffers for a specific UST session and app.
3798 */
3799 static
3800 int ust_app_flush_trace(struct ltt_ust_session *usess, struct ust_app *app)
3801 {
3802 int ret = 0;
3803 struct lttng_ht_iter iter;
3804 struct ust_app_session *ua_sess;
3805 struct ust_app_channel *ua_chan;
3806
3807 DBG("Flushing buffers for ust app pid %d", app->pid);
3808
3809 rcu_read_lock();
3810
3811 if (!app->compatible) {
3812 goto end_no_session;
3813 }
3814
3815 ua_sess = lookup_session_by_app(usess, app);
3816 if (ua_sess == NULL) {
3817 goto end_no_session;
3818 }
3819
3820 pthread_mutex_lock(&ua_sess->lock);
3821
3822 health_code_update();
3823
3824 /* Flushing buffers */
3825 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
3826 node.node) {
3827 health_code_update();
3828 assert(ua_chan->is_sent);
3829 ret = ustctl_sock_flush_buffer(app->sock, ua_chan->obj);
3830 if (ret < 0) {
3831 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3832 ERR("UST app PID %d channel %s flush failed with ret %d",
3833 app->pid, ua_chan->name, ret);
3834 } else {
3835 DBG3("UST app failed to flush %s. Application is dead.",
3836 ua_chan->name);
3837 /* No need to continue. */
3838 break;
3839 }
3840 /* Continuing flushing all buffers */
3841 continue;
3842 }
3843 }
3844
3845 health_code_update();
3846
3847 pthread_mutex_unlock(&ua_sess->lock);
3848 end_no_session:
3849 rcu_read_unlock();
3850 health_code_update();
3851 return 0;
3852 }
3853
3854 /*
3855 * Destroy a specific UST session in apps.
3856 */
3857 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
3858 {
3859 int ret;
3860 struct ust_app_session *ua_sess;
3861 struct lttng_ht_iter iter;
3862 struct lttng_ht_node_ulong *node;
3863
3864 DBG("Destroy tracing for ust app pid %d", app->pid);
3865
3866 rcu_read_lock();
3867
3868 if (!app->compatible) {
3869 goto end;
3870 }
3871
3872 __lookup_session_by_app(usess, app, &iter);
3873 node = lttng_ht_iter_get_node_ulong(&iter);
3874 if (node == NULL) {
3875 /* Session is being or is deleted. */
3876 goto end;
3877 }
3878 ua_sess = caa_container_of(node, struct ust_app_session, node);
3879
3880 health_code_update();
3881 destroy_app_session(app, ua_sess);
3882
3883 health_code_update();
3884
3885 /* Quiescent wait after stopping trace */
3886 ret = ustctl_wait_quiescent(app->sock);
3887 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3888 ERR("UST app wait quiescent failed for app pid %d ret %d",
3889 app->pid, ret);
3890 }
3891 end:
3892 rcu_read_unlock();
3893 health_code_update();
3894 return 0;
3895 }
3896
3897 /*
3898 * Start tracing for the UST session.
3899 */
3900 int ust_app_start_trace_all(struct ltt_ust_session *usess)
3901 {
3902 int ret = 0;
3903 struct lttng_ht_iter iter;
3904 struct ust_app *app;
3905
3906 DBG("Starting all UST traces");
3907
3908 rcu_read_lock();
3909
3910 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3911 ret = ust_app_start_trace(usess, app);
3912 if (ret < 0) {
3913 /* Continue to next apps even on error */
3914 continue;
3915 }
3916 }
3917
3918 rcu_read_unlock();
3919
3920 return 0;
3921 }
3922
3923 /*
3924 * Start tracing for the UST session.
3925 */
3926 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
3927 {
3928 int ret = 0;
3929 struct lttng_ht_iter iter;
3930 struct ust_app *app;
3931
3932 DBG("Stopping all UST traces");
3933
3934 rcu_read_lock();
3935
3936 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3937 ret = ust_app_stop_trace(usess, app);
3938 if (ret < 0) {
3939 /* Continue to next apps even on error */
3940 continue;
3941 }
3942 }
3943
3944 /* Flush buffers */
3945 switch (usess->buffer_type) {
3946 case LTTNG_BUFFER_PER_UID:
3947 {
3948 struct buffer_reg_uid *reg;
3949
3950 /* Flush all per UID buffers associated to that session. */
3951 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
3952 struct buffer_reg_channel *reg_chan;
3953 struct consumer_socket *socket;
3954
3955 /* Get consumer socket to use to push the metadata.*/
3956 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
3957 usess->consumer);
3958 if (!socket) {
3959 /* Ignore request if no consumer is found for the session. */
3960 continue;
3961 }
3962
3963 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
3964 reg_chan, node.node) {
3965 /*
3966 * The following call will print error values so the return
3967 * code is of little importance because whatever happens, we
3968 * have to try them all.
3969 */
3970 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
3971 }
3972 }
3973 break;
3974 }
3975 case LTTNG_BUFFER_PER_PID:
3976 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3977 ret = ust_app_flush_trace(usess, app);
3978 if (ret < 0) {
3979 /* Continue to next apps even on error */
3980 continue;
3981 }
3982 }
3983 break;
3984 default:
3985 assert(0);
3986 break;
3987 }
3988
3989 rcu_read_unlock();
3990
3991 return 0;
3992 }
3993
3994 /*
3995 * Destroy app UST session.
3996 */
3997 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
3998 {
3999 int ret = 0;
4000 struct lttng_ht_iter iter;
4001 struct ust_app *app;
4002
4003 DBG("Destroy all UST traces");
4004
4005 rcu_read_lock();
4006
4007 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4008 ret = destroy_trace(usess, app);
4009 if (ret < 0) {
4010 /* Continue to next apps even on error */
4011 continue;
4012 }
4013 }
4014
4015 rcu_read_unlock();
4016
4017 return 0;
4018 }
4019
4020 /*
4021 * Add channels/events from UST global domain to registered apps at sock.
4022 */
4023 void ust_app_global_update(struct ltt_ust_session *usess, int sock)
4024 {
4025 int ret = 0;
4026 struct lttng_ht_iter iter, uiter, iter_ctx;
4027 struct ust_app *app;
4028 struct ust_app_session *ua_sess = NULL;
4029 struct ust_app_channel *ua_chan;
4030 struct ust_app_event *ua_event;
4031 struct ust_app_ctx *ua_ctx;
4032
4033 assert(usess);
4034 assert(sock >= 0);
4035
4036 DBG2("UST app global update for app sock %d for session id %d", sock,
4037 usess->id);
4038
4039 rcu_read_lock();
4040
4041 app = find_app_by_sock(sock);
4042 if (app == NULL) {
4043 /*
4044 * Application can be unregistered before so this is possible hence
4045 * simply stopping the update.
4046 */
4047 DBG3("UST app update failed to find app sock %d", sock);
4048 goto error;
4049 }
4050
4051 if (!app->compatible) {
4052 goto error;
4053 }
4054
4055 ret = create_ust_app_session(usess, app, &ua_sess, NULL);
4056 if (ret < 0) {
4057 /* Tracer is probably gone or ENOMEM. */
4058 goto error;
4059 }
4060 assert(ua_sess);
4061
4062 pthread_mutex_lock(&ua_sess->lock);
4063
4064 /*
4065 * We can iterate safely here over all UST app session since the create ust
4066 * app session above made a shadow copy of the UST global domain from the
4067 * ltt ust session.
4068 */
4069 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4070 node.node) {
4071 /*
4072 * For a metadata channel, handle it differently.
4073 */
4074 if (!strncmp(ua_chan->name, DEFAULT_METADATA_NAME,
4075 sizeof(ua_chan->name))) {
4076 ret = create_ust_app_metadata(ua_sess, app, usess->consumer,
4077 &ua_chan->attr);
4078 if (ret < 0) {
4079 goto error_unlock;
4080 }
4081 /* Remove it from the hash table and continue!. */
4082 ret = lttng_ht_del(ua_sess->channels, &iter);
4083 assert(!ret);
4084 delete_ust_app_channel(-1, ua_chan, app);
4085 continue;
4086 } else {
4087 ret = do_create_channel(app, usess, ua_sess, ua_chan);
4088 if (ret < 0) {
4089 /*
4090 * Stop everything. On error, the application failed, no more
4091 * file descriptor are available or ENOMEM so stopping here is
4092 * the only thing we can do for now.
4093 */
4094 goto error_unlock;
4095 }
4096 }
4097
4098 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter_ctx.iter, ua_ctx,
4099 node.node) {
4100 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
4101 if (ret < 0) {
4102 goto error_unlock;
4103 }
4104 }
4105
4106
4107 /* For each events */
4108 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
4109 node.node) {
4110 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
4111 if (ret < 0) {
4112 goto error_unlock;
4113 }
4114 }
4115 }
4116
4117 pthread_mutex_unlock(&ua_sess->lock);
4118
4119 if (usess->start_trace) {
4120 ret = ust_app_start_trace(usess, app);
4121 if (ret < 0) {
4122 goto error;
4123 }
4124
4125 DBG2("UST trace started for app pid %d", app->pid);
4126 }
4127
4128 /* Everything went well at this point. */
4129 rcu_read_unlock();
4130 return;
4131
4132 error_unlock:
4133 pthread_mutex_unlock(&ua_sess->lock);
4134 error:
4135 if (ua_sess) {
4136 destroy_app_session(app, ua_sess);
4137 }
4138 rcu_read_unlock();
4139 return;
4140 }
4141
4142 /*
4143 * Add context to a specific channel for global UST domain.
4144 */
4145 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
4146 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
4147 {
4148 int ret = 0;
4149 struct lttng_ht_node_str *ua_chan_node;
4150 struct lttng_ht_iter iter, uiter;
4151 struct ust_app_channel *ua_chan = NULL;
4152 struct ust_app_session *ua_sess;
4153 struct ust_app *app;
4154
4155 rcu_read_lock();
4156
4157 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4158 if (!app->compatible) {
4159 /*
4160 * TODO: In time, we should notice the caller of this error by
4161 * telling him that this is a version error.
4162 */
4163 continue;
4164 }
4165 ua_sess = lookup_session_by_app(usess, app);
4166 if (ua_sess == NULL) {
4167 continue;
4168 }
4169
4170 pthread_mutex_lock(&ua_sess->lock);
4171 /* Lookup channel in the ust app session */
4172 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4173 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4174 if (ua_chan_node == NULL) {
4175 goto next_app;
4176 }
4177 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
4178 node);
4179 ret = create_ust_app_channel_context(ua_sess, ua_chan, &uctx->ctx, app);
4180 if (ret < 0) {
4181 goto next_app;
4182 }
4183 next_app:
4184 pthread_mutex_unlock(&ua_sess->lock);
4185 }
4186
4187 rcu_read_unlock();
4188 return ret;
4189 }
4190
4191 /*
4192 * Enable event for a channel from a UST session for a specific PID.
4193 */
4194 int ust_app_enable_event_pid(struct ltt_ust_session *usess,
4195 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4196 {
4197 int ret = 0;
4198 struct lttng_ht_iter iter;
4199 struct lttng_ht_node_str *ua_chan_node;
4200 struct ust_app *app;
4201 struct ust_app_session *ua_sess;
4202 struct ust_app_channel *ua_chan;
4203 struct ust_app_event *ua_event;
4204
4205 DBG("UST app enabling event %s for PID %d", uevent->attr.name, pid);
4206
4207 rcu_read_lock();
4208
4209 app = ust_app_find_by_pid(pid);
4210 if (app == NULL) {
4211 ERR("UST app enable event per PID %d not found", pid);
4212 ret = -1;
4213 goto end;
4214 }
4215
4216 if (!app->compatible) {
4217 ret = 0;
4218 goto end;
4219 }
4220
4221 ua_sess = lookup_session_by_app(usess, app);
4222 if (!ua_sess) {
4223 /* The application has problem or is probably dead. */
4224 ret = 0;
4225 goto end;
4226 }
4227
4228 pthread_mutex_lock(&ua_sess->lock);
4229 /* Lookup channel in the ust app session */
4230 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4231 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4232 /* If the channel is not found, there is a code flow error */
4233 assert(ua_chan_node);
4234
4235 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4236
4237 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4238 uevent->filter, uevent->attr.loglevel);
4239 if (ua_event == NULL) {
4240 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4241 if (ret < 0) {
4242 goto end_unlock;
4243 }
4244 } else {
4245 ret = enable_ust_app_event(ua_sess, ua_event, app);
4246 if (ret < 0) {
4247 goto end_unlock;
4248 }
4249 }
4250
4251 end_unlock:
4252 pthread_mutex_unlock(&ua_sess->lock);
4253 end:
4254 rcu_read_unlock();
4255 return ret;
4256 }
4257
4258 /*
4259 * Disable event for a channel from a UST session for a specific PID.
4260 */
4261 int ust_app_disable_event_pid(struct ltt_ust_session *usess,
4262 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4263 {
4264 int ret = 0;
4265 struct lttng_ht_iter iter;
4266 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
4267 struct ust_app *app;
4268 struct ust_app_session *ua_sess;
4269 struct ust_app_channel *ua_chan;
4270 struct ust_app_event *ua_event;
4271
4272 DBG("UST app disabling event %s for PID %d", uevent->attr.name, pid);
4273
4274 rcu_read_lock();
4275
4276 app = ust_app_find_by_pid(pid);
4277 if (app == NULL) {
4278 ERR("UST app disable event per PID %d not found", pid);
4279 ret = -1;
4280 goto error;
4281 }
4282
4283 if (!app->compatible) {
4284 ret = 0;
4285 goto error;
4286 }
4287
4288 ua_sess = lookup_session_by_app(usess, app);
4289 if (!ua_sess) {
4290 /* The application has problem or is probably dead. */
4291 goto error;
4292 }
4293
4294 /* Lookup channel in the ust app session */
4295 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4296 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4297 if (ua_chan_node == NULL) {
4298 /* Channel does not exist, skip disabling */
4299 goto error;
4300 }
4301 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4302
4303 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &iter);
4304 ua_event_node = lttng_ht_iter_get_node_str(&iter);
4305 if (ua_event_node == NULL) {
4306 /* Event does not exist, skip disabling */
4307 goto error;
4308 }
4309 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
4310
4311 ret = disable_ust_app_event(ua_sess, ua_event, app);
4312 if (ret < 0) {
4313 goto error;
4314 }
4315
4316 error:
4317 rcu_read_unlock();
4318 return ret;
4319 }
4320
4321 /*
4322 * Calibrate registered applications.
4323 */
4324 int ust_app_calibrate_glb(struct lttng_ust_calibrate *calibrate)
4325 {
4326 int ret = 0;
4327 struct lttng_ht_iter iter;
4328 struct ust_app *app;
4329
4330 rcu_read_lock();
4331
4332 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4333 if (!app->compatible) {
4334 /*
4335 * TODO: In time, we should notice the caller of this error by
4336 * telling him that this is a version error.
4337 */
4338 continue;
4339 }
4340
4341 health_code_update();
4342
4343 ret = ustctl_calibrate(app->sock, calibrate);
4344 if (ret < 0) {
4345 switch (ret) {
4346 case -ENOSYS:
4347 /* Means that it's not implemented on the tracer side. */
4348 ret = 0;
4349 break;
4350 default:
4351 DBG2("Calibrate app PID %d returned with error %d",
4352 app->pid, ret);
4353 break;
4354 }
4355 }
4356 }
4357
4358 DBG("UST app global domain calibration finished");
4359
4360 rcu_read_unlock();
4361
4362 health_code_update();
4363
4364 return ret;
4365 }
4366
4367 /*
4368 * Receive registration and populate the given msg structure.
4369 *
4370 * On success return 0 else a negative value returned by the ustctl call.
4371 */
4372 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
4373 {
4374 int ret;
4375 uint32_t pid, ppid, uid, gid;
4376
4377 assert(msg);
4378
4379 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
4380 &pid, &ppid, &uid, &gid,
4381 &msg->bits_per_long,
4382 &msg->uint8_t_alignment,
4383 &msg->uint16_t_alignment,
4384 &msg->uint32_t_alignment,
4385 &msg->uint64_t_alignment,
4386 &msg->long_alignment,
4387 &msg->byte_order,
4388 msg->name);
4389 if (ret < 0) {
4390 switch (-ret) {
4391 case EPIPE:
4392 case ECONNRESET:
4393 case LTTNG_UST_ERR_EXITING:
4394 DBG3("UST app recv reg message failed. Application died");
4395 break;
4396 case LTTNG_UST_ERR_UNSUP_MAJOR:
4397 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
4398 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
4399 LTTNG_UST_ABI_MINOR_VERSION);
4400 break;
4401 default:
4402 ERR("UST app recv reg message failed with ret %d", ret);
4403 break;
4404 }
4405 goto error;
4406 }
4407 msg->pid = (pid_t) pid;
4408 msg->ppid = (pid_t) ppid;
4409 msg->uid = (uid_t) uid;
4410 msg->gid = (gid_t) gid;
4411
4412 error:
4413 return ret;
4414 }
4415
4416 /*
4417 * Return a ust app channel object using the application object and the channel
4418 * object descriptor has a key. If not found, NULL is returned. A RCU read side
4419 * lock MUST be acquired before calling this function.
4420 */
4421 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
4422 int objd)
4423 {
4424 struct lttng_ht_node_ulong *node;
4425 struct lttng_ht_iter iter;
4426 struct ust_app_channel *ua_chan = NULL;
4427
4428 assert(app);
4429
4430 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
4431 node = lttng_ht_iter_get_node_ulong(&iter);
4432 if (node == NULL) {
4433 DBG2("UST app channel find by objd %d not found", objd);
4434 goto error;
4435 }
4436
4437 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
4438
4439 error:
4440 return ua_chan;
4441 }
4442
4443 /*
4444 * Reply to a register channel notification from an application on the notify
4445 * socket. The channel metadata is also created.
4446 *
4447 * The session UST registry lock is acquired in this function.
4448 *
4449 * On success 0 is returned else a negative value.
4450 */
4451 static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
4452 size_t nr_fields, struct ustctl_field *fields)
4453 {
4454 int ret, ret_code = 0;
4455 uint32_t chan_id, reg_count;
4456 uint64_t chan_reg_key;
4457 enum ustctl_channel_header type;
4458 struct ust_app *app;
4459 struct ust_app_channel *ua_chan;
4460 struct ust_app_session *ua_sess;
4461 struct ust_registry_session *registry;
4462 struct ust_registry_channel *chan_reg;
4463
4464 rcu_read_lock();
4465
4466 /* Lookup application. If not found, there is a code flow error. */
4467 app = find_app_by_notify_sock(sock);
4468 if (!app) {
4469 DBG("Application socket %d is being teardown. Abort event notify",
4470 sock);
4471 ret = 0;
4472 free(fields);
4473 goto error_rcu_unlock;
4474 }
4475
4476 /* Lookup channel by UST object descriptor. */
4477 ua_chan = find_channel_by_objd(app, cobjd);
4478 if (!ua_chan) {
4479 DBG("Application channel is being teardown. Abort event notify");
4480 ret = 0;
4481 free(fields);
4482 goto error_rcu_unlock;
4483 }
4484
4485 assert(ua_chan->session);
4486 ua_sess = ua_chan->session;
4487
4488 /* Get right session registry depending on the session buffer type. */
4489 registry = get_session_registry(ua_sess);
4490 assert(registry);
4491
4492 /* Depending on the buffer type, a different channel key is used. */
4493 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4494 chan_reg_key = ua_chan->tracing_channel_id;
4495 } else {
4496 chan_reg_key = ua_chan->key;
4497 }
4498
4499 pthread_mutex_lock(&registry->lock);
4500
4501 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
4502 assert(chan_reg);
4503
4504 if (!chan_reg->register_done) {
4505 reg_count = ust_registry_get_event_count(chan_reg);
4506 if (reg_count < 31) {
4507 type = USTCTL_CHANNEL_HEADER_COMPACT;
4508 } else {
4509 type = USTCTL_CHANNEL_HEADER_LARGE;
4510 }
4511
4512 chan_reg->nr_ctx_fields = nr_fields;
4513 chan_reg->ctx_fields = fields;
4514 chan_reg->header_type = type;
4515 } else {
4516 /* Get current already assigned values. */
4517 type = chan_reg->header_type;
4518 free(fields);
4519 /* Set to NULL so the error path does not do a double free. */
4520 fields = NULL;
4521 }
4522 /* Channel id is set during the object creation. */
4523 chan_id = chan_reg->chan_id;
4524
4525 /* Append to metadata */
4526 if (!chan_reg->metadata_dumped) {
4527 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
4528 if (ret_code) {
4529 ERR("Error appending channel metadata (errno = %d)", ret_code);
4530 goto reply;
4531 }
4532 }
4533
4534 reply:
4535 DBG3("UST app replying to register channel key %" PRIu64
4536 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
4537 ret_code);
4538
4539 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
4540 if (ret < 0) {
4541 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4542 ERR("UST app reply channel failed with ret %d", ret);
4543 } else {
4544 DBG3("UST app reply channel failed. Application died");
4545 }
4546 goto error;
4547 }
4548
4549 /* This channel registry registration is completed. */
4550 chan_reg->register_done = 1;
4551
4552 error:
4553 pthread_mutex_unlock(&registry->lock);
4554 error_rcu_unlock:
4555 rcu_read_unlock();
4556 if (ret) {
4557 free(fields);
4558 }
4559 return ret;
4560 }
4561
4562 /*
4563 * Add event to the UST channel registry. When the event is added to the
4564 * registry, the metadata is also created. Once done, this replies to the
4565 * application with the appropriate error code.
4566 *
4567 * The session UST registry lock is acquired in the function.
4568 *
4569 * On success 0 is returned else a negative value.
4570 */
4571 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
4572 char *sig, size_t nr_fields, struct ustctl_field *fields, int loglevel,
4573 char *model_emf_uri)
4574 {
4575 int ret, ret_code;
4576 uint32_t event_id = 0;
4577 uint64_t chan_reg_key;
4578 struct ust_app *app;
4579 struct ust_app_channel *ua_chan;
4580 struct ust_app_session *ua_sess;
4581 struct ust_registry_session *registry;
4582
4583 rcu_read_lock();
4584
4585 /* Lookup application. If not found, there is a code flow error. */
4586 app = find_app_by_notify_sock(sock);
4587 if (!app) {
4588 DBG("Application socket %d is being teardown. Abort event notify",
4589 sock);
4590 ret = 0;
4591 free(sig);
4592 free(fields);
4593 free(model_emf_uri);
4594 goto error_rcu_unlock;
4595 }
4596
4597 /* Lookup channel by UST object descriptor. */
4598 ua_chan = find_channel_by_objd(app, cobjd);
4599 if (!ua_chan) {
4600 DBG("Application channel is being teardown. Abort event notify");
4601 ret = 0;
4602 free(sig);
4603 free(fields);
4604 free(model_emf_uri);
4605 goto error_rcu_unlock;
4606 }
4607
4608 assert(ua_chan->session);
4609 ua_sess = ua_chan->session;
4610
4611 registry = get_session_registry(ua_sess);
4612 assert(registry);
4613
4614 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4615 chan_reg_key = ua_chan->tracing_channel_id;
4616 } else {
4617 chan_reg_key = ua_chan->key;
4618 }
4619
4620 pthread_mutex_lock(&registry->lock);
4621
4622 /*
4623 * From this point on, this call acquires the ownership of the sig, fields
4624 * and model_emf_uri meaning any free are done inside it if needed. These
4625 * three variables MUST NOT be read/write after this.
4626 */
4627 ret_code = ust_registry_create_event(registry, chan_reg_key,
4628 sobjd, cobjd, name, sig, nr_fields, fields, loglevel,
4629 model_emf_uri, ua_sess->buffer_type, &event_id);
4630
4631 /*
4632 * The return value is returned to ustctl so in case of an error, the
4633 * application can be notified. In case of an error, it's important not to
4634 * return a negative error or else the application will get closed.
4635 */
4636 ret = ustctl_reply_register_event(sock, event_id, ret_code);
4637 if (ret < 0) {
4638 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4639 ERR("UST app reply event failed with ret %d", ret);
4640 } else {
4641 DBG3("UST app reply event failed. Application died");
4642 }
4643 /*
4644 * No need to wipe the create event since the application socket will
4645 * get close on error hence cleaning up everything by itself.
4646 */
4647 goto error;
4648 }
4649
4650 DBG3("UST registry event %s with id %" PRId32 " added successfully",
4651 name, event_id);
4652
4653 error:
4654 pthread_mutex_unlock(&registry->lock);
4655 error_rcu_unlock:
4656 rcu_read_unlock();
4657 return ret;
4658 }
4659
4660 /*
4661 * Handle application notification through the given notify socket.
4662 *
4663 * Return 0 on success or else a negative value.
4664 */
4665 int ust_app_recv_notify(int sock)
4666 {
4667 int ret;
4668 enum ustctl_notify_cmd cmd;
4669
4670 DBG3("UST app receiving notify from sock %d", sock);
4671
4672 ret = ustctl_recv_notify(sock, &cmd);
4673 if (ret < 0) {
4674 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4675 ERR("UST app recv notify failed with ret %d", ret);
4676 } else {
4677 DBG3("UST app recv notify failed. Application died");
4678 }
4679 goto error;
4680 }
4681
4682 switch (cmd) {
4683 case USTCTL_NOTIFY_CMD_EVENT:
4684 {
4685 int sobjd, cobjd, loglevel;
4686 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
4687 size_t nr_fields;
4688 struct ustctl_field *fields;
4689
4690 DBG2("UST app ustctl register event received");
4691
4692 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name, &loglevel,
4693 &sig, &nr_fields, &fields, &model_emf_uri);
4694 if (ret < 0) {
4695 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4696 ERR("UST app recv event failed with ret %d", ret);
4697 } else {
4698 DBG3("UST app recv event failed. Application died");
4699 }
4700 goto error;
4701 }
4702
4703 /*
4704 * Add event to the UST registry coming from the notify socket. This
4705 * call will free if needed the sig, fields and model_emf_uri. This
4706 * code path loses the ownsership of these variables and transfer them
4707 * to the this function.
4708 */
4709 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
4710 fields, loglevel, model_emf_uri);
4711 if (ret < 0) {
4712 goto error;
4713 }
4714
4715 break;
4716 }
4717 case USTCTL_NOTIFY_CMD_CHANNEL:
4718 {
4719 int sobjd, cobjd;
4720 size_t nr_fields;
4721 struct ustctl_field *fields;
4722
4723 DBG2("UST app ustctl register channel received");
4724
4725 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
4726 &fields);
4727 if (ret < 0) {
4728 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4729 ERR("UST app recv channel failed with ret %d", ret);
4730 } else {
4731 DBG3("UST app recv channel failed. Application died");
4732 }
4733 goto error;
4734 }
4735
4736 /*
4737 * The fields ownership are transfered to this function call meaning
4738 * that if needed it will be freed. After this, it's invalid to access
4739 * fields or clean it up.
4740 */
4741 ret = reply_ust_register_channel(sock, sobjd, cobjd, nr_fields,
4742 fields);
4743 if (ret < 0) {
4744 goto error;
4745 }
4746
4747 break;
4748 }
4749 default:
4750 /* Should NEVER happen. */
4751 assert(0);
4752 }
4753
4754 error:
4755 return ret;
4756 }
4757
4758 /*
4759 * Once the notify socket hangs up, this is called. First, it tries to find the
4760 * corresponding application. On failure, the call_rcu to close the socket is
4761 * executed. If an application is found, it tries to delete it from the notify
4762 * socket hash table. Whathever the result, it proceeds to the call_rcu.
4763 *
4764 * Note that an object needs to be allocated here so on ENOMEM failure, the
4765 * call RCU is not done but the rest of the cleanup is.
4766 */
4767 void ust_app_notify_sock_unregister(int sock)
4768 {
4769 int err_enomem = 0;
4770 struct lttng_ht_iter iter;
4771 struct ust_app *app;
4772 struct ust_app_notify_sock_obj *obj;
4773
4774 assert(sock >= 0);
4775
4776 rcu_read_lock();
4777
4778 obj = zmalloc(sizeof(*obj));
4779 if (!obj) {
4780 /*
4781 * An ENOMEM is kind of uncool. If this strikes we continue the
4782 * procedure but the call_rcu will not be called. In this case, we
4783 * accept the fd leak rather than possibly creating an unsynchronized
4784 * state between threads.
4785 *
4786 * TODO: The notify object should be created once the notify socket is
4787 * registered and stored independantely from the ust app object. The
4788 * tricky part is to synchronize the teardown of the application and
4789 * this notify object. Let's keep that in mind so we can avoid this
4790 * kind of shenanigans with ENOMEM in the teardown path.
4791 */
4792 err_enomem = 1;
4793 } else {
4794 obj->fd = sock;
4795 }
4796
4797 DBG("UST app notify socket unregister %d", sock);
4798
4799 /*
4800 * Lookup application by notify socket. If this fails, this means that the
4801 * hash table delete has already been done by the application
4802 * unregistration process so we can safely close the notify socket in a
4803 * call RCU.
4804 */
4805 app = find_app_by_notify_sock(sock);
4806 if (!app) {
4807 goto close_socket;
4808 }
4809
4810 iter.iter.node = &app->notify_sock_n.node;
4811
4812 /*
4813 * Whatever happens here either we fail or succeed, in both cases we have
4814 * to close the socket after a grace period to continue to the call RCU
4815 * here. If the deletion is successful, the application is not visible
4816 * anymore by other threads and is it fails it means that it was already
4817 * deleted from the hash table so either way we just have to close the
4818 * socket.
4819 */
4820 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
4821
4822 close_socket:
4823 rcu_read_unlock();
4824
4825 /*
4826 * Close socket after a grace period to avoid for the socket to be reused
4827 * before the application object is freed creating potential race between
4828 * threads trying to add unique in the global hash table.
4829 */
4830 if (!err_enomem) {
4831 call_rcu(&obj->head, close_notify_sock_rcu);
4832 }
4833 }
4834
4835 /*
4836 * Destroy a ust app data structure and free its memory.
4837 */
4838 void ust_app_destroy(struct ust_app *app)
4839 {
4840 if (!app) {
4841 return;
4842 }
4843
4844 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
4845 }
This page took 0.200464 seconds and 6 git commands to generate.