Copy exclusion data in shadow_copy_event
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2 only,
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18 #define _GNU_SOURCE
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <pthread.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/stat.h>
26 #include <sys/types.h>
27 #include <unistd.h>
28 #include <urcu/compiler.h>
29 #include <lttng/ust-error.h>
30 #include <signal.h>
31
32 #include <common/common.h>
33 #include <common/sessiond-comm/sessiond-comm.h>
34
35 #include "buffer-registry.h"
36 #include "fd-limit.h"
37 #include "health-sessiond.h"
38 #include "ust-app.h"
39 #include "ust-consumer.h"
40 #include "ust-ctl.h"
41 #include "utils.h"
42
43 /* Next available channel key. Access under next_channel_key_lock. */
44 static uint64_t _next_channel_key;
45 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
46
47 /* Next available session ID. Access under next_session_id_lock. */
48 static uint64_t _next_session_id;
49 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
50
51 /*
52 * Return the incremented value of next_channel_key.
53 */
54 static uint64_t get_next_channel_key(void)
55 {
56 uint64_t ret;
57
58 pthread_mutex_lock(&next_channel_key_lock);
59 ret = ++_next_channel_key;
60 pthread_mutex_unlock(&next_channel_key_lock);
61 return ret;
62 }
63
64 /*
65 * Return the atomically incremented value of next_session_id.
66 */
67 static uint64_t get_next_session_id(void)
68 {
69 uint64_t ret;
70
71 pthread_mutex_lock(&next_session_id_lock);
72 ret = ++_next_session_id;
73 pthread_mutex_unlock(&next_session_id_lock);
74 return ret;
75 }
76
77 static void copy_channel_attr_to_ustctl(
78 struct ustctl_consumer_channel_attr *attr,
79 struct lttng_ust_channel_attr *uattr)
80 {
81 /* Copy event attributes since the layout is different. */
82 attr->subbuf_size = uattr->subbuf_size;
83 attr->num_subbuf = uattr->num_subbuf;
84 attr->overwrite = uattr->overwrite;
85 attr->switch_timer_interval = uattr->switch_timer_interval;
86 attr->read_timer_interval = uattr->read_timer_interval;
87 attr->output = uattr->output;
88 }
89
90 /*
91 * Match function for the hash table lookup.
92 *
93 * It matches an ust app event based on three attributes which are the event
94 * name, the filter bytecode and the loglevel.
95 */
96 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
97 {
98 struct ust_app_event *event;
99 const struct ust_app_ht_key *key;
100
101 assert(node);
102 assert(_key);
103
104 event = caa_container_of(node, struct ust_app_event, node.node);
105 key = _key;
106
107 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
108
109 /* Event name */
110 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
111 goto no_match;
112 }
113
114 /* Event loglevel. */
115 if (event->attr.loglevel != key->loglevel) {
116 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
117 && key->loglevel == 0 && event->attr.loglevel == -1) {
118 /*
119 * Match is accepted. This is because on event creation, the
120 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
121 * -1 are accepted for this loglevel type since 0 is the one set by
122 * the API when receiving an enable event.
123 */
124 } else {
125 goto no_match;
126 }
127 }
128
129 /* One of the filters is NULL, fail. */
130 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
131 goto no_match;
132 }
133
134 if (key->filter && event->filter) {
135 /* Both filters exists, check length followed by the bytecode. */
136 if (event->filter->len != key->filter->len ||
137 memcmp(event->filter->data, key->filter->data,
138 event->filter->len) != 0) {
139 goto no_match;
140 }
141 }
142
143 /* One of the exclusions is NULL, fail. */
144 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
145 goto no_match;
146 }
147
148 if (key->exclusion && event->exclusion) {
149 /* Both exclusions exists, check count followed by the names. */
150 if (event->exclusion->count != key->exclusion->count ||
151 memcmp(event->exclusion->names, key->exclusion->names,
152 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
153 goto no_match;
154 }
155 }
156
157
158 /* Match. */
159 return 1;
160
161 no_match:
162 return 0;
163 }
164
165 /*
166 * Unique add of an ust app event in the given ht. This uses the custom
167 * ht_match_ust_app_event match function and the event name as hash.
168 */
169 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
170 struct ust_app_event *event)
171 {
172 struct cds_lfht_node *node_ptr;
173 struct ust_app_ht_key key;
174 struct lttng_ht *ht;
175
176 assert(ua_chan);
177 assert(ua_chan->events);
178 assert(event);
179
180 ht = ua_chan->events;
181 key.name = event->attr.name;
182 key.filter = event->filter;
183 key.loglevel = event->attr.loglevel;
184 key.exclusion = event->exclusion;
185
186 node_ptr = cds_lfht_add_unique(ht->ht,
187 ht->hash_fct(event->node.key, lttng_ht_seed),
188 ht_match_ust_app_event, &key, &event->node.node);
189 assert(node_ptr == &event->node.node);
190 }
191
192 /*
193 * Close the notify socket from the given RCU head object. This MUST be called
194 * through a call_rcu().
195 */
196 static void close_notify_sock_rcu(struct rcu_head *head)
197 {
198 int ret;
199 struct ust_app_notify_sock_obj *obj =
200 caa_container_of(head, struct ust_app_notify_sock_obj, head);
201
202 /* Must have a valid fd here. */
203 assert(obj->fd >= 0);
204
205 ret = close(obj->fd);
206 if (ret) {
207 ERR("close notify sock %d RCU", obj->fd);
208 }
209 lttng_fd_put(LTTNG_FD_APPS, 1);
210
211 free(obj);
212 }
213
214 /*
215 * Return the session registry according to the buffer type of the given
216 * session.
217 *
218 * A registry per UID object MUST exists before calling this function or else
219 * it assert() if not found. RCU read side lock must be acquired.
220 */
221 static struct ust_registry_session *get_session_registry(
222 struct ust_app_session *ua_sess)
223 {
224 struct ust_registry_session *registry = NULL;
225
226 assert(ua_sess);
227
228 switch (ua_sess->buffer_type) {
229 case LTTNG_BUFFER_PER_PID:
230 {
231 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
232 if (!reg_pid) {
233 goto error;
234 }
235 registry = reg_pid->registry->reg.ust;
236 break;
237 }
238 case LTTNG_BUFFER_PER_UID:
239 {
240 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
241 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
242 if (!reg_uid) {
243 goto error;
244 }
245 registry = reg_uid->registry->reg.ust;
246 break;
247 }
248 default:
249 assert(0);
250 };
251
252 error:
253 return registry;
254 }
255
256 /*
257 * Delete ust context safely. RCU read lock must be held before calling
258 * this function.
259 */
260 static
261 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx)
262 {
263 int ret;
264
265 assert(ua_ctx);
266
267 if (ua_ctx->obj) {
268 ret = ustctl_release_object(sock, ua_ctx->obj);
269 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
270 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
271 sock, ua_ctx->obj->handle, ret);
272 }
273 free(ua_ctx->obj);
274 }
275 free(ua_ctx);
276 }
277
278 /*
279 * Delete ust app event safely. RCU read lock must be held before calling
280 * this function.
281 */
282 static
283 void delete_ust_app_event(int sock, struct ust_app_event *ua_event)
284 {
285 int ret;
286
287 assert(ua_event);
288
289 free(ua_event->filter);
290 if (ua_event->exclusion != NULL)
291 free(ua_event->exclusion);
292 if (ua_event->obj != NULL) {
293 ret = ustctl_release_object(sock, ua_event->obj);
294 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
295 ERR("UST app sock %d release event obj failed with ret %d",
296 sock, ret);
297 }
298 free(ua_event->obj);
299 }
300 free(ua_event);
301 }
302
303 /*
304 * Release ust data object of the given stream.
305 *
306 * Return 0 on success or else a negative value.
307 */
308 static int release_ust_app_stream(int sock, struct ust_app_stream *stream)
309 {
310 int ret = 0;
311
312 assert(stream);
313
314 if (stream->obj) {
315 ret = ustctl_release_object(sock, stream->obj);
316 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
317 ERR("UST app sock %d release stream obj failed with ret %d",
318 sock, ret);
319 }
320 lttng_fd_put(LTTNG_FD_APPS, 2);
321 free(stream->obj);
322 }
323
324 return ret;
325 }
326
327 /*
328 * Delete ust app stream safely. RCU read lock must be held before calling
329 * this function.
330 */
331 static
332 void delete_ust_app_stream(int sock, struct ust_app_stream *stream)
333 {
334 assert(stream);
335
336 (void) release_ust_app_stream(sock, stream);
337 free(stream);
338 }
339
340 /*
341 * We need to execute ht_destroy outside of RCU read-side critical
342 * section and outside of call_rcu thread, so we postpone its execution
343 * using ht_cleanup_push. It is simpler than to change the semantic of
344 * the many callers of delete_ust_app_session().
345 */
346 static
347 void delete_ust_app_channel_rcu(struct rcu_head *head)
348 {
349 struct ust_app_channel *ua_chan =
350 caa_container_of(head, struct ust_app_channel, rcu_head);
351
352 ht_cleanup_push(ua_chan->ctx);
353 ht_cleanup_push(ua_chan->events);
354 free(ua_chan);
355 }
356
357 /*
358 * Delete ust app channel safely. RCU read lock must be held before calling
359 * this function.
360 */
361 static
362 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
363 struct ust_app *app)
364 {
365 int ret;
366 struct lttng_ht_iter iter;
367 struct ust_app_event *ua_event;
368 struct ust_app_ctx *ua_ctx;
369 struct ust_app_stream *stream, *stmp;
370 struct ust_registry_session *registry;
371
372 assert(ua_chan);
373
374 DBG3("UST app deleting channel %s", ua_chan->name);
375
376 /* Wipe stream */
377 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
378 cds_list_del(&stream->list);
379 delete_ust_app_stream(sock, stream);
380 }
381
382 /* Wipe context */
383 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
384 cds_list_del(&ua_ctx->list);
385 ret = lttng_ht_del(ua_chan->ctx, &iter);
386 assert(!ret);
387 delete_ust_app_ctx(sock, ua_ctx);
388 }
389
390 /* Wipe events */
391 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
392 node.node) {
393 ret = lttng_ht_del(ua_chan->events, &iter);
394 assert(!ret);
395 delete_ust_app_event(sock, ua_event);
396 }
397
398 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
399 /* Wipe and free registry from session registry. */
400 registry = get_session_registry(ua_chan->session);
401 if (registry) {
402 ust_registry_channel_del_free(registry, ua_chan->key);
403 }
404 }
405
406 if (ua_chan->obj != NULL) {
407 /* Remove channel from application UST object descriptor. */
408 iter.iter.node = &ua_chan->ust_objd_node.node;
409 lttng_ht_del(app->ust_objd, &iter);
410 ret = ustctl_release_object(sock, ua_chan->obj);
411 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
412 ERR("UST app sock %d release channel obj failed with ret %d",
413 sock, ret);
414 }
415 lttng_fd_put(LTTNG_FD_APPS, 1);
416 free(ua_chan->obj);
417 }
418 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
419 }
420
421 /*
422 * Push metadata to consumer socket.
423 *
424 * The socket lock MUST be acquired.
425 * The ust app session lock MUST be acquired.
426 *
427 * On success, return the len of metadata pushed or else a negative value.
428 */
429 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
430 struct consumer_socket *socket, int send_zero_data)
431 {
432 int ret;
433 char *metadata_str = NULL;
434 size_t len, offset;
435 ssize_t ret_val;
436
437 assert(registry);
438 assert(socket);
439
440 /*
441 * On a push metadata error either the consumer is dead or the metadata
442 * channel has been destroyed because its endpoint might have died (e.g:
443 * relayd). If so, the metadata closed flag is set to 1 so we deny pushing
444 * metadata again which is not valid anymore on the consumer side.
445 *
446 * The ust app session mutex locked allows us to make this check without
447 * the registry lock.
448 */
449 if (registry->metadata_closed) {
450 return -EPIPE;
451 }
452
453 pthread_mutex_lock(&registry->lock);
454
455 offset = registry->metadata_len_sent;
456 len = registry->metadata_len - registry->metadata_len_sent;
457 if (len == 0) {
458 DBG3("No metadata to push for metadata key %" PRIu64,
459 registry->metadata_key);
460 ret_val = len;
461 if (send_zero_data) {
462 DBG("No metadata to push");
463 goto push_data;
464 }
465 goto end;
466 }
467
468 /* Allocate only what we have to send. */
469 metadata_str = zmalloc(len);
470 if (!metadata_str) {
471 PERROR("zmalloc ust app metadata string");
472 ret_val = -ENOMEM;
473 goto error;
474 }
475 /* Copy what we haven't send out. */
476 memcpy(metadata_str, registry->metadata + offset, len);
477 registry->metadata_len_sent += len;
478
479 push_data:
480 pthread_mutex_unlock(&registry->lock);
481 ret = consumer_push_metadata(socket, registry->metadata_key,
482 metadata_str, len, offset);
483 if (ret < 0) {
484 ret_val = ret;
485 goto error_push;
486 }
487
488 free(metadata_str);
489 return len;
490
491 end:
492 error:
493 pthread_mutex_unlock(&registry->lock);
494 error_push:
495 free(metadata_str);
496 return ret_val;
497 }
498
499 /*
500 * For a given application and session, push metadata to consumer. The session
501 * lock MUST be acquired here before calling this.
502 * Either sock or consumer is required : if sock is NULL, the default
503 * socket to send the metadata is retrieved from consumer, if sock
504 * is not NULL we use it to send the metadata.
505 *
506 * Return 0 on success else a negative error.
507 */
508 static int push_metadata(struct ust_registry_session *registry,
509 struct consumer_output *consumer)
510 {
511 int ret_val;
512 ssize_t ret;
513 struct consumer_socket *socket;
514
515 assert(registry);
516 assert(consumer);
517
518 rcu_read_lock();
519
520 /*
521 * Means that no metadata was assigned to the session. This can happens if
522 * no start has been done previously.
523 */
524 if (!registry->metadata_key) {
525 ret_val = 0;
526 goto end_rcu_unlock;
527 }
528
529 /* Get consumer socket to use to push the metadata.*/
530 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
531 consumer);
532 if (!socket) {
533 ret_val = -1;
534 goto error_rcu_unlock;
535 }
536
537 /*
538 * TODO: Currently, we hold the socket lock around sampling of the next
539 * metadata segment to ensure we send metadata over the consumer socket in
540 * the correct order. This makes the registry lock nest inside the socket
541 * lock.
542 *
543 * Please note that this is a temporary measure: we should move this lock
544 * back into ust_consumer_push_metadata() when the consumer gets the
545 * ability to reorder the metadata it receives.
546 */
547 pthread_mutex_lock(socket->lock);
548 ret = ust_app_push_metadata(registry, socket, 0);
549 pthread_mutex_unlock(socket->lock);
550 if (ret < 0) {
551 ret_val = ret;
552 goto error_rcu_unlock;
553 }
554
555 rcu_read_unlock();
556 return 0;
557
558 error_rcu_unlock:
559 /*
560 * On error, flag the registry that the metadata is closed. We were unable
561 * to push anything and this means that either the consumer is not
562 * responding or the metadata cache has been destroyed on the consumer.
563 */
564 registry->metadata_closed = 1;
565 end_rcu_unlock:
566 rcu_read_unlock();
567 return ret_val;
568 }
569
570 /*
571 * Send to the consumer a close metadata command for the given session. Once
572 * done, the metadata channel is deleted and the session metadata pointer is
573 * nullified. The session lock MUST be acquired here unless the application is
574 * in the destroy path.
575 *
576 * Return 0 on success else a negative value.
577 */
578 static int close_metadata(struct ust_registry_session *registry,
579 struct consumer_output *consumer)
580 {
581 int ret;
582 struct consumer_socket *socket;
583
584 assert(registry);
585 assert(consumer);
586
587 rcu_read_lock();
588
589 if (!registry->metadata_key || registry->metadata_closed) {
590 ret = 0;
591 goto end;
592 }
593
594 /* Get consumer socket to use to push the metadata.*/
595 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
596 consumer);
597 if (!socket) {
598 ret = -1;
599 goto error;
600 }
601
602 ret = consumer_close_metadata(socket, registry->metadata_key);
603 if (ret < 0) {
604 goto error;
605 }
606
607 error:
608 /*
609 * Metadata closed. Even on error this means that the consumer is not
610 * responding or not found so either way a second close should NOT be emit
611 * for this registry.
612 */
613 registry->metadata_closed = 1;
614 end:
615 rcu_read_unlock();
616 return ret;
617 }
618
619 /*
620 * We need to execute ht_destroy outside of RCU read-side critical
621 * section and outside of call_rcu thread, so we postpone its execution
622 * using ht_cleanup_push. It is simpler than to change the semantic of
623 * the many callers of delete_ust_app_session().
624 */
625 static
626 void delete_ust_app_session_rcu(struct rcu_head *head)
627 {
628 struct ust_app_session *ua_sess =
629 caa_container_of(head, struct ust_app_session, rcu_head);
630
631 ht_cleanup_push(ua_sess->channels);
632 free(ua_sess);
633 }
634
635 /*
636 * Delete ust app session safely. RCU read lock must be held before calling
637 * this function.
638 */
639 static
640 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
641 struct ust_app *app)
642 {
643 int ret;
644 struct lttng_ht_iter iter;
645 struct ust_app_channel *ua_chan;
646 struct ust_registry_session *registry;
647
648 assert(ua_sess);
649
650 pthread_mutex_lock(&ua_sess->lock);
651
652 registry = get_session_registry(ua_sess);
653 if (registry && !registry->metadata_closed) {
654 /* Push metadata for application before freeing the application. */
655 (void) push_metadata(registry, ua_sess->consumer);
656
657 /*
658 * Don't ask to close metadata for global per UID buffers. Close
659 * metadata only on destroy trace session in this case. Also, the
660 * previous push metadata could have flag the metadata registry to
661 * close so don't send a close command if closed.
662 */
663 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
664 !registry->metadata_closed) {
665 /* And ask to close it for this session registry. */
666 (void) close_metadata(registry, ua_sess->consumer);
667 }
668 }
669
670 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
671 node.node) {
672 ret = lttng_ht_del(ua_sess->channels, &iter);
673 assert(!ret);
674 delete_ust_app_channel(sock, ua_chan, app);
675 }
676
677 /* In case of per PID, the registry is kept in the session. */
678 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
679 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
680 if (reg_pid) {
681 buffer_reg_pid_remove(reg_pid);
682 buffer_reg_pid_destroy(reg_pid);
683 }
684 }
685
686 if (ua_sess->handle != -1) {
687 ret = ustctl_release_handle(sock, ua_sess->handle);
688 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
689 ERR("UST app sock %d release session handle failed with ret %d",
690 sock, ret);
691 }
692 }
693 pthread_mutex_unlock(&ua_sess->lock);
694
695 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
696 }
697
698 /*
699 * Delete a traceable application structure from the global list. Never call
700 * this function outside of a call_rcu call.
701 *
702 * RCU read side lock should _NOT_ be held when calling this function.
703 */
704 static
705 void delete_ust_app(struct ust_app *app)
706 {
707 int ret, sock;
708 struct ust_app_session *ua_sess, *tmp_ua_sess;
709
710 /* Delete ust app sessions info */
711 sock = app->sock;
712 app->sock = -1;
713
714 /* Wipe sessions */
715 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
716 teardown_node) {
717 /* Free every object in the session and the session. */
718 rcu_read_lock();
719 delete_ust_app_session(sock, ua_sess, app);
720 rcu_read_unlock();
721 }
722
723 ht_cleanup_push(app->sessions);
724 ht_cleanup_push(app->ust_objd);
725
726 /*
727 * Wait until we have deleted the application from the sock hash table
728 * before closing this socket, otherwise an application could re-use the
729 * socket ID and race with the teardown, using the same hash table entry.
730 *
731 * It's OK to leave the close in call_rcu. We want it to stay unique for
732 * all RCU readers that could run concurrently with unregister app,
733 * therefore we _need_ to only close that socket after a grace period. So
734 * it should stay in this RCU callback.
735 *
736 * This close() is a very important step of the synchronization model so
737 * every modification to this function must be carefully reviewed.
738 */
739 ret = close(sock);
740 if (ret) {
741 PERROR("close");
742 }
743 lttng_fd_put(LTTNG_FD_APPS, 1);
744
745 DBG2("UST app pid %d deleted", app->pid);
746 free(app);
747 }
748
749 /*
750 * URCU intermediate call to delete an UST app.
751 */
752 static
753 void delete_ust_app_rcu(struct rcu_head *head)
754 {
755 struct lttng_ht_node_ulong *node =
756 caa_container_of(head, struct lttng_ht_node_ulong, head);
757 struct ust_app *app =
758 caa_container_of(node, struct ust_app, pid_n);
759
760 DBG3("Call RCU deleting app PID %d", app->pid);
761 delete_ust_app(app);
762 }
763
764 /*
765 * Delete the session from the application ht and delete the data structure by
766 * freeing every object inside and releasing them.
767 */
768 static void destroy_app_session(struct ust_app *app,
769 struct ust_app_session *ua_sess)
770 {
771 int ret;
772 struct lttng_ht_iter iter;
773
774 assert(app);
775 assert(ua_sess);
776
777 iter.iter.node = &ua_sess->node.node;
778 ret = lttng_ht_del(app->sessions, &iter);
779 if (ret) {
780 /* Already scheduled for teardown. */
781 goto end;
782 }
783
784 /* Once deleted, free the data structure. */
785 delete_ust_app_session(app->sock, ua_sess, app);
786
787 end:
788 return;
789 }
790
791 /*
792 * Alloc new UST app session.
793 */
794 static
795 struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
796 {
797 struct ust_app_session *ua_sess;
798
799 /* Init most of the default value by allocating and zeroing */
800 ua_sess = zmalloc(sizeof(struct ust_app_session));
801 if (ua_sess == NULL) {
802 PERROR("malloc");
803 goto error_free;
804 }
805
806 ua_sess->handle = -1;
807 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
808 pthread_mutex_init(&ua_sess->lock, NULL);
809
810 return ua_sess;
811
812 error_free:
813 return NULL;
814 }
815
816 /*
817 * Alloc new UST app channel.
818 */
819 static
820 struct ust_app_channel *alloc_ust_app_channel(char *name,
821 struct ust_app_session *ua_sess,
822 struct lttng_ust_channel_attr *attr)
823 {
824 struct ust_app_channel *ua_chan;
825
826 /* Init most of the default value by allocating and zeroing */
827 ua_chan = zmalloc(sizeof(struct ust_app_channel));
828 if (ua_chan == NULL) {
829 PERROR("malloc");
830 goto error;
831 }
832
833 /* Setup channel name */
834 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
835 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
836
837 ua_chan->enabled = 1;
838 ua_chan->handle = -1;
839 ua_chan->session = ua_sess;
840 ua_chan->key = get_next_channel_key();
841 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
842 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
843 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
844
845 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
846 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
847
848 /* Copy attributes */
849 if (attr) {
850 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
851 ua_chan->attr.subbuf_size = attr->subbuf_size;
852 ua_chan->attr.num_subbuf = attr->num_subbuf;
853 ua_chan->attr.overwrite = attr->overwrite;
854 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
855 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
856 ua_chan->attr.output = attr->output;
857 }
858 /* By default, the channel is a per cpu channel. */
859 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
860
861 DBG3("UST app channel %s allocated", ua_chan->name);
862
863 return ua_chan;
864
865 error:
866 return NULL;
867 }
868
869 /*
870 * Allocate and initialize a UST app stream.
871 *
872 * Return newly allocated stream pointer or NULL on error.
873 */
874 struct ust_app_stream *ust_app_alloc_stream(void)
875 {
876 struct ust_app_stream *stream = NULL;
877
878 stream = zmalloc(sizeof(*stream));
879 if (stream == NULL) {
880 PERROR("zmalloc ust app stream");
881 goto error;
882 }
883
884 /* Zero could be a valid value for a handle so flag it to -1. */
885 stream->handle = -1;
886
887 error:
888 return stream;
889 }
890
891 /*
892 * Alloc new UST app event.
893 */
894 static
895 struct ust_app_event *alloc_ust_app_event(char *name,
896 struct lttng_ust_event *attr)
897 {
898 struct ust_app_event *ua_event;
899
900 /* Init most of the default value by allocating and zeroing */
901 ua_event = zmalloc(sizeof(struct ust_app_event));
902 if (ua_event == NULL) {
903 PERROR("malloc");
904 goto error;
905 }
906
907 ua_event->enabled = 1;
908 strncpy(ua_event->name, name, sizeof(ua_event->name));
909 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
910 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
911
912 /* Copy attributes */
913 if (attr) {
914 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
915 }
916
917 DBG3("UST app event %s allocated", ua_event->name);
918
919 return ua_event;
920
921 error:
922 return NULL;
923 }
924
925 /*
926 * Alloc new UST app context.
927 */
928 static
929 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context *uctx)
930 {
931 struct ust_app_ctx *ua_ctx;
932
933 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
934 if (ua_ctx == NULL) {
935 goto error;
936 }
937
938 CDS_INIT_LIST_HEAD(&ua_ctx->list);
939
940 if (uctx) {
941 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
942 }
943
944 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
945
946 error:
947 return ua_ctx;
948 }
949
950 /*
951 * Allocate a filter and copy the given original filter.
952 *
953 * Return allocated filter or NULL on error.
954 */
955 static struct lttng_ust_filter_bytecode *alloc_copy_ust_app_filter(
956 struct lttng_ust_filter_bytecode *orig_f)
957 {
958 struct lttng_ust_filter_bytecode *filter = NULL;
959
960 /* Copy filter bytecode */
961 filter = zmalloc(sizeof(*filter) + orig_f->len);
962 if (!filter) {
963 PERROR("zmalloc alloc ust app filter");
964 goto error;
965 }
966
967 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
968
969 error:
970 return filter;
971 }
972
973 /*
974 * Find an ust_app using the sock and return it. RCU read side lock must be
975 * held before calling this helper function.
976 */
977 struct ust_app *ust_app_find_by_sock(int sock)
978 {
979 struct lttng_ht_node_ulong *node;
980 struct lttng_ht_iter iter;
981
982 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
983 node = lttng_ht_iter_get_node_ulong(&iter);
984 if (node == NULL) {
985 DBG2("UST app find by sock %d not found", sock);
986 goto error;
987 }
988
989 return caa_container_of(node, struct ust_app, sock_n);
990
991 error:
992 return NULL;
993 }
994
995 /*
996 * Find an ust_app using the notify sock and return it. RCU read side lock must
997 * be held before calling this helper function.
998 */
999 static struct ust_app *find_app_by_notify_sock(int sock)
1000 {
1001 struct lttng_ht_node_ulong *node;
1002 struct lttng_ht_iter iter;
1003
1004 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1005 &iter);
1006 node = lttng_ht_iter_get_node_ulong(&iter);
1007 if (node == NULL) {
1008 DBG2("UST app find by notify sock %d not found", sock);
1009 goto error;
1010 }
1011
1012 return caa_container_of(node, struct ust_app, notify_sock_n);
1013
1014 error:
1015 return NULL;
1016 }
1017
1018 /*
1019 * Lookup for an ust app event based on event name, filter bytecode and the
1020 * event loglevel.
1021 *
1022 * Return an ust_app_event object or NULL on error.
1023 */
1024 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1025 char *name, struct lttng_ust_filter_bytecode *filter, int loglevel,
1026 const struct lttng_event_exclusion *exclusion)
1027 {
1028 struct lttng_ht_iter iter;
1029 struct lttng_ht_node_str *node;
1030 struct ust_app_event *event = NULL;
1031 struct ust_app_ht_key key;
1032
1033 assert(name);
1034 assert(ht);
1035
1036 /* Setup key for event lookup. */
1037 key.name = name;
1038 key.filter = filter;
1039 key.loglevel = loglevel;
1040 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1041 key.exclusion = (struct lttng_ust_event_exclusion *)exclusion;
1042
1043 /* Lookup using the event name as hash and a custom match fct. */
1044 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1045 ht_match_ust_app_event, &key, &iter.iter);
1046 node = lttng_ht_iter_get_node_str(&iter);
1047 if (node == NULL) {
1048 goto end;
1049 }
1050
1051 event = caa_container_of(node, struct ust_app_event, node);
1052
1053 end:
1054 return event;
1055 }
1056
1057 /*
1058 * Create the channel context on the tracer.
1059 *
1060 * Called with UST app session lock held.
1061 */
1062 static
1063 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1064 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1065 {
1066 int ret;
1067
1068 health_code_update();
1069
1070 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1071 ua_chan->obj, &ua_ctx->obj);
1072 if (ret < 0) {
1073 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1074 ERR("UST app create channel context failed for app (pid: %d) "
1075 "with ret %d", app->pid, ret);
1076 } else {
1077 /*
1078 * This is normal behavior, an application can die during the
1079 * creation process. Don't report an error so the execution can
1080 * continue normally.
1081 */
1082 ret = 0;
1083 DBG3("UST app disable event failed. Application is dead.");
1084 }
1085 goto error;
1086 }
1087
1088 ua_ctx->handle = ua_ctx->obj->handle;
1089
1090 DBG2("UST app context handle %d created successfully for channel %s",
1091 ua_ctx->handle, ua_chan->name);
1092
1093 error:
1094 health_code_update();
1095 return ret;
1096 }
1097
1098 /*
1099 * Set the filter on the tracer.
1100 */
1101 static
1102 int set_ust_event_filter(struct ust_app_event *ua_event,
1103 struct ust_app *app)
1104 {
1105 int ret;
1106
1107 health_code_update();
1108
1109 if (!ua_event->filter) {
1110 ret = 0;
1111 goto error;
1112 }
1113
1114 ret = ustctl_set_filter(app->sock, ua_event->filter,
1115 ua_event->obj);
1116 if (ret < 0) {
1117 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1118 ERR("UST app event %s filter failed for app (pid: %d) "
1119 "with ret %d", ua_event->attr.name, app->pid, ret);
1120 } else {
1121 /*
1122 * This is normal behavior, an application can die during the
1123 * creation process. Don't report an error so the execution can
1124 * continue normally.
1125 */
1126 ret = 0;
1127 DBG3("UST app filter event failed. Application is dead.");
1128 }
1129 goto error;
1130 }
1131
1132 DBG2("UST filter set successfully for event %s", ua_event->name);
1133
1134 error:
1135 health_code_update();
1136 return ret;
1137 }
1138
1139 /*
1140 * Disable the specified event on to UST tracer for the UST session.
1141 */
1142 static int disable_ust_event(struct ust_app *app,
1143 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1144 {
1145 int ret;
1146
1147 health_code_update();
1148
1149 ret = ustctl_disable(app->sock, ua_event->obj);
1150 if (ret < 0) {
1151 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1152 ERR("UST app event %s disable failed for app (pid: %d) "
1153 "and session handle %d with ret %d",
1154 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1155 } else {
1156 /*
1157 * This is normal behavior, an application can die during the
1158 * creation process. Don't report an error so the execution can
1159 * continue normally.
1160 */
1161 ret = 0;
1162 DBG3("UST app disable event failed. Application is dead.");
1163 }
1164 goto error;
1165 }
1166
1167 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1168 ua_event->attr.name, app->pid);
1169
1170 error:
1171 health_code_update();
1172 return ret;
1173 }
1174
1175 /*
1176 * Disable the specified channel on to UST tracer for the UST session.
1177 */
1178 static int disable_ust_channel(struct ust_app *app,
1179 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1180 {
1181 int ret;
1182
1183 health_code_update();
1184
1185 ret = ustctl_disable(app->sock, ua_chan->obj);
1186 if (ret < 0) {
1187 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1188 ERR("UST app channel %s disable failed for app (pid: %d) "
1189 "and session handle %d with ret %d",
1190 ua_chan->name, app->pid, ua_sess->handle, ret);
1191 } else {
1192 /*
1193 * This is normal behavior, an application can die during the
1194 * creation process. Don't report an error so the execution can
1195 * continue normally.
1196 */
1197 ret = 0;
1198 DBG3("UST app disable channel failed. Application is dead.");
1199 }
1200 goto error;
1201 }
1202
1203 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1204 ua_chan->name, app->pid);
1205
1206 error:
1207 health_code_update();
1208 return ret;
1209 }
1210
1211 /*
1212 * Enable the specified channel on to UST tracer for the UST session.
1213 */
1214 static int enable_ust_channel(struct ust_app *app,
1215 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1216 {
1217 int ret;
1218
1219 health_code_update();
1220
1221 ret = ustctl_enable(app->sock, ua_chan->obj);
1222 if (ret < 0) {
1223 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1224 ERR("UST app channel %s enable failed for app (pid: %d) "
1225 "and session handle %d with ret %d",
1226 ua_chan->name, app->pid, ua_sess->handle, ret);
1227 } else {
1228 /*
1229 * This is normal behavior, an application can die during the
1230 * creation process. Don't report an error so the execution can
1231 * continue normally.
1232 */
1233 ret = 0;
1234 DBG3("UST app enable channel failed. Application is dead.");
1235 }
1236 goto error;
1237 }
1238
1239 ua_chan->enabled = 1;
1240
1241 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1242 ua_chan->name, app->pid);
1243
1244 error:
1245 health_code_update();
1246 return ret;
1247 }
1248
1249 /*
1250 * Enable the specified event on to UST tracer for the UST session.
1251 */
1252 static int enable_ust_event(struct ust_app *app,
1253 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1254 {
1255 int ret;
1256
1257 health_code_update();
1258
1259 ret = ustctl_enable(app->sock, ua_event->obj);
1260 if (ret < 0) {
1261 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1262 ERR("UST app event %s enable failed for app (pid: %d) "
1263 "and session handle %d with ret %d",
1264 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1265 } else {
1266 /*
1267 * This is normal behavior, an application can die during the
1268 * creation process. Don't report an error so the execution can
1269 * continue normally.
1270 */
1271 ret = 0;
1272 DBG3("UST app enable event failed. Application is dead.");
1273 }
1274 goto error;
1275 }
1276
1277 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1278 ua_event->attr.name, app->pid);
1279
1280 error:
1281 health_code_update();
1282 return ret;
1283 }
1284
1285 /*
1286 * Send channel and stream buffer to application.
1287 *
1288 * Return 0 on success. On error, a negative value is returned.
1289 */
1290 static int send_channel_pid_to_ust(struct ust_app *app,
1291 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1292 {
1293 int ret;
1294 struct ust_app_stream *stream, *stmp;
1295
1296 assert(app);
1297 assert(ua_sess);
1298 assert(ua_chan);
1299
1300 health_code_update();
1301
1302 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1303 app->sock);
1304
1305 /* Send channel to the application. */
1306 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1307 if (ret < 0) {
1308 goto error;
1309 }
1310
1311 health_code_update();
1312
1313 /* Send all streams to application. */
1314 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1315 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1316 if (ret < 0) {
1317 goto error;
1318 }
1319 /* We don't need the stream anymore once sent to the tracer. */
1320 cds_list_del(&stream->list);
1321 delete_ust_app_stream(-1, stream);
1322 }
1323 /* Flag the channel that it is sent to the application. */
1324 ua_chan->is_sent = 1;
1325
1326 error:
1327 health_code_update();
1328 return ret;
1329 }
1330
1331 /*
1332 * Create the specified event onto the UST tracer for a UST session.
1333 *
1334 * Should be called with session mutex held.
1335 */
1336 static
1337 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1338 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1339 {
1340 int ret = 0;
1341
1342 health_code_update();
1343
1344 /* Create UST event on tracer */
1345 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1346 &ua_event->obj);
1347 if (ret < 0) {
1348 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1349 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1350 ua_event->attr.name, app->pid, ret);
1351 } else {
1352 /*
1353 * This is normal behavior, an application can die during the
1354 * creation process. Don't report an error so the execution can
1355 * continue normally.
1356 */
1357 ret = 0;
1358 DBG3("UST app create event failed. Application is dead.");
1359 }
1360 goto error;
1361 }
1362
1363 ua_event->handle = ua_event->obj->handle;
1364
1365 DBG2("UST app event %s created successfully for pid:%d",
1366 ua_event->attr.name, app->pid);
1367
1368 health_code_update();
1369
1370 /* Set filter if one is present. */
1371 if (ua_event->filter) {
1372 ret = set_ust_event_filter(ua_event, app);
1373 if (ret < 0) {
1374 goto error;
1375 }
1376 }
1377
1378 /* If event not enabled, disable it on the tracer */
1379 if (ua_event->enabled == 0) {
1380 ret = disable_ust_event(app, ua_sess, ua_event);
1381 if (ret < 0) {
1382 /*
1383 * If we hit an EPERM, something is wrong with our disable call. If
1384 * we get an EEXIST, there is a problem on the tracer side since we
1385 * just created it.
1386 */
1387 switch (ret) {
1388 case -LTTNG_UST_ERR_PERM:
1389 /* Code flow problem */
1390 assert(0);
1391 case -LTTNG_UST_ERR_EXIST:
1392 /* It's OK for our use case. */
1393 ret = 0;
1394 break;
1395 default:
1396 break;
1397 }
1398 goto error;
1399 }
1400 }
1401
1402 error:
1403 health_code_update();
1404 return ret;
1405 }
1406
1407 /*
1408 * Copy data between an UST app event and a LTT event.
1409 */
1410 static void shadow_copy_event(struct ust_app_event *ua_event,
1411 struct ltt_ust_event *uevent)
1412 {
1413 size_t exclusion_alloc_size;
1414
1415 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1416 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1417
1418 ua_event->enabled = uevent->enabled;
1419
1420 /* Copy event attributes */
1421 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1422
1423 /* Copy filter bytecode */
1424 if (uevent->filter) {
1425 ua_event->filter = alloc_copy_ust_app_filter(uevent->filter);
1426 /* Filter might be NULL here in case of ENONEM. */
1427 }
1428
1429 /* Copy exclusion data */
1430 if (uevent->exclusion) {
1431 exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1432 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
1433 ua_event->exclusion = zmalloc(exclusion_alloc_size);
1434 if (ua_event->exclusion) {
1435 memcpy(ua_event->exclusion, uevent->exclusion, exclusion_alloc_size);
1436 }
1437 }
1438 }
1439
1440 /*
1441 * Copy data between an UST app channel and a LTT channel.
1442 */
1443 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1444 struct ltt_ust_channel *uchan)
1445 {
1446 struct lttng_ht_iter iter;
1447 struct ltt_ust_event *uevent;
1448 struct ltt_ust_context *uctx;
1449 struct ust_app_event *ua_event;
1450 struct ust_app_ctx *ua_ctx;
1451
1452 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1453
1454 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1455 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1456
1457 ua_chan->tracefile_size = uchan->tracefile_size;
1458 ua_chan->tracefile_count = uchan->tracefile_count;
1459
1460 /* Copy event attributes since the layout is different. */
1461 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1462 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1463 ua_chan->attr.overwrite = uchan->attr.overwrite;
1464 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1465 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1466 ua_chan->attr.output = uchan->attr.output;
1467 /*
1468 * Note that the attribute channel type is not set since the channel on the
1469 * tracing registry side does not have this information.
1470 */
1471
1472 ua_chan->enabled = uchan->enabled;
1473 ua_chan->tracing_channel_id = uchan->id;
1474
1475 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
1476 ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1477 if (ua_ctx == NULL) {
1478 continue;
1479 }
1480 lttng_ht_node_init_ulong(&ua_ctx->node,
1481 (unsigned long) ua_ctx->ctx.ctx);
1482 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1483 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1484 }
1485
1486 /* Copy all events from ltt ust channel to ust app channel */
1487 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
1488 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1489 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
1490 if (ua_event == NULL) {
1491 DBG2("UST event %s not found on shadow copy channel",
1492 uevent->attr.name);
1493 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
1494 if (ua_event == NULL) {
1495 continue;
1496 }
1497 shadow_copy_event(ua_event, uevent);
1498 add_unique_ust_app_event(ua_chan, ua_event);
1499 }
1500 }
1501
1502 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1503 }
1504
1505 /*
1506 * Copy data between a UST app session and a regular LTT session.
1507 */
1508 static void shadow_copy_session(struct ust_app_session *ua_sess,
1509 struct ltt_ust_session *usess, struct ust_app *app)
1510 {
1511 struct lttng_ht_node_str *ua_chan_node;
1512 struct lttng_ht_iter iter;
1513 struct ltt_ust_channel *uchan;
1514 struct ust_app_channel *ua_chan;
1515 time_t rawtime;
1516 struct tm *timeinfo;
1517 char datetime[16];
1518 int ret;
1519
1520 /* Get date and time for unique app path */
1521 time(&rawtime);
1522 timeinfo = localtime(&rawtime);
1523 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1524
1525 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1526
1527 ua_sess->tracing_id = usess->id;
1528 ua_sess->id = get_next_session_id();
1529 ua_sess->uid = app->uid;
1530 ua_sess->gid = app->gid;
1531 ua_sess->euid = usess->uid;
1532 ua_sess->egid = usess->gid;
1533 ua_sess->buffer_type = usess->buffer_type;
1534 ua_sess->bits_per_long = app->bits_per_long;
1535 /* There is only one consumer object per session possible. */
1536 ua_sess->consumer = usess->consumer;
1537 ua_sess->output_traces = usess->output_traces;
1538 ua_sess->live_timer_interval = usess->live_timer_interval;
1539
1540 switch (ua_sess->buffer_type) {
1541 case LTTNG_BUFFER_PER_PID:
1542 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1543 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1544 datetime);
1545 break;
1546 case LTTNG_BUFFER_PER_UID:
1547 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1548 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1549 break;
1550 default:
1551 assert(0);
1552 goto error;
1553 }
1554 if (ret < 0) {
1555 PERROR("asprintf UST shadow copy session");
1556 assert(0);
1557 goto error;
1558 }
1559
1560 /* Iterate over all channels in global domain. */
1561 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1562 uchan, node.node) {
1563 struct lttng_ht_iter uiter;
1564
1565 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1566 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
1567 if (ua_chan_node != NULL) {
1568 /* Session exist. Contiuing. */
1569 continue;
1570 }
1571
1572 DBG2("Channel %s not found on shadow session copy, creating it",
1573 uchan->name);
1574 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
1575 if (ua_chan == NULL) {
1576 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1577 continue;
1578 }
1579 shadow_copy_channel(ua_chan, uchan);
1580 /*
1581 * The concept of metadata channel does not exist on the tracing
1582 * registry side of the session daemon so this can only be a per CPU
1583 * channel and not metadata.
1584 */
1585 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1586
1587 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
1588 }
1589
1590 error:
1591 return;
1592 }
1593
1594 /*
1595 * Lookup sesison wrapper.
1596 */
1597 static
1598 void __lookup_session_by_app(struct ltt_ust_session *usess,
1599 struct ust_app *app, struct lttng_ht_iter *iter)
1600 {
1601 /* Get right UST app session from app */
1602 lttng_ht_lookup(app->sessions, &usess->id, iter);
1603 }
1604
1605 /*
1606 * Return ust app session from the app session hashtable using the UST session
1607 * id.
1608 */
1609 static struct ust_app_session *lookup_session_by_app(
1610 struct ltt_ust_session *usess, struct ust_app *app)
1611 {
1612 struct lttng_ht_iter iter;
1613 struct lttng_ht_node_u64 *node;
1614
1615 __lookup_session_by_app(usess, app, &iter);
1616 node = lttng_ht_iter_get_node_u64(&iter);
1617 if (node == NULL) {
1618 goto error;
1619 }
1620
1621 return caa_container_of(node, struct ust_app_session, node);
1622
1623 error:
1624 return NULL;
1625 }
1626
1627 /*
1628 * Setup buffer registry per PID for the given session and application. If none
1629 * is found, a new one is created, added to the global registry and
1630 * initialized. If regp is valid, it's set with the newly created object.
1631 *
1632 * Return 0 on success or else a negative value.
1633 */
1634 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
1635 struct ust_app *app, struct buffer_reg_pid **regp)
1636 {
1637 int ret = 0;
1638 struct buffer_reg_pid *reg_pid;
1639
1640 assert(ua_sess);
1641 assert(app);
1642
1643 rcu_read_lock();
1644
1645 reg_pid = buffer_reg_pid_find(ua_sess->id);
1646 if (!reg_pid) {
1647 /*
1648 * This is the create channel path meaning that if there is NO
1649 * registry available, we have to create one for this session.
1650 */
1651 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid);
1652 if (ret < 0) {
1653 goto error;
1654 }
1655 buffer_reg_pid_add(reg_pid);
1656 } else {
1657 goto end;
1658 }
1659
1660 /* Initialize registry. */
1661 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
1662 app->bits_per_long, app->uint8_t_alignment,
1663 app->uint16_t_alignment, app->uint32_t_alignment,
1664 app->uint64_t_alignment, app->long_alignment,
1665 app->byte_order, app->version.major,
1666 app->version.minor);
1667 if (ret < 0) {
1668 goto error;
1669 }
1670
1671 DBG3("UST app buffer registry per PID created successfully");
1672
1673 end:
1674 if (regp) {
1675 *regp = reg_pid;
1676 }
1677 error:
1678 rcu_read_unlock();
1679 return ret;
1680 }
1681
1682 /*
1683 * Setup buffer registry per UID for the given session and application. If none
1684 * is found, a new one is created, added to the global registry and
1685 * initialized. If regp is valid, it's set with the newly created object.
1686 *
1687 * Return 0 on success or else a negative value.
1688 */
1689 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
1690 struct ust_app *app, struct buffer_reg_uid **regp)
1691 {
1692 int ret = 0;
1693 struct buffer_reg_uid *reg_uid;
1694
1695 assert(usess);
1696 assert(app);
1697
1698 rcu_read_lock();
1699
1700 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
1701 if (!reg_uid) {
1702 /*
1703 * This is the create channel path meaning that if there is NO
1704 * registry available, we have to create one for this session.
1705 */
1706 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
1707 LTTNG_DOMAIN_UST, &reg_uid);
1708 if (ret < 0) {
1709 goto error;
1710 }
1711 buffer_reg_uid_add(reg_uid);
1712 } else {
1713 goto end;
1714 }
1715
1716 /* Initialize registry. */
1717 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
1718 app->bits_per_long, app->uint8_t_alignment,
1719 app->uint16_t_alignment, app->uint32_t_alignment,
1720 app->uint64_t_alignment, app->long_alignment,
1721 app->byte_order, app->version.major,
1722 app->version.minor);
1723 if (ret < 0) {
1724 goto error;
1725 }
1726 /* Add node to teardown list of the session. */
1727 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
1728
1729 DBG3("UST app buffer registry per UID created successfully");
1730
1731 end:
1732 if (regp) {
1733 *regp = reg_uid;
1734 }
1735 error:
1736 rcu_read_unlock();
1737 return ret;
1738 }
1739
1740 /*
1741 * Create a session on the tracer side for the given app.
1742 *
1743 * On success, ua_sess_ptr is populated with the session pointer or else left
1744 * untouched. If the session was created, is_created is set to 1. On error,
1745 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
1746 * be NULL.
1747 *
1748 * Returns 0 on success or else a negative code which is either -ENOMEM or
1749 * -ENOTCONN which is the default code if the ustctl_create_session fails.
1750 */
1751 static int create_ust_app_session(struct ltt_ust_session *usess,
1752 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
1753 int *is_created)
1754 {
1755 int ret, created = 0;
1756 struct ust_app_session *ua_sess;
1757
1758 assert(usess);
1759 assert(app);
1760 assert(ua_sess_ptr);
1761
1762 health_code_update();
1763
1764 ua_sess = lookup_session_by_app(usess, app);
1765 if (ua_sess == NULL) {
1766 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
1767 app->pid, usess->id);
1768 ua_sess = alloc_ust_app_session(app);
1769 if (ua_sess == NULL) {
1770 /* Only malloc can failed so something is really wrong */
1771 ret = -ENOMEM;
1772 goto error;
1773 }
1774 shadow_copy_session(ua_sess, usess, app);
1775 created = 1;
1776 }
1777
1778 switch (usess->buffer_type) {
1779 case LTTNG_BUFFER_PER_PID:
1780 /* Init local registry. */
1781 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
1782 if (ret < 0) {
1783 goto error;
1784 }
1785 break;
1786 case LTTNG_BUFFER_PER_UID:
1787 /* Look for a global registry. If none exists, create one. */
1788 ret = setup_buffer_reg_uid(usess, app, NULL);
1789 if (ret < 0) {
1790 goto error;
1791 }
1792 break;
1793 default:
1794 assert(0);
1795 ret = -EINVAL;
1796 goto error;
1797 }
1798
1799 health_code_update();
1800
1801 if (ua_sess->handle == -1) {
1802 ret = ustctl_create_session(app->sock);
1803 if (ret < 0) {
1804 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1805 ERR("Creating session for app pid %d with ret %d",
1806 app->pid, ret);
1807 } else {
1808 DBG("UST app creating session failed. Application is dead");
1809 /*
1810 * This is normal behavior, an application can die during the
1811 * creation process. Don't report an error so the execution can
1812 * continue normally. This will get flagged ENOTCONN and the
1813 * caller will handle it.
1814 */
1815 ret = 0;
1816 }
1817 delete_ust_app_session(-1, ua_sess, app);
1818 if (ret != -ENOMEM) {
1819 /*
1820 * Tracer is probably gone or got an internal error so let's
1821 * behave like it will soon unregister or not usable.
1822 */
1823 ret = -ENOTCONN;
1824 }
1825 goto error;
1826 }
1827
1828 ua_sess->handle = ret;
1829
1830 /* Add ust app session to app's HT */
1831 lttng_ht_node_init_u64(&ua_sess->node,
1832 ua_sess->tracing_id);
1833 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
1834
1835 DBG2("UST app session created successfully with handle %d", ret);
1836 }
1837
1838 *ua_sess_ptr = ua_sess;
1839 if (is_created) {
1840 *is_created = created;
1841 }
1842
1843 /* Everything went well. */
1844 ret = 0;
1845
1846 error:
1847 health_code_update();
1848 return ret;
1849 }
1850
1851 /*
1852 * Create a context for the channel on the tracer.
1853 *
1854 * Called with UST app session lock held and a RCU read side lock.
1855 */
1856 static
1857 int create_ust_app_channel_context(struct ust_app_session *ua_sess,
1858 struct ust_app_channel *ua_chan, struct lttng_ust_context *uctx,
1859 struct ust_app *app)
1860 {
1861 int ret = 0;
1862 struct lttng_ht_iter iter;
1863 struct lttng_ht_node_ulong *node;
1864 struct ust_app_ctx *ua_ctx;
1865
1866 DBG2("UST app adding context to channel %s", ua_chan->name);
1867
1868 lttng_ht_lookup(ua_chan->ctx, (void *)((unsigned long)uctx->ctx), &iter);
1869 node = lttng_ht_iter_get_node_ulong(&iter);
1870 if (node != NULL) {
1871 ret = -EEXIST;
1872 goto error;
1873 }
1874
1875 ua_ctx = alloc_ust_app_ctx(uctx);
1876 if (ua_ctx == NULL) {
1877 /* malloc failed */
1878 ret = -1;
1879 goto error;
1880 }
1881
1882 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
1883 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1884 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1885
1886 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
1887 if (ret < 0) {
1888 goto error;
1889 }
1890
1891 error:
1892 return ret;
1893 }
1894
1895 /*
1896 * Enable on the tracer side a ust app event for the session and channel.
1897 *
1898 * Called with UST app session lock held.
1899 */
1900 static
1901 int enable_ust_app_event(struct ust_app_session *ua_sess,
1902 struct ust_app_event *ua_event, struct ust_app *app)
1903 {
1904 int ret;
1905
1906 ret = enable_ust_event(app, ua_sess, ua_event);
1907 if (ret < 0) {
1908 goto error;
1909 }
1910
1911 ua_event->enabled = 1;
1912
1913 error:
1914 return ret;
1915 }
1916
1917 /*
1918 * Disable on the tracer side a ust app event for the session and channel.
1919 */
1920 static int disable_ust_app_event(struct ust_app_session *ua_sess,
1921 struct ust_app_event *ua_event, struct ust_app *app)
1922 {
1923 int ret;
1924
1925 ret = disable_ust_event(app, ua_sess, ua_event);
1926 if (ret < 0) {
1927 goto error;
1928 }
1929
1930 ua_event->enabled = 0;
1931
1932 error:
1933 return ret;
1934 }
1935
1936 /*
1937 * Lookup ust app channel for session and disable it on the tracer side.
1938 */
1939 static
1940 int disable_ust_app_channel(struct ust_app_session *ua_sess,
1941 struct ust_app_channel *ua_chan, struct ust_app *app)
1942 {
1943 int ret;
1944
1945 ret = disable_ust_channel(app, ua_sess, ua_chan);
1946 if (ret < 0) {
1947 goto error;
1948 }
1949
1950 ua_chan->enabled = 0;
1951
1952 error:
1953 return ret;
1954 }
1955
1956 /*
1957 * Lookup ust app channel for session and enable it on the tracer side. This
1958 * MUST be called with a RCU read side lock acquired.
1959 */
1960 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
1961 struct ltt_ust_channel *uchan, struct ust_app *app)
1962 {
1963 int ret = 0;
1964 struct lttng_ht_iter iter;
1965 struct lttng_ht_node_str *ua_chan_node;
1966 struct ust_app_channel *ua_chan;
1967
1968 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
1969 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
1970 if (ua_chan_node == NULL) {
1971 DBG2("Unable to find channel %s in ust session id %" PRIu64,
1972 uchan->name, ua_sess->tracing_id);
1973 goto error;
1974 }
1975
1976 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
1977
1978 ret = enable_ust_channel(app, ua_sess, ua_chan);
1979 if (ret < 0) {
1980 goto error;
1981 }
1982
1983 error:
1984 return ret;
1985 }
1986
1987 /*
1988 * Ask the consumer to create a channel and get it if successful.
1989 *
1990 * Return 0 on success or else a negative value.
1991 */
1992 static int do_consumer_create_channel(struct ltt_ust_session *usess,
1993 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
1994 int bitness, struct ust_registry_session *registry)
1995 {
1996 int ret;
1997 unsigned int nb_fd = 0;
1998 struct consumer_socket *socket;
1999
2000 assert(usess);
2001 assert(ua_sess);
2002 assert(ua_chan);
2003 assert(registry);
2004
2005 rcu_read_lock();
2006 health_code_update();
2007
2008 /* Get the right consumer socket for the application. */
2009 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2010 if (!socket) {
2011 ret = -EINVAL;
2012 goto error;
2013 }
2014
2015 health_code_update();
2016
2017 /* Need one fd for the channel. */
2018 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2019 if (ret < 0) {
2020 ERR("Exhausted number of available FD upon create channel");
2021 goto error;
2022 }
2023
2024 /*
2025 * Ask consumer to create channel. The consumer will return the number of
2026 * stream we have to expect.
2027 */
2028 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2029 registry);
2030 if (ret < 0) {
2031 goto error_ask;
2032 }
2033
2034 /*
2035 * Compute the number of fd needed before receiving them. It must be 2 per
2036 * stream (2 being the default value here).
2037 */
2038 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2039
2040 /* Reserve the amount of file descriptor we need. */
2041 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2042 if (ret < 0) {
2043 ERR("Exhausted number of available FD upon create channel");
2044 goto error_fd_get_stream;
2045 }
2046
2047 health_code_update();
2048
2049 /*
2050 * Now get the channel from the consumer. This call wil populate the stream
2051 * list of that channel and set the ust objects.
2052 */
2053 if (usess->consumer->enabled) {
2054 ret = ust_consumer_get_channel(socket, ua_chan);
2055 if (ret < 0) {
2056 goto error_destroy;
2057 }
2058 }
2059
2060 rcu_read_unlock();
2061 return 0;
2062
2063 error_destroy:
2064 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2065 error_fd_get_stream:
2066 /*
2067 * Initiate a destroy channel on the consumer since we had an error
2068 * handling it on our side. The return value is of no importance since we
2069 * already have a ret value set by the previous error that we need to
2070 * return.
2071 */
2072 (void) ust_consumer_destroy_channel(socket, ua_chan);
2073 error_ask:
2074 lttng_fd_put(LTTNG_FD_APPS, 1);
2075 error:
2076 health_code_update();
2077 rcu_read_unlock();
2078 return ret;
2079 }
2080
2081 /*
2082 * Duplicate the ust data object of the ust app stream and save it in the
2083 * buffer registry stream.
2084 *
2085 * Return 0 on success or else a negative value.
2086 */
2087 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2088 struct ust_app_stream *stream)
2089 {
2090 int ret;
2091
2092 assert(reg_stream);
2093 assert(stream);
2094
2095 /* Reserve the amount of file descriptor we need. */
2096 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2097 if (ret < 0) {
2098 ERR("Exhausted number of available FD upon duplicate stream");
2099 goto error;
2100 }
2101
2102 /* Duplicate object for stream once the original is in the registry. */
2103 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2104 reg_stream->obj.ust);
2105 if (ret < 0) {
2106 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2107 reg_stream->obj.ust, stream->obj, ret);
2108 lttng_fd_put(LTTNG_FD_APPS, 2);
2109 goto error;
2110 }
2111 stream->handle = stream->obj->handle;
2112
2113 error:
2114 return ret;
2115 }
2116
2117 /*
2118 * Duplicate the ust data object of the ust app. channel and save it in the
2119 * buffer registry channel.
2120 *
2121 * Return 0 on success or else a negative value.
2122 */
2123 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2124 struct ust_app_channel *ua_chan)
2125 {
2126 int ret;
2127
2128 assert(reg_chan);
2129 assert(ua_chan);
2130
2131 /* Need two fds for the channel. */
2132 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2133 if (ret < 0) {
2134 ERR("Exhausted number of available FD upon duplicate channel");
2135 goto error_fd_get;
2136 }
2137
2138 /* Duplicate object for stream once the original is in the registry. */
2139 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2140 if (ret < 0) {
2141 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2142 reg_chan->obj.ust, ua_chan->obj, ret);
2143 goto error;
2144 }
2145 ua_chan->handle = ua_chan->obj->handle;
2146
2147 return 0;
2148
2149 error:
2150 lttng_fd_put(LTTNG_FD_APPS, 1);
2151 error_fd_get:
2152 return ret;
2153 }
2154
2155 /*
2156 * For a given channel buffer registry, setup all streams of the given ust
2157 * application channel.
2158 *
2159 * Return 0 on success or else a negative value.
2160 */
2161 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2162 struct ust_app_channel *ua_chan)
2163 {
2164 int ret = 0;
2165 struct ust_app_stream *stream, *stmp;
2166
2167 assert(reg_chan);
2168 assert(ua_chan);
2169
2170 DBG2("UST app setup buffer registry stream");
2171
2172 /* Send all streams to application. */
2173 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2174 struct buffer_reg_stream *reg_stream;
2175
2176 ret = buffer_reg_stream_create(&reg_stream);
2177 if (ret < 0) {
2178 goto error;
2179 }
2180
2181 /*
2182 * Keep original pointer and nullify it in the stream so the delete
2183 * stream call does not release the object.
2184 */
2185 reg_stream->obj.ust = stream->obj;
2186 stream->obj = NULL;
2187 buffer_reg_stream_add(reg_stream, reg_chan);
2188
2189 /* We don't need the streams anymore. */
2190 cds_list_del(&stream->list);
2191 delete_ust_app_stream(-1, stream);
2192 }
2193
2194 error:
2195 return ret;
2196 }
2197
2198 /*
2199 * Create a buffer registry channel for the given session registry and
2200 * application channel object. If regp pointer is valid, it's set with the
2201 * created object. Important, the created object is NOT added to the session
2202 * registry hash table.
2203 *
2204 * Return 0 on success else a negative value.
2205 */
2206 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2207 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2208 {
2209 int ret;
2210 struct buffer_reg_channel *reg_chan = NULL;
2211
2212 assert(reg_sess);
2213 assert(ua_chan);
2214
2215 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2216
2217 /* Create buffer registry channel. */
2218 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2219 if (ret < 0) {
2220 goto error_create;
2221 }
2222 assert(reg_chan);
2223 reg_chan->consumer_key = ua_chan->key;
2224 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
2225
2226 /* Create and add a channel registry to session. */
2227 ret = ust_registry_channel_add(reg_sess->reg.ust,
2228 ua_chan->tracing_channel_id);
2229 if (ret < 0) {
2230 goto error;
2231 }
2232 buffer_reg_channel_add(reg_sess, reg_chan);
2233
2234 if (regp) {
2235 *regp = reg_chan;
2236 }
2237
2238 return 0;
2239
2240 error:
2241 /* Safe because the registry channel object was not added to any HT. */
2242 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2243 error_create:
2244 return ret;
2245 }
2246
2247 /*
2248 * Setup buffer registry channel for the given session registry and application
2249 * channel object. If regp pointer is valid, it's set with the created object.
2250 *
2251 * Return 0 on success else a negative value.
2252 */
2253 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2254 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan)
2255 {
2256 int ret;
2257
2258 assert(reg_sess);
2259 assert(reg_chan);
2260 assert(ua_chan);
2261 assert(ua_chan->obj);
2262
2263 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2264
2265 /* Setup all streams for the registry. */
2266 ret = setup_buffer_reg_streams(reg_chan, ua_chan);
2267 if (ret < 0) {
2268 goto error;
2269 }
2270
2271 reg_chan->obj.ust = ua_chan->obj;
2272 ua_chan->obj = NULL;
2273
2274 return 0;
2275
2276 error:
2277 buffer_reg_channel_remove(reg_sess, reg_chan);
2278 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2279 return ret;
2280 }
2281
2282 /*
2283 * Send buffer registry channel to the application.
2284 *
2285 * Return 0 on success else a negative value.
2286 */
2287 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2288 struct ust_app *app, struct ust_app_session *ua_sess,
2289 struct ust_app_channel *ua_chan)
2290 {
2291 int ret;
2292 struct buffer_reg_stream *reg_stream;
2293
2294 assert(reg_chan);
2295 assert(app);
2296 assert(ua_sess);
2297 assert(ua_chan);
2298
2299 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2300
2301 ret = duplicate_channel_object(reg_chan, ua_chan);
2302 if (ret < 0) {
2303 goto error;
2304 }
2305
2306 /* Send channel to the application. */
2307 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2308 if (ret < 0) {
2309 goto error;
2310 }
2311
2312 health_code_update();
2313
2314 /* Send all streams to application. */
2315 pthread_mutex_lock(&reg_chan->stream_list_lock);
2316 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2317 struct ust_app_stream stream;
2318
2319 ret = duplicate_stream_object(reg_stream, &stream);
2320 if (ret < 0) {
2321 goto error_stream_unlock;
2322 }
2323
2324 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2325 if (ret < 0) {
2326 (void) release_ust_app_stream(-1, &stream);
2327 goto error_stream_unlock;
2328 }
2329
2330 /*
2331 * The return value is not important here. This function will output an
2332 * error if needed.
2333 */
2334 (void) release_ust_app_stream(-1, &stream);
2335 }
2336 ua_chan->is_sent = 1;
2337
2338 error_stream_unlock:
2339 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2340 error:
2341 return ret;
2342 }
2343
2344 /*
2345 * Create and send to the application the created buffers with per UID buffers.
2346 *
2347 * Return 0 on success else a negative value.
2348 */
2349 static int create_channel_per_uid(struct ust_app *app,
2350 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2351 struct ust_app_channel *ua_chan)
2352 {
2353 int ret;
2354 struct buffer_reg_uid *reg_uid;
2355 struct buffer_reg_channel *reg_chan;
2356
2357 assert(app);
2358 assert(usess);
2359 assert(ua_sess);
2360 assert(ua_chan);
2361
2362 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2363
2364 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2365 /*
2366 * The session creation handles the creation of this global registry
2367 * object. If none can be find, there is a code flow problem or a
2368 * teardown race.
2369 */
2370 assert(reg_uid);
2371
2372 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2373 reg_uid);
2374 if (!reg_chan) {
2375 /* Create the buffer registry channel object. */
2376 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2377 if (ret < 0) {
2378 goto error;
2379 }
2380 assert(reg_chan);
2381
2382 /*
2383 * Create the buffers on the consumer side. This call populates the
2384 * ust app channel object with all streams and data object.
2385 */
2386 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2387 app->bits_per_long, reg_uid->registry->reg.ust);
2388 if (ret < 0) {
2389 /*
2390 * Let's remove the previously created buffer registry channel so
2391 * it's not visible anymore in the session registry.
2392 */
2393 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2394 ua_chan->tracing_channel_id);
2395 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2396 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2397 goto error;
2398 }
2399
2400 /*
2401 * Setup the streams and add it to the session registry.
2402 */
2403 ret = setup_buffer_reg_channel(reg_uid->registry, ua_chan, reg_chan);
2404 if (ret < 0) {
2405 goto error;
2406 }
2407
2408 }
2409
2410 /* Send buffers to the application. */
2411 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2412 if (ret < 0) {
2413 goto error;
2414 }
2415
2416 error:
2417 return ret;
2418 }
2419
2420 /*
2421 * Create and send to the application the created buffers with per PID buffers.
2422 *
2423 * Return 0 on success else a negative value.
2424 */
2425 static int create_channel_per_pid(struct ust_app *app,
2426 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2427 struct ust_app_channel *ua_chan)
2428 {
2429 int ret;
2430 struct ust_registry_session *registry;
2431
2432 assert(app);
2433 assert(usess);
2434 assert(ua_sess);
2435 assert(ua_chan);
2436
2437 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2438
2439 rcu_read_lock();
2440
2441 registry = get_session_registry(ua_sess);
2442 assert(registry);
2443
2444 /* Create and add a new channel registry to session. */
2445 ret = ust_registry_channel_add(registry, ua_chan->key);
2446 if (ret < 0) {
2447 goto error;
2448 }
2449
2450 /* Create and get channel on the consumer side. */
2451 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2452 app->bits_per_long, registry);
2453 if (ret < 0) {
2454 goto error;
2455 }
2456
2457 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2458 if (ret < 0) {
2459 goto error;
2460 }
2461
2462 error:
2463 rcu_read_unlock();
2464 return ret;
2465 }
2466
2467 /*
2468 * From an already allocated ust app channel, create the channel buffers if
2469 * need and send it to the application. This MUST be called with a RCU read
2470 * side lock acquired.
2471 *
2472 * Return 0 on success or else a negative value.
2473 */
2474 static int do_create_channel(struct ust_app *app,
2475 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2476 struct ust_app_channel *ua_chan)
2477 {
2478 int ret;
2479
2480 assert(app);
2481 assert(usess);
2482 assert(ua_sess);
2483 assert(ua_chan);
2484
2485 /* Handle buffer type before sending the channel to the application. */
2486 switch (usess->buffer_type) {
2487 case LTTNG_BUFFER_PER_UID:
2488 {
2489 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
2490 if (ret < 0) {
2491 goto error;
2492 }
2493 break;
2494 }
2495 case LTTNG_BUFFER_PER_PID:
2496 {
2497 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
2498 if (ret < 0) {
2499 goto error;
2500 }
2501 break;
2502 }
2503 default:
2504 assert(0);
2505 ret = -EINVAL;
2506 goto error;
2507 }
2508
2509 /* Initialize ust objd object using the received handle and add it. */
2510 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
2511 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
2512
2513 /* If channel is not enabled, disable it on the tracer */
2514 if (!ua_chan->enabled) {
2515 ret = disable_ust_channel(app, ua_sess, ua_chan);
2516 if (ret < 0) {
2517 goto error;
2518 }
2519 }
2520
2521 error:
2522 return ret;
2523 }
2524
2525 /*
2526 * Create UST app channel and create it on the tracer. Set ua_chanp of the
2527 * newly created channel if not NULL.
2528 *
2529 * Called with UST app session lock and RCU read-side lock held.
2530 *
2531 * Return 0 on success or else a negative value.
2532 */
2533 static int create_ust_app_channel(struct ust_app_session *ua_sess,
2534 struct ltt_ust_channel *uchan, struct ust_app *app,
2535 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
2536 struct ust_app_channel **ua_chanp)
2537 {
2538 int ret = 0;
2539 struct lttng_ht_iter iter;
2540 struct lttng_ht_node_str *ua_chan_node;
2541 struct ust_app_channel *ua_chan;
2542
2543 /* Lookup channel in the ust app session */
2544 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2545 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2546 if (ua_chan_node != NULL) {
2547 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2548 goto end;
2549 }
2550
2551 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
2552 if (ua_chan == NULL) {
2553 /* Only malloc can fail here */
2554 ret = -ENOMEM;
2555 goto error_alloc;
2556 }
2557 shadow_copy_channel(ua_chan, uchan);
2558
2559 /* Set channel type. */
2560 ua_chan->attr.type = type;
2561
2562 ret = do_create_channel(app, usess, ua_sess, ua_chan);
2563 if (ret < 0) {
2564 goto error;
2565 }
2566
2567 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
2568 app->pid);
2569
2570 /* Only add the channel if successful on the tracer side. */
2571 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
2572
2573 end:
2574 if (ua_chanp) {
2575 *ua_chanp = ua_chan;
2576 }
2577
2578 /* Everything went well. */
2579 return 0;
2580
2581 error:
2582 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
2583 error_alloc:
2584 return ret;
2585 }
2586
2587 /*
2588 * Create UST app event and create it on the tracer side.
2589 *
2590 * Called with ust app session mutex held.
2591 */
2592 static
2593 int create_ust_app_event(struct ust_app_session *ua_sess,
2594 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
2595 struct ust_app *app)
2596 {
2597 int ret = 0;
2598 struct ust_app_event *ua_event;
2599
2600 /* Get event node */
2601 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
2602 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
2603 if (ua_event != NULL) {
2604 ret = -EEXIST;
2605 goto end;
2606 }
2607
2608 /* Does not exist so create one */
2609 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
2610 if (ua_event == NULL) {
2611 /* Only malloc can failed so something is really wrong */
2612 ret = -ENOMEM;
2613 goto end;
2614 }
2615 shadow_copy_event(ua_event, uevent);
2616
2617 /* Create it on the tracer side */
2618 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
2619 if (ret < 0) {
2620 /* Not found previously means that it does not exist on the tracer */
2621 assert(ret != -LTTNG_UST_ERR_EXIST);
2622 goto error;
2623 }
2624
2625 add_unique_ust_app_event(ua_chan, ua_event);
2626
2627 DBG2("UST app create event %s for PID %d completed", ua_event->name,
2628 app->pid);
2629
2630 end:
2631 return ret;
2632
2633 error:
2634 /* Valid. Calling here is already in a read side lock */
2635 delete_ust_app_event(-1, ua_event);
2636 return ret;
2637 }
2638
2639 /*
2640 * Create UST metadata and open it on the tracer side.
2641 *
2642 * Called with UST app session lock held and RCU read side lock.
2643 */
2644 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
2645 struct ust_app *app, struct consumer_output *consumer,
2646 struct ustctl_consumer_channel_attr *attr)
2647 {
2648 int ret = 0;
2649 struct ust_app_channel *metadata;
2650 struct consumer_socket *socket;
2651 struct ust_registry_session *registry;
2652
2653 assert(ua_sess);
2654 assert(app);
2655 assert(consumer);
2656
2657 registry = get_session_registry(ua_sess);
2658 assert(registry);
2659
2660 /* Metadata already exists for this registry or it was closed previously */
2661 if (registry->metadata_key || registry->metadata_closed) {
2662 ret = 0;
2663 goto error;
2664 }
2665
2666 /* Allocate UST metadata */
2667 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
2668 if (!metadata) {
2669 /* malloc() failed */
2670 ret = -ENOMEM;
2671 goto error;
2672 }
2673
2674 if (!attr) {
2675 /* Set default attributes for metadata. */
2676 metadata->attr.overwrite = DEFAULT_CHANNEL_OVERWRITE;
2677 metadata->attr.subbuf_size = default_get_metadata_subbuf_size();
2678 metadata->attr.num_subbuf = DEFAULT_METADATA_SUBBUF_NUM;
2679 metadata->attr.switch_timer_interval = DEFAULT_METADATA_SWITCH_TIMER;
2680 metadata->attr.read_timer_interval = DEFAULT_METADATA_READ_TIMER;
2681 metadata->attr.output = LTTNG_UST_MMAP;
2682 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2683 } else {
2684 memcpy(&metadata->attr, attr, sizeof(metadata->attr));
2685 metadata->attr.output = LTTNG_UST_MMAP;
2686 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2687 }
2688
2689 /* Need one fd for the channel. */
2690 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2691 if (ret < 0) {
2692 ERR("Exhausted number of available FD upon create metadata");
2693 goto error;
2694 }
2695
2696 /* Get the right consumer socket for the application. */
2697 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
2698 if (!socket) {
2699 ret = -EINVAL;
2700 goto error_consumer;
2701 }
2702
2703 /*
2704 * Keep metadata key so we can identify it on the consumer side. Assign it
2705 * to the registry *before* we ask the consumer so we avoid the race of the
2706 * consumer requesting the metadata and the ask_channel call on our side
2707 * did not returned yet.
2708 */
2709 registry->metadata_key = metadata->key;
2710
2711 /*
2712 * Ask the metadata channel creation to the consumer. The metadata object
2713 * will be created by the consumer and kept their. However, the stream is
2714 * never added or monitored until we do a first push metadata to the
2715 * consumer.
2716 */
2717 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
2718 registry);
2719 if (ret < 0) {
2720 /* Nullify the metadata key so we don't try to close it later on. */
2721 registry->metadata_key = 0;
2722 goto error_consumer;
2723 }
2724
2725 /*
2726 * The setup command will make the metadata stream be sent to the relayd,
2727 * if applicable, and the thread managing the metadatas. This is important
2728 * because after this point, if an error occurs, the only way the stream
2729 * can be deleted is to be monitored in the consumer.
2730 */
2731 ret = consumer_setup_metadata(socket, metadata->key);
2732 if (ret < 0) {
2733 /* Nullify the metadata key so we don't try to close it later on. */
2734 registry->metadata_key = 0;
2735 goto error_consumer;
2736 }
2737
2738 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
2739 metadata->key, app->pid);
2740
2741 error_consumer:
2742 lttng_fd_put(LTTNG_FD_APPS, 1);
2743 delete_ust_app_channel(-1, metadata, app);
2744 error:
2745 return ret;
2746 }
2747
2748 /*
2749 * Return pointer to traceable apps list.
2750 */
2751 struct lttng_ht *ust_app_get_ht(void)
2752 {
2753 return ust_app_ht;
2754 }
2755
2756 /*
2757 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
2758 * acquired before calling this function.
2759 */
2760 struct ust_app *ust_app_find_by_pid(pid_t pid)
2761 {
2762 struct ust_app *app = NULL;
2763 struct lttng_ht_node_ulong *node;
2764 struct lttng_ht_iter iter;
2765
2766 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
2767 node = lttng_ht_iter_get_node_ulong(&iter);
2768 if (node == NULL) {
2769 DBG2("UST app no found with pid %d", pid);
2770 goto error;
2771 }
2772
2773 DBG2("Found UST app by pid %d", pid);
2774
2775 app = caa_container_of(node, struct ust_app, pid_n);
2776
2777 error:
2778 return app;
2779 }
2780
2781 /*
2782 * Allocate and init an UST app object using the registration information and
2783 * the command socket. This is called when the command socket connects to the
2784 * session daemon.
2785 *
2786 * The object is returned on success or else NULL.
2787 */
2788 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
2789 {
2790 struct ust_app *lta = NULL;
2791
2792 assert(msg);
2793 assert(sock >= 0);
2794
2795 DBG3("UST app creating application for socket %d", sock);
2796
2797 if ((msg->bits_per_long == 64 &&
2798 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
2799 || (msg->bits_per_long == 32 &&
2800 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
2801 ERR("Registration failed: application \"%s\" (pid: %d) has "
2802 "%d-bit long, but no consumerd for this size is available.\n",
2803 msg->name, msg->pid, msg->bits_per_long);
2804 goto error;
2805 }
2806
2807 lta = zmalloc(sizeof(struct ust_app));
2808 if (lta == NULL) {
2809 PERROR("malloc");
2810 goto error;
2811 }
2812
2813 lta->ppid = msg->ppid;
2814 lta->uid = msg->uid;
2815 lta->gid = msg->gid;
2816
2817 lta->bits_per_long = msg->bits_per_long;
2818 lta->uint8_t_alignment = msg->uint8_t_alignment;
2819 lta->uint16_t_alignment = msg->uint16_t_alignment;
2820 lta->uint32_t_alignment = msg->uint32_t_alignment;
2821 lta->uint64_t_alignment = msg->uint64_t_alignment;
2822 lta->long_alignment = msg->long_alignment;
2823 lta->byte_order = msg->byte_order;
2824
2825 lta->v_major = msg->major;
2826 lta->v_minor = msg->minor;
2827 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
2828 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2829 lta->notify_sock = -1;
2830
2831 /* Copy name and make sure it's NULL terminated. */
2832 strncpy(lta->name, msg->name, sizeof(lta->name));
2833 lta->name[UST_APP_PROCNAME_LEN] = '\0';
2834
2835 /*
2836 * Before this can be called, when receiving the registration information,
2837 * the application compatibility is checked. So, at this point, the
2838 * application can work with this session daemon.
2839 */
2840 lta->compatible = 1;
2841
2842 lta->pid = msg->pid;
2843 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
2844 lta->sock = sock;
2845 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
2846
2847 CDS_INIT_LIST_HEAD(&lta->teardown_head);
2848
2849 error:
2850 return lta;
2851 }
2852
2853 /*
2854 * For a given application object, add it to every hash table.
2855 */
2856 void ust_app_add(struct ust_app *app)
2857 {
2858 assert(app);
2859 assert(app->notify_sock >= 0);
2860
2861 rcu_read_lock();
2862
2863 /*
2864 * On a re-registration, we want to kick out the previous registration of
2865 * that pid
2866 */
2867 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
2868
2869 /*
2870 * The socket _should_ be unique until _we_ call close. So, a add_unique
2871 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
2872 * already in the table.
2873 */
2874 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
2875
2876 /* Add application to the notify socket hash table. */
2877 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
2878 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
2879
2880 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
2881 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
2882 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
2883 app->v_minor);
2884
2885 rcu_read_unlock();
2886 }
2887
2888 /*
2889 * Set the application version into the object.
2890 *
2891 * Return 0 on success else a negative value either an errno code or a
2892 * LTTng-UST error code.
2893 */
2894 int ust_app_version(struct ust_app *app)
2895 {
2896 int ret;
2897
2898 assert(app);
2899
2900 ret = ustctl_tracer_version(app->sock, &app->version);
2901 if (ret < 0) {
2902 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
2903 ERR("UST app %d verson failed with ret %d", app->sock, ret);
2904 } else {
2905 DBG3("UST app %d verion failed. Application is dead", app->sock);
2906 }
2907 }
2908
2909 return ret;
2910 }
2911
2912 /*
2913 * Unregister app by removing it from the global traceable app list and freeing
2914 * the data struct.
2915 *
2916 * The socket is already closed at this point so no close to sock.
2917 */
2918 void ust_app_unregister(int sock)
2919 {
2920 struct ust_app *lta;
2921 struct lttng_ht_node_ulong *node;
2922 struct lttng_ht_iter iter;
2923 struct ust_app_session *ua_sess;
2924 int ret;
2925
2926 rcu_read_lock();
2927
2928 /* Get the node reference for a call_rcu */
2929 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
2930 node = lttng_ht_iter_get_node_ulong(&iter);
2931 assert(node);
2932
2933 lta = caa_container_of(node, struct ust_app, sock_n);
2934 DBG("PID %d unregistering with sock %d", lta->pid, sock);
2935
2936 /* Remove application from PID hash table */
2937 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
2938 assert(!ret);
2939
2940 /*
2941 * Remove application from notify hash table. The thread handling the
2942 * notify socket could have deleted the node so ignore on error because
2943 * either way it's valid. The close of that socket is handled by the other
2944 * thread.
2945 */
2946 iter.iter.node = &lta->notify_sock_n.node;
2947 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
2948
2949 /*
2950 * Ignore return value since the node might have been removed before by an
2951 * add replace during app registration because the PID can be reassigned by
2952 * the OS.
2953 */
2954 iter.iter.node = &lta->pid_n.node;
2955 ret = lttng_ht_del(ust_app_ht, &iter);
2956 if (ret) {
2957 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
2958 lta->pid);
2959 }
2960
2961 /* Remove sessions so they are not visible during deletion.*/
2962 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
2963 node.node) {
2964 struct ust_registry_session *registry;
2965
2966 ret = lttng_ht_del(lta->sessions, &iter);
2967 if (ret) {
2968 /* The session was already removed so scheduled for teardown. */
2969 continue;
2970 }
2971
2972 /*
2973 * Add session to list for teardown. This is safe since at this point we
2974 * are the only one using this list.
2975 */
2976 pthread_mutex_lock(&ua_sess->lock);
2977
2978 /*
2979 * Normally, this is done in the delete session process which is
2980 * executed in the call rcu below. However, upon registration we can't
2981 * afford to wait for the grace period before pushing data or else the
2982 * data pending feature can race between the unregistration and stop
2983 * command where the data pending command is sent *before* the grace
2984 * period ended.
2985 *
2986 * The close metadata below nullifies the metadata pointer in the
2987 * session so the delete session will NOT push/close a second time.
2988 */
2989 registry = get_session_registry(ua_sess);
2990 if (registry && !registry->metadata_closed) {
2991 /* Push metadata for application before freeing the application. */
2992 (void) push_metadata(registry, ua_sess->consumer);
2993
2994 /*
2995 * Don't ask to close metadata for global per UID buffers. Close
2996 * metadata only on destroy trace session in this case. Also, the
2997 * previous push metadata could have flag the metadata registry to
2998 * close so don't send a close command if closed.
2999 */
3000 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
3001 !registry->metadata_closed) {
3002 /* And ask to close it for this session registry. */
3003 (void) close_metadata(registry, ua_sess->consumer);
3004 }
3005 }
3006
3007 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
3008 pthread_mutex_unlock(&ua_sess->lock);
3009 }
3010
3011 /* Free memory */
3012 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
3013
3014 rcu_read_unlock();
3015 return;
3016 }
3017
3018 /*
3019 * Return traceable_app_count
3020 */
3021 unsigned long ust_app_list_count(void)
3022 {
3023 unsigned long count;
3024
3025 rcu_read_lock();
3026 count = lttng_ht_get_count(ust_app_ht);
3027 rcu_read_unlock();
3028
3029 return count;
3030 }
3031
3032 /*
3033 * Fill events array with all events name of all registered apps.
3034 */
3035 int ust_app_list_events(struct lttng_event **events)
3036 {
3037 int ret, handle;
3038 size_t nbmem, count = 0;
3039 struct lttng_ht_iter iter;
3040 struct ust_app *app;
3041 struct lttng_event *tmp_event;
3042
3043 nbmem = UST_APP_EVENT_LIST_SIZE;
3044 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
3045 if (tmp_event == NULL) {
3046 PERROR("zmalloc ust app events");
3047 ret = -ENOMEM;
3048 goto error;
3049 }
3050
3051 rcu_read_lock();
3052
3053 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3054 struct lttng_ust_tracepoint_iter uiter;
3055
3056 health_code_update();
3057
3058 if (!app->compatible) {
3059 /*
3060 * TODO: In time, we should notice the caller of this error by
3061 * telling him that this is a version error.
3062 */
3063 continue;
3064 }
3065 handle = ustctl_tracepoint_list(app->sock);
3066 if (handle < 0) {
3067 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3068 ERR("UST app list events getting handle failed for app pid %d",
3069 app->pid);
3070 }
3071 continue;
3072 }
3073
3074 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
3075 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3076 /* Handle ustctl error. */
3077 if (ret < 0) {
3078 free(tmp_event);
3079 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
3080 ERR("UST app tp list get failed for app %d with ret %d",
3081 app->sock, ret);
3082 } else {
3083 DBG3("UST app tp list get failed. Application is dead");
3084 /*
3085 * This is normal behavior, an application can die during the
3086 * creation process. Don't report an error so the execution can
3087 * continue normally. Continue normal execution.
3088 */
3089 break;
3090 }
3091 goto rcu_error;
3092 }
3093
3094 health_code_update();
3095 if (count >= nbmem) {
3096 /* In case the realloc fails, we free the memory */
3097 void *ptr;
3098
3099 DBG2("Reallocating event list from %zu to %zu entries", nbmem,
3100 2 * nbmem);
3101 nbmem *= 2;
3102 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event));
3103 if (ptr == NULL) {
3104 PERROR("realloc ust app events");
3105 free(tmp_event);
3106 ret = -ENOMEM;
3107 goto rcu_error;
3108 }
3109 tmp_event = ptr;
3110 }
3111 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3112 tmp_event[count].loglevel = uiter.loglevel;
3113 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3114 tmp_event[count].pid = app->pid;
3115 tmp_event[count].enabled = -1;
3116 count++;
3117 }
3118 }
3119
3120 ret = count;
3121 *events = tmp_event;
3122
3123 DBG2("UST app list events done (%zu events)", count);
3124
3125 rcu_error:
3126 rcu_read_unlock();
3127 error:
3128 health_code_update();
3129 return ret;
3130 }
3131
3132 /*
3133 * Fill events array with all events name of all registered apps.
3134 */
3135 int ust_app_list_event_fields(struct lttng_event_field **fields)
3136 {
3137 int ret, handle;
3138 size_t nbmem, count = 0;
3139 struct lttng_ht_iter iter;
3140 struct ust_app *app;
3141 struct lttng_event_field *tmp_event;
3142
3143 nbmem = UST_APP_EVENT_LIST_SIZE;
3144 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3145 if (tmp_event == NULL) {
3146 PERROR("zmalloc ust app event fields");
3147 ret = -ENOMEM;
3148 goto error;
3149 }
3150
3151 rcu_read_lock();
3152
3153 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3154 struct lttng_ust_field_iter uiter;
3155
3156 health_code_update();
3157
3158 if (!app->compatible) {
3159 /*
3160 * TODO: In time, we should notice the caller of this error by
3161 * telling him that this is a version error.
3162 */
3163 continue;
3164 }
3165 handle = ustctl_tracepoint_field_list(app->sock);
3166 if (handle < 0) {
3167 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3168 ERR("UST app list field getting handle failed for app pid %d",
3169 app->pid);
3170 }
3171 continue;
3172 }
3173
3174 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
3175 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3176 /* Handle ustctl error. */
3177 if (ret < 0) {
3178 free(tmp_event);
3179 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
3180 ERR("UST app tp list field failed for app %d with ret %d",
3181 app->sock, ret);
3182 } else {
3183 DBG3("UST app tp list field failed. Application is dead");
3184 /*
3185 * This is normal behavior, an application can die during the
3186 * creation process. Don't report an error so the execution can
3187 * continue normally.
3188 */
3189 break;
3190 }
3191 goto rcu_error;
3192 }
3193
3194 health_code_update();
3195 if (count >= nbmem) {
3196 /* In case the realloc fails, we free the memory */
3197 void *ptr;
3198
3199 DBG2("Reallocating event field list from %zu to %zu entries", nbmem,
3200 2 * nbmem);
3201 nbmem *= 2;
3202 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event_field));
3203 if (ptr == NULL) {
3204 PERROR("realloc ust app event fields");
3205 free(tmp_event);
3206 ret = -ENOMEM;
3207 goto rcu_error;
3208 }
3209 tmp_event = ptr;
3210 }
3211
3212 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
3213 tmp_event[count].type = uiter.type;
3214 tmp_event[count].nowrite = uiter.nowrite;
3215
3216 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3217 tmp_event[count].event.loglevel = uiter.loglevel;
3218 tmp_event[count].event.type = LTTNG_UST_TRACEPOINT;
3219 tmp_event[count].event.pid = app->pid;
3220 tmp_event[count].event.enabled = -1;
3221 count++;
3222 }
3223 }
3224
3225 ret = count;
3226 *fields = tmp_event;
3227
3228 DBG2("UST app list event fields done (%zu events)", count);
3229
3230 rcu_error:
3231 rcu_read_unlock();
3232 error:
3233 health_code_update();
3234 return ret;
3235 }
3236
3237 /*
3238 * Free and clean all traceable apps of the global list.
3239 *
3240 * Should _NOT_ be called with RCU read-side lock held.
3241 */
3242 void ust_app_clean_list(void)
3243 {
3244 int ret;
3245 struct ust_app *app;
3246 struct lttng_ht_iter iter;
3247
3248 DBG2("UST app cleaning registered apps hash table");
3249
3250 rcu_read_lock();
3251
3252 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3253 ret = lttng_ht_del(ust_app_ht, &iter);
3254 assert(!ret);
3255 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3256 }
3257
3258 /* Cleanup socket hash table */
3259 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3260 sock_n.node) {
3261 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3262 assert(!ret);
3263 }
3264
3265 /* Cleanup notify socket hash table */
3266 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
3267 notify_sock_n.node) {
3268 ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3269 assert(!ret);
3270 }
3271 rcu_read_unlock();
3272
3273 /* Destroy is done only when the ht is empty */
3274 ht_cleanup_push(ust_app_ht);
3275 ht_cleanup_push(ust_app_ht_by_sock);
3276 ht_cleanup_push(ust_app_ht_by_notify_sock);
3277 }
3278
3279 /*
3280 * Init UST app hash table.
3281 */
3282 void ust_app_ht_alloc(void)
3283 {
3284 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3285 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3286 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3287 }
3288
3289 /*
3290 * For a specific UST session, disable the channel for all registered apps.
3291 */
3292 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
3293 struct ltt_ust_channel *uchan)
3294 {
3295 int ret = 0;
3296 struct lttng_ht_iter iter;
3297 struct lttng_ht_node_str *ua_chan_node;
3298 struct ust_app *app;
3299 struct ust_app_session *ua_sess;
3300 struct ust_app_channel *ua_chan;
3301
3302 if (usess == NULL || uchan == NULL) {
3303 ERR("Disabling UST global channel with NULL values");
3304 ret = -1;
3305 goto error;
3306 }
3307
3308 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
3309 uchan->name, usess->id);
3310
3311 rcu_read_lock();
3312
3313 /* For every registered applications */
3314 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3315 struct lttng_ht_iter uiter;
3316 if (!app->compatible) {
3317 /*
3318 * TODO: In time, we should notice the caller of this error by
3319 * telling him that this is a version error.
3320 */
3321 continue;
3322 }
3323 ua_sess = lookup_session_by_app(usess, app);
3324 if (ua_sess == NULL) {
3325 continue;
3326 }
3327
3328 /* Get channel */
3329 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3330 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3331 /* If the session if found for the app, the channel must be there */
3332 assert(ua_chan_node);
3333
3334 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3335 /* The channel must not be already disabled */
3336 assert(ua_chan->enabled == 1);
3337
3338 /* Disable channel onto application */
3339 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
3340 if (ret < 0) {
3341 /* XXX: We might want to report this error at some point... */
3342 continue;
3343 }
3344 }
3345
3346 rcu_read_unlock();
3347
3348 error:
3349 return ret;
3350 }
3351
3352 /*
3353 * For a specific UST session, enable the channel for all registered apps.
3354 */
3355 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
3356 struct ltt_ust_channel *uchan)
3357 {
3358 int ret = 0;
3359 struct lttng_ht_iter iter;
3360 struct ust_app *app;
3361 struct ust_app_session *ua_sess;
3362
3363 if (usess == NULL || uchan == NULL) {
3364 ERR("Adding UST global channel to NULL values");
3365 ret = -1;
3366 goto error;
3367 }
3368
3369 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
3370 uchan->name, usess->id);
3371
3372 rcu_read_lock();
3373
3374 /* For every registered applications */
3375 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3376 if (!app->compatible) {
3377 /*
3378 * TODO: In time, we should notice the caller of this error by
3379 * telling him that this is a version error.
3380 */
3381 continue;
3382 }
3383 ua_sess = lookup_session_by_app(usess, app);
3384 if (ua_sess == NULL) {
3385 continue;
3386 }
3387
3388 /* Enable channel onto application */
3389 ret = enable_ust_app_channel(ua_sess, uchan, app);
3390 if (ret < 0) {
3391 /* XXX: We might want to report this error at some point... */
3392 continue;
3393 }
3394 }
3395
3396 rcu_read_unlock();
3397
3398 error:
3399 return ret;
3400 }
3401
3402 /*
3403 * Disable an event in a channel and for a specific session.
3404 */
3405 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
3406 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3407 {
3408 int ret = 0;
3409 struct lttng_ht_iter iter, uiter;
3410 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
3411 struct ust_app *app;
3412 struct ust_app_session *ua_sess;
3413 struct ust_app_channel *ua_chan;
3414 struct ust_app_event *ua_event;
3415
3416 DBG("UST app disabling event %s for all apps in channel "
3417 "%s for session id %" PRIu64,
3418 uevent->attr.name, uchan->name, usess->id);
3419
3420 rcu_read_lock();
3421
3422 /* For all registered applications */
3423 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3424 if (!app->compatible) {
3425 /*
3426 * TODO: In time, we should notice the caller of this error by
3427 * telling him that this is a version error.
3428 */
3429 continue;
3430 }
3431 ua_sess = lookup_session_by_app(usess, app);
3432 if (ua_sess == NULL) {
3433 /* Next app */
3434 continue;
3435 }
3436
3437 /* Lookup channel in the ust app session */
3438 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3439 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3440 if (ua_chan_node == NULL) {
3441 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
3442 "Skipping", uchan->name, usess->id, app->pid);
3443 continue;
3444 }
3445 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3446
3447 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &uiter);
3448 ua_event_node = lttng_ht_iter_get_node_str(&uiter);
3449 if (ua_event_node == NULL) {
3450 DBG2("Event %s not found in channel %s for app pid %d."
3451 "Skipping", uevent->attr.name, uchan->name, app->pid);
3452 continue;
3453 }
3454 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
3455
3456 ret = disable_ust_app_event(ua_sess, ua_event, app);
3457 if (ret < 0) {
3458 /* XXX: Report error someday... */
3459 continue;
3460 }
3461 }
3462
3463 rcu_read_unlock();
3464
3465 return ret;
3466 }
3467
3468 /*
3469 * For a specific UST session and UST channel, the event for all
3470 * registered apps.
3471 */
3472 int ust_app_disable_all_event_glb(struct ltt_ust_session *usess,
3473 struct ltt_ust_channel *uchan)
3474 {
3475 int ret = 0;
3476 struct lttng_ht_iter iter, uiter;
3477 struct lttng_ht_node_str *ua_chan_node;
3478 struct ust_app *app;
3479 struct ust_app_session *ua_sess;
3480 struct ust_app_channel *ua_chan;
3481 struct ust_app_event *ua_event;
3482
3483 DBG("UST app disabling all event for all apps in channel "
3484 "%s for session id %" PRIu64, uchan->name, usess->id);
3485
3486 rcu_read_lock();
3487
3488 /* For all registered applications */
3489 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3490 if (!app->compatible) {
3491 /*
3492 * TODO: In time, we should notice the caller of this error by
3493 * telling him that this is a version error.
3494 */
3495 continue;
3496 }
3497 ua_sess = lookup_session_by_app(usess, app);
3498 if (!ua_sess) {
3499 /* The application has problem or is probably dead. */
3500 continue;
3501 }
3502
3503 /* Lookup channel in the ust app session */
3504 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3505 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3506 /* If the channel is not found, there is a code flow error */
3507 assert(ua_chan_node);
3508
3509 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3510
3511 /* Disable each events of channel */
3512 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
3513 node.node) {
3514 ret = disable_ust_app_event(ua_sess, ua_event, app);
3515 if (ret < 0) {
3516 /* XXX: Report error someday... */
3517 continue;
3518 }
3519 }
3520 }
3521
3522 rcu_read_unlock();
3523
3524 return ret;
3525 }
3526
3527 /*
3528 * For a specific UST session, create the channel for all registered apps.
3529 */
3530 int ust_app_create_channel_glb(struct ltt_ust_session *usess,
3531 struct ltt_ust_channel *uchan)
3532 {
3533 int ret = 0, created;
3534 struct lttng_ht_iter iter;
3535 struct ust_app *app;
3536 struct ust_app_session *ua_sess = NULL;
3537
3538 /* Very wrong code flow */
3539 assert(usess);
3540 assert(uchan);
3541
3542 DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
3543 uchan->name, usess->id);
3544
3545 rcu_read_lock();
3546
3547 /* For every registered applications */
3548 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3549 if (!app->compatible) {
3550 /*
3551 * TODO: In time, we should notice the caller of this error by
3552 * telling him that this is a version error.
3553 */
3554 continue;
3555 }
3556 /*
3557 * Create session on the tracer side and add it to app session HT. Note
3558 * that if session exist, it will simply return a pointer to the ust
3559 * app session.
3560 */
3561 ret = create_ust_app_session(usess, app, &ua_sess, &created);
3562 if (ret < 0) {
3563 switch (ret) {
3564 case -ENOTCONN:
3565 /*
3566 * The application's socket is not valid. Either a bad socket
3567 * or a timeout on it. We can't inform the caller that for a
3568 * specific app, the session failed so lets continue here.
3569 */
3570 continue;
3571 case -ENOMEM:
3572 default:
3573 goto error_rcu_unlock;
3574 }
3575 }
3576 assert(ua_sess);
3577
3578 pthread_mutex_lock(&ua_sess->lock);
3579 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
3580 sizeof(uchan->name))) {
3581 struct ustctl_consumer_channel_attr attr;
3582 copy_channel_attr_to_ustctl(&attr, &uchan->attr);
3583 ret = create_ust_app_metadata(ua_sess, app, usess->consumer,
3584 &attr);
3585 } else {
3586 /* Create channel onto application. We don't need the chan ref. */
3587 ret = create_ust_app_channel(ua_sess, uchan, app,
3588 LTTNG_UST_CHAN_PER_CPU, usess, NULL);
3589 }
3590 pthread_mutex_unlock(&ua_sess->lock);
3591 if (ret < 0) {
3592 if (ret == -ENOMEM) {
3593 /* No more memory is a fatal error. Stop right now. */
3594 goto error_rcu_unlock;
3595 }
3596 /* Cleanup the created session if it's the case. */
3597 if (created) {
3598 destroy_app_session(app, ua_sess);
3599 }
3600 }
3601 }
3602
3603 error_rcu_unlock:
3604 rcu_read_unlock();
3605 return ret;
3606 }
3607
3608 /*
3609 * Enable event for a specific session and channel on the tracer.
3610 */
3611 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
3612 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3613 {
3614 int ret = 0;
3615 struct lttng_ht_iter iter, uiter;
3616 struct lttng_ht_node_str *ua_chan_node;
3617 struct ust_app *app;
3618 struct ust_app_session *ua_sess;
3619 struct ust_app_channel *ua_chan;
3620 struct ust_app_event *ua_event;
3621
3622 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
3623 uevent->attr.name, usess->id);
3624
3625 /*
3626 * NOTE: At this point, this function is called only if the session and
3627 * channel passed are already created for all apps. and enabled on the
3628 * tracer also.
3629 */
3630
3631 rcu_read_lock();
3632
3633 /* For all registered applications */
3634 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3635 if (!app->compatible) {
3636 /*
3637 * TODO: In time, we should notice the caller of this error by
3638 * telling him that this is a version error.
3639 */
3640 continue;
3641 }
3642 ua_sess = lookup_session_by_app(usess, app);
3643 if (!ua_sess) {
3644 /* The application has problem or is probably dead. */
3645 continue;
3646 }
3647
3648 pthread_mutex_lock(&ua_sess->lock);
3649
3650 /* Lookup channel in the ust app session */
3651 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3652 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3653 /* If the channel is not found, there is a code flow error */
3654 assert(ua_chan_node);
3655
3656 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3657
3658 /* Get event node */
3659 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
3660 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
3661 if (ua_event == NULL) {
3662 DBG3("UST app enable event %s not found for app PID %d."
3663 "Skipping app", uevent->attr.name, app->pid);
3664 goto next_app;
3665 }
3666
3667 ret = enable_ust_app_event(ua_sess, ua_event, app);
3668 if (ret < 0) {
3669 pthread_mutex_unlock(&ua_sess->lock);
3670 goto error;
3671 }
3672 next_app:
3673 pthread_mutex_unlock(&ua_sess->lock);
3674 }
3675
3676 error:
3677 rcu_read_unlock();
3678 return ret;
3679 }
3680
3681 /*
3682 * For a specific existing UST session and UST channel, creates the event for
3683 * all registered apps.
3684 */
3685 int ust_app_create_event_glb(struct ltt_ust_session *usess,
3686 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3687 {
3688 int ret = 0;
3689 struct lttng_ht_iter iter, uiter;
3690 struct lttng_ht_node_str *ua_chan_node;
3691 struct ust_app *app;
3692 struct ust_app_session *ua_sess;
3693 struct ust_app_channel *ua_chan;
3694
3695 DBG("UST app creating event %s for all apps for session id %" PRIu64,
3696 uevent->attr.name, usess->id);
3697
3698 rcu_read_lock();
3699
3700 /* For all registered applications */
3701 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3702 if (!app->compatible) {
3703 /*
3704 * TODO: In time, we should notice the caller of this error by
3705 * telling him that this is a version error.
3706 */
3707 continue;
3708 }
3709 ua_sess = lookup_session_by_app(usess, app);
3710 if (!ua_sess) {
3711 /* The application has problem or is probably dead. */
3712 continue;
3713 }
3714
3715 pthread_mutex_lock(&ua_sess->lock);
3716 /* Lookup channel in the ust app session */
3717 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3718 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3719 /* If the channel is not found, there is a code flow error */
3720 assert(ua_chan_node);
3721
3722 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3723
3724 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
3725 pthread_mutex_unlock(&ua_sess->lock);
3726 if (ret < 0) {
3727 if (ret != -LTTNG_UST_ERR_EXIST) {
3728 /* Possible value at this point: -ENOMEM. If so, we stop! */
3729 break;
3730 }
3731 DBG2("UST app event %s already exist on app PID %d",
3732 uevent->attr.name, app->pid);
3733 continue;
3734 }
3735 }
3736
3737 rcu_read_unlock();
3738
3739 return ret;
3740 }
3741
3742 /*
3743 * Start tracing for a specific UST session and app.
3744 */
3745 static
3746 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
3747 {
3748 int ret = 0;
3749 struct ust_app_session *ua_sess;
3750
3751 DBG("Starting tracing for ust app pid %d", app->pid);
3752
3753 rcu_read_lock();
3754
3755 if (!app->compatible) {
3756 goto end;
3757 }
3758
3759 ua_sess = lookup_session_by_app(usess, app);
3760 if (ua_sess == NULL) {
3761 /* The session is in teardown process. Ignore and continue. */
3762 goto end;
3763 }
3764
3765 pthread_mutex_lock(&ua_sess->lock);
3766
3767 /* Upon restart, we skip the setup, already done */
3768 if (ua_sess->started) {
3769 goto skip_setup;
3770 }
3771
3772 /* Create directories if consumer is LOCAL and has a path defined. */
3773 if (usess->consumer->type == CONSUMER_DST_LOCAL &&
3774 strlen(usess->consumer->dst.trace_path) > 0) {
3775 ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
3776 S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
3777 if (ret < 0) {
3778 if (ret != -EEXIST) {
3779 ERR("Trace directory creation error");
3780 goto error_unlock;
3781 }
3782 }
3783 }
3784
3785 /*
3786 * Create the metadata for the application. This returns gracefully if a
3787 * metadata was already set for the session.
3788 */
3789 ret = create_ust_app_metadata(ua_sess, app, usess->consumer, NULL);
3790 if (ret < 0) {
3791 goto error_unlock;
3792 }
3793
3794 health_code_update();
3795
3796 skip_setup:
3797 /* This start the UST tracing */
3798 ret = ustctl_start_session(app->sock, ua_sess->handle);
3799 if (ret < 0) {
3800 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3801 ERR("Error starting tracing for app pid: %d (ret: %d)",
3802 app->pid, ret);
3803 } else {
3804 DBG("UST app start session failed. Application is dead.");
3805 /*
3806 * This is normal behavior, an application can die during the
3807 * creation process. Don't report an error so the execution can
3808 * continue normally.
3809 */
3810 pthread_mutex_unlock(&ua_sess->lock);
3811 goto end;
3812 }
3813 goto error_unlock;
3814 }
3815
3816 /* Indicate that the session has been started once */
3817 ua_sess->started = 1;
3818
3819 pthread_mutex_unlock(&ua_sess->lock);
3820
3821 health_code_update();
3822
3823 /* Quiescent wait after starting trace */
3824 ret = ustctl_wait_quiescent(app->sock);
3825 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3826 ERR("UST app wait quiescent failed for app pid %d ret %d",
3827 app->pid, ret);
3828 }
3829
3830 end:
3831 rcu_read_unlock();
3832 health_code_update();
3833 return 0;
3834
3835 error_unlock:
3836 pthread_mutex_unlock(&ua_sess->lock);
3837 rcu_read_unlock();
3838 health_code_update();
3839 return -1;
3840 }
3841
3842 /*
3843 * Stop tracing for a specific UST session and app.
3844 */
3845 static
3846 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
3847 {
3848 int ret = 0;
3849 struct ust_app_session *ua_sess;
3850 struct ust_registry_session *registry;
3851
3852 DBG("Stopping tracing for ust app pid %d", app->pid);
3853
3854 rcu_read_lock();
3855
3856 if (!app->compatible) {
3857 goto end_no_session;
3858 }
3859
3860 ua_sess = lookup_session_by_app(usess, app);
3861 if (ua_sess == NULL) {
3862 goto end_no_session;
3863 }
3864
3865 pthread_mutex_lock(&ua_sess->lock);
3866
3867 /*
3868 * If started = 0, it means that stop trace has been called for a session
3869 * that was never started. It's possible since we can have a fail start
3870 * from either the application manager thread or the command thread. Simply
3871 * indicate that this is a stop error.
3872 */
3873 if (!ua_sess->started) {
3874 goto error_rcu_unlock;
3875 }
3876
3877 health_code_update();
3878
3879 /* This inhibits UST tracing */
3880 ret = ustctl_stop_session(app->sock, ua_sess->handle);
3881 if (ret < 0) {
3882 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3883 ERR("Error stopping tracing for app pid: %d (ret: %d)",
3884 app->pid, ret);
3885 } else {
3886 DBG("UST app stop session failed. Application is dead.");
3887 /*
3888 * This is normal behavior, an application can die during the
3889 * creation process. Don't report an error so the execution can
3890 * continue normally.
3891 */
3892 goto end_unlock;
3893 }
3894 goto error_rcu_unlock;
3895 }
3896
3897 health_code_update();
3898
3899 /* Quiescent wait after stopping trace */
3900 ret = ustctl_wait_quiescent(app->sock);
3901 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3902 ERR("UST app wait quiescent failed for app pid %d ret %d",
3903 app->pid, ret);
3904 }
3905
3906 health_code_update();
3907
3908 registry = get_session_registry(ua_sess);
3909 assert(registry);
3910
3911 if (!registry->metadata_closed) {
3912 /* Push metadata for application before freeing the application. */
3913 (void) push_metadata(registry, ua_sess->consumer);
3914 }
3915
3916 end_unlock:
3917 pthread_mutex_unlock(&ua_sess->lock);
3918 end_no_session:
3919 rcu_read_unlock();
3920 health_code_update();
3921 return 0;
3922
3923 error_rcu_unlock:
3924 pthread_mutex_unlock(&ua_sess->lock);
3925 rcu_read_unlock();
3926 health_code_update();
3927 return -1;
3928 }
3929
3930 /*
3931 * Flush buffers for a specific UST session and app.
3932 */
3933 static
3934 int ust_app_flush_trace(struct ltt_ust_session *usess, struct ust_app *app)
3935 {
3936 int ret = 0;
3937 struct lttng_ht_iter iter;
3938 struct ust_app_session *ua_sess;
3939 struct ust_app_channel *ua_chan;
3940
3941 DBG("Flushing buffers for ust app pid %d", app->pid);
3942
3943 rcu_read_lock();
3944
3945 if (!app->compatible) {
3946 goto end_no_session;
3947 }
3948
3949 ua_sess = lookup_session_by_app(usess, app);
3950 if (ua_sess == NULL) {
3951 goto end_no_session;
3952 }
3953
3954 pthread_mutex_lock(&ua_sess->lock);
3955
3956 health_code_update();
3957
3958 /* Flushing buffers */
3959 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
3960 node.node) {
3961 health_code_update();
3962 assert(ua_chan->is_sent);
3963 ret = ustctl_sock_flush_buffer(app->sock, ua_chan->obj);
3964 if (ret < 0) {
3965 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3966 ERR("UST app PID %d channel %s flush failed with ret %d",
3967 app->pid, ua_chan->name, ret);
3968 } else {
3969 DBG3("UST app failed to flush %s. Application is dead.",
3970 ua_chan->name);
3971 /*
3972 * This is normal behavior, an application can die during the
3973 * creation process. Don't report an error so the execution can
3974 * continue normally.
3975 */
3976 }
3977 /* Continuing flushing all buffers */
3978 continue;
3979 }
3980 }
3981
3982 health_code_update();
3983
3984 pthread_mutex_unlock(&ua_sess->lock);
3985 end_no_session:
3986 rcu_read_unlock();
3987 health_code_update();
3988 return 0;
3989 }
3990
3991 /*
3992 * Destroy a specific UST session in apps.
3993 */
3994 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
3995 {
3996 int ret;
3997 struct ust_app_session *ua_sess;
3998 struct lttng_ht_iter iter;
3999 struct lttng_ht_node_u64 *node;
4000
4001 DBG("Destroy tracing for ust app pid %d", app->pid);
4002
4003 rcu_read_lock();
4004
4005 if (!app->compatible) {
4006 goto end;
4007 }
4008
4009 __lookup_session_by_app(usess, app, &iter);
4010 node = lttng_ht_iter_get_node_u64(&iter);
4011 if (node == NULL) {
4012 /* Session is being or is deleted. */
4013 goto end;
4014 }
4015 ua_sess = caa_container_of(node, struct ust_app_session, node);
4016
4017 health_code_update();
4018 destroy_app_session(app, ua_sess);
4019
4020 health_code_update();
4021
4022 /* Quiescent wait after stopping trace */
4023 ret = ustctl_wait_quiescent(app->sock);
4024 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4025 ERR("UST app wait quiescent failed for app pid %d ret %d",
4026 app->pid, ret);
4027 }
4028 end:
4029 rcu_read_unlock();
4030 health_code_update();
4031 return 0;
4032 }
4033
4034 /*
4035 * Start tracing for the UST session.
4036 */
4037 int ust_app_start_trace_all(struct ltt_ust_session *usess)
4038 {
4039 int ret = 0;
4040 struct lttng_ht_iter iter;
4041 struct ust_app *app;
4042
4043 DBG("Starting all UST traces");
4044
4045 rcu_read_lock();
4046
4047 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4048 ret = ust_app_start_trace(usess, app);
4049 if (ret < 0) {
4050 /* Continue to next apps even on error */
4051 continue;
4052 }
4053 }
4054
4055 rcu_read_unlock();
4056
4057 return 0;
4058 }
4059
4060 /*
4061 * Start tracing for the UST session.
4062 */
4063 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
4064 {
4065 int ret = 0;
4066 struct lttng_ht_iter iter;
4067 struct ust_app *app;
4068
4069 DBG("Stopping all UST traces");
4070
4071 rcu_read_lock();
4072
4073 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4074 ret = ust_app_stop_trace(usess, app);
4075 if (ret < 0) {
4076 /* Continue to next apps even on error */
4077 continue;
4078 }
4079 }
4080
4081 /* Flush buffers and push metadata (for UID buffers). */
4082 switch (usess->buffer_type) {
4083 case LTTNG_BUFFER_PER_UID:
4084 {
4085 struct buffer_reg_uid *reg;
4086
4087 /* Flush all per UID buffers associated to that session. */
4088 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4089 struct ust_registry_session *ust_session_reg;
4090 struct buffer_reg_channel *reg_chan;
4091 struct consumer_socket *socket;
4092
4093 /* Get consumer socket to use to push the metadata.*/
4094 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
4095 usess->consumer);
4096 if (!socket) {
4097 /* Ignore request if no consumer is found for the session. */
4098 continue;
4099 }
4100
4101 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
4102 reg_chan, node.node) {
4103 /*
4104 * The following call will print error values so the return
4105 * code is of little importance because whatever happens, we
4106 * have to try them all.
4107 */
4108 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
4109 }
4110
4111 ust_session_reg = reg->registry->reg.ust;
4112 if (!ust_session_reg->metadata_closed) {
4113 /* Push metadata. */
4114 (void) push_metadata(ust_session_reg, usess->consumer);
4115 }
4116 }
4117
4118 break;
4119 }
4120 case LTTNG_BUFFER_PER_PID:
4121 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4122 ret = ust_app_flush_trace(usess, app);
4123 if (ret < 0) {
4124 /* Continue to next apps even on error */
4125 continue;
4126 }
4127 }
4128 break;
4129 default:
4130 assert(0);
4131 break;
4132 }
4133
4134 rcu_read_unlock();
4135
4136 return 0;
4137 }
4138
4139 /*
4140 * Destroy app UST session.
4141 */
4142 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
4143 {
4144 int ret = 0;
4145 struct lttng_ht_iter iter;
4146 struct ust_app *app;
4147
4148 DBG("Destroy all UST traces");
4149
4150 rcu_read_lock();
4151
4152 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4153 ret = destroy_trace(usess, app);
4154 if (ret < 0) {
4155 /* Continue to next apps even on error */
4156 continue;
4157 }
4158 }
4159
4160 rcu_read_unlock();
4161
4162 return 0;
4163 }
4164
4165 /*
4166 * Add channels/events from UST global domain to registered apps at sock.
4167 */
4168 void ust_app_global_update(struct ltt_ust_session *usess, int sock)
4169 {
4170 int ret = 0;
4171 struct lttng_ht_iter iter, uiter;
4172 struct ust_app *app;
4173 struct ust_app_session *ua_sess = NULL;
4174 struct ust_app_channel *ua_chan;
4175 struct ust_app_event *ua_event;
4176 struct ust_app_ctx *ua_ctx;
4177
4178 assert(usess);
4179 assert(sock >= 0);
4180
4181 DBG2("UST app global update for app sock %d for session id %" PRIu64, sock,
4182 usess->id);
4183
4184 rcu_read_lock();
4185
4186 app = ust_app_find_by_sock(sock);
4187 if (app == NULL) {
4188 /*
4189 * Application can be unregistered before so this is possible hence
4190 * simply stopping the update.
4191 */
4192 DBG3("UST app update failed to find app sock %d", sock);
4193 goto error;
4194 }
4195
4196 if (!app->compatible) {
4197 goto error;
4198 }
4199
4200 ret = create_ust_app_session(usess, app, &ua_sess, NULL);
4201 if (ret < 0) {
4202 /* Tracer is probably gone or ENOMEM. */
4203 goto error;
4204 }
4205 assert(ua_sess);
4206
4207 pthread_mutex_lock(&ua_sess->lock);
4208
4209 /*
4210 * We can iterate safely here over all UST app session since the create ust
4211 * app session above made a shadow copy of the UST global domain from the
4212 * ltt ust session.
4213 */
4214 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4215 node.node) {
4216 /*
4217 * For a metadata channel, handle it differently.
4218 */
4219 if (!strncmp(ua_chan->name, DEFAULT_METADATA_NAME,
4220 sizeof(ua_chan->name))) {
4221 ret = create_ust_app_metadata(ua_sess, app, usess->consumer,
4222 &ua_chan->attr);
4223 if (ret < 0) {
4224 goto error_unlock;
4225 }
4226 /* Remove it from the hash table and continue!. */
4227 ret = lttng_ht_del(ua_sess->channels, &iter);
4228 assert(!ret);
4229 delete_ust_app_channel(-1, ua_chan, app);
4230 continue;
4231 } else {
4232 ret = do_create_channel(app, usess, ua_sess, ua_chan);
4233 if (ret < 0) {
4234 /*
4235 * Stop everything. On error, the application failed, no more
4236 * file descriptor are available or ENOMEM so stopping here is
4237 * the only thing we can do for now.
4238 */
4239 goto error_unlock;
4240 }
4241 }
4242
4243 /*
4244 * Add context using the list so they are enabled in the same order the
4245 * user added them.
4246 */
4247 cds_list_for_each_entry(ua_ctx, &ua_chan->ctx_list, list) {
4248 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
4249 if (ret < 0) {
4250 goto error_unlock;
4251 }
4252 }
4253
4254
4255 /* For each events */
4256 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
4257 node.node) {
4258 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
4259 if (ret < 0) {
4260 goto error_unlock;
4261 }
4262 }
4263 }
4264
4265 pthread_mutex_unlock(&ua_sess->lock);
4266
4267 if (usess->start_trace) {
4268 ret = ust_app_start_trace(usess, app);
4269 if (ret < 0) {
4270 goto error;
4271 }
4272
4273 DBG2("UST trace started for app pid %d", app->pid);
4274 }
4275
4276 /* Everything went well at this point. */
4277 rcu_read_unlock();
4278 return;
4279
4280 error_unlock:
4281 pthread_mutex_unlock(&ua_sess->lock);
4282 error:
4283 if (ua_sess) {
4284 destroy_app_session(app, ua_sess);
4285 }
4286 rcu_read_unlock();
4287 return;
4288 }
4289
4290 /*
4291 * Add context to a specific channel for global UST domain.
4292 */
4293 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
4294 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
4295 {
4296 int ret = 0;
4297 struct lttng_ht_node_str *ua_chan_node;
4298 struct lttng_ht_iter iter, uiter;
4299 struct ust_app_channel *ua_chan = NULL;
4300 struct ust_app_session *ua_sess;
4301 struct ust_app *app;
4302
4303 rcu_read_lock();
4304
4305 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4306 if (!app->compatible) {
4307 /*
4308 * TODO: In time, we should notice the caller of this error by
4309 * telling him that this is a version error.
4310 */
4311 continue;
4312 }
4313 ua_sess = lookup_session_by_app(usess, app);
4314 if (ua_sess == NULL) {
4315 continue;
4316 }
4317
4318 pthread_mutex_lock(&ua_sess->lock);
4319 /* Lookup channel in the ust app session */
4320 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4321 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4322 if (ua_chan_node == NULL) {
4323 goto next_app;
4324 }
4325 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
4326 node);
4327 ret = create_ust_app_channel_context(ua_sess, ua_chan, &uctx->ctx, app);
4328 if (ret < 0) {
4329 goto next_app;
4330 }
4331 next_app:
4332 pthread_mutex_unlock(&ua_sess->lock);
4333 }
4334
4335 rcu_read_unlock();
4336 return ret;
4337 }
4338
4339 /*
4340 * Enable event for a channel from a UST session for a specific PID.
4341 */
4342 int ust_app_enable_event_pid(struct ltt_ust_session *usess,
4343 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4344 {
4345 int ret = 0;
4346 struct lttng_ht_iter iter;
4347 struct lttng_ht_node_str *ua_chan_node;
4348 struct ust_app *app;
4349 struct ust_app_session *ua_sess;
4350 struct ust_app_channel *ua_chan;
4351 struct ust_app_event *ua_event;
4352
4353 DBG("UST app enabling event %s for PID %d", uevent->attr.name, pid);
4354
4355 rcu_read_lock();
4356
4357 app = ust_app_find_by_pid(pid);
4358 if (app == NULL) {
4359 ERR("UST app enable event per PID %d not found", pid);
4360 ret = -1;
4361 goto end;
4362 }
4363
4364 if (!app->compatible) {
4365 ret = 0;
4366 goto end;
4367 }
4368
4369 ua_sess = lookup_session_by_app(usess, app);
4370 if (!ua_sess) {
4371 /* The application has problem or is probably dead. */
4372 ret = 0;
4373 goto end;
4374 }
4375
4376 pthread_mutex_lock(&ua_sess->lock);
4377 /* Lookup channel in the ust app session */
4378 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4379 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4380 /* If the channel is not found, there is a code flow error */
4381 assert(ua_chan_node);
4382
4383 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4384
4385 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4386 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
4387 if (ua_event == NULL) {
4388 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4389 if (ret < 0) {
4390 goto end_unlock;
4391 }
4392 } else {
4393 ret = enable_ust_app_event(ua_sess, ua_event, app);
4394 if (ret < 0) {
4395 goto end_unlock;
4396 }
4397 }
4398
4399 end_unlock:
4400 pthread_mutex_unlock(&ua_sess->lock);
4401 end:
4402 rcu_read_unlock();
4403 return ret;
4404 }
4405
4406 /*
4407 * Disable event for a channel from a UST session for a specific PID.
4408 */
4409 int ust_app_disable_event_pid(struct ltt_ust_session *usess,
4410 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4411 {
4412 int ret = 0;
4413 struct lttng_ht_iter iter;
4414 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
4415 struct ust_app *app;
4416 struct ust_app_session *ua_sess;
4417 struct ust_app_channel *ua_chan;
4418 struct ust_app_event *ua_event;
4419
4420 DBG("UST app disabling event %s for PID %d", uevent->attr.name, pid);
4421
4422 rcu_read_lock();
4423
4424 app = ust_app_find_by_pid(pid);
4425 if (app == NULL) {
4426 ERR("UST app disable event per PID %d not found", pid);
4427 ret = -1;
4428 goto error;
4429 }
4430
4431 if (!app->compatible) {
4432 ret = 0;
4433 goto error;
4434 }
4435
4436 ua_sess = lookup_session_by_app(usess, app);
4437 if (!ua_sess) {
4438 /* The application has problem or is probably dead. */
4439 goto error;
4440 }
4441
4442 /* Lookup channel in the ust app session */
4443 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4444 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4445 if (ua_chan_node == NULL) {
4446 /* Channel does not exist, skip disabling */
4447 goto error;
4448 }
4449 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4450
4451 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &iter);
4452 ua_event_node = lttng_ht_iter_get_node_str(&iter);
4453 if (ua_event_node == NULL) {
4454 /* Event does not exist, skip disabling */
4455 goto error;
4456 }
4457 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
4458
4459 ret = disable_ust_app_event(ua_sess, ua_event, app);
4460 if (ret < 0) {
4461 goto error;
4462 }
4463
4464 error:
4465 rcu_read_unlock();
4466 return ret;
4467 }
4468
4469 /*
4470 * Calibrate registered applications.
4471 */
4472 int ust_app_calibrate_glb(struct lttng_ust_calibrate *calibrate)
4473 {
4474 int ret = 0;
4475 struct lttng_ht_iter iter;
4476 struct ust_app *app;
4477
4478 rcu_read_lock();
4479
4480 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4481 if (!app->compatible) {
4482 /*
4483 * TODO: In time, we should notice the caller of this error by
4484 * telling him that this is a version error.
4485 */
4486 continue;
4487 }
4488
4489 health_code_update();
4490
4491 ret = ustctl_calibrate(app->sock, calibrate);
4492 if (ret < 0) {
4493 switch (ret) {
4494 case -ENOSYS:
4495 /* Means that it's not implemented on the tracer side. */
4496 ret = 0;
4497 break;
4498 default:
4499 DBG2("Calibrate app PID %d returned with error %d",
4500 app->pid, ret);
4501 break;
4502 }
4503 }
4504 }
4505
4506 DBG("UST app global domain calibration finished");
4507
4508 rcu_read_unlock();
4509
4510 health_code_update();
4511
4512 return ret;
4513 }
4514
4515 /*
4516 * Receive registration and populate the given msg structure.
4517 *
4518 * On success return 0 else a negative value returned by the ustctl call.
4519 */
4520 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
4521 {
4522 int ret;
4523 uint32_t pid, ppid, uid, gid;
4524
4525 assert(msg);
4526
4527 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
4528 &pid, &ppid, &uid, &gid,
4529 &msg->bits_per_long,
4530 &msg->uint8_t_alignment,
4531 &msg->uint16_t_alignment,
4532 &msg->uint32_t_alignment,
4533 &msg->uint64_t_alignment,
4534 &msg->long_alignment,
4535 &msg->byte_order,
4536 msg->name);
4537 if (ret < 0) {
4538 switch (-ret) {
4539 case EPIPE:
4540 case ECONNRESET:
4541 case LTTNG_UST_ERR_EXITING:
4542 DBG3("UST app recv reg message failed. Application died");
4543 break;
4544 case LTTNG_UST_ERR_UNSUP_MAJOR:
4545 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
4546 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
4547 LTTNG_UST_ABI_MINOR_VERSION);
4548 break;
4549 default:
4550 ERR("UST app recv reg message failed with ret %d", ret);
4551 break;
4552 }
4553 goto error;
4554 }
4555 msg->pid = (pid_t) pid;
4556 msg->ppid = (pid_t) ppid;
4557 msg->uid = (uid_t) uid;
4558 msg->gid = (gid_t) gid;
4559
4560 error:
4561 return ret;
4562 }
4563
4564 /*
4565 * Return a ust app channel object using the application object and the channel
4566 * object descriptor has a key. If not found, NULL is returned. A RCU read side
4567 * lock MUST be acquired before calling this function.
4568 */
4569 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
4570 int objd)
4571 {
4572 struct lttng_ht_node_ulong *node;
4573 struct lttng_ht_iter iter;
4574 struct ust_app_channel *ua_chan = NULL;
4575
4576 assert(app);
4577
4578 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
4579 node = lttng_ht_iter_get_node_ulong(&iter);
4580 if (node == NULL) {
4581 DBG2("UST app channel find by objd %d not found", objd);
4582 goto error;
4583 }
4584
4585 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
4586
4587 error:
4588 return ua_chan;
4589 }
4590
4591 /*
4592 * Reply to a register channel notification from an application on the notify
4593 * socket. The channel metadata is also created.
4594 *
4595 * The session UST registry lock is acquired in this function.
4596 *
4597 * On success 0 is returned else a negative value.
4598 */
4599 static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
4600 size_t nr_fields, struct ustctl_field *fields)
4601 {
4602 int ret, ret_code = 0;
4603 uint32_t chan_id, reg_count;
4604 uint64_t chan_reg_key;
4605 enum ustctl_channel_header type;
4606 struct ust_app *app;
4607 struct ust_app_channel *ua_chan;
4608 struct ust_app_session *ua_sess;
4609 struct ust_registry_session *registry;
4610 struct ust_registry_channel *chan_reg;
4611
4612 rcu_read_lock();
4613
4614 /* Lookup application. If not found, there is a code flow error. */
4615 app = find_app_by_notify_sock(sock);
4616 if (!app) {
4617 DBG("Application socket %d is being teardown. Abort event notify",
4618 sock);
4619 ret = 0;
4620 free(fields);
4621 goto error_rcu_unlock;
4622 }
4623
4624 /* Lookup channel by UST object descriptor. */
4625 ua_chan = find_channel_by_objd(app, cobjd);
4626 if (!ua_chan) {
4627 DBG("Application channel is being teardown. Abort event notify");
4628 ret = 0;
4629 free(fields);
4630 goto error_rcu_unlock;
4631 }
4632
4633 assert(ua_chan->session);
4634 ua_sess = ua_chan->session;
4635
4636 /* Get right session registry depending on the session buffer type. */
4637 registry = get_session_registry(ua_sess);
4638 assert(registry);
4639
4640 /* Depending on the buffer type, a different channel key is used. */
4641 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4642 chan_reg_key = ua_chan->tracing_channel_id;
4643 } else {
4644 chan_reg_key = ua_chan->key;
4645 }
4646
4647 pthread_mutex_lock(&registry->lock);
4648
4649 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
4650 assert(chan_reg);
4651
4652 if (!chan_reg->register_done) {
4653 reg_count = ust_registry_get_event_count(chan_reg);
4654 if (reg_count < 31) {
4655 type = USTCTL_CHANNEL_HEADER_COMPACT;
4656 } else {
4657 type = USTCTL_CHANNEL_HEADER_LARGE;
4658 }
4659
4660 chan_reg->nr_ctx_fields = nr_fields;
4661 chan_reg->ctx_fields = fields;
4662 chan_reg->header_type = type;
4663 } else {
4664 /* Get current already assigned values. */
4665 type = chan_reg->header_type;
4666 free(fields);
4667 /* Set to NULL so the error path does not do a double free. */
4668 fields = NULL;
4669 }
4670 /* Channel id is set during the object creation. */
4671 chan_id = chan_reg->chan_id;
4672
4673 /* Append to metadata */
4674 if (!chan_reg->metadata_dumped) {
4675 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
4676 if (ret_code) {
4677 ERR("Error appending channel metadata (errno = %d)", ret_code);
4678 goto reply;
4679 }
4680 }
4681
4682 reply:
4683 DBG3("UST app replying to register channel key %" PRIu64
4684 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
4685 ret_code);
4686
4687 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
4688 if (ret < 0) {
4689 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4690 ERR("UST app reply channel failed with ret %d", ret);
4691 } else {
4692 DBG3("UST app reply channel failed. Application died");
4693 }
4694 goto error;
4695 }
4696
4697 /* This channel registry registration is completed. */
4698 chan_reg->register_done = 1;
4699
4700 error:
4701 pthread_mutex_unlock(&registry->lock);
4702 error_rcu_unlock:
4703 rcu_read_unlock();
4704 if (ret) {
4705 free(fields);
4706 }
4707 return ret;
4708 }
4709
4710 /*
4711 * Add event to the UST channel registry. When the event is added to the
4712 * registry, the metadata is also created. Once done, this replies to the
4713 * application with the appropriate error code.
4714 *
4715 * The session UST registry lock is acquired in the function.
4716 *
4717 * On success 0 is returned else a negative value.
4718 */
4719 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
4720 char *sig, size_t nr_fields, struct ustctl_field *fields, int loglevel,
4721 char *model_emf_uri)
4722 {
4723 int ret, ret_code;
4724 uint32_t event_id = 0;
4725 uint64_t chan_reg_key;
4726 struct ust_app *app;
4727 struct ust_app_channel *ua_chan;
4728 struct ust_app_session *ua_sess;
4729 struct ust_registry_session *registry;
4730
4731 rcu_read_lock();
4732
4733 /* Lookup application. If not found, there is a code flow error. */
4734 app = find_app_by_notify_sock(sock);
4735 if (!app) {
4736 DBG("Application socket %d is being teardown. Abort event notify",
4737 sock);
4738 ret = 0;
4739 free(sig);
4740 free(fields);
4741 free(model_emf_uri);
4742 goto error_rcu_unlock;
4743 }
4744
4745 /* Lookup channel by UST object descriptor. */
4746 ua_chan = find_channel_by_objd(app, cobjd);
4747 if (!ua_chan) {
4748 DBG("Application channel is being teardown. Abort event notify");
4749 ret = 0;
4750 free(sig);
4751 free(fields);
4752 free(model_emf_uri);
4753 goto error_rcu_unlock;
4754 }
4755
4756 assert(ua_chan->session);
4757 ua_sess = ua_chan->session;
4758
4759 registry = get_session_registry(ua_sess);
4760 assert(registry);
4761
4762 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4763 chan_reg_key = ua_chan->tracing_channel_id;
4764 } else {
4765 chan_reg_key = ua_chan->key;
4766 }
4767
4768 pthread_mutex_lock(&registry->lock);
4769
4770 /*
4771 * From this point on, this call acquires the ownership of the sig, fields
4772 * and model_emf_uri meaning any free are done inside it if needed. These
4773 * three variables MUST NOT be read/write after this.
4774 */
4775 ret_code = ust_registry_create_event(registry, chan_reg_key,
4776 sobjd, cobjd, name, sig, nr_fields, fields, loglevel,
4777 model_emf_uri, ua_sess->buffer_type, &event_id,
4778 app);
4779
4780 /*
4781 * The return value is returned to ustctl so in case of an error, the
4782 * application can be notified. In case of an error, it's important not to
4783 * return a negative error or else the application will get closed.
4784 */
4785 ret = ustctl_reply_register_event(sock, event_id, ret_code);
4786 if (ret < 0) {
4787 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4788 ERR("UST app reply event failed with ret %d", ret);
4789 } else {
4790 DBG3("UST app reply event failed. Application died");
4791 }
4792 /*
4793 * No need to wipe the create event since the application socket will
4794 * get close on error hence cleaning up everything by itself.
4795 */
4796 goto error;
4797 }
4798
4799 DBG3("UST registry event %s with id %" PRId32 " added successfully",
4800 name, event_id);
4801
4802 error:
4803 pthread_mutex_unlock(&registry->lock);
4804 error_rcu_unlock:
4805 rcu_read_unlock();
4806 return ret;
4807 }
4808
4809 /*
4810 * Handle application notification through the given notify socket.
4811 *
4812 * Return 0 on success or else a negative value.
4813 */
4814 int ust_app_recv_notify(int sock)
4815 {
4816 int ret;
4817 enum ustctl_notify_cmd cmd;
4818
4819 DBG3("UST app receiving notify from sock %d", sock);
4820
4821 ret = ustctl_recv_notify(sock, &cmd);
4822 if (ret < 0) {
4823 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4824 ERR("UST app recv notify failed with ret %d", ret);
4825 } else {
4826 DBG3("UST app recv notify failed. Application died");
4827 }
4828 goto error;
4829 }
4830
4831 switch (cmd) {
4832 case USTCTL_NOTIFY_CMD_EVENT:
4833 {
4834 int sobjd, cobjd, loglevel;
4835 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
4836 size_t nr_fields;
4837 struct ustctl_field *fields;
4838
4839 DBG2("UST app ustctl register event received");
4840
4841 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name, &loglevel,
4842 &sig, &nr_fields, &fields, &model_emf_uri);
4843 if (ret < 0) {
4844 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4845 ERR("UST app recv event failed with ret %d", ret);
4846 } else {
4847 DBG3("UST app recv event failed. Application died");
4848 }
4849 goto error;
4850 }
4851
4852 /*
4853 * Add event to the UST registry coming from the notify socket. This
4854 * call will free if needed the sig, fields and model_emf_uri. This
4855 * code path loses the ownsership of these variables and transfer them
4856 * to the this function.
4857 */
4858 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
4859 fields, loglevel, model_emf_uri);
4860 if (ret < 0) {
4861 goto error;
4862 }
4863
4864 break;
4865 }
4866 case USTCTL_NOTIFY_CMD_CHANNEL:
4867 {
4868 int sobjd, cobjd;
4869 size_t nr_fields;
4870 struct ustctl_field *fields;
4871
4872 DBG2("UST app ustctl register channel received");
4873
4874 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
4875 &fields);
4876 if (ret < 0) {
4877 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4878 ERR("UST app recv channel failed with ret %d", ret);
4879 } else {
4880 DBG3("UST app recv channel failed. Application died");
4881 }
4882 goto error;
4883 }
4884
4885 /*
4886 * The fields ownership are transfered to this function call meaning
4887 * that if needed it will be freed. After this, it's invalid to access
4888 * fields or clean it up.
4889 */
4890 ret = reply_ust_register_channel(sock, sobjd, cobjd, nr_fields,
4891 fields);
4892 if (ret < 0) {
4893 goto error;
4894 }
4895
4896 break;
4897 }
4898 default:
4899 /* Should NEVER happen. */
4900 assert(0);
4901 }
4902
4903 error:
4904 return ret;
4905 }
4906
4907 /*
4908 * Once the notify socket hangs up, this is called. First, it tries to find the
4909 * corresponding application. On failure, the call_rcu to close the socket is
4910 * executed. If an application is found, it tries to delete it from the notify
4911 * socket hash table. Whathever the result, it proceeds to the call_rcu.
4912 *
4913 * Note that an object needs to be allocated here so on ENOMEM failure, the
4914 * call RCU is not done but the rest of the cleanup is.
4915 */
4916 void ust_app_notify_sock_unregister(int sock)
4917 {
4918 int err_enomem = 0;
4919 struct lttng_ht_iter iter;
4920 struct ust_app *app;
4921 struct ust_app_notify_sock_obj *obj;
4922
4923 assert(sock >= 0);
4924
4925 rcu_read_lock();
4926
4927 obj = zmalloc(sizeof(*obj));
4928 if (!obj) {
4929 /*
4930 * An ENOMEM is kind of uncool. If this strikes we continue the
4931 * procedure but the call_rcu will not be called. In this case, we
4932 * accept the fd leak rather than possibly creating an unsynchronized
4933 * state between threads.
4934 *
4935 * TODO: The notify object should be created once the notify socket is
4936 * registered and stored independantely from the ust app object. The
4937 * tricky part is to synchronize the teardown of the application and
4938 * this notify object. Let's keep that in mind so we can avoid this
4939 * kind of shenanigans with ENOMEM in the teardown path.
4940 */
4941 err_enomem = 1;
4942 } else {
4943 obj->fd = sock;
4944 }
4945
4946 DBG("UST app notify socket unregister %d", sock);
4947
4948 /*
4949 * Lookup application by notify socket. If this fails, this means that the
4950 * hash table delete has already been done by the application
4951 * unregistration process so we can safely close the notify socket in a
4952 * call RCU.
4953 */
4954 app = find_app_by_notify_sock(sock);
4955 if (!app) {
4956 goto close_socket;
4957 }
4958
4959 iter.iter.node = &app->notify_sock_n.node;
4960
4961 /*
4962 * Whatever happens here either we fail or succeed, in both cases we have
4963 * to close the socket after a grace period to continue to the call RCU
4964 * here. If the deletion is successful, the application is not visible
4965 * anymore by other threads and is it fails it means that it was already
4966 * deleted from the hash table so either way we just have to close the
4967 * socket.
4968 */
4969 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
4970
4971 close_socket:
4972 rcu_read_unlock();
4973
4974 /*
4975 * Close socket after a grace period to avoid for the socket to be reused
4976 * before the application object is freed creating potential race between
4977 * threads trying to add unique in the global hash table.
4978 */
4979 if (!err_enomem) {
4980 call_rcu(&obj->head, close_notify_sock_rcu);
4981 }
4982 }
4983
4984 /*
4985 * Destroy a ust app data structure and free its memory.
4986 */
4987 void ust_app_destroy(struct ust_app *app)
4988 {
4989 if (!app) {
4990 return;
4991 }
4992
4993 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
4994 }
4995
4996 /*
4997 * Take a snapshot for a given UST session. The snapshot is sent to the given
4998 * output.
4999 *
5000 * Return 0 on success or else a negative value.
5001 */
5002 int ust_app_snapshot_record(struct ltt_ust_session *usess,
5003 struct snapshot_output *output, int wait, unsigned int nb_streams)
5004 {
5005 int ret = 0;
5006 struct lttng_ht_iter iter;
5007 struct ust_app *app;
5008 char pathname[PATH_MAX];
5009 uint64_t max_stream_size = 0;
5010
5011 assert(usess);
5012 assert(output);
5013
5014 rcu_read_lock();
5015
5016 /*
5017 * Compute the maximum size of a single stream if a max size is asked by
5018 * the caller.
5019 */
5020 if (output->max_size > 0 && nb_streams > 0) {
5021 max_stream_size = output->max_size / nb_streams;
5022 }
5023
5024 switch (usess->buffer_type) {
5025 case LTTNG_BUFFER_PER_UID:
5026 {
5027 struct buffer_reg_uid *reg;
5028
5029 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5030 struct buffer_reg_channel *reg_chan;
5031 struct consumer_socket *socket;
5032
5033 /* Get consumer socket to use to push the metadata.*/
5034 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5035 usess->consumer);
5036 if (!socket) {
5037 ret = -EINVAL;
5038 goto error;
5039 }
5040
5041 memset(pathname, 0, sizeof(pathname));
5042 ret = snprintf(pathname, sizeof(pathname),
5043 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
5044 reg->uid, reg->bits_per_long);
5045 if (ret < 0) {
5046 PERROR("snprintf snapshot path");
5047 goto error;
5048 }
5049
5050 /* Add the UST default trace dir to path. */
5051 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5052 reg_chan, node.node) {
5053
5054 /*
5055 * Make sure the maximum stream size is not lower than the
5056 * subbuffer size or else it's an error since we won't be able to
5057 * snapshot anything.
5058 */
5059 if (max_stream_size &&
5060 reg_chan->subbuf_size > max_stream_size) {
5061 ret = -EINVAL;
5062 DBG3("UST app snapshot record maximum stream size %" PRIu64
5063 " is smaller than subbuffer size of %zu",
5064 max_stream_size, reg_chan->subbuf_size);
5065 goto error;
5066 }
5067 ret = consumer_snapshot_channel(socket, reg_chan->consumer_key, output, 0,
5068 usess->uid, usess->gid, pathname, wait,
5069 max_stream_size);
5070 if (ret < 0) {
5071 goto error;
5072 }
5073 }
5074 ret = consumer_snapshot_channel(socket, reg->registry->reg.ust->metadata_key, output,
5075 1, usess->uid, usess->gid, pathname, wait,
5076 max_stream_size);
5077 if (ret < 0) {
5078 goto error;
5079 }
5080 }
5081 break;
5082 }
5083 case LTTNG_BUFFER_PER_PID:
5084 {
5085 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5086 struct consumer_socket *socket;
5087 struct lttng_ht_iter chan_iter;
5088 struct ust_app_channel *ua_chan;
5089 struct ust_app_session *ua_sess;
5090 struct ust_registry_session *registry;
5091
5092 ua_sess = lookup_session_by_app(usess, app);
5093 if (!ua_sess) {
5094 /* Session not associated with this app. */
5095 continue;
5096 }
5097
5098 /* Get the right consumer socket for the application. */
5099 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5100 output->consumer);
5101 if (!socket) {
5102 ret = -EINVAL;
5103 goto error;
5104 }
5105
5106 /* Add the UST default trace dir to path. */
5107 memset(pathname, 0, sizeof(pathname));
5108 ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
5109 ua_sess->path);
5110 if (ret < 0) {
5111 PERROR("snprintf snapshot path");
5112 goto error;
5113 }
5114
5115 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5116 ua_chan, node.node) {
5117 /*
5118 * Make sure the maximum stream size is not lower than the
5119 * subbuffer size or else it's an error since we won't be able to
5120 * snapshot anything.
5121 */
5122 if (max_stream_size &&
5123 ua_chan->attr.subbuf_size > max_stream_size) {
5124 ret = -EINVAL;
5125 DBG3("UST app snapshot record maximum stream size %" PRIu64
5126 " is smaller than subbuffer size of %" PRIu64,
5127 max_stream_size, ua_chan->attr.subbuf_size);
5128 goto error;
5129 }
5130
5131 ret = consumer_snapshot_channel(socket, ua_chan->key, output, 0,
5132 ua_sess->euid, ua_sess->egid, pathname, wait,
5133 max_stream_size);
5134 if (ret < 0) {
5135 goto error;
5136 }
5137 }
5138
5139 registry = get_session_registry(ua_sess);
5140 assert(registry);
5141 ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
5142 1, ua_sess->euid, ua_sess->egid, pathname, wait,
5143 max_stream_size);
5144 if (ret < 0) {
5145 goto error;
5146 }
5147 }
5148 break;
5149 }
5150 default:
5151 assert(0);
5152 break;
5153 }
5154
5155 error:
5156 rcu_read_unlock();
5157 return ret;
5158 }
5159
5160 /*
5161 * Return the number of streams for a UST session.
5162 */
5163 unsigned int ust_app_get_nb_stream(struct ltt_ust_session *usess)
5164 {
5165 unsigned int ret = 0;
5166 struct ust_app *app;
5167 struct lttng_ht_iter iter;
5168
5169 assert(usess);
5170
5171 switch (usess->buffer_type) {
5172 case LTTNG_BUFFER_PER_UID:
5173 {
5174 struct buffer_reg_uid *reg;
5175
5176 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5177 struct buffer_reg_channel *reg_chan;
5178
5179 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5180 reg_chan, node.node) {
5181 ret += reg_chan->stream_count;
5182 }
5183 }
5184 break;
5185 }
5186 case LTTNG_BUFFER_PER_PID:
5187 {
5188 rcu_read_lock();
5189 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5190 struct ust_app_channel *ua_chan;
5191 struct ust_app_session *ua_sess;
5192 struct lttng_ht_iter chan_iter;
5193
5194 ua_sess = lookup_session_by_app(usess, app);
5195 if (!ua_sess) {
5196 /* Session not associated with this app. */
5197 continue;
5198 }
5199
5200 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5201 ua_chan, node.node) {
5202 ret += ua_chan->streams.count;
5203 }
5204 }
5205 rcu_read_unlock();
5206 break;
5207 }
5208 default:
5209 assert(0);
5210 break;
5211 }
5212
5213 return ret;
5214 }
This page took 0.222788 seconds and 6 git commands to generate.