Implement --shm-path option for UST sessions (per-uid channels)
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2 only,
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18 #define _GNU_SOURCE
19 #define _LGPL_SOURCE
20 #include <errno.h>
21 #include <inttypes.h>
22 #include <pthread.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/stat.h>
27 #include <sys/types.h>
28 #include <unistd.h>
29 #include <urcu/compiler.h>
30 #include <lttng/ust-error.h>
31 #include <signal.h>
32
33 #include <common/common.h>
34 #include <common/sessiond-comm/sessiond-comm.h>
35
36 #include "buffer-registry.h"
37 #include "fd-limit.h"
38 #include "health-sessiond.h"
39 #include "ust-app.h"
40 #include "ust-consumer.h"
41 #include "ust-ctl.h"
42 #include "utils.h"
43
44 static
45 int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
46
47 /* Next available channel key. Access under next_channel_key_lock. */
48 static uint64_t _next_channel_key;
49 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
50
51 /* Next available session ID. Access under next_session_id_lock. */
52 static uint64_t _next_session_id;
53 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
54
55 /*
56 * Return the incremented value of next_channel_key.
57 */
58 static uint64_t get_next_channel_key(void)
59 {
60 uint64_t ret;
61
62 pthread_mutex_lock(&next_channel_key_lock);
63 ret = ++_next_channel_key;
64 pthread_mutex_unlock(&next_channel_key_lock);
65 return ret;
66 }
67
68 /*
69 * Return the atomically incremented value of next_session_id.
70 */
71 static uint64_t get_next_session_id(void)
72 {
73 uint64_t ret;
74
75 pthread_mutex_lock(&next_session_id_lock);
76 ret = ++_next_session_id;
77 pthread_mutex_unlock(&next_session_id_lock);
78 return ret;
79 }
80
81 static void copy_channel_attr_to_ustctl(
82 struct ustctl_consumer_channel_attr *attr,
83 struct lttng_ust_channel_attr *uattr)
84 {
85 /* Copy event attributes since the layout is different. */
86 attr->subbuf_size = uattr->subbuf_size;
87 attr->num_subbuf = uattr->num_subbuf;
88 attr->overwrite = uattr->overwrite;
89 attr->switch_timer_interval = uattr->switch_timer_interval;
90 attr->read_timer_interval = uattr->read_timer_interval;
91 attr->output = uattr->output;
92 }
93
94 /*
95 * Match function for the hash table lookup.
96 *
97 * It matches an ust app event based on three attributes which are the event
98 * name, the filter bytecode and the loglevel.
99 */
100 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
101 {
102 struct ust_app_event *event;
103 const struct ust_app_ht_key *key;
104
105 assert(node);
106 assert(_key);
107
108 event = caa_container_of(node, struct ust_app_event, node.node);
109 key = _key;
110
111 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
112
113 /* Event name */
114 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
115 goto no_match;
116 }
117
118 /* Event loglevel. */
119 if (event->attr.loglevel != key->loglevel) {
120 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
121 && key->loglevel == 0 && event->attr.loglevel == -1) {
122 /*
123 * Match is accepted. This is because on event creation, the
124 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
125 * -1 are accepted for this loglevel type since 0 is the one set by
126 * the API when receiving an enable event.
127 */
128 } else {
129 goto no_match;
130 }
131 }
132
133 /* One of the filters is NULL, fail. */
134 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
135 goto no_match;
136 }
137
138 if (key->filter && event->filter) {
139 /* Both filters exists, check length followed by the bytecode. */
140 if (event->filter->len != key->filter->len ||
141 memcmp(event->filter->data, key->filter->data,
142 event->filter->len) != 0) {
143 goto no_match;
144 }
145 }
146
147 /* One of the exclusions is NULL, fail. */
148 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
149 goto no_match;
150 }
151
152 if (key->exclusion && event->exclusion) {
153 /* Both exclusions exists, check count followed by the names. */
154 if (event->exclusion->count != key->exclusion->count ||
155 memcmp(event->exclusion->names, key->exclusion->names,
156 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
157 goto no_match;
158 }
159 }
160
161
162 /* Match. */
163 return 1;
164
165 no_match:
166 return 0;
167 }
168
169 /*
170 * Unique add of an ust app event in the given ht. This uses the custom
171 * ht_match_ust_app_event match function and the event name as hash.
172 */
173 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
174 struct ust_app_event *event)
175 {
176 struct cds_lfht_node *node_ptr;
177 struct ust_app_ht_key key;
178 struct lttng_ht *ht;
179
180 assert(ua_chan);
181 assert(ua_chan->events);
182 assert(event);
183
184 ht = ua_chan->events;
185 key.name = event->attr.name;
186 key.filter = event->filter;
187 key.loglevel = event->attr.loglevel;
188 key.exclusion = event->exclusion;
189
190 node_ptr = cds_lfht_add_unique(ht->ht,
191 ht->hash_fct(event->node.key, lttng_ht_seed),
192 ht_match_ust_app_event, &key, &event->node.node);
193 assert(node_ptr == &event->node.node);
194 }
195
196 /*
197 * Close the notify socket from the given RCU head object. This MUST be called
198 * through a call_rcu().
199 */
200 static void close_notify_sock_rcu(struct rcu_head *head)
201 {
202 int ret;
203 struct ust_app_notify_sock_obj *obj =
204 caa_container_of(head, struct ust_app_notify_sock_obj, head);
205
206 /* Must have a valid fd here. */
207 assert(obj->fd >= 0);
208
209 ret = close(obj->fd);
210 if (ret) {
211 ERR("close notify sock %d RCU", obj->fd);
212 }
213 lttng_fd_put(LTTNG_FD_APPS, 1);
214
215 free(obj);
216 }
217
218 /*
219 * Return the session registry according to the buffer type of the given
220 * session.
221 *
222 * A registry per UID object MUST exists before calling this function or else
223 * it assert() if not found. RCU read side lock must be acquired.
224 */
225 static struct ust_registry_session *get_session_registry(
226 struct ust_app_session *ua_sess)
227 {
228 struct ust_registry_session *registry = NULL;
229
230 assert(ua_sess);
231
232 switch (ua_sess->buffer_type) {
233 case LTTNG_BUFFER_PER_PID:
234 {
235 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
236 if (!reg_pid) {
237 goto error;
238 }
239 registry = reg_pid->registry->reg.ust;
240 break;
241 }
242 case LTTNG_BUFFER_PER_UID:
243 {
244 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
245 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
246 if (!reg_uid) {
247 goto error;
248 }
249 registry = reg_uid->registry->reg.ust;
250 break;
251 }
252 default:
253 assert(0);
254 };
255
256 error:
257 return registry;
258 }
259
260 /*
261 * Delete ust context safely. RCU read lock must be held before calling
262 * this function.
263 */
264 static
265 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx)
266 {
267 int ret;
268
269 assert(ua_ctx);
270
271 if (ua_ctx->obj) {
272 ret = ustctl_release_object(sock, ua_ctx->obj);
273 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
274 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
275 sock, ua_ctx->obj->handle, ret);
276 }
277 free(ua_ctx->obj);
278 }
279 free(ua_ctx);
280 }
281
282 /*
283 * Delete ust app event safely. RCU read lock must be held before calling
284 * this function.
285 */
286 static
287 void delete_ust_app_event(int sock, struct ust_app_event *ua_event)
288 {
289 int ret;
290
291 assert(ua_event);
292
293 free(ua_event->filter);
294 if (ua_event->exclusion != NULL)
295 free(ua_event->exclusion);
296 if (ua_event->obj != NULL) {
297 ret = ustctl_release_object(sock, ua_event->obj);
298 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
299 ERR("UST app sock %d release event obj failed with ret %d",
300 sock, ret);
301 }
302 free(ua_event->obj);
303 }
304 free(ua_event);
305 }
306
307 /*
308 * Release ust data object of the given stream.
309 *
310 * Return 0 on success or else a negative value.
311 */
312 static int release_ust_app_stream(int sock, struct ust_app_stream *stream)
313 {
314 int ret = 0;
315
316 assert(stream);
317
318 if (stream->obj) {
319 ret = ustctl_release_object(sock, stream->obj);
320 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
321 ERR("UST app sock %d release stream obj failed with ret %d",
322 sock, ret);
323 }
324 lttng_fd_put(LTTNG_FD_APPS, 2);
325 free(stream->obj);
326 }
327
328 return ret;
329 }
330
331 /*
332 * Delete ust app stream safely. RCU read lock must be held before calling
333 * this function.
334 */
335 static
336 void delete_ust_app_stream(int sock, struct ust_app_stream *stream)
337 {
338 assert(stream);
339
340 (void) release_ust_app_stream(sock, stream);
341 free(stream);
342 }
343
344 /*
345 * We need to execute ht_destroy outside of RCU read-side critical
346 * section and outside of call_rcu thread, so we postpone its execution
347 * using ht_cleanup_push. It is simpler than to change the semantic of
348 * the many callers of delete_ust_app_session().
349 */
350 static
351 void delete_ust_app_channel_rcu(struct rcu_head *head)
352 {
353 struct ust_app_channel *ua_chan =
354 caa_container_of(head, struct ust_app_channel, rcu_head);
355
356 ht_cleanup_push(ua_chan->ctx);
357 ht_cleanup_push(ua_chan->events);
358 free(ua_chan);
359 }
360
361 /*
362 * Delete ust app channel safely. RCU read lock must be held before calling
363 * this function.
364 */
365 static
366 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
367 struct ust_app *app)
368 {
369 int ret;
370 struct lttng_ht_iter iter;
371 struct ust_app_event *ua_event;
372 struct ust_app_ctx *ua_ctx;
373 struct ust_app_stream *stream, *stmp;
374 struct ust_registry_session *registry;
375
376 assert(ua_chan);
377
378 DBG3("UST app deleting channel %s", ua_chan->name);
379
380 /* Wipe stream */
381 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
382 cds_list_del(&stream->list);
383 delete_ust_app_stream(sock, stream);
384 }
385
386 /* Wipe context */
387 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
388 cds_list_del(&ua_ctx->list);
389 ret = lttng_ht_del(ua_chan->ctx, &iter);
390 assert(!ret);
391 delete_ust_app_ctx(sock, ua_ctx);
392 }
393
394 /* Wipe events */
395 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
396 node.node) {
397 ret = lttng_ht_del(ua_chan->events, &iter);
398 assert(!ret);
399 delete_ust_app_event(sock, ua_event);
400 }
401
402 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
403 /* Wipe and free registry from session registry. */
404 registry = get_session_registry(ua_chan->session);
405 if (registry) {
406 ust_registry_channel_del_free(registry, ua_chan->key);
407 }
408 }
409
410 if (ua_chan->obj != NULL) {
411 /* Remove channel from application UST object descriptor. */
412 iter.iter.node = &ua_chan->ust_objd_node.node;
413 ret = lttng_ht_del(app->ust_objd, &iter);
414 assert(!ret);
415 ret = ustctl_release_object(sock, ua_chan->obj);
416 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
417 ERR("UST app sock %d release channel obj failed with ret %d",
418 sock, ret);
419 }
420 lttng_fd_put(LTTNG_FD_APPS, 1);
421 free(ua_chan->obj);
422 }
423 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
424 }
425
426 /*
427 * Push metadata to consumer socket.
428 *
429 * RCU read-side lock must be held to guarantee existance of socket.
430 * Must be called with the ust app session lock held.
431 * Must be called with the registry lock held.
432 *
433 * On success, return the len of metadata pushed or else a negative value.
434 */
435 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
436 struct consumer_socket *socket, int send_zero_data)
437 {
438 int ret;
439 char *metadata_str = NULL;
440 size_t len, offset;
441 ssize_t ret_val;
442
443 assert(registry);
444 assert(socket);
445
446 /*
447 * Means that no metadata was assigned to the session. This can
448 * happens if no start has been done previously.
449 */
450 if (!registry->metadata_key) {
451 return 0;
452 }
453
454 /*
455 * On a push metadata error either the consumer is dead or the
456 * metadata channel has been destroyed because its endpoint
457 * might have died (e.g: relayd). If so, the metadata closed
458 * flag is set to 1 so we deny pushing metadata again which is
459 * not valid anymore on the consumer side.
460 */
461 if (registry->metadata_closed) {
462 return -EPIPE;
463 }
464
465 offset = registry->metadata_len_sent;
466 len = registry->metadata_len - registry->metadata_len_sent;
467 if (len == 0) {
468 DBG3("No metadata to push for metadata key %" PRIu64,
469 registry->metadata_key);
470 ret_val = len;
471 if (send_zero_data) {
472 DBG("No metadata to push");
473 goto push_data;
474 }
475 goto end;
476 }
477
478 /* Allocate only what we have to send. */
479 metadata_str = zmalloc(len);
480 if (!metadata_str) {
481 PERROR("zmalloc ust app metadata string");
482 ret_val = -ENOMEM;
483 goto error;
484 }
485 /* Copy what we haven't send out. */
486 memcpy(metadata_str, registry->metadata + offset, len);
487 registry->metadata_len_sent += len;
488
489 push_data:
490 ret = consumer_push_metadata(socket, registry->metadata_key,
491 metadata_str, len, offset);
492 if (ret < 0) {
493 /*
494 * There is an acceptable race here between the registry
495 * metadata key assignment and the creation on the
496 * consumer. The session daemon can concurrently push
497 * metadata for this registry while being created on the
498 * consumer since the metadata key of the registry is
499 * assigned *before* it is setup to avoid the consumer
500 * to ask for metadata that could possibly be not found
501 * in the session daemon.
502 *
503 * The metadata will get pushed either by the session
504 * being stopped or the consumer requesting metadata if
505 * that race is triggered.
506 */
507 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
508 ret = 0;
509 }
510
511 /*
512 * Update back the actual metadata len sent since it
513 * failed here.
514 */
515 registry->metadata_len_sent -= len;
516 ret_val = ret;
517 goto error_push;
518 }
519
520 free(metadata_str);
521 return len;
522
523 end:
524 error:
525 if (ret_val) {
526 /*
527 * On error, flag the registry that the metadata is
528 * closed. We were unable to push anything and this
529 * means that either the consumer is not responding or
530 * the metadata cache has been destroyed on the
531 * consumer.
532 */
533 registry->metadata_closed = 1;
534 }
535 error_push:
536 free(metadata_str);
537 return ret_val;
538 }
539
540 /*
541 * For a given application and session, push metadata to consumer.
542 * Either sock or consumer is required : if sock is NULL, the default
543 * socket to send the metadata is retrieved from consumer, if sock
544 * is not NULL we use it to send the metadata.
545 * RCU read-side lock must be held while calling this function,
546 * therefore ensuring existance of registry. It also ensures existance
547 * of socket throughout this function.
548 *
549 * Return 0 on success else a negative error.
550 */
551 static int push_metadata(struct ust_registry_session *registry,
552 struct consumer_output *consumer)
553 {
554 int ret_val;
555 ssize_t ret;
556 struct consumer_socket *socket;
557
558 assert(registry);
559 assert(consumer);
560
561 pthread_mutex_lock(&registry->lock);
562 if (registry->metadata_closed) {
563 ret_val = -EPIPE;
564 goto error;
565 }
566
567 /* Get consumer socket to use to push the metadata.*/
568 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
569 consumer);
570 if (!socket) {
571 ret_val = -1;
572 goto error;
573 }
574
575 ret = ust_app_push_metadata(registry, socket, 0);
576 if (ret < 0) {
577 ret_val = ret;
578 goto error;
579 }
580 pthread_mutex_unlock(&registry->lock);
581 return 0;
582
583 error:
584 pthread_mutex_unlock(&registry->lock);
585 return ret_val;
586 }
587
588 /*
589 * Send to the consumer a close metadata command for the given session. Once
590 * done, the metadata channel is deleted and the session metadata pointer is
591 * nullified. The session lock MUST be held unless the application is
592 * in the destroy path.
593 *
594 * Return 0 on success else a negative value.
595 */
596 static int close_metadata(struct ust_registry_session *registry,
597 struct consumer_output *consumer)
598 {
599 int ret;
600 struct consumer_socket *socket;
601
602 assert(registry);
603 assert(consumer);
604
605 rcu_read_lock();
606
607 pthread_mutex_lock(&registry->lock);
608
609 if (!registry->metadata_key || registry->metadata_closed) {
610 ret = 0;
611 goto end;
612 }
613
614 /* Get consumer socket to use to push the metadata.*/
615 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
616 consumer);
617 if (!socket) {
618 ret = -1;
619 goto error;
620 }
621
622 ret = consumer_close_metadata(socket, registry->metadata_key);
623 if (ret < 0) {
624 goto error;
625 }
626
627 error:
628 /*
629 * Metadata closed. Even on error this means that the consumer is not
630 * responding or not found so either way a second close should NOT be emit
631 * for this registry.
632 */
633 registry->metadata_closed = 1;
634 end:
635 pthread_mutex_unlock(&registry->lock);
636 rcu_read_unlock();
637 return ret;
638 }
639
640 /*
641 * We need to execute ht_destroy outside of RCU read-side critical
642 * section and outside of call_rcu thread, so we postpone its execution
643 * using ht_cleanup_push. It is simpler than to change the semantic of
644 * the many callers of delete_ust_app_session().
645 */
646 static
647 void delete_ust_app_session_rcu(struct rcu_head *head)
648 {
649 struct ust_app_session *ua_sess =
650 caa_container_of(head, struct ust_app_session, rcu_head);
651
652 ht_cleanup_push(ua_sess->channels);
653 free(ua_sess);
654 }
655
656 /*
657 * Delete ust app session safely. RCU read lock must be held before calling
658 * this function.
659 */
660 static
661 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
662 struct ust_app *app)
663 {
664 int ret;
665 struct lttng_ht_iter iter;
666 struct ust_app_channel *ua_chan;
667 struct ust_registry_session *registry;
668
669 assert(ua_sess);
670
671 pthread_mutex_lock(&ua_sess->lock);
672
673 registry = get_session_registry(ua_sess);
674 if (registry) {
675 /* Push metadata for application before freeing the application. */
676 (void) push_metadata(registry, ua_sess->consumer);
677
678 /*
679 * Don't ask to close metadata for global per UID buffers. Close
680 * metadata only on destroy trace session in this case. Also, the
681 * previous push metadata could have flag the metadata registry to
682 * close so don't send a close command if closed.
683 */
684 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
685 /* And ask to close it for this session registry. */
686 (void) close_metadata(registry, ua_sess->consumer);
687 }
688 }
689
690 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
691 node.node) {
692 ret = lttng_ht_del(ua_sess->channels, &iter);
693 assert(!ret);
694 delete_ust_app_channel(sock, ua_chan, app);
695 }
696
697 /* In case of per PID, the registry is kept in the session. */
698 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
699 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
700 if (reg_pid) {
701 buffer_reg_pid_remove(reg_pid);
702 buffer_reg_pid_destroy(reg_pid);
703 }
704 }
705
706 if (ua_sess->handle != -1) {
707 ret = ustctl_release_handle(sock, ua_sess->handle);
708 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
709 ERR("UST app sock %d release session handle failed with ret %d",
710 sock, ret);
711 }
712 }
713 pthread_mutex_unlock(&ua_sess->lock);
714
715 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
716 }
717
718 /*
719 * Delete a traceable application structure from the global list. Never call
720 * this function outside of a call_rcu call.
721 *
722 * RCU read side lock should _NOT_ be held when calling this function.
723 */
724 static
725 void delete_ust_app(struct ust_app *app)
726 {
727 int ret, sock;
728 struct ust_app_session *ua_sess, *tmp_ua_sess;
729
730 /* Delete ust app sessions info */
731 sock = app->sock;
732 app->sock = -1;
733
734 /* Wipe sessions */
735 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
736 teardown_node) {
737 /* Free every object in the session and the session. */
738 rcu_read_lock();
739 delete_ust_app_session(sock, ua_sess, app);
740 rcu_read_unlock();
741 }
742
743 ht_cleanup_push(app->sessions);
744 ht_cleanup_push(app->ust_objd);
745
746 /*
747 * Wait until we have deleted the application from the sock hash table
748 * before closing this socket, otherwise an application could re-use the
749 * socket ID and race with the teardown, using the same hash table entry.
750 *
751 * It's OK to leave the close in call_rcu. We want it to stay unique for
752 * all RCU readers that could run concurrently with unregister app,
753 * therefore we _need_ to only close that socket after a grace period. So
754 * it should stay in this RCU callback.
755 *
756 * This close() is a very important step of the synchronization model so
757 * every modification to this function must be carefully reviewed.
758 */
759 ret = close(sock);
760 if (ret) {
761 PERROR("close");
762 }
763 lttng_fd_put(LTTNG_FD_APPS, 1);
764
765 DBG2("UST app pid %d deleted", app->pid);
766 free(app);
767 }
768
769 /*
770 * URCU intermediate call to delete an UST app.
771 */
772 static
773 void delete_ust_app_rcu(struct rcu_head *head)
774 {
775 struct lttng_ht_node_ulong *node =
776 caa_container_of(head, struct lttng_ht_node_ulong, head);
777 struct ust_app *app =
778 caa_container_of(node, struct ust_app, pid_n);
779
780 DBG3("Call RCU deleting app PID %d", app->pid);
781 delete_ust_app(app);
782 }
783
784 /*
785 * Delete the session from the application ht and delete the data structure by
786 * freeing every object inside and releasing them.
787 */
788 static void destroy_app_session(struct ust_app *app,
789 struct ust_app_session *ua_sess)
790 {
791 int ret;
792 struct lttng_ht_iter iter;
793
794 assert(app);
795 assert(ua_sess);
796
797 iter.iter.node = &ua_sess->node.node;
798 ret = lttng_ht_del(app->sessions, &iter);
799 if (ret) {
800 /* Already scheduled for teardown. */
801 goto end;
802 }
803
804 /* Once deleted, free the data structure. */
805 delete_ust_app_session(app->sock, ua_sess, app);
806
807 end:
808 return;
809 }
810
811 /*
812 * Alloc new UST app session.
813 */
814 static
815 struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
816 {
817 struct ust_app_session *ua_sess;
818
819 /* Init most of the default value by allocating and zeroing */
820 ua_sess = zmalloc(sizeof(struct ust_app_session));
821 if (ua_sess == NULL) {
822 PERROR("malloc");
823 goto error_free;
824 }
825
826 ua_sess->handle = -1;
827 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
828 ua_sess->metadata_attr.type = LTTNG_UST_CHAN_METADATA;
829 pthread_mutex_init(&ua_sess->lock, NULL);
830
831 return ua_sess;
832
833 error_free:
834 return NULL;
835 }
836
837 /*
838 * Alloc new UST app channel.
839 */
840 static
841 struct ust_app_channel *alloc_ust_app_channel(char *name,
842 struct ust_app_session *ua_sess,
843 struct lttng_ust_channel_attr *attr)
844 {
845 struct ust_app_channel *ua_chan;
846
847 /* Init most of the default value by allocating and zeroing */
848 ua_chan = zmalloc(sizeof(struct ust_app_channel));
849 if (ua_chan == NULL) {
850 PERROR("malloc");
851 goto error;
852 }
853
854 /* Setup channel name */
855 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
856 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
857
858 ua_chan->enabled = 1;
859 ua_chan->handle = -1;
860 ua_chan->session = ua_sess;
861 ua_chan->key = get_next_channel_key();
862 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
863 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
864 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
865
866 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
867 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
868
869 /* Copy attributes */
870 if (attr) {
871 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
872 ua_chan->attr.subbuf_size = attr->subbuf_size;
873 ua_chan->attr.num_subbuf = attr->num_subbuf;
874 ua_chan->attr.overwrite = attr->overwrite;
875 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
876 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
877 ua_chan->attr.output = attr->output;
878 }
879 /* By default, the channel is a per cpu channel. */
880 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
881
882 DBG3("UST app channel %s allocated", ua_chan->name);
883
884 return ua_chan;
885
886 error:
887 return NULL;
888 }
889
890 /*
891 * Allocate and initialize a UST app stream.
892 *
893 * Return newly allocated stream pointer or NULL on error.
894 */
895 struct ust_app_stream *ust_app_alloc_stream(void)
896 {
897 struct ust_app_stream *stream = NULL;
898
899 stream = zmalloc(sizeof(*stream));
900 if (stream == NULL) {
901 PERROR("zmalloc ust app stream");
902 goto error;
903 }
904
905 /* Zero could be a valid value for a handle so flag it to -1. */
906 stream->handle = -1;
907
908 error:
909 return stream;
910 }
911
912 /*
913 * Alloc new UST app event.
914 */
915 static
916 struct ust_app_event *alloc_ust_app_event(char *name,
917 struct lttng_ust_event *attr)
918 {
919 struct ust_app_event *ua_event;
920
921 /* Init most of the default value by allocating and zeroing */
922 ua_event = zmalloc(sizeof(struct ust_app_event));
923 if (ua_event == NULL) {
924 PERROR("malloc");
925 goto error;
926 }
927
928 ua_event->enabled = 1;
929 strncpy(ua_event->name, name, sizeof(ua_event->name));
930 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
931 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
932
933 /* Copy attributes */
934 if (attr) {
935 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
936 }
937
938 DBG3("UST app event %s allocated", ua_event->name);
939
940 return ua_event;
941
942 error:
943 return NULL;
944 }
945
946 /*
947 * Alloc new UST app context.
948 */
949 static
950 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context *uctx)
951 {
952 struct ust_app_ctx *ua_ctx;
953
954 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
955 if (ua_ctx == NULL) {
956 goto error;
957 }
958
959 CDS_INIT_LIST_HEAD(&ua_ctx->list);
960
961 if (uctx) {
962 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
963 }
964
965 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
966
967 error:
968 return ua_ctx;
969 }
970
971 /*
972 * Allocate a filter and copy the given original filter.
973 *
974 * Return allocated filter or NULL on error.
975 */
976 static struct lttng_ust_filter_bytecode *alloc_copy_ust_app_filter(
977 struct lttng_ust_filter_bytecode *orig_f)
978 {
979 struct lttng_ust_filter_bytecode *filter = NULL;
980
981 /* Copy filter bytecode */
982 filter = zmalloc(sizeof(*filter) + orig_f->len);
983 if (!filter) {
984 PERROR("zmalloc alloc ust app filter");
985 goto error;
986 }
987
988 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
989
990 error:
991 return filter;
992 }
993
994 /*
995 * Find an ust_app using the sock and return it. RCU read side lock must be
996 * held before calling this helper function.
997 */
998 struct ust_app *ust_app_find_by_sock(int sock)
999 {
1000 struct lttng_ht_node_ulong *node;
1001 struct lttng_ht_iter iter;
1002
1003 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
1004 node = lttng_ht_iter_get_node_ulong(&iter);
1005 if (node == NULL) {
1006 DBG2("UST app find by sock %d not found", sock);
1007 goto error;
1008 }
1009
1010 return caa_container_of(node, struct ust_app, sock_n);
1011
1012 error:
1013 return NULL;
1014 }
1015
1016 /*
1017 * Find an ust_app using the notify sock and return it. RCU read side lock must
1018 * be held before calling this helper function.
1019 */
1020 static struct ust_app *find_app_by_notify_sock(int sock)
1021 {
1022 struct lttng_ht_node_ulong *node;
1023 struct lttng_ht_iter iter;
1024
1025 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1026 &iter);
1027 node = lttng_ht_iter_get_node_ulong(&iter);
1028 if (node == NULL) {
1029 DBG2("UST app find by notify sock %d not found", sock);
1030 goto error;
1031 }
1032
1033 return caa_container_of(node, struct ust_app, notify_sock_n);
1034
1035 error:
1036 return NULL;
1037 }
1038
1039 /*
1040 * Lookup for an ust app event based on event name, filter bytecode and the
1041 * event loglevel.
1042 *
1043 * Return an ust_app_event object or NULL on error.
1044 */
1045 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1046 char *name, struct lttng_ust_filter_bytecode *filter, int loglevel,
1047 const struct lttng_event_exclusion *exclusion)
1048 {
1049 struct lttng_ht_iter iter;
1050 struct lttng_ht_node_str *node;
1051 struct ust_app_event *event = NULL;
1052 struct ust_app_ht_key key;
1053
1054 assert(name);
1055 assert(ht);
1056
1057 /* Setup key for event lookup. */
1058 key.name = name;
1059 key.filter = filter;
1060 key.loglevel = loglevel;
1061 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1062 key.exclusion = (struct lttng_ust_event_exclusion *)exclusion;
1063
1064 /* Lookup using the event name as hash and a custom match fct. */
1065 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1066 ht_match_ust_app_event, &key, &iter.iter);
1067 node = lttng_ht_iter_get_node_str(&iter);
1068 if (node == NULL) {
1069 goto end;
1070 }
1071
1072 event = caa_container_of(node, struct ust_app_event, node);
1073
1074 end:
1075 return event;
1076 }
1077
1078 /*
1079 * Create the channel context on the tracer.
1080 *
1081 * Called with UST app session lock held.
1082 */
1083 static
1084 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1085 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1086 {
1087 int ret;
1088
1089 health_code_update();
1090
1091 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1092 ua_chan->obj, &ua_ctx->obj);
1093 if (ret < 0) {
1094 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1095 ERR("UST app create channel context failed for app (pid: %d) "
1096 "with ret %d", app->pid, ret);
1097 } else {
1098 /*
1099 * This is normal behavior, an application can die during the
1100 * creation process. Don't report an error so the execution can
1101 * continue normally.
1102 */
1103 ret = 0;
1104 DBG3("UST app disable event failed. Application is dead.");
1105 }
1106 goto error;
1107 }
1108
1109 ua_ctx->handle = ua_ctx->obj->handle;
1110
1111 DBG2("UST app context handle %d created successfully for channel %s",
1112 ua_ctx->handle, ua_chan->name);
1113
1114 error:
1115 health_code_update();
1116 return ret;
1117 }
1118
1119 /*
1120 * Set the filter on the tracer.
1121 */
1122 static
1123 int set_ust_event_filter(struct ust_app_event *ua_event,
1124 struct ust_app *app)
1125 {
1126 int ret;
1127
1128 health_code_update();
1129
1130 if (!ua_event->filter) {
1131 ret = 0;
1132 goto error;
1133 }
1134
1135 ret = ustctl_set_filter(app->sock, ua_event->filter,
1136 ua_event->obj);
1137 if (ret < 0) {
1138 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1139 ERR("UST app event %s filter failed for app (pid: %d) "
1140 "with ret %d", ua_event->attr.name, app->pid, ret);
1141 } else {
1142 /*
1143 * This is normal behavior, an application can die during the
1144 * creation process. Don't report an error so the execution can
1145 * continue normally.
1146 */
1147 ret = 0;
1148 DBG3("UST app filter event failed. Application is dead.");
1149 }
1150 goto error;
1151 }
1152
1153 DBG2("UST filter set successfully for event %s", ua_event->name);
1154
1155 error:
1156 health_code_update();
1157 return ret;
1158 }
1159
1160 /*
1161 * Set event exclusions on the tracer.
1162 */
1163 static
1164 int set_ust_event_exclusion(struct ust_app_event *ua_event,
1165 struct ust_app *app)
1166 {
1167 int ret;
1168
1169 health_code_update();
1170
1171 if (!ua_event->exclusion || !ua_event->exclusion->count) {
1172 ret = 0;
1173 goto error;
1174 }
1175
1176 ret = ustctl_set_exclusion(app->sock, ua_event->exclusion,
1177 ua_event->obj);
1178 if (ret < 0) {
1179 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1180 ERR("UST app event %s exclusions failed for app (pid: %d) "
1181 "with ret %d", ua_event->attr.name, app->pid, ret);
1182 } else {
1183 /*
1184 * This is normal behavior, an application can die during the
1185 * creation process. Don't report an error so the execution can
1186 * continue normally.
1187 */
1188 ret = 0;
1189 DBG3("UST app event exclusion failed. Application is dead.");
1190 }
1191 goto error;
1192 }
1193
1194 DBG2("UST exclusion set successfully for event %s", ua_event->name);
1195
1196 error:
1197 health_code_update();
1198 return ret;
1199 }
1200
1201 /*
1202 * Disable the specified event on to UST tracer for the UST session.
1203 */
1204 static int disable_ust_event(struct ust_app *app,
1205 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1206 {
1207 int ret;
1208
1209 health_code_update();
1210
1211 ret = ustctl_disable(app->sock, ua_event->obj);
1212 if (ret < 0) {
1213 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1214 ERR("UST app event %s disable failed for app (pid: %d) "
1215 "and session handle %d with ret %d",
1216 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1217 } else {
1218 /*
1219 * This is normal behavior, an application can die during the
1220 * creation process. Don't report an error so the execution can
1221 * continue normally.
1222 */
1223 ret = 0;
1224 DBG3("UST app disable event failed. Application is dead.");
1225 }
1226 goto error;
1227 }
1228
1229 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1230 ua_event->attr.name, app->pid);
1231
1232 error:
1233 health_code_update();
1234 return ret;
1235 }
1236
1237 /*
1238 * Disable the specified channel on to UST tracer for the UST session.
1239 */
1240 static int disable_ust_channel(struct ust_app *app,
1241 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1242 {
1243 int ret;
1244
1245 health_code_update();
1246
1247 ret = ustctl_disable(app->sock, ua_chan->obj);
1248 if (ret < 0) {
1249 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1250 ERR("UST app channel %s disable failed for app (pid: %d) "
1251 "and session handle %d with ret %d",
1252 ua_chan->name, app->pid, ua_sess->handle, ret);
1253 } else {
1254 /*
1255 * This is normal behavior, an application can die during the
1256 * creation process. Don't report an error so the execution can
1257 * continue normally.
1258 */
1259 ret = 0;
1260 DBG3("UST app disable channel failed. Application is dead.");
1261 }
1262 goto error;
1263 }
1264
1265 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1266 ua_chan->name, app->pid);
1267
1268 error:
1269 health_code_update();
1270 return ret;
1271 }
1272
1273 /*
1274 * Enable the specified channel on to UST tracer for the UST session.
1275 */
1276 static int enable_ust_channel(struct ust_app *app,
1277 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1278 {
1279 int ret;
1280
1281 health_code_update();
1282
1283 ret = ustctl_enable(app->sock, ua_chan->obj);
1284 if (ret < 0) {
1285 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1286 ERR("UST app channel %s enable failed for app (pid: %d) "
1287 "and session handle %d with ret %d",
1288 ua_chan->name, app->pid, ua_sess->handle, ret);
1289 } else {
1290 /*
1291 * This is normal behavior, an application can die during the
1292 * creation process. Don't report an error so the execution can
1293 * continue normally.
1294 */
1295 ret = 0;
1296 DBG3("UST app enable channel failed. Application is dead.");
1297 }
1298 goto error;
1299 }
1300
1301 ua_chan->enabled = 1;
1302
1303 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1304 ua_chan->name, app->pid);
1305
1306 error:
1307 health_code_update();
1308 return ret;
1309 }
1310
1311 /*
1312 * Enable the specified event on to UST tracer for the UST session.
1313 */
1314 static int enable_ust_event(struct ust_app *app,
1315 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1316 {
1317 int ret;
1318
1319 health_code_update();
1320
1321 ret = ustctl_enable(app->sock, ua_event->obj);
1322 if (ret < 0) {
1323 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1324 ERR("UST app event %s enable failed for app (pid: %d) "
1325 "and session handle %d with ret %d",
1326 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1327 } else {
1328 /*
1329 * This is normal behavior, an application can die during the
1330 * creation process. Don't report an error so the execution can
1331 * continue normally.
1332 */
1333 ret = 0;
1334 DBG3("UST app enable event failed. Application is dead.");
1335 }
1336 goto error;
1337 }
1338
1339 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1340 ua_event->attr.name, app->pid);
1341
1342 error:
1343 health_code_update();
1344 return ret;
1345 }
1346
1347 /*
1348 * Send channel and stream buffer to application.
1349 *
1350 * Return 0 on success. On error, a negative value is returned.
1351 */
1352 static int send_channel_pid_to_ust(struct ust_app *app,
1353 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1354 {
1355 int ret;
1356 struct ust_app_stream *stream, *stmp;
1357
1358 assert(app);
1359 assert(ua_sess);
1360 assert(ua_chan);
1361
1362 health_code_update();
1363
1364 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1365 app->sock);
1366
1367 /* Send channel to the application. */
1368 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1369 if (ret < 0) {
1370 goto error;
1371 }
1372
1373 health_code_update();
1374
1375 /* Send all streams to application. */
1376 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1377 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1378 if (ret < 0) {
1379 goto error;
1380 }
1381 /* We don't need the stream anymore once sent to the tracer. */
1382 cds_list_del(&stream->list);
1383 delete_ust_app_stream(-1, stream);
1384 }
1385 /* Flag the channel that it is sent to the application. */
1386 ua_chan->is_sent = 1;
1387
1388 error:
1389 health_code_update();
1390 return ret;
1391 }
1392
1393 /*
1394 * Create the specified event onto the UST tracer for a UST session.
1395 *
1396 * Should be called with session mutex held.
1397 */
1398 static
1399 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1400 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1401 {
1402 int ret = 0;
1403
1404 health_code_update();
1405
1406 /* Create UST event on tracer */
1407 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1408 &ua_event->obj);
1409 if (ret < 0) {
1410 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1411 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1412 ua_event->attr.name, app->pid, ret);
1413 } else {
1414 /*
1415 * This is normal behavior, an application can die during the
1416 * creation process. Don't report an error so the execution can
1417 * continue normally.
1418 */
1419 ret = 0;
1420 DBG3("UST app create event failed. Application is dead.");
1421 }
1422 goto error;
1423 }
1424
1425 ua_event->handle = ua_event->obj->handle;
1426
1427 DBG2("UST app event %s created successfully for pid:%d",
1428 ua_event->attr.name, app->pid);
1429
1430 health_code_update();
1431
1432 /* Set filter if one is present. */
1433 if (ua_event->filter) {
1434 ret = set_ust_event_filter(ua_event, app);
1435 if (ret < 0) {
1436 goto error;
1437 }
1438 }
1439
1440 /* Set exclusions for the event */
1441 if (ua_event->exclusion) {
1442 ret = set_ust_event_exclusion(ua_event, app);
1443 if (ret < 0) {
1444 goto error;
1445 }
1446 }
1447
1448 /* If event not enabled, disable it on the tracer */
1449 if (ua_event->enabled) {
1450 /*
1451 * We now need to explicitly enable the event, since it
1452 * is now disabled at creation.
1453 */
1454 ret = enable_ust_event(app, ua_sess, ua_event);
1455 if (ret < 0) {
1456 /*
1457 * If we hit an EPERM, something is wrong with our enable call. If
1458 * we get an EEXIST, there is a problem on the tracer side since we
1459 * just created it.
1460 */
1461 switch (ret) {
1462 case -LTTNG_UST_ERR_PERM:
1463 /* Code flow problem */
1464 assert(0);
1465 case -LTTNG_UST_ERR_EXIST:
1466 /* It's OK for our use case. */
1467 ret = 0;
1468 break;
1469 default:
1470 break;
1471 }
1472 goto error;
1473 }
1474 }
1475
1476 error:
1477 health_code_update();
1478 return ret;
1479 }
1480
1481 /*
1482 * Copy data between an UST app event and a LTT event.
1483 */
1484 static void shadow_copy_event(struct ust_app_event *ua_event,
1485 struct ltt_ust_event *uevent)
1486 {
1487 size_t exclusion_alloc_size;
1488
1489 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1490 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1491
1492 ua_event->enabled = uevent->enabled;
1493
1494 /* Copy event attributes */
1495 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1496
1497 /* Copy filter bytecode */
1498 if (uevent->filter) {
1499 ua_event->filter = alloc_copy_ust_app_filter(uevent->filter);
1500 /* Filter might be NULL here in case of ENONEM. */
1501 }
1502
1503 /* Copy exclusion data */
1504 if (uevent->exclusion) {
1505 exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1506 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
1507 ua_event->exclusion = zmalloc(exclusion_alloc_size);
1508 if (ua_event->exclusion == NULL) {
1509 PERROR("malloc");
1510 } else {
1511 memcpy(ua_event->exclusion, uevent->exclusion,
1512 exclusion_alloc_size);
1513 }
1514 }
1515 }
1516
1517 /*
1518 * Copy data between an UST app channel and a LTT channel.
1519 */
1520 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1521 struct ltt_ust_channel *uchan)
1522 {
1523 struct lttng_ht_iter iter;
1524 struct ltt_ust_event *uevent;
1525 struct ltt_ust_context *uctx;
1526 struct ust_app_event *ua_event;
1527 struct ust_app_ctx *ua_ctx;
1528
1529 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1530
1531 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1532 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1533
1534 ua_chan->tracefile_size = uchan->tracefile_size;
1535 ua_chan->tracefile_count = uchan->tracefile_count;
1536
1537 /* Copy event attributes since the layout is different. */
1538 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1539 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1540 ua_chan->attr.overwrite = uchan->attr.overwrite;
1541 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1542 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1543 ua_chan->attr.output = uchan->attr.output;
1544 /*
1545 * Note that the attribute channel type is not set since the channel on the
1546 * tracing registry side does not have this information.
1547 */
1548
1549 ua_chan->enabled = uchan->enabled;
1550 ua_chan->tracing_channel_id = uchan->id;
1551
1552 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
1553 ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1554 if (ua_ctx == NULL) {
1555 continue;
1556 }
1557 lttng_ht_node_init_ulong(&ua_ctx->node,
1558 (unsigned long) ua_ctx->ctx.ctx);
1559 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
1560 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1561 }
1562
1563 /* Copy all events from ltt ust channel to ust app channel */
1564 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
1565 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1566 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
1567 if (ua_event == NULL) {
1568 DBG2("UST event %s not found on shadow copy channel",
1569 uevent->attr.name);
1570 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
1571 if (ua_event == NULL) {
1572 continue;
1573 }
1574 shadow_copy_event(ua_event, uevent);
1575 add_unique_ust_app_event(ua_chan, ua_event);
1576 }
1577 }
1578
1579 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1580 }
1581
1582 /*
1583 * Copy data between a UST app session and a regular LTT session.
1584 */
1585 static void shadow_copy_session(struct ust_app_session *ua_sess,
1586 struct ltt_ust_session *usess, struct ust_app *app)
1587 {
1588 struct lttng_ht_node_str *ua_chan_node;
1589 struct lttng_ht_iter iter;
1590 struct ltt_ust_channel *uchan;
1591 struct ust_app_channel *ua_chan;
1592 time_t rawtime;
1593 struct tm *timeinfo;
1594 char datetime[16];
1595 int ret;
1596 char tmp_shm_path[PATH_MAX];
1597
1598 /* Get date and time for unique app path */
1599 time(&rawtime);
1600 timeinfo = localtime(&rawtime);
1601 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1602
1603 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1604
1605 ua_sess->tracing_id = usess->id;
1606 ua_sess->id = get_next_session_id();
1607 ua_sess->uid = app->uid;
1608 ua_sess->gid = app->gid;
1609 ua_sess->euid = usess->uid;
1610 ua_sess->egid = usess->gid;
1611 ua_sess->buffer_type = usess->buffer_type;
1612 ua_sess->bits_per_long = app->bits_per_long;
1613 /* There is only one consumer object per session possible. */
1614 ua_sess->consumer = usess->consumer;
1615 ua_sess->output_traces = usess->output_traces;
1616 ua_sess->live_timer_interval = usess->live_timer_interval;
1617 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
1618 &usess->metadata_attr);
1619
1620 switch (ua_sess->buffer_type) {
1621 case LTTNG_BUFFER_PER_PID:
1622 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1623 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1624 datetime);
1625 break;
1626 case LTTNG_BUFFER_PER_UID:
1627 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1628 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1629 break;
1630 default:
1631 assert(0);
1632 goto error;
1633 }
1634 if (ret < 0) {
1635 PERROR("asprintf UST shadow copy session");
1636 assert(0);
1637 goto error;
1638 }
1639
1640 strncpy(ua_sess->shm_path, usess->shm_path,
1641 sizeof(ua_sess->shm_path));
1642 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1643 if (ua_sess->shm_path[0]) {
1644 switch (ua_sess->buffer_type) {
1645 case LTTNG_BUFFER_PER_PID:
1646 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
1647 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
1648 app->name, app->pid, datetime);
1649 break;
1650 case LTTNG_BUFFER_PER_UID:
1651 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
1652 DEFAULT_UST_TRACE_UID_PATH,
1653 app->uid, app->bits_per_long);
1654 break;
1655 default:
1656 assert(0);
1657 goto error;
1658 }
1659 if (ret < 0) {
1660 PERROR("sprintf UST shadow copy session");
1661 assert(0);
1662 goto error;
1663 }
1664 strncat(ua_sess->shm_path, tmp_shm_path,
1665 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
1666 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1667 }
1668
1669 /* Iterate over all channels in global domain. */
1670 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1671 uchan, node.node) {
1672 struct lttng_ht_iter uiter;
1673
1674 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1675 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
1676 if (ua_chan_node != NULL) {
1677 /* Session exist. Contiuing. */
1678 continue;
1679 }
1680
1681 DBG2("Channel %s not found on shadow session copy, creating it",
1682 uchan->name);
1683 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
1684 if (ua_chan == NULL) {
1685 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1686 continue;
1687 }
1688 shadow_copy_channel(ua_chan, uchan);
1689 /*
1690 * The concept of metadata channel does not exist on the tracing
1691 * registry side of the session daemon so this can only be a per CPU
1692 * channel and not metadata.
1693 */
1694 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1695
1696 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
1697 }
1698
1699 error:
1700 return;
1701 }
1702
1703 /*
1704 * Lookup sesison wrapper.
1705 */
1706 static
1707 void __lookup_session_by_app(struct ltt_ust_session *usess,
1708 struct ust_app *app, struct lttng_ht_iter *iter)
1709 {
1710 /* Get right UST app session from app */
1711 lttng_ht_lookup(app->sessions, &usess->id, iter);
1712 }
1713
1714 /*
1715 * Return ust app session from the app session hashtable using the UST session
1716 * id.
1717 */
1718 static struct ust_app_session *lookup_session_by_app(
1719 struct ltt_ust_session *usess, struct ust_app *app)
1720 {
1721 struct lttng_ht_iter iter;
1722 struct lttng_ht_node_u64 *node;
1723
1724 __lookup_session_by_app(usess, app, &iter);
1725 node = lttng_ht_iter_get_node_u64(&iter);
1726 if (node == NULL) {
1727 goto error;
1728 }
1729
1730 return caa_container_of(node, struct ust_app_session, node);
1731
1732 error:
1733 return NULL;
1734 }
1735
1736 /*
1737 * Setup buffer registry per PID for the given session and application. If none
1738 * is found, a new one is created, added to the global registry and
1739 * initialized. If regp is valid, it's set with the newly created object.
1740 *
1741 * Return 0 on success or else a negative value.
1742 */
1743 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
1744 struct ust_app *app, struct buffer_reg_pid **regp)
1745 {
1746 int ret = 0;
1747 struct buffer_reg_pid *reg_pid;
1748
1749 assert(ua_sess);
1750 assert(app);
1751
1752 rcu_read_lock();
1753
1754 reg_pid = buffer_reg_pid_find(ua_sess->id);
1755 if (!reg_pid) {
1756 /*
1757 * This is the create channel path meaning that if there is NO
1758 * registry available, we have to create one for this session.
1759 */
1760 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
1761 ua_sess->shm_path);
1762 if (ret < 0) {
1763 goto error;
1764 }
1765 } else {
1766 goto end;
1767 }
1768
1769 /* Initialize registry. */
1770 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
1771 app->bits_per_long, app->uint8_t_alignment,
1772 app->uint16_t_alignment, app->uint32_t_alignment,
1773 app->uint64_t_alignment, app->long_alignment,
1774 app->byte_order, app->version.major,
1775 app->version.minor, reg_pid->shm_path,
1776 ua_sess->euid, ua_sess->egid);
1777 if (ret < 0) {
1778 /*
1779 * reg_pid->registry->reg.ust is NULL upon error, so we need to
1780 * destroy the buffer registry, because it is always expected
1781 * that if the buffer registry can be found, its ust registry is
1782 * non-NULL.
1783 */
1784 buffer_reg_pid_destroy(reg_pid);
1785 goto error;
1786 }
1787
1788 buffer_reg_pid_add(reg_pid);
1789
1790 DBG3("UST app buffer registry per PID created successfully");
1791
1792 end:
1793 if (regp) {
1794 *regp = reg_pid;
1795 }
1796 error:
1797 rcu_read_unlock();
1798 return ret;
1799 }
1800
1801 /*
1802 * Setup buffer registry per UID for the given session and application. If none
1803 * is found, a new one is created, added to the global registry and
1804 * initialized. If regp is valid, it's set with the newly created object.
1805 *
1806 * Return 0 on success or else a negative value.
1807 */
1808 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
1809 struct ust_app_session *ua_sess,
1810 struct ust_app *app, struct buffer_reg_uid **regp)
1811 {
1812 int ret = 0;
1813 struct buffer_reg_uid *reg_uid;
1814
1815 assert(usess);
1816 assert(app);
1817
1818 rcu_read_lock();
1819
1820 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
1821 if (!reg_uid) {
1822 /*
1823 * This is the create channel path meaning that if there is NO
1824 * registry available, we have to create one for this session.
1825 */
1826 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
1827 LTTNG_DOMAIN_UST, &reg_uid, ua_sess->shm_path);
1828 if (ret < 0) {
1829 goto error;
1830 }
1831 } else {
1832 goto end;
1833 }
1834
1835 /* Initialize registry. */
1836 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
1837 app->bits_per_long, app->uint8_t_alignment,
1838 app->uint16_t_alignment, app->uint32_t_alignment,
1839 app->uint64_t_alignment, app->long_alignment,
1840 app->byte_order, app->version.major,
1841 app->version.minor, reg_uid->shm_path,
1842 usess->uid, usess->gid);
1843 if (ret < 0) {
1844 /*
1845 * reg_uid->registry->reg.ust is NULL upon error, so we need to
1846 * destroy the buffer registry, because it is always expected
1847 * that if the buffer registry can be found, its ust registry is
1848 * non-NULL.
1849 */
1850 buffer_reg_uid_destroy(reg_uid, NULL);
1851 goto error;
1852 }
1853 /* Add node to teardown list of the session. */
1854 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
1855
1856 buffer_reg_uid_add(reg_uid);
1857
1858 DBG3("UST app buffer registry per UID created successfully");
1859 end:
1860 if (regp) {
1861 *regp = reg_uid;
1862 }
1863 error:
1864 rcu_read_unlock();
1865 return ret;
1866 }
1867
1868 /*
1869 * Create a session on the tracer side for the given app.
1870 *
1871 * On success, ua_sess_ptr is populated with the session pointer or else left
1872 * untouched. If the session was created, is_created is set to 1. On error,
1873 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
1874 * be NULL.
1875 *
1876 * Returns 0 on success or else a negative code which is either -ENOMEM or
1877 * -ENOTCONN which is the default code if the ustctl_create_session fails.
1878 */
1879 static int create_ust_app_session(struct ltt_ust_session *usess,
1880 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
1881 int *is_created)
1882 {
1883 int ret, created = 0;
1884 struct ust_app_session *ua_sess;
1885
1886 assert(usess);
1887 assert(app);
1888 assert(ua_sess_ptr);
1889
1890 health_code_update();
1891
1892 ua_sess = lookup_session_by_app(usess, app);
1893 if (ua_sess == NULL) {
1894 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
1895 app->pid, usess->id);
1896 ua_sess = alloc_ust_app_session(app);
1897 if (ua_sess == NULL) {
1898 /* Only malloc can failed so something is really wrong */
1899 ret = -ENOMEM;
1900 goto error;
1901 }
1902 shadow_copy_session(ua_sess, usess, app);
1903 created = 1;
1904 }
1905
1906 switch (usess->buffer_type) {
1907 case LTTNG_BUFFER_PER_PID:
1908 /* Init local registry. */
1909 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
1910 if (ret < 0) {
1911 delete_ust_app_session(-1, ua_sess, app);
1912 goto error;
1913 }
1914 break;
1915 case LTTNG_BUFFER_PER_UID:
1916 /* Look for a global registry. If none exists, create one. */
1917 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
1918 if (ret < 0) {
1919 delete_ust_app_session(-1, ua_sess, app);
1920 goto error;
1921 }
1922 break;
1923 default:
1924 assert(0);
1925 ret = -EINVAL;
1926 goto error;
1927 }
1928
1929 health_code_update();
1930
1931 if (ua_sess->handle == -1) {
1932 ret = ustctl_create_session(app->sock);
1933 if (ret < 0) {
1934 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1935 ERR("Creating session for app pid %d with ret %d",
1936 app->pid, ret);
1937 } else {
1938 DBG("UST app creating session failed. Application is dead");
1939 /*
1940 * This is normal behavior, an application can die during the
1941 * creation process. Don't report an error so the execution can
1942 * continue normally. This will get flagged ENOTCONN and the
1943 * caller will handle it.
1944 */
1945 ret = 0;
1946 }
1947 delete_ust_app_session(-1, ua_sess, app);
1948 if (ret != -ENOMEM) {
1949 /*
1950 * Tracer is probably gone or got an internal error so let's
1951 * behave like it will soon unregister or not usable.
1952 */
1953 ret = -ENOTCONN;
1954 }
1955 goto error;
1956 }
1957
1958 ua_sess->handle = ret;
1959
1960 /* Add ust app session to app's HT */
1961 lttng_ht_node_init_u64(&ua_sess->node,
1962 ua_sess->tracing_id);
1963 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
1964
1965 DBG2("UST app session created successfully with handle %d", ret);
1966 }
1967
1968 *ua_sess_ptr = ua_sess;
1969 if (is_created) {
1970 *is_created = created;
1971 }
1972
1973 /* Everything went well. */
1974 ret = 0;
1975
1976 error:
1977 health_code_update();
1978 return ret;
1979 }
1980
1981 /*
1982 * Match function for a hash table lookup of ust_app_ctx.
1983 *
1984 * It matches an ust app context based on the context type and, in the case
1985 * of perf counters, their name.
1986 */
1987 static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
1988 {
1989 struct ust_app_ctx *ctx;
1990 const struct lttng_ust_context *key;
1991
1992 assert(node);
1993 assert(_key);
1994
1995 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
1996 key = _key;
1997
1998 /* Context type */
1999 if (ctx->ctx.ctx != key->ctx) {
2000 goto no_match;
2001 }
2002
2003 /* Check the name in the case of perf thread counters. */
2004 if (key->ctx == LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER) {
2005 if (strncmp(key->u.perf_counter.name,
2006 ctx->ctx.u.perf_counter.name,
2007 sizeof(key->u.perf_counter.name))) {
2008 goto no_match;
2009 }
2010 }
2011
2012 /* Match. */
2013 return 1;
2014
2015 no_match:
2016 return 0;
2017 }
2018
2019 /*
2020 * Lookup for an ust app context from an lttng_ust_context.
2021 *
2022 * Must be called while holding RCU read side lock.
2023 * Return an ust_app_ctx object or NULL on error.
2024 */
2025 static
2026 struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
2027 struct lttng_ust_context *uctx)
2028 {
2029 struct lttng_ht_iter iter;
2030 struct lttng_ht_node_ulong *node;
2031 struct ust_app_ctx *app_ctx = NULL;
2032
2033 assert(uctx);
2034 assert(ht);
2035
2036 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2037 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2038 ht_match_ust_app_ctx, uctx, &iter.iter);
2039 node = lttng_ht_iter_get_node_ulong(&iter);
2040 if (!node) {
2041 goto end;
2042 }
2043
2044 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2045
2046 end:
2047 return app_ctx;
2048 }
2049
2050 /*
2051 * Create a context for the channel on the tracer.
2052 *
2053 * Called with UST app session lock held and a RCU read side lock.
2054 */
2055 static
2056 int create_ust_app_channel_context(struct ust_app_session *ua_sess,
2057 struct ust_app_channel *ua_chan, struct lttng_ust_context *uctx,
2058 struct ust_app *app)
2059 {
2060 int ret = 0;
2061 struct ust_app_ctx *ua_ctx;
2062
2063 DBG2("UST app adding context to channel %s", ua_chan->name);
2064
2065 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2066 if (ua_ctx) {
2067 ret = -EEXIST;
2068 goto error;
2069 }
2070
2071 ua_ctx = alloc_ust_app_ctx(uctx);
2072 if (ua_ctx == NULL) {
2073 /* malloc failed */
2074 ret = -1;
2075 goto error;
2076 }
2077
2078 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
2079 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
2080 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
2081
2082 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2083 if (ret < 0) {
2084 goto error;
2085 }
2086
2087 error:
2088 return ret;
2089 }
2090
2091 /*
2092 * Enable on the tracer side a ust app event for the session and channel.
2093 *
2094 * Called with UST app session lock held.
2095 */
2096 static
2097 int enable_ust_app_event(struct ust_app_session *ua_sess,
2098 struct ust_app_event *ua_event, struct ust_app *app)
2099 {
2100 int ret;
2101
2102 ret = enable_ust_event(app, ua_sess, ua_event);
2103 if (ret < 0) {
2104 goto error;
2105 }
2106
2107 ua_event->enabled = 1;
2108
2109 error:
2110 return ret;
2111 }
2112
2113 /*
2114 * Disable on the tracer side a ust app event for the session and channel.
2115 */
2116 static int disable_ust_app_event(struct ust_app_session *ua_sess,
2117 struct ust_app_event *ua_event, struct ust_app *app)
2118 {
2119 int ret;
2120
2121 ret = disable_ust_event(app, ua_sess, ua_event);
2122 if (ret < 0) {
2123 goto error;
2124 }
2125
2126 ua_event->enabled = 0;
2127
2128 error:
2129 return ret;
2130 }
2131
2132 /*
2133 * Lookup ust app channel for session and disable it on the tracer side.
2134 */
2135 static
2136 int disable_ust_app_channel(struct ust_app_session *ua_sess,
2137 struct ust_app_channel *ua_chan, struct ust_app *app)
2138 {
2139 int ret;
2140
2141 ret = disable_ust_channel(app, ua_sess, ua_chan);
2142 if (ret < 0) {
2143 goto error;
2144 }
2145
2146 ua_chan->enabled = 0;
2147
2148 error:
2149 return ret;
2150 }
2151
2152 /*
2153 * Lookup ust app channel for session and enable it on the tracer side. This
2154 * MUST be called with a RCU read side lock acquired.
2155 */
2156 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2157 struct ltt_ust_channel *uchan, struct ust_app *app)
2158 {
2159 int ret = 0;
2160 struct lttng_ht_iter iter;
2161 struct lttng_ht_node_str *ua_chan_node;
2162 struct ust_app_channel *ua_chan;
2163
2164 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2165 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2166 if (ua_chan_node == NULL) {
2167 DBG2("Unable to find channel %s in ust session id %" PRIu64,
2168 uchan->name, ua_sess->tracing_id);
2169 goto error;
2170 }
2171
2172 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2173
2174 ret = enable_ust_channel(app, ua_sess, ua_chan);
2175 if (ret < 0) {
2176 goto error;
2177 }
2178
2179 error:
2180 return ret;
2181 }
2182
2183 /*
2184 * Ask the consumer to create a channel and get it if successful.
2185 *
2186 * Return 0 on success or else a negative value.
2187 */
2188 static int do_consumer_create_channel(struct ltt_ust_session *usess,
2189 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2190 int bitness, struct ust_registry_session *registry)
2191 {
2192 int ret;
2193 unsigned int nb_fd = 0;
2194 struct consumer_socket *socket;
2195
2196 assert(usess);
2197 assert(ua_sess);
2198 assert(ua_chan);
2199 assert(registry);
2200
2201 rcu_read_lock();
2202 health_code_update();
2203
2204 /* Get the right consumer socket for the application. */
2205 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2206 if (!socket) {
2207 ret = -EINVAL;
2208 goto error;
2209 }
2210
2211 health_code_update();
2212
2213 /* Need one fd for the channel. */
2214 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2215 if (ret < 0) {
2216 ERR("Exhausted number of available FD upon create channel");
2217 goto error;
2218 }
2219
2220 /*
2221 * Ask consumer to create channel. The consumer will return the number of
2222 * stream we have to expect.
2223 */
2224 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2225 registry);
2226 if (ret < 0) {
2227 goto error_ask;
2228 }
2229
2230 /*
2231 * Compute the number of fd needed before receiving them. It must be 2 per
2232 * stream (2 being the default value here).
2233 */
2234 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2235
2236 /* Reserve the amount of file descriptor we need. */
2237 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2238 if (ret < 0) {
2239 ERR("Exhausted number of available FD upon create channel");
2240 goto error_fd_get_stream;
2241 }
2242
2243 health_code_update();
2244
2245 /*
2246 * Now get the channel from the consumer. This call wil populate the stream
2247 * list of that channel and set the ust objects.
2248 */
2249 if (usess->consumer->enabled) {
2250 ret = ust_consumer_get_channel(socket, ua_chan);
2251 if (ret < 0) {
2252 goto error_destroy;
2253 }
2254 }
2255
2256 rcu_read_unlock();
2257 return 0;
2258
2259 error_destroy:
2260 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2261 error_fd_get_stream:
2262 /*
2263 * Initiate a destroy channel on the consumer since we had an error
2264 * handling it on our side. The return value is of no importance since we
2265 * already have a ret value set by the previous error that we need to
2266 * return.
2267 */
2268 (void) ust_consumer_destroy_channel(socket, ua_chan);
2269 error_ask:
2270 lttng_fd_put(LTTNG_FD_APPS, 1);
2271 error:
2272 health_code_update();
2273 rcu_read_unlock();
2274 return ret;
2275 }
2276
2277 /*
2278 * Duplicate the ust data object of the ust app stream and save it in the
2279 * buffer registry stream.
2280 *
2281 * Return 0 on success or else a negative value.
2282 */
2283 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2284 struct ust_app_stream *stream)
2285 {
2286 int ret;
2287
2288 assert(reg_stream);
2289 assert(stream);
2290
2291 /* Reserve the amount of file descriptor we need. */
2292 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2293 if (ret < 0) {
2294 ERR("Exhausted number of available FD upon duplicate stream");
2295 goto error;
2296 }
2297
2298 /* Duplicate object for stream once the original is in the registry. */
2299 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2300 reg_stream->obj.ust);
2301 if (ret < 0) {
2302 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2303 reg_stream->obj.ust, stream->obj, ret);
2304 lttng_fd_put(LTTNG_FD_APPS, 2);
2305 goto error;
2306 }
2307 stream->handle = stream->obj->handle;
2308
2309 error:
2310 return ret;
2311 }
2312
2313 /*
2314 * Duplicate the ust data object of the ust app. channel and save it in the
2315 * buffer registry channel.
2316 *
2317 * Return 0 on success or else a negative value.
2318 */
2319 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2320 struct ust_app_channel *ua_chan)
2321 {
2322 int ret;
2323
2324 assert(reg_chan);
2325 assert(ua_chan);
2326
2327 /* Need two fds for the channel. */
2328 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2329 if (ret < 0) {
2330 ERR("Exhausted number of available FD upon duplicate channel");
2331 goto error_fd_get;
2332 }
2333
2334 /* Duplicate object for stream once the original is in the registry. */
2335 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2336 if (ret < 0) {
2337 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2338 reg_chan->obj.ust, ua_chan->obj, ret);
2339 goto error;
2340 }
2341 ua_chan->handle = ua_chan->obj->handle;
2342
2343 return 0;
2344
2345 error:
2346 lttng_fd_put(LTTNG_FD_APPS, 1);
2347 error_fd_get:
2348 return ret;
2349 }
2350
2351 /*
2352 * For a given channel buffer registry, setup all streams of the given ust
2353 * application channel.
2354 *
2355 * Return 0 on success or else a negative value.
2356 */
2357 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2358 struct ust_app_channel *ua_chan)
2359 {
2360 int ret = 0;
2361 struct ust_app_stream *stream, *stmp;
2362
2363 assert(reg_chan);
2364 assert(ua_chan);
2365
2366 DBG2("UST app setup buffer registry stream");
2367
2368 /* Send all streams to application. */
2369 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2370 struct buffer_reg_stream *reg_stream;
2371
2372 ret = buffer_reg_stream_create(&reg_stream);
2373 if (ret < 0) {
2374 goto error;
2375 }
2376
2377 /*
2378 * Keep original pointer and nullify it in the stream so the delete
2379 * stream call does not release the object.
2380 */
2381 reg_stream->obj.ust = stream->obj;
2382 stream->obj = NULL;
2383 buffer_reg_stream_add(reg_stream, reg_chan);
2384
2385 /* We don't need the streams anymore. */
2386 cds_list_del(&stream->list);
2387 delete_ust_app_stream(-1, stream);
2388 }
2389
2390 error:
2391 return ret;
2392 }
2393
2394 /*
2395 * Create a buffer registry channel for the given session registry and
2396 * application channel object. If regp pointer is valid, it's set with the
2397 * created object. Important, the created object is NOT added to the session
2398 * registry hash table.
2399 *
2400 * Return 0 on success else a negative value.
2401 */
2402 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2403 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2404 {
2405 int ret;
2406 struct buffer_reg_channel *reg_chan = NULL;
2407
2408 assert(reg_sess);
2409 assert(ua_chan);
2410
2411 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2412
2413 /* Create buffer registry channel. */
2414 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2415 if (ret < 0) {
2416 goto error_create;
2417 }
2418 assert(reg_chan);
2419 reg_chan->consumer_key = ua_chan->key;
2420 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
2421 reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
2422
2423 /* Create and add a channel registry to session. */
2424 ret = ust_registry_channel_add(reg_sess->reg.ust,
2425 ua_chan->tracing_channel_id);
2426 if (ret < 0) {
2427 goto error;
2428 }
2429 buffer_reg_channel_add(reg_sess, reg_chan);
2430
2431 if (regp) {
2432 *regp = reg_chan;
2433 }
2434
2435 return 0;
2436
2437 error:
2438 /* Safe because the registry channel object was not added to any HT. */
2439 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2440 error_create:
2441 return ret;
2442 }
2443
2444 /*
2445 * Setup buffer registry channel for the given session registry and application
2446 * channel object. If regp pointer is valid, it's set with the created object.
2447 *
2448 * Return 0 on success else a negative value.
2449 */
2450 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2451 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan)
2452 {
2453 int ret;
2454
2455 assert(reg_sess);
2456 assert(reg_chan);
2457 assert(ua_chan);
2458 assert(ua_chan->obj);
2459
2460 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2461
2462 /* Setup all streams for the registry. */
2463 ret = setup_buffer_reg_streams(reg_chan, ua_chan);
2464 if (ret < 0) {
2465 goto error;
2466 }
2467
2468 reg_chan->obj.ust = ua_chan->obj;
2469 ua_chan->obj = NULL;
2470
2471 return 0;
2472
2473 error:
2474 buffer_reg_channel_remove(reg_sess, reg_chan);
2475 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2476 return ret;
2477 }
2478
2479 /*
2480 * Send buffer registry channel to the application.
2481 *
2482 * Return 0 on success else a negative value.
2483 */
2484 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2485 struct ust_app *app, struct ust_app_session *ua_sess,
2486 struct ust_app_channel *ua_chan)
2487 {
2488 int ret;
2489 struct buffer_reg_stream *reg_stream;
2490
2491 assert(reg_chan);
2492 assert(app);
2493 assert(ua_sess);
2494 assert(ua_chan);
2495
2496 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2497
2498 ret = duplicate_channel_object(reg_chan, ua_chan);
2499 if (ret < 0) {
2500 goto error;
2501 }
2502
2503 /* Send channel to the application. */
2504 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2505 if (ret < 0) {
2506 goto error;
2507 }
2508
2509 health_code_update();
2510
2511 /* Send all streams to application. */
2512 pthread_mutex_lock(&reg_chan->stream_list_lock);
2513 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2514 struct ust_app_stream stream;
2515
2516 ret = duplicate_stream_object(reg_stream, &stream);
2517 if (ret < 0) {
2518 goto error_stream_unlock;
2519 }
2520
2521 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2522 if (ret < 0) {
2523 (void) release_ust_app_stream(-1, &stream);
2524 goto error_stream_unlock;
2525 }
2526
2527 /*
2528 * The return value is not important here. This function will output an
2529 * error if needed.
2530 */
2531 (void) release_ust_app_stream(-1, &stream);
2532 }
2533 ua_chan->is_sent = 1;
2534
2535 error_stream_unlock:
2536 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2537 error:
2538 return ret;
2539 }
2540
2541 /*
2542 * Create and send to the application the created buffers with per UID buffers.
2543 *
2544 * Return 0 on success else a negative value.
2545 */
2546 static int create_channel_per_uid(struct ust_app *app,
2547 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2548 struct ust_app_channel *ua_chan)
2549 {
2550 int ret;
2551 struct buffer_reg_uid *reg_uid;
2552 struct buffer_reg_channel *reg_chan;
2553
2554 assert(app);
2555 assert(usess);
2556 assert(ua_sess);
2557 assert(ua_chan);
2558
2559 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2560
2561 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2562 /*
2563 * The session creation handles the creation of this global registry
2564 * object. If none can be find, there is a code flow problem or a
2565 * teardown race.
2566 */
2567 assert(reg_uid);
2568
2569 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2570 reg_uid);
2571 if (!reg_chan) {
2572 /* Create the buffer registry channel object. */
2573 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2574 if (ret < 0) {
2575 ERR("Error creating the UST channel \"%s\" registry instance",
2576 ua_chan->name);
2577 goto error;
2578 }
2579 assert(reg_chan);
2580
2581 /*
2582 * Create the buffers on the consumer side. This call populates the
2583 * ust app channel object with all streams and data object.
2584 */
2585 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2586 app->bits_per_long, reg_uid->registry->reg.ust);
2587 if (ret < 0) {
2588 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2589 ua_chan->name);
2590
2591 /*
2592 * Let's remove the previously created buffer registry channel so
2593 * it's not visible anymore in the session registry.
2594 */
2595 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2596 ua_chan->tracing_channel_id);
2597 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2598 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2599 goto error;
2600 }
2601
2602 /*
2603 * Setup the streams and add it to the session registry.
2604 */
2605 ret = setup_buffer_reg_channel(reg_uid->registry, ua_chan, reg_chan);
2606 if (ret < 0) {
2607 ERR("Error setting up UST channel \"%s\"",
2608 ua_chan->name);
2609 goto error;
2610 }
2611
2612 }
2613
2614 /* Send buffers to the application. */
2615 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2616 if (ret < 0) {
2617 /*
2618 * Don't report error to the console, since it may be
2619 * caused by application concurrently exiting.
2620 */
2621 goto error;
2622 }
2623
2624 error:
2625 return ret;
2626 }
2627
2628 /*
2629 * Create and send to the application the created buffers with per PID buffers.
2630 *
2631 * Return 0 on success else a negative value.
2632 */
2633 static int create_channel_per_pid(struct ust_app *app,
2634 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2635 struct ust_app_channel *ua_chan)
2636 {
2637 int ret;
2638 struct ust_registry_session *registry;
2639
2640 assert(app);
2641 assert(usess);
2642 assert(ua_sess);
2643 assert(ua_chan);
2644
2645 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2646
2647 rcu_read_lock();
2648
2649 registry = get_session_registry(ua_sess);
2650 assert(registry);
2651
2652 /* Create and add a new channel registry to session. */
2653 ret = ust_registry_channel_add(registry, ua_chan->key);
2654 if (ret < 0) {
2655 ERR("Error creating the UST channel \"%s\" registry instance",
2656 ua_chan->name);
2657 goto error;
2658 }
2659
2660 /* Create and get channel on the consumer side. */
2661 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2662 app->bits_per_long, registry);
2663 if (ret < 0) {
2664 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2665 ua_chan->name);
2666 goto error;
2667 }
2668
2669 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2670 if (ret < 0) {
2671 /*
2672 * Don't report error to the console, since it may be
2673 * caused by application concurrently exiting.
2674 */
2675 goto error;
2676 }
2677
2678 error:
2679 rcu_read_unlock();
2680 return ret;
2681 }
2682
2683 /*
2684 * From an already allocated ust app channel, create the channel buffers if
2685 * need and send it to the application. This MUST be called with a RCU read
2686 * side lock acquired.
2687 *
2688 * Return 0 on success or else a negative value.
2689 */
2690 static int do_create_channel(struct ust_app *app,
2691 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2692 struct ust_app_channel *ua_chan)
2693 {
2694 int ret;
2695
2696 assert(app);
2697 assert(usess);
2698 assert(ua_sess);
2699 assert(ua_chan);
2700
2701 /* Handle buffer type before sending the channel to the application. */
2702 switch (usess->buffer_type) {
2703 case LTTNG_BUFFER_PER_UID:
2704 {
2705 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
2706 if (ret < 0) {
2707 goto error;
2708 }
2709 break;
2710 }
2711 case LTTNG_BUFFER_PER_PID:
2712 {
2713 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
2714 if (ret < 0) {
2715 goto error;
2716 }
2717 break;
2718 }
2719 default:
2720 assert(0);
2721 ret = -EINVAL;
2722 goto error;
2723 }
2724
2725 /* Initialize ust objd object using the received handle and add it. */
2726 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
2727 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
2728
2729 /* If channel is not enabled, disable it on the tracer */
2730 if (!ua_chan->enabled) {
2731 ret = disable_ust_channel(app, ua_sess, ua_chan);
2732 if (ret < 0) {
2733 goto error;
2734 }
2735 }
2736
2737 error:
2738 return ret;
2739 }
2740
2741 /*
2742 * Create UST app channel and create it on the tracer. Set ua_chanp of the
2743 * newly created channel if not NULL.
2744 *
2745 * Called with UST app session lock and RCU read-side lock held.
2746 *
2747 * Return 0 on success or else a negative value.
2748 */
2749 static int create_ust_app_channel(struct ust_app_session *ua_sess,
2750 struct ltt_ust_channel *uchan, struct ust_app *app,
2751 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
2752 struct ust_app_channel **ua_chanp)
2753 {
2754 int ret = 0;
2755 struct lttng_ht_iter iter;
2756 struct lttng_ht_node_str *ua_chan_node;
2757 struct ust_app_channel *ua_chan;
2758
2759 /* Lookup channel in the ust app session */
2760 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2761 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2762 if (ua_chan_node != NULL) {
2763 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2764 goto end;
2765 }
2766
2767 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
2768 if (ua_chan == NULL) {
2769 /* Only malloc can fail here */
2770 ret = -ENOMEM;
2771 goto error_alloc;
2772 }
2773 shadow_copy_channel(ua_chan, uchan);
2774
2775 /* Set channel type. */
2776 ua_chan->attr.type = type;
2777
2778 ret = do_create_channel(app, usess, ua_sess, ua_chan);
2779 if (ret < 0) {
2780 goto error;
2781 }
2782
2783 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
2784 app->pid);
2785
2786 /* Only add the channel if successful on the tracer side. */
2787 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
2788
2789 end:
2790 if (ua_chanp) {
2791 *ua_chanp = ua_chan;
2792 }
2793
2794 /* Everything went well. */
2795 return 0;
2796
2797 error:
2798 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
2799 error_alloc:
2800 return ret;
2801 }
2802
2803 /*
2804 * Create UST app event and create it on the tracer side.
2805 *
2806 * Called with ust app session mutex held.
2807 */
2808 static
2809 int create_ust_app_event(struct ust_app_session *ua_sess,
2810 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
2811 struct ust_app *app)
2812 {
2813 int ret = 0;
2814 struct ust_app_event *ua_event;
2815
2816 /* Get event node */
2817 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
2818 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
2819 if (ua_event != NULL) {
2820 ret = -EEXIST;
2821 goto end;
2822 }
2823
2824 /* Does not exist so create one */
2825 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
2826 if (ua_event == NULL) {
2827 /* Only malloc can failed so something is really wrong */
2828 ret = -ENOMEM;
2829 goto end;
2830 }
2831 shadow_copy_event(ua_event, uevent);
2832
2833 /* Create it on the tracer side */
2834 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
2835 if (ret < 0) {
2836 /* Not found previously means that it does not exist on the tracer */
2837 assert(ret != -LTTNG_UST_ERR_EXIST);
2838 goto error;
2839 }
2840
2841 add_unique_ust_app_event(ua_chan, ua_event);
2842
2843 DBG2("UST app create event %s for PID %d completed", ua_event->name,
2844 app->pid);
2845
2846 end:
2847 return ret;
2848
2849 error:
2850 /* Valid. Calling here is already in a read side lock */
2851 delete_ust_app_event(-1, ua_event);
2852 return ret;
2853 }
2854
2855 /*
2856 * Create UST metadata and open it on the tracer side.
2857 *
2858 * Called with UST app session lock held and RCU read side lock.
2859 */
2860 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
2861 struct ust_app *app, struct consumer_output *consumer)
2862 {
2863 int ret = 0;
2864 struct ust_app_channel *metadata;
2865 struct consumer_socket *socket;
2866 struct ust_registry_session *registry;
2867
2868 assert(ua_sess);
2869 assert(app);
2870 assert(consumer);
2871
2872 registry = get_session_registry(ua_sess);
2873 assert(registry);
2874
2875 pthread_mutex_lock(&registry->lock);
2876
2877 /* Metadata already exists for this registry or it was closed previously */
2878 if (registry->metadata_key || registry->metadata_closed) {
2879 ret = 0;
2880 goto error;
2881 }
2882
2883 /* Allocate UST metadata */
2884 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
2885 if (!metadata) {
2886 /* malloc() failed */
2887 ret = -ENOMEM;
2888 goto error;
2889 }
2890
2891 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
2892
2893 /* Need one fd for the channel. */
2894 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2895 if (ret < 0) {
2896 ERR("Exhausted number of available FD upon create metadata");
2897 goto error;
2898 }
2899
2900 /* Get the right consumer socket for the application. */
2901 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
2902 if (!socket) {
2903 ret = -EINVAL;
2904 goto error_consumer;
2905 }
2906
2907 /*
2908 * Keep metadata key so we can identify it on the consumer side. Assign it
2909 * to the registry *before* we ask the consumer so we avoid the race of the
2910 * consumer requesting the metadata and the ask_channel call on our side
2911 * did not returned yet.
2912 */
2913 registry->metadata_key = metadata->key;
2914
2915 /*
2916 * Ask the metadata channel creation to the consumer. The metadata object
2917 * will be created by the consumer and kept their. However, the stream is
2918 * never added or monitored until we do a first push metadata to the
2919 * consumer.
2920 */
2921 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
2922 registry);
2923 if (ret < 0) {
2924 /* Nullify the metadata key so we don't try to close it later on. */
2925 registry->metadata_key = 0;
2926 goto error_consumer;
2927 }
2928
2929 /*
2930 * The setup command will make the metadata stream be sent to the relayd,
2931 * if applicable, and the thread managing the metadatas. This is important
2932 * because after this point, if an error occurs, the only way the stream
2933 * can be deleted is to be monitored in the consumer.
2934 */
2935 ret = consumer_setup_metadata(socket, metadata->key);
2936 if (ret < 0) {
2937 /* Nullify the metadata key so we don't try to close it later on. */
2938 registry->metadata_key = 0;
2939 goto error_consumer;
2940 }
2941
2942 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
2943 metadata->key, app->pid);
2944
2945 error_consumer:
2946 lttng_fd_put(LTTNG_FD_APPS, 1);
2947 delete_ust_app_channel(-1, metadata, app);
2948 error:
2949 pthread_mutex_unlock(&registry->lock);
2950 return ret;
2951 }
2952
2953 /*
2954 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
2955 * acquired before calling this function.
2956 */
2957 struct ust_app *ust_app_find_by_pid(pid_t pid)
2958 {
2959 struct ust_app *app = NULL;
2960 struct lttng_ht_node_ulong *node;
2961 struct lttng_ht_iter iter;
2962
2963 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
2964 node = lttng_ht_iter_get_node_ulong(&iter);
2965 if (node == NULL) {
2966 DBG2("UST app no found with pid %d", pid);
2967 goto error;
2968 }
2969
2970 DBG2("Found UST app by pid %d", pid);
2971
2972 app = caa_container_of(node, struct ust_app, pid_n);
2973
2974 error:
2975 return app;
2976 }
2977
2978 /*
2979 * Allocate and init an UST app object using the registration information and
2980 * the command socket. This is called when the command socket connects to the
2981 * session daemon.
2982 *
2983 * The object is returned on success or else NULL.
2984 */
2985 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
2986 {
2987 struct ust_app *lta = NULL;
2988
2989 assert(msg);
2990 assert(sock >= 0);
2991
2992 DBG3("UST app creating application for socket %d", sock);
2993
2994 if ((msg->bits_per_long == 64 &&
2995 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
2996 || (msg->bits_per_long == 32 &&
2997 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
2998 ERR("Registration failed: application \"%s\" (pid: %d) has "
2999 "%d-bit long, but no consumerd for this size is available.\n",
3000 msg->name, msg->pid, msg->bits_per_long);
3001 goto error;
3002 }
3003
3004 lta = zmalloc(sizeof(struct ust_app));
3005 if (lta == NULL) {
3006 PERROR("malloc");
3007 goto error;
3008 }
3009
3010 lta->ppid = msg->ppid;
3011 lta->uid = msg->uid;
3012 lta->gid = msg->gid;
3013
3014 lta->bits_per_long = msg->bits_per_long;
3015 lta->uint8_t_alignment = msg->uint8_t_alignment;
3016 lta->uint16_t_alignment = msg->uint16_t_alignment;
3017 lta->uint32_t_alignment = msg->uint32_t_alignment;
3018 lta->uint64_t_alignment = msg->uint64_t_alignment;
3019 lta->long_alignment = msg->long_alignment;
3020 lta->byte_order = msg->byte_order;
3021
3022 lta->v_major = msg->major;
3023 lta->v_minor = msg->minor;
3024 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3025 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3026 lta->notify_sock = -1;
3027
3028 /* Copy name and make sure it's NULL terminated. */
3029 strncpy(lta->name, msg->name, sizeof(lta->name));
3030 lta->name[UST_APP_PROCNAME_LEN] = '\0';
3031
3032 /*
3033 * Before this can be called, when receiving the registration information,
3034 * the application compatibility is checked. So, at this point, the
3035 * application can work with this session daemon.
3036 */
3037 lta->compatible = 1;
3038
3039 lta->pid = msg->pid;
3040 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
3041 lta->sock = sock;
3042 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
3043
3044 CDS_INIT_LIST_HEAD(&lta->teardown_head);
3045
3046 error:
3047 return lta;
3048 }
3049
3050 /*
3051 * For a given application object, add it to every hash table.
3052 */
3053 void ust_app_add(struct ust_app *app)
3054 {
3055 assert(app);
3056 assert(app->notify_sock >= 0);
3057
3058 rcu_read_lock();
3059
3060 /*
3061 * On a re-registration, we want to kick out the previous registration of
3062 * that pid
3063 */
3064 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
3065
3066 /*
3067 * The socket _should_ be unique until _we_ call close. So, a add_unique
3068 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3069 * already in the table.
3070 */
3071 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
3072
3073 /* Add application to the notify socket hash table. */
3074 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
3075 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
3076
3077 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
3078 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
3079 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
3080 app->v_minor);
3081
3082 rcu_read_unlock();
3083 }
3084
3085 /*
3086 * Set the application version into the object.
3087 *
3088 * Return 0 on success else a negative value either an errno code or a
3089 * LTTng-UST error code.
3090 */
3091 int ust_app_version(struct ust_app *app)
3092 {
3093 int ret;
3094
3095 assert(app);
3096
3097 ret = ustctl_tracer_version(app->sock, &app->version);
3098 if (ret < 0) {
3099 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3100 ERR("UST app %d version failed with ret %d", app->sock, ret);
3101 } else {
3102 DBG3("UST app %d version failed. Application is dead", app->sock);
3103 }
3104 }
3105
3106 return ret;
3107 }
3108
3109 /*
3110 * Unregister app by removing it from the global traceable app list and freeing
3111 * the data struct.
3112 *
3113 * The socket is already closed at this point so no close to sock.
3114 */
3115 void ust_app_unregister(int sock)
3116 {
3117 struct ust_app *lta;
3118 struct lttng_ht_node_ulong *node;
3119 struct lttng_ht_iter ust_app_sock_iter;
3120 struct lttng_ht_iter iter;
3121 struct ust_app_session *ua_sess;
3122 int ret;
3123
3124 rcu_read_lock();
3125
3126 /* Get the node reference for a call_rcu */
3127 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
3128 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
3129 assert(node);
3130
3131 lta = caa_container_of(node, struct ust_app, sock_n);
3132 DBG("PID %d unregistering with sock %d", lta->pid, sock);
3133
3134 /*
3135 * For per-PID buffers, perform "push metadata" and flush all
3136 * application streams before removing app from hash tables,
3137 * ensuring proper behavior of data_pending check.
3138 * Remove sessions so they are not visible during deletion.
3139 */
3140 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
3141 node.node) {
3142 struct ust_registry_session *registry;
3143
3144 ret = lttng_ht_del(lta->sessions, &iter);
3145 if (ret) {
3146 /* The session was already removed so scheduled for teardown. */
3147 continue;
3148 }
3149
3150 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
3151 (void) ust_app_flush_app_session(lta, ua_sess);
3152 }
3153
3154 /*
3155 * Add session to list for teardown. This is safe since at this point we
3156 * are the only one using this list.
3157 */
3158 pthread_mutex_lock(&ua_sess->lock);
3159
3160 /*
3161 * Normally, this is done in the delete session process which is
3162 * executed in the call rcu below. However, upon registration we can't
3163 * afford to wait for the grace period before pushing data or else the
3164 * data pending feature can race between the unregistration and stop
3165 * command where the data pending command is sent *before* the grace
3166 * period ended.
3167 *
3168 * The close metadata below nullifies the metadata pointer in the
3169 * session so the delete session will NOT push/close a second time.
3170 */
3171 registry = get_session_registry(ua_sess);
3172 if (registry) {
3173 /* Push metadata for application before freeing the application. */
3174 (void) push_metadata(registry, ua_sess->consumer);
3175
3176 /*
3177 * Don't ask to close metadata for global per UID buffers. Close
3178 * metadata only on destroy trace session in this case. Also, the
3179 * previous push metadata could have flag the metadata registry to
3180 * close so don't send a close command if closed.
3181 */
3182 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
3183 /* And ask to close it for this session registry. */
3184 (void) close_metadata(registry, ua_sess->consumer);
3185 }
3186 }
3187 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
3188
3189 pthread_mutex_unlock(&ua_sess->lock);
3190 }
3191
3192 /* Remove application from PID hash table */
3193 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
3194 assert(!ret);
3195
3196 /*
3197 * Remove application from notify hash table. The thread handling the
3198 * notify socket could have deleted the node so ignore on error because
3199 * either way it's valid. The close of that socket is handled by the other
3200 * thread.
3201 */
3202 iter.iter.node = &lta->notify_sock_n.node;
3203 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3204
3205 /*
3206 * Ignore return value since the node might have been removed before by an
3207 * add replace during app registration because the PID can be reassigned by
3208 * the OS.
3209 */
3210 iter.iter.node = &lta->pid_n.node;
3211 ret = lttng_ht_del(ust_app_ht, &iter);
3212 if (ret) {
3213 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
3214 lta->pid);
3215 }
3216
3217 /* Free memory */
3218 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
3219
3220 rcu_read_unlock();
3221 return;
3222 }
3223
3224 /*
3225 * Fill events array with all events name of all registered apps.
3226 */
3227 int ust_app_list_events(struct lttng_event **events)
3228 {
3229 int ret, handle;
3230 size_t nbmem, count = 0;
3231 struct lttng_ht_iter iter;
3232 struct ust_app *app;
3233 struct lttng_event *tmp_event;
3234
3235 nbmem = UST_APP_EVENT_LIST_SIZE;
3236 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
3237 if (tmp_event == NULL) {
3238 PERROR("zmalloc ust app events");
3239 ret = -ENOMEM;
3240 goto error;
3241 }
3242
3243 rcu_read_lock();
3244
3245 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3246 struct lttng_ust_tracepoint_iter uiter;
3247
3248 health_code_update();
3249
3250 if (!app->compatible) {
3251 /*
3252 * TODO: In time, we should notice the caller of this error by
3253 * telling him that this is a version error.
3254 */
3255 continue;
3256 }
3257 handle = ustctl_tracepoint_list(app->sock);
3258 if (handle < 0) {
3259 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3260 ERR("UST app list events getting handle failed for app pid %d",
3261 app->pid);
3262 }
3263 continue;
3264 }
3265
3266 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
3267 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3268 /* Handle ustctl error. */
3269 if (ret < 0) {
3270 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3271 ERR("UST app tp list get failed for app %d with ret %d",
3272 app->sock, ret);
3273 } else {
3274 DBG3("UST app tp list get failed. Application is dead");
3275 /*
3276 * This is normal behavior, an application can die during the
3277 * creation process. Don't report an error so the execution can
3278 * continue normally. Continue normal execution.
3279 */
3280 break;
3281 }
3282 free(tmp_event);
3283 goto rcu_error;
3284 }
3285
3286 health_code_update();
3287 if (count >= nbmem) {
3288 /* In case the realloc fails, we free the memory */
3289 struct lttng_event *new_tmp_event;
3290 size_t new_nbmem;
3291
3292 new_nbmem = nbmem << 1;
3293 DBG2("Reallocating event list from %zu to %zu entries",
3294 nbmem, new_nbmem);
3295 new_tmp_event = realloc(tmp_event,
3296 new_nbmem * sizeof(struct lttng_event));
3297 if (new_tmp_event == NULL) {
3298 PERROR("realloc ust app events");
3299 free(tmp_event);
3300 ret = -ENOMEM;
3301 goto rcu_error;
3302 }
3303 /* Zero the new memory */
3304 memset(new_tmp_event + nbmem, 0,
3305 (new_nbmem - nbmem) * sizeof(struct lttng_event));
3306 nbmem = new_nbmem;
3307 tmp_event = new_tmp_event;
3308 }
3309 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3310 tmp_event[count].loglevel = uiter.loglevel;
3311 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3312 tmp_event[count].pid = app->pid;
3313 tmp_event[count].enabled = -1;
3314 count++;
3315 }
3316 }
3317
3318 ret = count;
3319 *events = tmp_event;
3320
3321 DBG2("UST app list events done (%zu events)", count);
3322
3323 rcu_error:
3324 rcu_read_unlock();
3325 error:
3326 health_code_update();
3327 return ret;
3328 }
3329
3330 /*
3331 * Fill events array with all events name of all registered apps.
3332 */
3333 int ust_app_list_event_fields(struct lttng_event_field **fields)
3334 {
3335 int ret, handle;
3336 size_t nbmem, count = 0;
3337 struct lttng_ht_iter iter;
3338 struct ust_app *app;
3339 struct lttng_event_field *tmp_event;
3340
3341 nbmem = UST_APP_EVENT_LIST_SIZE;
3342 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3343 if (tmp_event == NULL) {
3344 PERROR("zmalloc ust app event fields");
3345 ret = -ENOMEM;
3346 goto error;
3347 }
3348
3349 rcu_read_lock();
3350
3351 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3352 struct lttng_ust_field_iter uiter;
3353
3354 health_code_update();
3355
3356 if (!app->compatible) {
3357 /*
3358 * TODO: In time, we should notice the caller of this error by
3359 * telling him that this is a version error.
3360 */
3361 continue;
3362 }
3363 handle = ustctl_tracepoint_field_list(app->sock);
3364 if (handle < 0) {
3365 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3366 ERR("UST app list field getting handle failed for app pid %d",
3367 app->pid);
3368 }
3369 continue;
3370 }
3371
3372 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
3373 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3374 /* Handle ustctl error. */
3375 if (ret < 0) {
3376 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3377 ERR("UST app tp list field failed for app %d with ret %d",
3378 app->sock, ret);
3379 } else {
3380 DBG3("UST app tp list field failed. Application is dead");
3381 /*
3382 * This is normal behavior, an application can die during the
3383 * creation process. Don't report an error so the execution can
3384 * continue normally. Reset list and count for next app.
3385 */
3386 break;
3387 }
3388 free(tmp_event);
3389 goto rcu_error;
3390 }
3391
3392 health_code_update();
3393 if (count >= nbmem) {
3394 /* In case the realloc fails, we free the memory */
3395 struct lttng_event_field *new_tmp_event;
3396 size_t new_nbmem;
3397
3398 new_nbmem = nbmem << 1;
3399 DBG2("Reallocating event field list from %zu to %zu entries",
3400 nbmem, new_nbmem);
3401 new_tmp_event = realloc(tmp_event,
3402 new_nbmem * sizeof(struct lttng_event_field));
3403 if (new_tmp_event == NULL) {
3404 PERROR("realloc ust app event fields");
3405 free(tmp_event);
3406 ret = -ENOMEM;
3407 goto rcu_error;
3408 }
3409 /* Zero the new memory */
3410 memset(new_tmp_event + nbmem, 0,
3411 (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
3412 nbmem = new_nbmem;
3413 tmp_event = new_tmp_event;
3414 }
3415
3416 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
3417 /* Mapping between these enums matches 1 to 1. */
3418 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
3419 tmp_event[count].nowrite = uiter.nowrite;
3420
3421 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3422 tmp_event[count].event.loglevel = uiter.loglevel;
3423 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
3424 tmp_event[count].event.pid = app->pid;
3425 tmp_event[count].event.enabled = -1;
3426 count++;
3427 }
3428 }
3429
3430 ret = count;
3431 *fields = tmp_event;
3432
3433 DBG2("UST app list event fields done (%zu events)", count);
3434
3435 rcu_error:
3436 rcu_read_unlock();
3437 error:
3438 health_code_update();
3439 return ret;
3440 }
3441
3442 /*
3443 * Free and clean all traceable apps of the global list.
3444 *
3445 * Should _NOT_ be called with RCU read-side lock held.
3446 */
3447 void ust_app_clean_list(void)
3448 {
3449 int ret;
3450 struct ust_app *app;
3451 struct lttng_ht_iter iter;
3452
3453 DBG2("UST app cleaning registered apps hash table");
3454
3455 rcu_read_lock();
3456
3457 if (ust_app_ht) {
3458 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3459 ret = lttng_ht_del(ust_app_ht, &iter);
3460 assert(!ret);
3461 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3462 }
3463 }
3464
3465 /* Cleanup socket hash table */
3466 if (ust_app_ht_by_sock) {
3467 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3468 sock_n.node) {
3469 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3470 assert(!ret);
3471 }
3472 }
3473
3474 /* Cleanup notify socket hash table */
3475 if (ust_app_ht_by_notify_sock) {
3476 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
3477 notify_sock_n.node) {
3478 ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3479 assert(!ret);
3480 }
3481 }
3482 rcu_read_unlock();
3483
3484 /* Destroy is done only when the ht is empty */
3485 if (ust_app_ht) {
3486 ht_cleanup_push(ust_app_ht);
3487 }
3488 if (ust_app_ht_by_sock) {
3489 ht_cleanup_push(ust_app_ht_by_sock);
3490 }
3491 if (ust_app_ht_by_notify_sock) {
3492 ht_cleanup_push(ust_app_ht_by_notify_sock);
3493 }
3494 }
3495
3496 /*
3497 * Init UST app hash table.
3498 */
3499 int ust_app_ht_alloc(void)
3500 {
3501 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3502 if (!ust_app_ht) {
3503 return -1;
3504 }
3505 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3506 if (!ust_app_ht_by_sock) {
3507 return -1;
3508 }
3509 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3510 if (!ust_app_ht_by_notify_sock) {
3511 return -1;
3512 }
3513 return 0;
3514 }
3515
3516 /*
3517 * For a specific UST session, disable the channel for all registered apps.
3518 */
3519 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
3520 struct ltt_ust_channel *uchan)
3521 {
3522 int ret = 0;
3523 struct lttng_ht_iter iter;
3524 struct lttng_ht_node_str *ua_chan_node;
3525 struct ust_app *app;
3526 struct ust_app_session *ua_sess;
3527 struct ust_app_channel *ua_chan;
3528
3529 if (usess == NULL || uchan == NULL) {
3530 ERR("Disabling UST global channel with NULL values");
3531 ret = -1;
3532 goto error;
3533 }
3534
3535 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
3536 uchan->name, usess->id);
3537
3538 rcu_read_lock();
3539
3540 /* For every registered applications */
3541 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3542 struct lttng_ht_iter uiter;
3543 if (!app->compatible) {
3544 /*
3545 * TODO: In time, we should notice the caller of this error by
3546 * telling him that this is a version error.
3547 */
3548 continue;
3549 }
3550 ua_sess = lookup_session_by_app(usess, app);
3551 if (ua_sess == NULL) {
3552 continue;
3553 }
3554
3555 /* Get channel */
3556 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3557 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3558 /* If the session if found for the app, the channel must be there */
3559 assert(ua_chan_node);
3560
3561 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3562 /* The channel must not be already disabled */
3563 assert(ua_chan->enabled == 1);
3564
3565 /* Disable channel onto application */
3566 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
3567 if (ret < 0) {
3568 /* XXX: We might want to report this error at some point... */
3569 continue;
3570 }
3571 }
3572
3573 rcu_read_unlock();
3574
3575 error:
3576 return ret;
3577 }
3578
3579 /*
3580 * For a specific UST session, enable the channel for all registered apps.
3581 */
3582 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
3583 struct ltt_ust_channel *uchan)
3584 {
3585 int ret = 0;
3586 struct lttng_ht_iter iter;
3587 struct ust_app *app;
3588 struct ust_app_session *ua_sess;
3589
3590 if (usess == NULL || uchan == NULL) {
3591 ERR("Adding UST global channel to NULL values");
3592 ret = -1;
3593 goto error;
3594 }
3595
3596 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
3597 uchan->name, usess->id);
3598
3599 rcu_read_lock();
3600
3601 /* For every registered applications */
3602 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3603 if (!app->compatible) {
3604 /*
3605 * TODO: In time, we should notice the caller of this error by
3606 * telling him that this is a version error.
3607 */
3608 continue;
3609 }
3610 ua_sess = lookup_session_by_app(usess, app);
3611 if (ua_sess == NULL) {
3612 continue;
3613 }
3614
3615 /* Enable channel onto application */
3616 ret = enable_ust_app_channel(ua_sess, uchan, app);
3617 if (ret < 0) {
3618 /* XXX: We might want to report this error at some point... */
3619 continue;
3620 }
3621 }
3622
3623 rcu_read_unlock();
3624
3625 error:
3626 return ret;
3627 }
3628
3629 /*
3630 * Disable an event in a channel and for a specific session.
3631 */
3632 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
3633 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3634 {
3635 int ret = 0;
3636 struct lttng_ht_iter iter, uiter;
3637 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
3638 struct ust_app *app;
3639 struct ust_app_session *ua_sess;
3640 struct ust_app_channel *ua_chan;
3641 struct ust_app_event *ua_event;
3642
3643 DBG("UST app disabling event %s for all apps in channel "
3644 "%s for session id %" PRIu64,
3645 uevent->attr.name, uchan->name, usess->id);
3646
3647 rcu_read_lock();
3648
3649 /* For all registered applications */
3650 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3651 if (!app->compatible) {
3652 /*
3653 * TODO: In time, we should notice the caller of this error by
3654 * telling him that this is a version error.
3655 */
3656 continue;
3657 }
3658 ua_sess = lookup_session_by_app(usess, app);
3659 if (ua_sess == NULL) {
3660 /* Next app */
3661 continue;
3662 }
3663
3664 /* Lookup channel in the ust app session */
3665 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3666 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3667 if (ua_chan_node == NULL) {
3668 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
3669 "Skipping", uchan->name, usess->id, app->pid);
3670 continue;
3671 }
3672 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3673
3674 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &uiter);
3675 ua_event_node = lttng_ht_iter_get_node_str(&uiter);
3676 if (ua_event_node == NULL) {
3677 DBG2("Event %s not found in channel %s for app pid %d."
3678 "Skipping", uevent->attr.name, uchan->name, app->pid);
3679 continue;
3680 }
3681 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
3682
3683 ret = disable_ust_app_event(ua_sess, ua_event, app);
3684 if (ret < 0) {
3685 /* XXX: Report error someday... */
3686 continue;
3687 }
3688 }
3689
3690 rcu_read_unlock();
3691
3692 return ret;
3693 }
3694
3695 /*
3696 * For a specific UST session, create the channel for all registered apps.
3697 */
3698 int ust_app_create_channel_glb(struct ltt_ust_session *usess,
3699 struct ltt_ust_channel *uchan)
3700 {
3701 int ret = 0, created;
3702 struct lttng_ht_iter iter;
3703 struct ust_app *app;
3704 struct ust_app_session *ua_sess = NULL;
3705
3706 /* Very wrong code flow */
3707 assert(usess);
3708 assert(uchan);
3709
3710 DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
3711 uchan->name, usess->id);
3712
3713 rcu_read_lock();
3714
3715 /* For every registered applications */
3716 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3717 if (!app->compatible) {
3718 /*
3719 * TODO: In time, we should notice the caller of this error by
3720 * telling him that this is a version error.
3721 */
3722 continue;
3723 }
3724 /*
3725 * Create session on the tracer side and add it to app session HT. Note
3726 * that if session exist, it will simply return a pointer to the ust
3727 * app session.
3728 */
3729 ret = create_ust_app_session(usess, app, &ua_sess, &created);
3730 if (ret < 0) {
3731 switch (ret) {
3732 case -ENOTCONN:
3733 /*
3734 * The application's socket is not valid. Either a bad socket
3735 * or a timeout on it. We can't inform the caller that for a
3736 * specific app, the session failed so lets continue here.
3737 */
3738 continue;
3739 case -ENOMEM:
3740 default:
3741 goto error_rcu_unlock;
3742 }
3743 }
3744 assert(ua_sess);
3745
3746 pthread_mutex_lock(&ua_sess->lock);
3747 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
3748 sizeof(uchan->name))) {
3749 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr, &uchan->attr);
3750 ret = 0;
3751 } else {
3752 /* Create channel onto application. We don't need the chan ref. */
3753 ret = create_ust_app_channel(ua_sess, uchan, app,
3754 LTTNG_UST_CHAN_PER_CPU, usess, NULL);
3755 }
3756 pthread_mutex_unlock(&ua_sess->lock);
3757 if (ret < 0) {
3758 if (ret == -ENOMEM) {
3759 /* No more memory is a fatal error. Stop right now. */
3760 goto error_rcu_unlock;
3761 }
3762 /* Cleanup the created session if it's the case. */
3763 if (created) {
3764 destroy_app_session(app, ua_sess);
3765 }
3766 }
3767 }
3768
3769 error_rcu_unlock:
3770 rcu_read_unlock();
3771 return ret;
3772 }
3773
3774 /*
3775 * Enable event for a specific session and channel on the tracer.
3776 */
3777 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
3778 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3779 {
3780 int ret = 0;
3781 struct lttng_ht_iter iter, uiter;
3782 struct lttng_ht_node_str *ua_chan_node;
3783 struct ust_app *app;
3784 struct ust_app_session *ua_sess;
3785 struct ust_app_channel *ua_chan;
3786 struct ust_app_event *ua_event;
3787
3788 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
3789 uevent->attr.name, usess->id);
3790
3791 /*
3792 * NOTE: At this point, this function is called only if the session and
3793 * channel passed are already created for all apps. and enabled on the
3794 * tracer also.
3795 */
3796
3797 rcu_read_lock();
3798
3799 /* For all registered applications */
3800 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3801 if (!app->compatible) {
3802 /*
3803 * TODO: In time, we should notice the caller of this error by
3804 * telling him that this is a version error.
3805 */
3806 continue;
3807 }
3808 ua_sess = lookup_session_by_app(usess, app);
3809 if (!ua_sess) {
3810 /* The application has problem or is probably dead. */
3811 continue;
3812 }
3813
3814 pthread_mutex_lock(&ua_sess->lock);
3815
3816 /* Lookup channel in the ust app session */
3817 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3818 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3819 /* If the channel is not found, there is a code flow error */
3820 assert(ua_chan_node);
3821
3822 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3823
3824 /* Get event node */
3825 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
3826 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
3827 if (ua_event == NULL) {
3828 DBG3("UST app enable event %s not found for app PID %d."
3829 "Skipping app", uevent->attr.name, app->pid);
3830 goto next_app;
3831 }
3832
3833 ret = enable_ust_app_event(ua_sess, ua_event, app);
3834 if (ret < 0) {
3835 pthread_mutex_unlock(&ua_sess->lock);
3836 goto error;
3837 }
3838 next_app:
3839 pthread_mutex_unlock(&ua_sess->lock);
3840 }
3841
3842 error:
3843 rcu_read_unlock();
3844 return ret;
3845 }
3846
3847 /*
3848 * For a specific existing UST session and UST channel, creates the event for
3849 * all registered apps.
3850 */
3851 int ust_app_create_event_glb(struct ltt_ust_session *usess,
3852 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3853 {
3854 int ret = 0;
3855 struct lttng_ht_iter iter, uiter;
3856 struct lttng_ht_node_str *ua_chan_node;
3857 struct ust_app *app;
3858 struct ust_app_session *ua_sess;
3859 struct ust_app_channel *ua_chan;
3860
3861 DBG("UST app creating event %s for all apps for session id %" PRIu64,
3862 uevent->attr.name, usess->id);
3863
3864 rcu_read_lock();
3865
3866 /* For all registered applications */
3867 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3868 if (!app->compatible) {
3869 /*
3870 * TODO: In time, we should notice the caller of this error by
3871 * telling him that this is a version error.
3872 */
3873 continue;
3874 }
3875 ua_sess = lookup_session_by_app(usess, app);
3876 if (!ua_sess) {
3877 /* The application has problem or is probably dead. */
3878 continue;
3879 }
3880
3881 pthread_mutex_lock(&ua_sess->lock);
3882 /* Lookup channel in the ust app session */
3883 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3884 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3885 /* If the channel is not found, there is a code flow error */
3886 assert(ua_chan_node);
3887
3888 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3889
3890 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
3891 pthread_mutex_unlock(&ua_sess->lock);
3892 if (ret < 0) {
3893 if (ret != -LTTNG_UST_ERR_EXIST) {
3894 /* Possible value at this point: -ENOMEM. If so, we stop! */
3895 break;
3896 }
3897 DBG2("UST app event %s already exist on app PID %d",
3898 uevent->attr.name, app->pid);
3899 continue;
3900 }
3901 }
3902
3903 rcu_read_unlock();
3904
3905 return ret;
3906 }
3907
3908 /*
3909 * Start tracing for a specific UST session and app.
3910 */
3911 static
3912 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
3913 {
3914 int ret = 0;
3915 struct ust_app_session *ua_sess;
3916
3917 DBG("Starting tracing for ust app pid %d", app->pid);
3918
3919 rcu_read_lock();
3920
3921 if (!app->compatible) {
3922 goto end;
3923 }
3924
3925 ua_sess = lookup_session_by_app(usess, app);
3926 if (ua_sess == NULL) {
3927 /* The session is in teardown process. Ignore and continue. */
3928 goto end;
3929 }
3930
3931 pthread_mutex_lock(&ua_sess->lock);
3932
3933 /* Upon restart, we skip the setup, already done */
3934 if (ua_sess->started) {
3935 goto skip_setup;
3936 }
3937
3938 /* Create directories if consumer is LOCAL and has a path defined. */
3939 if (usess->consumer->type == CONSUMER_DST_LOCAL &&
3940 strlen(usess->consumer->dst.trace_path) > 0) {
3941 ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
3942 S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
3943 if (ret < 0) {
3944 if (ret != -EEXIST) {
3945 ERR("Trace directory creation error");
3946 goto error_unlock;
3947 }
3948 }
3949 }
3950
3951 /*
3952 * Create the metadata for the application. This returns gracefully if a
3953 * metadata was already set for the session.
3954 */
3955 ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
3956 if (ret < 0) {
3957 goto error_unlock;
3958 }
3959
3960 health_code_update();
3961
3962 skip_setup:
3963 /* This start the UST tracing */
3964 ret = ustctl_start_session(app->sock, ua_sess->handle);
3965 if (ret < 0) {
3966 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3967 ERR("Error starting tracing for app pid: %d (ret: %d)",
3968 app->pid, ret);
3969 } else {
3970 DBG("UST app start session failed. Application is dead.");
3971 /*
3972 * This is normal behavior, an application can die during the
3973 * creation process. Don't report an error so the execution can
3974 * continue normally.
3975 */
3976 pthread_mutex_unlock(&ua_sess->lock);
3977 goto end;
3978 }
3979 goto error_unlock;
3980 }
3981
3982 /* Indicate that the session has been started once */
3983 ua_sess->started = 1;
3984
3985 pthread_mutex_unlock(&ua_sess->lock);
3986
3987 health_code_update();
3988
3989 /* Quiescent wait after starting trace */
3990 ret = ustctl_wait_quiescent(app->sock);
3991 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3992 ERR("UST app wait quiescent failed for app pid %d ret %d",
3993 app->pid, ret);
3994 }
3995
3996 end:
3997 rcu_read_unlock();
3998 health_code_update();
3999 return 0;
4000
4001 error_unlock:
4002 pthread_mutex_unlock(&ua_sess->lock);
4003 rcu_read_unlock();
4004 health_code_update();
4005 return -1;
4006 }
4007
4008 /*
4009 * Stop tracing for a specific UST session and app.
4010 */
4011 static
4012 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
4013 {
4014 int ret = 0;
4015 struct ust_app_session *ua_sess;
4016 struct ust_registry_session *registry;
4017
4018 DBG("Stopping tracing for ust app pid %d", app->pid);
4019
4020 rcu_read_lock();
4021
4022 if (!app->compatible) {
4023 goto end_no_session;
4024 }
4025
4026 ua_sess = lookup_session_by_app(usess, app);
4027 if (ua_sess == NULL) {
4028 goto end_no_session;
4029 }
4030
4031 pthread_mutex_lock(&ua_sess->lock);
4032
4033 /*
4034 * If started = 0, it means that stop trace has been called for a session
4035 * that was never started. It's possible since we can have a fail start
4036 * from either the application manager thread or the command thread. Simply
4037 * indicate that this is a stop error.
4038 */
4039 if (!ua_sess->started) {
4040 goto error_rcu_unlock;
4041 }
4042
4043 health_code_update();
4044
4045 /* This inhibits UST tracing */
4046 ret = ustctl_stop_session(app->sock, ua_sess->handle);
4047 if (ret < 0) {
4048 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4049 ERR("Error stopping tracing for app pid: %d (ret: %d)",
4050 app->pid, ret);
4051 } else {
4052 DBG("UST app stop session failed. Application is dead.");
4053 /*
4054 * This is normal behavior, an application can die during the
4055 * creation process. Don't report an error so the execution can
4056 * continue normally.
4057 */
4058 goto end_unlock;
4059 }
4060 goto error_rcu_unlock;
4061 }
4062
4063 health_code_update();
4064
4065 /* Quiescent wait after stopping trace */
4066 ret = ustctl_wait_quiescent(app->sock);
4067 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4068 ERR("UST app wait quiescent failed for app pid %d ret %d",
4069 app->pid, ret);
4070 }
4071
4072 health_code_update();
4073
4074 registry = get_session_registry(ua_sess);
4075 assert(registry);
4076
4077 /* Push metadata for application before freeing the application. */
4078 (void) push_metadata(registry, ua_sess->consumer);
4079
4080 end_unlock:
4081 pthread_mutex_unlock(&ua_sess->lock);
4082 end_no_session:
4083 rcu_read_unlock();
4084 health_code_update();
4085 return 0;
4086
4087 error_rcu_unlock:
4088 pthread_mutex_unlock(&ua_sess->lock);
4089 rcu_read_unlock();
4090 health_code_update();
4091 return -1;
4092 }
4093
4094 static
4095 int ust_app_flush_app_session(struct ust_app *app,
4096 struct ust_app_session *ua_sess)
4097 {
4098 int ret, retval = 0;
4099 struct lttng_ht_iter iter;
4100 struct ust_app_channel *ua_chan;
4101 struct consumer_socket *socket;
4102
4103 DBG("Flushing app session buffers for ust app pid %d", app->pid);
4104
4105 rcu_read_lock();
4106
4107 if (!app->compatible) {
4108 goto end_not_compatible;
4109 }
4110
4111 pthread_mutex_lock(&ua_sess->lock);
4112
4113 health_code_update();
4114
4115 /* Flushing buffers */
4116 socket = consumer_find_socket_by_bitness(app->bits_per_long,
4117 ua_sess->consumer);
4118
4119 /* Flush buffers and push metadata. */
4120 switch (ua_sess->buffer_type) {
4121 case LTTNG_BUFFER_PER_PID:
4122 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4123 node.node) {
4124 health_code_update();
4125 assert(ua_chan->is_sent);
4126 ret = consumer_flush_channel(socket, ua_chan->key);
4127 if (ret) {
4128 ERR("Error flushing consumer channel");
4129 retval = -1;
4130 continue;
4131 }
4132 }
4133 break;
4134 case LTTNG_BUFFER_PER_UID:
4135 default:
4136 assert(0);
4137 break;
4138 }
4139
4140 health_code_update();
4141
4142 pthread_mutex_unlock(&ua_sess->lock);
4143
4144 end_not_compatible:
4145 rcu_read_unlock();
4146 health_code_update();
4147 return retval;
4148 }
4149
4150 /*
4151 * Flush buffers for all applications for a specific UST session.
4152 * Called with UST session lock held.
4153 */
4154 static
4155 int ust_app_flush_session(struct ltt_ust_session *usess)
4156
4157 {
4158 int ret = 0;
4159
4160 DBG("Flushing session buffers for all ust apps");
4161
4162 rcu_read_lock();
4163
4164 /* Flush buffers and push metadata. */
4165 switch (usess->buffer_type) {
4166 case LTTNG_BUFFER_PER_UID:
4167 {
4168 struct buffer_reg_uid *reg;
4169 struct lttng_ht_iter iter;
4170
4171 /* Flush all per UID buffers associated to that session. */
4172 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4173 struct ust_registry_session *ust_session_reg;
4174 struct buffer_reg_channel *reg_chan;
4175 struct consumer_socket *socket;
4176
4177 /* Get consumer socket to use to push the metadata.*/
4178 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
4179 usess->consumer);
4180 if (!socket) {
4181 /* Ignore request if no consumer is found for the session. */
4182 continue;
4183 }
4184
4185 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
4186 reg_chan, node.node) {
4187 /*
4188 * The following call will print error values so the return
4189 * code is of little importance because whatever happens, we
4190 * have to try them all.
4191 */
4192 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
4193 }
4194
4195 ust_session_reg = reg->registry->reg.ust;
4196 /* Push metadata. */
4197 (void) push_metadata(ust_session_reg, usess->consumer);
4198 }
4199 break;
4200 }
4201 case LTTNG_BUFFER_PER_PID:
4202 {
4203 struct ust_app_session *ua_sess;
4204 struct lttng_ht_iter iter;
4205 struct ust_app *app;
4206
4207 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4208 ua_sess = lookup_session_by_app(usess, app);
4209 if (ua_sess == NULL) {
4210 continue;
4211 }
4212 (void) ust_app_flush_app_session(app, ua_sess);
4213 }
4214 break;
4215 }
4216 default:
4217 ret = -1;
4218 assert(0);
4219 break;
4220 }
4221
4222 rcu_read_unlock();
4223 health_code_update();
4224 return ret;
4225 }
4226
4227 /*
4228 * Destroy a specific UST session in apps.
4229 */
4230 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
4231 {
4232 int ret;
4233 struct ust_app_session *ua_sess;
4234 struct lttng_ht_iter iter;
4235 struct lttng_ht_node_u64 *node;
4236
4237 DBG("Destroy tracing for ust app pid %d", app->pid);
4238
4239 rcu_read_lock();
4240
4241 if (!app->compatible) {
4242 goto end;
4243 }
4244
4245 __lookup_session_by_app(usess, app, &iter);
4246 node = lttng_ht_iter_get_node_u64(&iter);
4247 if (node == NULL) {
4248 /* Session is being or is deleted. */
4249 goto end;
4250 }
4251 ua_sess = caa_container_of(node, struct ust_app_session, node);
4252
4253 health_code_update();
4254 destroy_app_session(app, ua_sess);
4255
4256 health_code_update();
4257
4258 /* Quiescent wait after stopping trace */
4259 ret = ustctl_wait_quiescent(app->sock);
4260 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4261 ERR("UST app wait quiescent failed for app pid %d ret %d",
4262 app->pid, ret);
4263 }
4264 end:
4265 rcu_read_unlock();
4266 health_code_update();
4267 return 0;
4268 }
4269
4270 /*
4271 * Start tracing for the UST session.
4272 */
4273 int ust_app_start_trace_all(struct ltt_ust_session *usess)
4274 {
4275 int ret = 0;
4276 struct lttng_ht_iter iter;
4277 struct ust_app *app;
4278
4279 DBG("Starting all UST traces");
4280
4281 rcu_read_lock();
4282
4283 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4284 ret = ust_app_start_trace(usess, app);
4285 if (ret < 0) {
4286 /* Continue to next apps even on error */
4287 continue;
4288 }
4289 }
4290
4291 rcu_read_unlock();
4292
4293 return 0;
4294 }
4295
4296 /*
4297 * Start tracing for the UST session.
4298 * Called with UST session lock held.
4299 */
4300 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
4301 {
4302 int ret = 0;
4303 struct lttng_ht_iter iter;
4304 struct ust_app *app;
4305
4306 DBG("Stopping all UST traces");
4307
4308 rcu_read_lock();
4309
4310 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4311 ret = ust_app_stop_trace(usess, app);
4312 if (ret < 0) {
4313 /* Continue to next apps even on error */
4314 continue;
4315 }
4316 }
4317
4318 (void) ust_app_flush_session(usess);
4319
4320 rcu_read_unlock();
4321
4322 return 0;
4323 }
4324
4325 /*
4326 * Destroy app UST session.
4327 */
4328 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
4329 {
4330 int ret = 0;
4331 struct lttng_ht_iter iter;
4332 struct ust_app *app;
4333
4334 DBG("Destroy all UST traces");
4335
4336 rcu_read_lock();
4337
4338 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4339 ret = destroy_trace(usess, app);
4340 if (ret < 0) {
4341 /* Continue to next apps even on error */
4342 continue;
4343 }
4344 }
4345
4346 rcu_read_unlock();
4347
4348 return 0;
4349 }
4350
4351 /*
4352 * Add channels/events from UST global domain to registered apps at sock.
4353 */
4354 void ust_app_global_update(struct ltt_ust_session *usess, int sock)
4355 {
4356 int ret = 0;
4357 struct lttng_ht_iter iter, uiter;
4358 struct ust_app *app;
4359 struct ust_app_session *ua_sess = NULL;
4360 struct ust_app_channel *ua_chan;
4361 struct ust_app_event *ua_event;
4362 struct ust_app_ctx *ua_ctx;
4363
4364 assert(usess);
4365 assert(sock >= 0);
4366
4367 DBG2("UST app global update for app sock %d for session id %" PRIu64, sock,
4368 usess->id);
4369
4370 rcu_read_lock();
4371
4372 app = ust_app_find_by_sock(sock);
4373 if (app == NULL) {
4374 /*
4375 * Application can be unregistered before so this is possible hence
4376 * simply stopping the update.
4377 */
4378 DBG3("UST app update failed to find app sock %d", sock);
4379 goto error;
4380 }
4381
4382 if (!app->compatible) {
4383 goto error;
4384 }
4385
4386 ret = create_ust_app_session(usess, app, &ua_sess, NULL);
4387 if (ret < 0) {
4388 /* Tracer is probably gone or ENOMEM. */
4389 goto error;
4390 }
4391 assert(ua_sess);
4392
4393 pthread_mutex_lock(&ua_sess->lock);
4394
4395 /*
4396 * We can iterate safely here over all UST app session since the create ust
4397 * app session above made a shadow copy of the UST global domain from the
4398 * ltt ust session.
4399 */
4400 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4401 node.node) {
4402 ret = do_create_channel(app, usess, ua_sess, ua_chan);
4403 if (ret < 0) {
4404 /*
4405 * Stop everything. On error, the application failed, no more
4406 * file descriptor are available or ENOMEM so stopping here is
4407 * the only thing we can do for now.
4408 */
4409 goto error_unlock;
4410 }
4411
4412 /*
4413 * Add context using the list so they are enabled in the same order the
4414 * user added them.
4415 */
4416 cds_list_for_each_entry(ua_ctx, &ua_chan->ctx_list, list) {
4417 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
4418 if (ret < 0) {
4419 goto error_unlock;
4420 }
4421 }
4422
4423
4424 /* For each events */
4425 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
4426 node.node) {
4427 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
4428 if (ret < 0) {
4429 goto error_unlock;
4430 }
4431 }
4432 }
4433
4434 pthread_mutex_unlock(&ua_sess->lock);
4435
4436 if (usess->active) {
4437 ret = ust_app_start_trace(usess, app);
4438 if (ret < 0) {
4439 goto error;
4440 }
4441
4442 DBG2("UST trace started for app pid %d", app->pid);
4443 }
4444
4445 /* Everything went well at this point. */
4446 rcu_read_unlock();
4447 return;
4448
4449 error_unlock:
4450 pthread_mutex_unlock(&ua_sess->lock);
4451 error:
4452 if (ua_sess) {
4453 destroy_app_session(app, ua_sess);
4454 }
4455 rcu_read_unlock();
4456 return;
4457 }
4458
4459 /*
4460 * Add context to a specific channel for global UST domain.
4461 */
4462 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
4463 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
4464 {
4465 int ret = 0;
4466 struct lttng_ht_node_str *ua_chan_node;
4467 struct lttng_ht_iter iter, uiter;
4468 struct ust_app_channel *ua_chan = NULL;
4469 struct ust_app_session *ua_sess;
4470 struct ust_app *app;
4471
4472 rcu_read_lock();
4473
4474 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4475 if (!app->compatible) {
4476 /*
4477 * TODO: In time, we should notice the caller of this error by
4478 * telling him that this is a version error.
4479 */
4480 continue;
4481 }
4482 ua_sess = lookup_session_by_app(usess, app);
4483 if (ua_sess == NULL) {
4484 continue;
4485 }
4486
4487 pthread_mutex_lock(&ua_sess->lock);
4488 /* Lookup channel in the ust app session */
4489 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4490 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4491 if (ua_chan_node == NULL) {
4492 goto next_app;
4493 }
4494 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
4495 node);
4496 ret = create_ust_app_channel_context(ua_sess, ua_chan, &uctx->ctx, app);
4497 if (ret < 0) {
4498 goto next_app;
4499 }
4500 next_app:
4501 pthread_mutex_unlock(&ua_sess->lock);
4502 }
4503
4504 rcu_read_unlock();
4505 return ret;
4506 }
4507
4508 /*
4509 * Enable event for a channel from a UST session for a specific PID.
4510 */
4511 int ust_app_enable_event_pid(struct ltt_ust_session *usess,
4512 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4513 {
4514 int ret = 0;
4515 struct lttng_ht_iter iter;
4516 struct lttng_ht_node_str *ua_chan_node;
4517 struct ust_app *app;
4518 struct ust_app_session *ua_sess;
4519 struct ust_app_channel *ua_chan;
4520 struct ust_app_event *ua_event;
4521
4522 DBG("UST app enabling event %s for PID %d", uevent->attr.name, pid);
4523
4524 rcu_read_lock();
4525
4526 app = ust_app_find_by_pid(pid);
4527 if (app == NULL) {
4528 ERR("UST app enable event per PID %d not found", pid);
4529 ret = -1;
4530 goto end;
4531 }
4532
4533 if (!app->compatible) {
4534 ret = 0;
4535 goto end;
4536 }
4537
4538 ua_sess = lookup_session_by_app(usess, app);
4539 if (!ua_sess) {
4540 /* The application has problem or is probably dead. */
4541 ret = 0;
4542 goto end;
4543 }
4544
4545 pthread_mutex_lock(&ua_sess->lock);
4546 /* Lookup channel in the ust app session */
4547 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4548 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4549 /* If the channel is not found, there is a code flow error */
4550 assert(ua_chan_node);
4551
4552 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4553
4554 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4555 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
4556 if (ua_event == NULL) {
4557 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4558 if (ret < 0) {
4559 goto end_unlock;
4560 }
4561 } else {
4562 ret = enable_ust_app_event(ua_sess, ua_event, app);
4563 if (ret < 0) {
4564 goto end_unlock;
4565 }
4566 }
4567
4568 end_unlock:
4569 pthread_mutex_unlock(&ua_sess->lock);
4570 end:
4571 rcu_read_unlock();
4572 return ret;
4573 }
4574
4575 /*
4576 * Calibrate registered applications.
4577 */
4578 int ust_app_calibrate_glb(struct lttng_ust_calibrate *calibrate)
4579 {
4580 int ret = 0;
4581 struct lttng_ht_iter iter;
4582 struct ust_app *app;
4583
4584 rcu_read_lock();
4585
4586 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4587 if (!app->compatible) {
4588 /*
4589 * TODO: In time, we should notice the caller of this error by
4590 * telling him that this is a version error.
4591 */
4592 continue;
4593 }
4594
4595 health_code_update();
4596
4597 ret = ustctl_calibrate(app->sock, calibrate);
4598 if (ret < 0) {
4599 switch (ret) {
4600 case -ENOSYS:
4601 /* Means that it's not implemented on the tracer side. */
4602 ret = 0;
4603 break;
4604 default:
4605 DBG2("Calibrate app PID %d returned with error %d",
4606 app->pid, ret);
4607 break;
4608 }
4609 }
4610 }
4611
4612 DBG("UST app global domain calibration finished");
4613
4614 rcu_read_unlock();
4615
4616 health_code_update();
4617
4618 return ret;
4619 }
4620
4621 /*
4622 * Receive registration and populate the given msg structure.
4623 *
4624 * On success return 0 else a negative value returned by the ustctl call.
4625 */
4626 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
4627 {
4628 int ret;
4629 uint32_t pid, ppid, uid, gid;
4630
4631 assert(msg);
4632
4633 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
4634 &pid, &ppid, &uid, &gid,
4635 &msg->bits_per_long,
4636 &msg->uint8_t_alignment,
4637 &msg->uint16_t_alignment,
4638 &msg->uint32_t_alignment,
4639 &msg->uint64_t_alignment,
4640 &msg->long_alignment,
4641 &msg->byte_order,
4642 msg->name);
4643 if (ret < 0) {
4644 switch (-ret) {
4645 case EPIPE:
4646 case ECONNRESET:
4647 case LTTNG_UST_ERR_EXITING:
4648 DBG3("UST app recv reg message failed. Application died");
4649 break;
4650 case LTTNG_UST_ERR_UNSUP_MAJOR:
4651 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
4652 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
4653 LTTNG_UST_ABI_MINOR_VERSION);
4654 break;
4655 default:
4656 ERR("UST app recv reg message failed with ret %d", ret);
4657 break;
4658 }
4659 goto error;
4660 }
4661 msg->pid = (pid_t) pid;
4662 msg->ppid = (pid_t) ppid;
4663 msg->uid = (uid_t) uid;
4664 msg->gid = (gid_t) gid;
4665
4666 error:
4667 return ret;
4668 }
4669
4670 /*
4671 * Return a ust app channel object using the application object and the channel
4672 * object descriptor has a key. If not found, NULL is returned. A RCU read side
4673 * lock MUST be acquired before calling this function.
4674 */
4675 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
4676 int objd)
4677 {
4678 struct lttng_ht_node_ulong *node;
4679 struct lttng_ht_iter iter;
4680 struct ust_app_channel *ua_chan = NULL;
4681
4682 assert(app);
4683
4684 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
4685 node = lttng_ht_iter_get_node_ulong(&iter);
4686 if (node == NULL) {
4687 DBG2("UST app channel find by objd %d not found", objd);
4688 goto error;
4689 }
4690
4691 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
4692
4693 error:
4694 return ua_chan;
4695 }
4696
4697 /*
4698 * Reply to a register channel notification from an application on the notify
4699 * socket. The channel metadata is also created.
4700 *
4701 * The session UST registry lock is acquired in this function.
4702 *
4703 * On success 0 is returned else a negative value.
4704 */
4705 static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
4706 size_t nr_fields, struct ustctl_field *fields)
4707 {
4708 int ret, ret_code = 0;
4709 uint32_t chan_id, reg_count;
4710 uint64_t chan_reg_key;
4711 enum ustctl_channel_header type;
4712 struct ust_app *app;
4713 struct ust_app_channel *ua_chan;
4714 struct ust_app_session *ua_sess;
4715 struct ust_registry_session *registry;
4716 struct ust_registry_channel *chan_reg;
4717
4718 rcu_read_lock();
4719
4720 /* Lookup application. If not found, there is a code flow error. */
4721 app = find_app_by_notify_sock(sock);
4722 if (!app) {
4723 DBG("Application socket %d is being teardown. Abort event notify",
4724 sock);
4725 ret = 0;
4726 free(fields);
4727 goto error_rcu_unlock;
4728 }
4729
4730 /* Lookup channel by UST object descriptor. */
4731 ua_chan = find_channel_by_objd(app, cobjd);
4732 if (!ua_chan) {
4733 DBG("Application channel is being teardown. Abort event notify");
4734 ret = 0;
4735 free(fields);
4736 goto error_rcu_unlock;
4737 }
4738
4739 assert(ua_chan->session);
4740 ua_sess = ua_chan->session;
4741
4742 /* Get right session registry depending on the session buffer type. */
4743 registry = get_session_registry(ua_sess);
4744 assert(registry);
4745
4746 /* Depending on the buffer type, a different channel key is used. */
4747 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4748 chan_reg_key = ua_chan->tracing_channel_id;
4749 } else {
4750 chan_reg_key = ua_chan->key;
4751 }
4752
4753 pthread_mutex_lock(&registry->lock);
4754
4755 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
4756 assert(chan_reg);
4757
4758 if (!chan_reg->register_done) {
4759 reg_count = ust_registry_get_event_count(chan_reg);
4760 if (reg_count < 31) {
4761 type = USTCTL_CHANNEL_HEADER_COMPACT;
4762 } else {
4763 type = USTCTL_CHANNEL_HEADER_LARGE;
4764 }
4765
4766 chan_reg->nr_ctx_fields = nr_fields;
4767 chan_reg->ctx_fields = fields;
4768 chan_reg->header_type = type;
4769 } else {
4770 /* Get current already assigned values. */
4771 type = chan_reg->header_type;
4772 free(fields);
4773 /* Set to NULL so the error path does not do a double free. */
4774 fields = NULL;
4775 }
4776 /* Channel id is set during the object creation. */
4777 chan_id = chan_reg->chan_id;
4778
4779 /* Append to metadata */
4780 if (!chan_reg->metadata_dumped) {
4781 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
4782 if (ret_code) {
4783 ERR("Error appending channel metadata (errno = %d)", ret_code);
4784 goto reply;
4785 }
4786 }
4787
4788 reply:
4789 DBG3("UST app replying to register channel key %" PRIu64
4790 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
4791 ret_code);
4792
4793 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
4794 if (ret < 0) {
4795 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4796 ERR("UST app reply channel failed with ret %d", ret);
4797 } else {
4798 DBG3("UST app reply channel failed. Application died");
4799 }
4800 goto error;
4801 }
4802
4803 /* This channel registry registration is completed. */
4804 chan_reg->register_done = 1;
4805
4806 error:
4807 pthread_mutex_unlock(&registry->lock);
4808 error_rcu_unlock:
4809 rcu_read_unlock();
4810 if (ret) {
4811 free(fields);
4812 }
4813 return ret;
4814 }
4815
4816 /*
4817 * Add event to the UST channel registry. When the event is added to the
4818 * registry, the metadata is also created. Once done, this replies to the
4819 * application with the appropriate error code.
4820 *
4821 * The session UST registry lock is acquired in the function.
4822 *
4823 * On success 0 is returned else a negative value.
4824 */
4825 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
4826 char *sig, size_t nr_fields, struct ustctl_field *fields, int loglevel,
4827 char *model_emf_uri)
4828 {
4829 int ret, ret_code;
4830 uint32_t event_id = 0;
4831 uint64_t chan_reg_key;
4832 struct ust_app *app;
4833 struct ust_app_channel *ua_chan;
4834 struct ust_app_session *ua_sess;
4835 struct ust_registry_session *registry;
4836
4837 rcu_read_lock();
4838
4839 /* Lookup application. If not found, there is a code flow error. */
4840 app = find_app_by_notify_sock(sock);
4841 if (!app) {
4842 DBG("Application socket %d is being teardown. Abort event notify",
4843 sock);
4844 ret = 0;
4845 free(sig);
4846 free(fields);
4847 free(model_emf_uri);
4848 goto error_rcu_unlock;
4849 }
4850
4851 /* Lookup channel by UST object descriptor. */
4852 ua_chan = find_channel_by_objd(app, cobjd);
4853 if (!ua_chan) {
4854 DBG("Application channel is being teardown. Abort event notify");
4855 ret = 0;
4856 free(sig);
4857 free(fields);
4858 free(model_emf_uri);
4859 goto error_rcu_unlock;
4860 }
4861
4862 assert(ua_chan->session);
4863 ua_sess = ua_chan->session;
4864
4865 registry = get_session_registry(ua_sess);
4866 assert(registry);
4867
4868 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4869 chan_reg_key = ua_chan->tracing_channel_id;
4870 } else {
4871 chan_reg_key = ua_chan->key;
4872 }
4873
4874 pthread_mutex_lock(&registry->lock);
4875
4876 /*
4877 * From this point on, this call acquires the ownership of the sig, fields
4878 * and model_emf_uri meaning any free are done inside it if needed. These
4879 * three variables MUST NOT be read/write after this.
4880 */
4881 ret_code = ust_registry_create_event(registry, chan_reg_key,
4882 sobjd, cobjd, name, sig, nr_fields, fields, loglevel,
4883 model_emf_uri, ua_sess->buffer_type, &event_id,
4884 app);
4885
4886 /*
4887 * The return value is returned to ustctl so in case of an error, the
4888 * application can be notified. In case of an error, it's important not to
4889 * return a negative error or else the application will get closed.
4890 */
4891 ret = ustctl_reply_register_event(sock, event_id, ret_code);
4892 if (ret < 0) {
4893 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4894 ERR("UST app reply event failed with ret %d", ret);
4895 } else {
4896 DBG3("UST app reply event failed. Application died");
4897 }
4898 /*
4899 * No need to wipe the create event since the application socket will
4900 * get close on error hence cleaning up everything by itself.
4901 */
4902 goto error;
4903 }
4904
4905 DBG3("UST registry event %s with id %" PRId32 " added successfully",
4906 name, event_id);
4907
4908 error:
4909 pthread_mutex_unlock(&registry->lock);
4910 error_rcu_unlock:
4911 rcu_read_unlock();
4912 return ret;
4913 }
4914
4915 /*
4916 * Handle application notification through the given notify socket.
4917 *
4918 * Return 0 on success or else a negative value.
4919 */
4920 int ust_app_recv_notify(int sock)
4921 {
4922 int ret;
4923 enum ustctl_notify_cmd cmd;
4924
4925 DBG3("UST app receiving notify from sock %d", sock);
4926
4927 ret = ustctl_recv_notify(sock, &cmd);
4928 if (ret < 0) {
4929 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4930 ERR("UST app recv notify failed with ret %d", ret);
4931 } else {
4932 DBG3("UST app recv notify failed. Application died");
4933 }
4934 goto error;
4935 }
4936
4937 switch (cmd) {
4938 case USTCTL_NOTIFY_CMD_EVENT:
4939 {
4940 int sobjd, cobjd, loglevel;
4941 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
4942 size_t nr_fields;
4943 struct ustctl_field *fields;
4944
4945 DBG2("UST app ustctl register event received");
4946
4947 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name, &loglevel,
4948 &sig, &nr_fields, &fields, &model_emf_uri);
4949 if (ret < 0) {
4950 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4951 ERR("UST app recv event failed with ret %d", ret);
4952 } else {
4953 DBG3("UST app recv event failed. Application died");
4954 }
4955 goto error;
4956 }
4957
4958 /*
4959 * Add event to the UST registry coming from the notify socket. This
4960 * call will free if needed the sig, fields and model_emf_uri. This
4961 * code path loses the ownsership of these variables and transfer them
4962 * to the this function.
4963 */
4964 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
4965 fields, loglevel, model_emf_uri);
4966 if (ret < 0) {
4967 goto error;
4968 }
4969
4970 break;
4971 }
4972 case USTCTL_NOTIFY_CMD_CHANNEL:
4973 {
4974 int sobjd, cobjd;
4975 size_t nr_fields;
4976 struct ustctl_field *fields;
4977
4978 DBG2("UST app ustctl register channel received");
4979
4980 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
4981 &fields);
4982 if (ret < 0) {
4983 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4984 ERR("UST app recv channel failed with ret %d", ret);
4985 } else {
4986 DBG3("UST app recv channel failed. Application died");
4987 }
4988 goto error;
4989 }
4990
4991 /*
4992 * The fields ownership are transfered to this function call meaning
4993 * that if needed it will be freed. After this, it's invalid to access
4994 * fields or clean it up.
4995 */
4996 ret = reply_ust_register_channel(sock, sobjd, cobjd, nr_fields,
4997 fields);
4998 if (ret < 0) {
4999 goto error;
5000 }
5001
5002 break;
5003 }
5004 default:
5005 /* Should NEVER happen. */
5006 assert(0);
5007 }
5008
5009 error:
5010 return ret;
5011 }
5012
5013 /*
5014 * Once the notify socket hangs up, this is called. First, it tries to find the
5015 * corresponding application. On failure, the call_rcu to close the socket is
5016 * executed. If an application is found, it tries to delete it from the notify
5017 * socket hash table. Whathever the result, it proceeds to the call_rcu.
5018 *
5019 * Note that an object needs to be allocated here so on ENOMEM failure, the
5020 * call RCU is not done but the rest of the cleanup is.
5021 */
5022 void ust_app_notify_sock_unregister(int sock)
5023 {
5024 int err_enomem = 0;
5025 struct lttng_ht_iter iter;
5026 struct ust_app *app;
5027 struct ust_app_notify_sock_obj *obj;
5028
5029 assert(sock >= 0);
5030
5031 rcu_read_lock();
5032
5033 obj = zmalloc(sizeof(*obj));
5034 if (!obj) {
5035 /*
5036 * An ENOMEM is kind of uncool. If this strikes we continue the
5037 * procedure but the call_rcu will not be called. In this case, we
5038 * accept the fd leak rather than possibly creating an unsynchronized
5039 * state between threads.
5040 *
5041 * TODO: The notify object should be created once the notify socket is
5042 * registered and stored independantely from the ust app object. The
5043 * tricky part is to synchronize the teardown of the application and
5044 * this notify object. Let's keep that in mind so we can avoid this
5045 * kind of shenanigans with ENOMEM in the teardown path.
5046 */
5047 err_enomem = 1;
5048 } else {
5049 obj->fd = sock;
5050 }
5051
5052 DBG("UST app notify socket unregister %d", sock);
5053
5054 /*
5055 * Lookup application by notify socket. If this fails, this means that the
5056 * hash table delete has already been done by the application
5057 * unregistration process so we can safely close the notify socket in a
5058 * call RCU.
5059 */
5060 app = find_app_by_notify_sock(sock);
5061 if (!app) {
5062 goto close_socket;
5063 }
5064
5065 iter.iter.node = &app->notify_sock_n.node;
5066
5067 /*
5068 * Whatever happens here either we fail or succeed, in both cases we have
5069 * to close the socket after a grace period to continue to the call RCU
5070 * here. If the deletion is successful, the application is not visible
5071 * anymore by other threads and is it fails it means that it was already
5072 * deleted from the hash table so either way we just have to close the
5073 * socket.
5074 */
5075 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
5076
5077 close_socket:
5078 rcu_read_unlock();
5079
5080 /*
5081 * Close socket after a grace period to avoid for the socket to be reused
5082 * before the application object is freed creating potential race between
5083 * threads trying to add unique in the global hash table.
5084 */
5085 if (!err_enomem) {
5086 call_rcu(&obj->head, close_notify_sock_rcu);
5087 }
5088 }
5089
5090 /*
5091 * Destroy a ust app data structure and free its memory.
5092 */
5093 void ust_app_destroy(struct ust_app *app)
5094 {
5095 if (!app) {
5096 return;
5097 }
5098
5099 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
5100 }
5101
5102 /*
5103 * Take a snapshot for a given UST session. The snapshot is sent to the given
5104 * output.
5105 *
5106 * Return 0 on success or else a negative value.
5107 */
5108 int ust_app_snapshot_record(struct ltt_ust_session *usess,
5109 struct snapshot_output *output, int wait,
5110 uint64_t nb_packets_per_stream)
5111 {
5112 int ret = 0;
5113 unsigned int snapshot_done = 0;
5114 struct lttng_ht_iter iter;
5115 struct ust_app *app;
5116 char pathname[PATH_MAX];
5117
5118 assert(usess);
5119 assert(output);
5120
5121 rcu_read_lock();
5122
5123 switch (usess->buffer_type) {
5124 case LTTNG_BUFFER_PER_UID:
5125 {
5126 struct buffer_reg_uid *reg;
5127
5128 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5129 struct buffer_reg_channel *reg_chan;
5130 struct consumer_socket *socket;
5131
5132 /* Get consumer socket to use to push the metadata.*/
5133 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5134 usess->consumer);
5135 if (!socket) {
5136 ret = -EINVAL;
5137 goto error;
5138 }
5139
5140 memset(pathname, 0, sizeof(pathname));
5141 ret = snprintf(pathname, sizeof(pathname),
5142 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
5143 reg->uid, reg->bits_per_long);
5144 if (ret < 0) {
5145 PERROR("snprintf snapshot path");
5146 goto error;
5147 }
5148
5149 /* Add the UST default trace dir to path. */
5150 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5151 reg_chan, node.node) {
5152 ret = consumer_snapshot_channel(socket, reg_chan->consumer_key,
5153 output, 0, usess->uid, usess->gid, pathname, wait,
5154 nb_packets_per_stream);
5155 if (ret < 0) {
5156 goto error;
5157 }
5158 }
5159 ret = consumer_snapshot_channel(socket,
5160 reg->registry->reg.ust->metadata_key, output, 1,
5161 usess->uid, usess->gid, pathname, wait, 0);
5162 if (ret < 0) {
5163 goto error;
5164 }
5165 snapshot_done = 1;
5166 }
5167 break;
5168 }
5169 case LTTNG_BUFFER_PER_PID:
5170 {
5171 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5172 struct consumer_socket *socket;
5173 struct lttng_ht_iter chan_iter;
5174 struct ust_app_channel *ua_chan;
5175 struct ust_app_session *ua_sess;
5176 struct ust_registry_session *registry;
5177
5178 ua_sess = lookup_session_by_app(usess, app);
5179 if (!ua_sess) {
5180 /* Session not associated with this app. */
5181 continue;
5182 }
5183
5184 /* Get the right consumer socket for the application. */
5185 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5186 output->consumer);
5187 if (!socket) {
5188 ret = -EINVAL;
5189 goto error;
5190 }
5191
5192 /* Add the UST default trace dir to path. */
5193 memset(pathname, 0, sizeof(pathname));
5194 ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
5195 ua_sess->path);
5196 if (ret < 0) {
5197 PERROR("snprintf snapshot path");
5198 goto error;
5199 }
5200
5201 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5202 ua_chan, node.node) {
5203 ret = consumer_snapshot_channel(socket, ua_chan->key, output,
5204 0, ua_sess->euid, ua_sess->egid, pathname, wait,
5205 nb_packets_per_stream);
5206 if (ret < 0) {
5207 goto error;
5208 }
5209 }
5210
5211 registry = get_session_registry(ua_sess);
5212 assert(registry);
5213 ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
5214 1, ua_sess->euid, ua_sess->egid, pathname, wait, 0);
5215 if (ret < 0) {
5216 goto error;
5217 }
5218 snapshot_done = 1;
5219 }
5220 break;
5221 }
5222 default:
5223 assert(0);
5224 break;
5225 }
5226
5227 if (!snapshot_done) {
5228 /*
5229 * If no snapshot was made and we are not in the error path, this means
5230 * that there are no buffers thus no (prior) application to snapshot
5231 * data from so we have simply NO data.
5232 */
5233 ret = -ENODATA;
5234 }
5235
5236 error:
5237 rcu_read_unlock();
5238 return ret;
5239 }
5240
5241 /*
5242 * Return the size taken by one more packet per stream.
5243 */
5244 uint64_t ust_app_get_size_one_more_packet_per_stream(struct ltt_ust_session *usess,
5245 uint64_t cur_nr_packets)
5246 {
5247 uint64_t tot_size = 0;
5248 struct ust_app *app;
5249 struct lttng_ht_iter iter;
5250
5251 assert(usess);
5252
5253 switch (usess->buffer_type) {
5254 case LTTNG_BUFFER_PER_UID:
5255 {
5256 struct buffer_reg_uid *reg;
5257
5258 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5259 struct buffer_reg_channel *reg_chan;
5260
5261 rcu_read_lock();
5262 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5263 reg_chan, node.node) {
5264 if (cur_nr_packets >= reg_chan->num_subbuf) {
5265 /*
5266 * Don't take channel into account if we
5267 * already grab all its packets.
5268 */
5269 continue;
5270 }
5271 tot_size += reg_chan->subbuf_size * reg_chan->stream_count;
5272 }
5273 rcu_read_unlock();
5274 }
5275 break;
5276 }
5277 case LTTNG_BUFFER_PER_PID:
5278 {
5279 rcu_read_lock();
5280 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5281 struct ust_app_channel *ua_chan;
5282 struct ust_app_session *ua_sess;
5283 struct lttng_ht_iter chan_iter;
5284
5285 ua_sess = lookup_session_by_app(usess, app);
5286 if (!ua_sess) {
5287 /* Session not associated with this app. */
5288 continue;
5289 }
5290
5291 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5292 ua_chan, node.node) {
5293 if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
5294 /*
5295 * Don't take channel into account if we
5296 * already grab all its packets.
5297 */
5298 continue;
5299 }
5300 tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
5301 }
5302 }
5303 rcu_read_unlock();
5304 break;
5305 }
5306 default:
5307 assert(0);
5308 break;
5309 }
5310
5311 return tot_size;
5312 }
This page took 0.176855 seconds and 6 git commands to generate.