SoW-2019-0002: Dynamic Snapshot
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
CommitLineData
91d76f53 1/*
ab5be9fa
MJ
2 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
3 * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
91d76f53 4 *
ab5be9fa 5 * SPDX-License-Identifier: GPL-2.0-only
91d76f53 6 *
91d76f53
DG
7 */
8
6c1c0768 9#define _LGPL_SOURCE
91d76f53 10#include <errno.h>
7972aab2 11#include <inttypes.h>
91d76f53
DG
12#include <pthread.h>
13#include <stdio.h>
14#include <stdlib.h>
099e26bd 15#include <string.h>
aba8e916
DG
16#include <sys/stat.h>
17#include <sys/types.h>
099e26bd 18#include <unistd.h>
0df502fd 19#include <urcu/compiler.h>
331744e3 20#include <signal.h>
bec39940 21
990570ed 22#include <common/common.h>
1831ae68
FD
23#include <common/hashtable/utils.h>
24#include <lttng/event-rule/event-rule.h>
25#include <lttng/event-rule/event-rule-internal.h>
26#include <lttng/event-rule/tracepoint.h>
27#include <lttng/condition/condition.h>
28#include <lttng/condition/event-rule-internal.h>
29#include <lttng/condition/event-rule.h>
86acf0da 30#include <common/sessiond-comm/sessiond-comm.h>
1e307fab 31
7972aab2 32#include "buffer-registry.h"
86acf0da 33#include "fd-limit.h"
8782cc74 34#include "health-sessiond.h"
56fff090 35#include "ust-app.h"
48842b30 36#include "ust-consumer.h"
75018ab6
JG
37#include "lttng-ust-ctl.h"
38#include "lttng-ust-error.h"
0b2dc8df 39#include "utils.h"
fb83fe64 40#include "session.h"
e9404c27
JG
41#include "lttng-sessiond.h"
42#include "notification-thread-commands.h"
5c408ad8 43#include "rotate.h"
1831ae68 44#include "event.h"
d80a6244 45
44cdb3a2
MJ
46struct lttng_ht *ust_app_ht;
47struct lttng_ht *ust_app_ht_by_sock;
48struct lttng_ht *ust_app_ht_by_notify_sock;
49
c4b88406
MD
50static
51int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
52
d9bf3ca4
MD
53/* Next available channel key. Access under next_channel_key_lock. */
54static uint64_t _next_channel_key;
55static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
56
57/* Next available session ID. Access under next_session_id_lock. */
58static uint64_t _next_session_id;
59static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
ffe60014
DG
60
61/*
d9bf3ca4 62 * Return the incremented value of next_channel_key.
ffe60014 63 */
d9bf3ca4 64static uint64_t get_next_channel_key(void)
ffe60014 65{
d9bf3ca4
MD
66 uint64_t ret;
67
68 pthread_mutex_lock(&next_channel_key_lock);
69 ret = ++_next_channel_key;
70 pthread_mutex_unlock(&next_channel_key_lock);
71 return ret;
ffe60014
DG
72}
73
74/*
7972aab2 75 * Return the atomically incremented value of next_session_id.
ffe60014 76 */
d9bf3ca4 77static uint64_t get_next_session_id(void)
ffe60014 78{
d9bf3ca4
MD
79 uint64_t ret;
80
81 pthread_mutex_lock(&next_session_id_lock);
82 ret = ++_next_session_id;
83 pthread_mutex_unlock(&next_session_id_lock);
84 return ret;
ffe60014
DG
85}
86
d65d2de8
DG
87static void copy_channel_attr_to_ustctl(
88 struct ustctl_consumer_channel_attr *attr,
89 struct lttng_ust_channel_attr *uattr)
90{
91 /* Copy event attributes since the layout is different. */
92 attr->subbuf_size = uattr->subbuf_size;
93 attr->num_subbuf = uattr->num_subbuf;
94 attr->overwrite = uattr->overwrite;
95 attr->switch_timer_interval = uattr->switch_timer_interval;
96 attr->read_timer_interval = uattr->read_timer_interval;
97 attr->output = uattr->output;
491d1539 98 attr->blocking_timeout = uattr->u.s.blocking_timeout;
d65d2de8
DG
99}
100
025faf73
DG
101/*
102 * Match function for the hash table lookup.
103 *
104 * It matches an ust app event based on three attributes which are the event
105 * name, the filter bytecode and the loglevel.
106 */
18eace3b
DG
107static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
108{
109 struct ust_app_event *event;
110 const struct ust_app_ht_key *key;
2106efa0 111 int ev_loglevel_value;
18eace3b
DG
112
113 assert(node);
114 assert(_key);
115
116 event = caa_container_of(node, struct ust_app_event, node.node);
117 key = _key;
2106efa0 118 ev_loglevel_value = event->attr.loglevel;
18eace3b 119
1af53eb5 120 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
18eace3b
DG
121
122 /* Event name */
123 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
124 goto no_match;
125 }
126
127 /* Event loglevel. */
2106efa0 128 if (ev_loglevel_value != key->loglevel_type) {
025faf73 129 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
2106efa0
PP
130 && key->loglevel_type == 0 &&
131 ev_loglevel_value == -1) {
025faf73
DG
132 /*
133 * Match is accepted. This is because on event creation, the
134 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
135 * -1 are accepted for this loglevel type since 0 is the one set by
136 * the API when receiving an enable event.
137 */
138 } else {
139 goto no_match;
140 }
18eace3b
DG
141 }
142
143 /* One of the filters is NULL, fail. */
144 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
145 goto no_match;
146 }
147
025faf73
DG
148 if (key->filter && event->filter) {
149 /* Both filters exists, check length followed by the bytecode. */
150 if (event->filter->len != key->filter->len ||
151 memcmp(event->filter->data, key->filter->data,
152 event->filter->len) != 0) {
153 goto no_match;
154 }
18eace3b
DG
155 }
156
1af53eb5
JI
157 /* One of the exclusions is NULL, fail. */
158 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
159 goto no_match;
160 }
161
162 if (key->exclusion && event->exclusion) {
163 /* Both exclusions exists, check count followed by the names. */
164 if (event->exclusion->count != key->exclusion->count ||
165 memcmp(event->exclusion->names, key->exclusion->names,
166 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
167 goto no_match;
168 }
169 }
170
171
025faf73 172 /* Match. */
18eace3b
DG
173 return 1;
174
175no_match:
176 return 0;
18eace3b
DG
177}
178
025faf73
DG
179/*
180 * Unique add of an ust app event in the given ht. This uses the custom
181 * ht_match_ust_app_event match function and the event name as hash.
182 */
d0b96690 183static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
18eace3b
DG
184 struct ust_app_event *event)
185{
186 struct cds_lfht_node *node_ptr;
187 struct ust_app_ht_key key;
d0b96690 188 struct lttng_ht *ht;
18eace3b 189
d0b96690
DG
190 assert(ua_chan);
191 assert(ua_chan->events);
18eace3b
DG
192 assert(event);
193
d0b96690 194 ht = ua_chan->events;
18eace3b
DG
195 key.name = event->attr.name;
196 key.filter = event->filter;
2106efa0 197 key.loglevel_type = event->attr.loglevel;
91c89f23 198 key.exclusion = event->exclusion;
18eace3b
DG
199
200 node_ptr = cds_lfht_add_unique(ht->ht,
201 ht->hash_fct(event->node.key, lttng_ht_seed),
202 ht_match_ust_app_event, &key, &event->node.node);
203 assert(node_ptr == &event->node.node);
204}
205
d88aee68
DG
206/*
207 * Close the notify socket from the given RCU head object. This MUST be called
208 * through a call_rcu().
209 */
210static void close_notify_sock_rcu(struct rcu_head *head)
211{
212 int ret;
213 struct ust_app_notify_sock_obj *obj =
214 caa_container_of(head, struct ust_app_notify_sock_obj, head);
215
216 /* Must have a valid fd here. */
217 assert(obj->fd >= 0);
218
219 ret = close(obj->fd);
220 if (ret) {
221 ERR("close notify sock %d RCU", obj->fd);
222 }
223 lttng_fd_put(LTTNG_FD_APPS, 1);
224
225 free(obj);
226}
227
7972aab2
DG
228/*
229 * Return the session registry according to the buffer type of the given
230 * session.
231 *
232 * A registry per UID object MUST exists before calling this function or else
233 * it assert() if not found. RCU read side lock must be acquired.
234 */
235static struct ust_registry_session *get_session_registry(
236 struct ust_app_session *ua_sess)
237{
238 struct ust_registry_session *registry = NULL;
239
240 assert(ua_sess);
241
242 switch (ua_sess->buffer_type) {
243 case LTTNG_BUFFER_PER_PID:
244 {
245 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
246 if (!reg_pid) {
247 goto error;
248 }
249 registry = reg_pid->registry->reg.ust;
250 break;
251 }
252 case LTTNG_BUFFER_PER_UID:
253 {
254 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
470cc211
JG
255 ua_sess->tracing_id, ua_sess->bits_per_long,
256 ua_sess->real_credentials.uid);
7972aab2
DG
257 if (!reg_uid) {
258 goto error;
259 }
260 registry = reg_uid->registry->reg.ust;
261 break;
262 }
263 default:
264 assert(0);
265 };
266
267error:
268 return registry;
269}
270
55cc08a6
DG
271/*
272 * Delete ust context safely. RCU read lock must be held before calling
273 * this function.
274 */
275static
fb45065e
MD
276void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
277 struct ust_app *app)
55cc08a6 278{
ffe60014
DG
279 int ret;
280
281 assert(ua_ctx);
282
55cc08a6 283 if (ua_ctx->obj) {
fb45065e 284 pthread_mutex_lock(&app->sock_lock);
ffe60014 285 ret = ustctl_release_object(sock, ua_ctx->obj);
fb45065e 286 pthread_mutex_unlock(&app->sock_lock);
d0b96690
DG
287 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
288 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
289 sock, ua_ctx->obj->handle, ret);
ffe60014 290 }
55cc08a6
DG
291 free(ua_ctx->obj);
292 }
293 free(ua_ctx);
294}
295
d80a6244
DG
296/*
297 * Delete ust app event safely. RCU read lock must be held before calling
298 * this function.
299 */
8b366481 300static
fb45065e
MD
301void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
302 struct ust_app *app)
d80a6244 303{
ffe60014
DG
304 int ret;
305
306 assert(ua_event);
307
53a80697 308 free(ua_event->filter);
951f0b71
JI
309 if (ua_event->exclusion != NULL)
310 free(ua_event->exclusion);
edb67388 311 if (ua_event->obj != NULL) {
fb45065e 312 pthread_mutex_lock(&app->sock_lock);
ffe60014 313 ret = ustctl_release_object(sock, ua_event->obj);
fb45065e 314 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
315 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
316 ERR("UST app sock %d release event obj failed with ret %d",
317 sock, ret);
318 }
edb67388
DG
319 free(ua_event->obj);
320 }
d80a6244
DG
321 free(ua_event);
322}
323
1831ae68
FD
324/*
325 * Delete ust app token event_rule safely. RCU read lock must be held before calling
326 * this function. TODO: or does it????
327 */
328static
329void delete_ust_app_token_event_rule(int sock, struct ust_app_token_event_rule *ua_token,
330 struct ust_app *app)
331{
332 int ret;
333
334 assert(ua_token);
335
336 if (ua_token->exclusion != NULL)
337 free(ua_token->exclusion);
338 if (ua_token->obj != NULL) {
339 pthread_mutex_lock(&app->sock_lock);
340 ret = ustctl_release_object(sock, ua_token->obj);
341 pthread_mutex_unlock(&app->sock_lock);
342 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
343 ERR("UST app sock %d release event obj failed with ret %d",
344 sock, ret);
345 }
346 free(ua_token->obj);
347 }
348 lttng_event_rule_put(ua_token->event_rule);
349 free(ua_token);
350}
351
d80a6244 352/*
7972aab2
DG
353 * Release ust data object of the given stream.
354 *
355 * Return 0 on success or else a negative value.
d80a6244 356 */
fb45065e
MD
357static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
358 struct ust_app *app)
d80a6244 359{
7972aab2 360 int ret = 0;
ffe60014
DG
361
362 assert(stream);
363
8b366481 364 if (stream->obj) {
fb45065e 365 pthread_mutex_lock(&app->sock_lock);
ffe60014 366 ret = ustctl_release_object(sock, stream->obj);
fb45065e 367 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
368 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
369 ERR("UST app sock %d release stream obj failed with ret %d",
370 sock, ret);
371 }
4063050c 372 lttng_fd_put(LTTNG_FD_APPS, 2);
8b366481
DG
373 free(stream->obj);
374 }
7972aab2
DG
375
376 return ret;
377}
378
379/*
380 * Delete ust app stream safely. RCU read lock must be held before calling
381 * this function.
382 */
383static
fb45065e
MD
384void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
385 struct ust_app *app)
7972aab2
DG
386{
387 assert(stream);
388
fb45065e 389 (void) release_ust_app_stream(sock, stream, app);
84cd17c6 390 free(stream);
d80a6244
DG
391}
392
36b588ed
MD
393/*
394 * We need to execute ht_destroy outside of RCU read-side critical
0b2dc8df
MD
395 * section and outside of call_rcu thread, so we postpone its execution
396 * using ht_cleanup_push. It is simpler than to change the semantic of
397 * the many callers of delete_ust_app_session().
36b588ed
MD
398 */
399static
400void delete_ust_app_channel_rcu(struct rcu_head *head)
401{
402 struct ust_app_channel *ua_chan =
403 caa_container_of(head, struct ust_app_channel, rcu_head);
404
0b2dc8df
MD
405 ht_cleanup_push(ua_chan->ctx);
406 ht_cleanup_push(ua_chan->events);
36b588ed
MD
407 free(ua_chan);
408}
409
fb83fe64
JD
410/*
411 * Extract the lost packet or discarded events counter when the channel is
412 * being deleted and store the value in the parent channel so we can
413 * access it from lttng list and at stop/destroy.
82cac6d2
JG
414 *
415 * The session list lock must be held by the caller.
fb83fe64
JD
416 */
417static
418void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
419{
420 uint64_t discarded = 0, lost = 0;
421 struct ltt_session *session;
422 struct ltt_ust_channel *uchan;
423
424 if (ua_chan->attr.type != LTTNG_UST_CHAN_PER_CPU) {
425 return;
426 }
427
428 rcu_read_lock();
429 session = session_find_by_id(ua_chan->session->tracing_id);
d68ec974
JG
430 if (!session || !session->ust_session) {
431 /*
432 * Not finding the session is not an error because there are
433 * multiple ways the channels can be torn down.
434 *
435 * 1) The session daemon can initiate the destruction of the
436 * ust app session after receiving a destroy command or
437 * during its shutdown/teardown.
438 * 2) The application, since we are in per-pid tracing, is
439 * unregistering and tearing down its ust app session.
440 *
441 * Both paths are protected by the session list lock which
442 * ensures that the accounting of lost packets and discarded
443 * events is done exactly once. The session is then unpublished
444 * from the session list, resulting in this condition.
445 */
fb83fe64
JD
446 goto end;
447 }
448
449 if (ua_chan->attr.overwrite) {
450 consumer_get_lost_packets(ua_chan->session->tracing_id,
451 ua_chan->key, session->ust_session->consumer,
452 &lost);
453 } else {
454 consumer_get_discarded_events(ua_chan->session->tracing_id,
455 ua_chan->key, session->ust_session->consumer,
456 &discarded);
457 }
458 uchan = trace_ust_find_channel_by_name(
459 session->ust_session->domain_global.channels,
460 ua_chan->name);
461 if (!uchan) {
462 ERR("Missing UST channel to store discarded counters");
463 goto end;
464 }
465
466 uchan->per_pid_closed_app_discarded += discarded;
467 uchan->per_pid_closed_app_lost += lost;
468
469end:
470 rcu_read_unlock();
e32d7f27
JG
471 if (session) {
472 session_put(session);
473 }
fb83fe64
JD
474}
475
d80a6244
DG
476/*
477 * Delete ust app channel safely. RCU read lock must be held before calling
478 * this function.
82cac6d2
JG
479 *
480 * The session list lock must be held by the caller.
d80a6244 481 */
8b366481 482static
d0b96690
DG
483void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
484 struct ust_app *app)
d80a6244
DG
485{
486 int ret;
bec39940 487 struct lttng_ht_iter iter;
d80a6244 488 struct ust_app_event *ua_event;
55cc08a6 489 struct ust_app_ctx *ua_ctx;
030a66fa 490 struct ust_app_stream *stream, *stmp;
7972aab2 491 struct ust_registry_session *registry;
d80a6244 492
ffe60014
DG
493 assert(ua_chan);
494
495 DBG3("UST app deleting channel %s", ua_chan->name);
496
55cc08a6 497 /* Wipe stream */
d80a6244 498 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
84cd17c6 499 cds_list_del(&stream->list);
fb45065e 500 delete_ust_app_stream(sock, stream, app);
d80a6244
DG
501 }
502
55cc08a6 503 /* Wipe context */
bec39940 504 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
31746f93 505 cds_list_del(&ua_ctx->list);
bec39940 506 ret = lttng_ht_del(ua_chan->ctx, &iter);
55cc08a6 507 assert(!ret);
fb45065e 508 delete_ust_app_ctx(sock, ua_ctx, app);
55cc08a6 509 }
d80a6244 510
55cc08a6 511 /* Wipe events */
bec39940
DG
512 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
513 node.node) {
514 ret = lttng_ht_del(ua_chan->events, &iter);
525b0740 515 assert(!ret);
fb45065e 516 delete_ust_app_event(sock, ua_event, app);
d80a6244 517 }
edb67388 518
c8335706
MD
519 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
520 /* Wipe and free registry from session registry. */
521 registry = get_session_registry(ua_chan->session);
522 if (registry) {
e9404c27 523 ust_registry_channel_del_free(registry, ua_chan->key,
e38d96f9
MD
524 sock >= 0);
525 }
45798a31
JG
526 /*
527 * A negative socket can be used by the caller when
528 * cleaning-up a ua_chan in an error path. Skip the
529 * accounting in this case.
530 */
e38d96f9
MD
531 if (sock >= 0) {
532 save_per_pid_lost_discarded_counters(ua_chan);
c8335706 533 }
7972aab2 534 }
d0b96690 535
edb67388 536 if (ua_chan->obj != NULL) {
d0b96690
DG
537 /* Remove channel from application UST object descriptor. */
538 iter.iter.node = &ua_chan->ust_objd_node.node;
c6e62271
DG
539 ret = lttng_ht_del(app->ust_objd, &iter);
540 assert(!ret);
fb45065e 541 pthread_mutex_lock(&app->sock_lock);
ffe60014 542 ret = ustctl_release_object(sock, ua_chan->obj);
fb45065e 543 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
544 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
545 ERR("UST app sock %d release channel obj failed with ret %d",
546 sock, ret);
547 }
7972aab2 548 lttng_fd_put(LTTNG_FD_APPS, 1);
edb67388
DG
549 free(ua_chan->obj);
550 }
36b588ed 551 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
d80a6244
DG
552}
553
fb45065e
MD
554int ust_app_register_done(struct ust_app *app)
555{
556 int ret;
557
558 pthread_mutex_lock(&app->sock_lock);
559 ret = ustctl_register_done(app->sock);
560 pthread_mutex_unlock(&app->sock_lock);
561 return ret;
562}
563
564int ust_app_release_object(struct ust_app *app, struct lttng_ust_object_data *data)
565{
566 int ret, sock;
567
568 if (app) {
569 pthread_mutex_lock(&app->sock_lock);
570 sock = app->sock;
571 } else {
572 sock = -1;
573 }
574 ret = ustctl_release_object(sock, data);
575 if (app) {
576 pthread_mutex_unlock(&app->sock_lock);
577 }
578 return ret;
579}
580
331744e3 581/*
1b532a60
DG
582 * Push metadata to consumer socket.
583 *
dc2bbdae
MD
584 * RCU read-side lock must be held to guarantee existance of socket.
585 * Must be called with the ust app session lock held.
586 * Must be called with the registry lock held.
331744e3
JD
587 *
588 * On success, return the len of metadata pushed or else a negative value.
2c57e06d
MD
589 * Returning a -EPIPE return value means we could not send the metadata,
590 * but it can be caused by recoverable errors (e.g. the application has
591 * terminated concurrently).
331744e3
JD
592 */
593ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
594 struct consumer_socket *socket, int send_zero_data)
595{
596 int ret;
597 char *metadata_str = NULL;
c585821b 598 size_t len, offset, new_metadata_len_sent;
331744e3 599 ssize_t ret_val;
93ec662e 600 uint64_t metadata_key, metadata_version;
331744e3
JD
601
602 assert(registry);
603 assert(socket);
1b532a60 604
c585821b
MD
605 metadata_key = registry->metadata_key;
606
ce34fcd0 607 /*
dc2bbdae
MD
608 * Means that no metadata was assigned to the session. This can
609 * happens if no start has been done previously.
ce34fcd0 610 */
c585821b 611 if (!metadata_key) {
ce34fcd0
MD
612 return 0;
613 }
614
331744e3
JD
615 offset = registry->metadata_len_sent;
616 len = registry->metadata_len - registry->metadata_len_sent;
c585821b 617 new_metadata_len_sent = registry->metadata_len;
93ec662e 618 metadata_version = registry->metadata_version;
331744e3
JD
619 if (len == 0) {
620 DBG3("No metadata to push for metadata key %" PRIu64,
621 registry->metadata_key);
622 ret_val = len;
623 if (send_zero_data) {
624 DBG("No metadata to push");
625 goto push_data;
626 }
627 goto end;
628 }
629
630 /* Allocate only what we have to send. */
631 metadata_str = zmalloc(len);
632 if (!metadata_str) {
633 PERROR("zmalloc ust app metadata string");
634 ret_val = -ENOMEM;
635 goto error;
636 }
c585821b 637 /* Copy what we haven't sent out. */
331744e3 638 memcpy(metadata_str, registry->metadata + offset, len);
331744e3
JD
639
640push_data:
c585821b
MD
641 pthread_mutex_unlock(&registry->lock);
642 /*
643 * We need to unlock the registry while we push metadata to
644 * break a circular dependency between the consumerd metadata
645 * lock and the sessiond registry lock. Indeed, pushing metadata
646 * to the consumerd awaits that it gets pushed all the way to
647 * relayd, but doing so requires grabbing the metadata lock. If
648 * a concurrent metadata request is being performed by
649 * consumerd, this can try to grab the registry lock on the
650 * sessiond while holding the metadata lock on the consumer
651 * daemon. Those push and pull schemes are performed on two
652 * different bidirectionnal communication sockets.
653 */
654 ret = consumer_push_metadata(socket, metadata_key,
93ec662e 655 metadata_str, len, offset, metadata_version);
c585821b 656 pthread_mutex_lock(&registry->lock);
331744e3 657 if (ret < 0) {
000baf6a 658 /*
dc2bbdae
MD
659 * There is an acceptable race here between the registry
660 * metadata key assignment and the creation on the
661 * consumer. The session daemon can concurrently push
662 * metadata for this registry while being created on the
663 * consumer since the metadata key of the registry is
664 * assigned *before* it is setup to avoid the consumer
665 * to ask for metadata that could possibly be not found
666 * in the session daemon.
000baf6a 667 *
dc2bbdae
MD
668 * The metadata will get pushed either by the session
669 * being stopped or the consumer requesting metadata if
670 * that race is triggered.
000baf6a
DG
671 */
672 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
673 ret = 0;
c585821b
MD
674 } else {
675 ERR("Error pushing metadata to consumer");
000baf6a 676 }
331744e3
JD
677 ret_val = ret;
678 goto error_push;
c585821b
MD
679 } else {
680 /*
681 * Metadata may have been concurrently pushed, since
682 * we're not holding the registry lock while pushing to
683 * consumer. This is handled by the fact that we send
684 * the metadata content, size, and the offset at which
685 * that metadata belongs. This may arrive out of order
686 * on the consumer side, and the consumer is able to
687 * deal with overlapping fragments. The consumer
688 * supports overlapping fragments, which must be
689 * contiguous starting from offset 0. We keep the
690 * largest metadata_len_sent value of the concurrent
691 * send.
692 */
693 registry->metadata_len_sent =
694 max_t(size_t, registry->metadata_len_sent,
695 new_metadata_len_sent);
331744e3 696 }
331744e3
JD
697 free(metadata_str);
698 return len;
699
700end:
701error:
ce34fcd0
MD
702 if (ret_val) {
703 /*
dc2bbdae
MD
704 * On error, flag the registry that the metadata is
705 * closed. We were unable to push anything and this
706 * means that either the consumer is not responding or
707 * the metadata cache has been destroyed on the
708 * consumer.
ce34fcd0
MD
709 */
710 registry->metadata_closed = 1;
711 }
331744e3
JD
712error_push:
713 free(metadata_str);
714 return ret_val;
715}
716
d88aee68 717/*
ce34fcd0 718 * For a given application and session, push metadata to consumer.
331744e3
JD
719 * Either sock or consumer is required : if sock is NULL, the default
720 * socket to send the metadata is retrieved from consumer, if sock
721 * is not NULL we use it to send the metadata.
ce34fcd0 722 * RCU read-side lock must be held while calling this function,
dc2bbdae
MD
723 * therefore ensuring existance of registry. It also ensures existance
724 * of socket throughout this function.
d88aee68
DG
725 *
726 * Return 0 on success else a negative error.
2c57e06d
MD
727 * Returning a -EPIPE return value means we could not send the metadata,
728 * but it can be caused by recoverable errors (e.g. the application has
729 * terminated concurrently).
d88aee68 730 */
7972aab2
DG
731static int push_metadata(struct ust_registry_session *registry,
732 struct consumer_output *consumer)
d88aee68 733{
331744e3
JD
734 int ret_val;
735 ssize_t ret;
d88aee68
DG
736 struct consumer_socket *socket;
737
7972aab2
DG
738 assert(registry);
739 assert(consumer);
740
ce34fcd0 741 pthread_mutex_lock(&registry->lock);
ce34fcd0 742 if (registry->metadata_closed) {
dc2bbdae
MD
743 ret_val = -EPIPE;
744 goto error;
d88aee68
DG
745 }
746
d88aee68 747 /* Get consumer socket to use to push the metadata.*/
7972aab2
DG
748 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
749 consumer);
d88aee68 750 if (!socket) {
331744e3 751 ret_val = -1;
ce34fcd0 752 goto error;
d88aee68
DG
753 }
754
331744e3 755 ret = ust_app_push_metadata(registry, socket, 0);
d88aee68 756 if (ret < 0) {
331744e3 757 ret_val = ret;
ce34fcd0 758 goto error;
d88aee68 759 }
dc2bbdae 760 pthread_mutex_unlock(&registry->lock);
d88aee68
DG
761 return 0;
762
ce34fcd0 763error:
dc2bbdae 764 pthread_mutex_unlock(&registry->lock);
331744e3 765 return ret_val;
d88aee68
DG
766}
767
768/*
769 * Send to the consumer a close metadata command for the given session. Once
770 * done, the metadata channel is deleted and the session metadata pointer is
dc2bbdae 771 * nullified. The session lock MUST be held unless the application is
d88aee68
DG
772 * in the destroy path.
773 *
a70ac2f4
MD
774 * Do not hold the registry lock while communicating with the consumerd, because
775 * doing so causes inter-process deadlocks between consumerd and sessiond with
776 * the metadata request notification.
777 *
d88aee68
DG
778 * Return 0 on success else a negative value.
779 */
7972aab2
DG
780static int close_metadata(struct ust_registry_session *registry,
781 struct consumer_output *consumer)
d88aee68
DG
782{
783 int ret;
784 struct consumer_socket *socket;
a70ac2f4
MD
785 uint64_t metadata_key;
786 bool registry_was_already_closed;
d88aee68 787
7972aab2
DG
788 assert(registry);
789 assert(consumer);
d88aee68 790
7972aab2
DG
791 rcu_read_lock();
792
ce34fcd0 793 pthread_mutex_lock(&registry->lock);
a70ac2f4
MD
794 metadata_key = registry->metadata_key;
795 registry_was_already_closed = registry->metadata_closed;
796 if (metadata_key != 0) {
797 /*
798 * Metadata closed. Even on error this means that the consumer
799 * is not responding or not found so either way a second close
800 * should NOT be emit for this registry.
801 */
802 registry->metadata_closed = 1;
803 }
804 pthread_mutex_unlock(&registry->lock);
ce34fcd0 805
a70ac2f4 806 if (metadata_key == 0 || registry_was_already_closed) {
d88aee68 807 ret = 0;
1b532a60 808 goto end;
d88aee68
DG
809 }
810
d88aee68 811 /* Get consumer socket to use to push the metadata.*/
7972aab2
DG
812 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
813 consumer);
d88aee68
DG
814 if (!socket) {
815 ret = -1;
a70ac2f4 816 goto end;
d88aee68
DG
817 }
818
a70ac2f4 819 ret = consumer_close_metadata(socket, metadata_key);
d88aee68 820 if (ret < 0) {
a70ac2f4 821 goto end;
d88aee68
DG
822 }
823
1b532a60 824end:
7972aab2 825 rcu_read_unlock();
d88aee68
DG
826 return ret;
827}
828
36b588ed
MD
829/*
830 * We need to execute ht_destroy outside of RCU read-side critical
0b2dc8df
MD
831 * section and outside of call_rcu thread, so we postpone its execution
832 * using ht_cleanup_push. It is simpler than to change the semantic of
833 * the many callers of delete_ust_app_session().
36b588ed
MD
834 */
835static
836void delete_ust_app_session_rcu(struct rcu_head *head)
837{
838 struct ust_app_session *ua_sess =
839 caa_container_of(head, struct ust_app_session, rcu_head);
840
0b2dc8df 841 ht_cleanup_push(ua_sess->channels);
36b588ed
MD
842 free(ua_sess);
843}
844
d80a6244
DG
845/*
846 * Delete ust app session safely. RCU read lock must be held before calling
847 * this function.
82cac6d2
JG
848 *
849 * The session list lock must be held by the caller.
d80a6244 850 */
8b366481 851static
d0b96690
DG
852void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
853 struct ust_app *app)
d80a6244
DG
854{
855 int ret;
bec39940 856 struct lttng_ht_iter iter;
d80a6244 857 struct ust_app_channel *ua_chan;
7972aab2 858 struct ust_registry_session *registry;
d80a6244 859
d88aee68
DG
860 assert(ua_sess);
861
1b532a60
DG
862 pthread_mutex_lock(&ua_sess->lock);
863
b161602a
MD
864 assert(!ua_sess->deleted);
865 ua_sess->deleted = true;
866
7972aab2 867 registry = get_session_registry(ua_sess);
fad1ed2f 868 /* Registry can be null on error path during initialization. */
ce34fcd0 869 if (registry) {
d88aee68 870 /* Push metadata for application before freeing the application. */
7972aab2 871 (void) push_metadata(registry, ua_sess->consumer);
d88aee68 872
7972aab2
DG
873 /*
874 * Don't ask to close metadata for global per UID buffers. Close
1b532a60
DG
875 * metadata only on destroy trace session in this case. Also, the
876 * previous push metadata could have flag the metadata registry to
877 * close so don't send a close command if closed.
7972aab2 878 */
ce34fcd0 879 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
7972aab2
DG
880 /* And ask to close it for this session registry. */
881 (void) close_metadata(registry, ua_sess->consumer);
882 }
d80a6244
DG
883 }
884
bec39940
DG
885 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
886 node.node) {
887 ret = lttng_ht_del(ua_sess->channels, &iter);
525b0740 888 assert(!ret);
d0b96690 889 delete_ust_app_channel(sock, ua_chan, app);
d80a6244 890 }
d80a6244 891
7972aab2
DG
892 /* In case of per PID, the registry is kept in the session. */
893 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
894 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
895 if (reg_pid) {
fad1ed2f
JR
896 /*
897 * Registry can be null on error path during
898 * initialization.
899 */
7972aab2
DG
900 buffer_reg_pid_remove(reg_pid);
901 buffer_reg_pid_destroy(reg_pid);
902 }
903 }
d0b96690 904
aee6bafd 905 if (ua_sess->handle != -1) {
fb45065e 906 pthread_mutex_lock(&app->sock_lock);
ffe60014 907 ret = ustctl_release_handle(sock, ua_sess->handle);
fb45065e 908 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
909 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
910 ERR("UST app sock %d release session handle failed with ret %d",
911 sock, ret);
912 }
10b56aef
MD
913 /* Remove session from application UST object descriptor. */
914 iter.iter.node = &ua_sess->ust_objd_node.node;
915 ret = lttng_ht_del(app->ust_sessions_objd, &iter);
916 assert(!ret);
aee6bafd 917 }
10b56aef 918
1b532a60
DG
919 pthread_mutex_unlock(&ua_sess->lock);
920
6addfa37
MD
921 consumer_output_put(ua_sess->consumer);
922
36b588ed 923 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
d80a6244 924}
91d76f53
DG
925
926/*
284d8f55
DG
927 * Delete a traceable application structure from the global list. Never call
928 * this function outside of a call_rcu call.
36b588ed
MD
929 *
930 * RCU read side lock should _NOT_ be held when calling this function.
91d76f53 931 */
8b366481
DG
932static
933void delete_ust_app(struct ust_app *app)
91d76f53 934{
8b366481 935 int ret, sock;
d42f20df 936 struct ust_app_session *ua_sess, *tmp_ua_sess;
44d3bd01 937
82cac6d2
JG
938 /*
939 * The session list lock must be held during this function to guarantee
940 * the existence of ua_sess.
941 */
942 session_lock_list();
d80a6244 943 /* Delete ust app sessions info */
852d0037
DG
944 sock = app->sock;
945 app->sock = -1;
d80a6244 946
8b366481 947 /* Wipe sessions */
d42f20df
DG
948 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
949 teardown_node) {
950 /* Free every object in the session and the session. */
36b588ed 951 rcu_read_lock();
d0b96690 952 delete_ust_app_session(sock, ua_sess, app);
36b588ed 953 rcu_read_unlock();
d80a6244 954 }
36b588ed 955
0b2dc8df 956 ht_cleanup_push(app->sessions);
10b56aef 957 ht_cleanup_push(app->ust_sessions_objd);
0b2dc8df 958 ht_cleanup_push(app->ust_objd);
d80a6244 959
1831ae68
FD
960 ustctl_release_object(sock, app->token_communication.handle);
961
962 lttng_pipe_close(app->token_communication.trigger_event_pipe);
963
6414a713 964 /*
852d0037
DG
965 * Wait until we have deleted the application from the sock hash table
966 * before closing this socket, otherwise an application could re-use the
967 * socket ID and race with the teardown, using the same hash table entry.
968 *
969 * It's OK to leave the close in call_rcu. We want it to stay unique for
970 * all RCU readers that could run concurrently with unregister app,
971 * therefore we _need_ to only close that socket after a grace period. So
972 * it should stay in this RCU callback.
973 *
974 * This close() is a very important step of the synchronization model so
975 * every modification to this function must be carefully reviewed.
6414a713 976 */
799e2c4f
MD
977 ret = close(sock);
978 if (ret) {
979 PERROR("close");
980 }
4063050c 981 lttng_fd_put(LTTNG_FD_APPS, 1);
d80a6244 982
852d0037 983 DBG2("UST app pid %d deleted", app->pid);
284d8f55 984 free(app);
82cac6d2 985 session_unlock_list();
099e26bd
DG
986}
987
988/*
f6a9efaa 989 * URCU intermediate call to delete an UST app.
099e26bd 990 */
8b366481
DG
991static
992void delete_ust_app_rcu(struct rcu_head *head)
099e26bd 993{
bec39940
DG
994 struct lttng_ht_node_ulong *node =
995 caa_container_of(head, struct lttng_ht_node_ulong, head);
f6a9efaa 996 struct ust_app *app =
852d0037 997 caa_container_of(node, struct ust_app, pid_n);
f6a9efaa 998
852d0037 999 DBG3("Call RCU deleting app PID %d", app->pid);
f6a9efaa 1000 delete_ust_app(app);
099e26bd
DG
1001}
1002
ffe60014
DG
1003/*
1004 * Delete the session from the application ht and delete the data structure by
1005 * freeing every object inside and releasing them.
82cac6d2
JG
1006 *
1007 * The session list lock must be held by the caller.
ffe60014 1008 */
d0b96690 1009static void destroy_app_session(struct ust_app *app,
ffe60014
DG
1010 struct ust_app_session *ua_sess)
1011{
1012 int ret;
1013 struct lttng_ht_iter iter;
1014
1015 assert(app);
1016 assert(ua_sess);
1017
1018 iter.iter.node = &ua_sess->node.node;
1019 ret = lttng_ht_del(app->sessions, &iter);
1020 if (ret) {
1021 /* Already scheduled for teardown. */
1022 goto end;
1023 }
1024
1025 /* Once deleted, free the data structure. */
d0b96690 1026 delete_ust_app_session(app->sock, ua_sess, app);
ffe60014
DG
1027
1028end:
1029 return;
1030}
1031
8b366481
DG
1032/*
1033 * Alloc new UST app session.
1034 */
1035static
40bbd087 1036struct ust_app_session *alloc_ust_app_session(void)
8b366481
DG
1037{
1038 struct ust_app_session *ua_sess;
1039
1040 /* Init most of the default value by allocating and zeroing */
1041 ua_sess = zmalloc(sizeof(struct ust_app_session));
1042 if (ua_sess == NULL) {
1043 PERROR("malloc");
ffe60014 1044 goto error_free;
8b366481
DG
1045 }
1046
1047 ua_sess->handle = -1;
bec39940 1048 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
ad7a9107 1049 ua_sess->metadata_attr.type = LTTNG_UST_CHAN_METADATA;
84ad93e8 1050 pthread_mutex_init(&ua_sess->lock, NULL);
ad7a9107 1051
8b366481
DG
1052 return ua_sess;
1053
ffe60014 1054error_free:
8b366481
DG
1055 return NULL;
1056}
1057
1058/*
1059 * Alloc new UST app channel.
1060 */
1061static
1831ae68 1062struct ust_app_channel *alloc_ust_app_channel(const char *name,
d0b96690 1063 struct ust_app_session *ua_sess,
ffe60014 1064 struct lttng_ust_channel_attr *attr)
8b366481
DG
1065{
1066 struct ust_app_channel *ua_chan;
1067
1068 /* Init most of the default value by allocating and zeroing */
1069 ua_chan = zmalloc(sizeof(struct ust_app_channel));
1070 if (ua_chan == NULL) {
1071 PERROR("malloc");
1072 goto error;
1073 }
1074
1075 /* Setup channel name */
1076 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
1077 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1078
1079 ua_chan->enabled = 1;
1080 ua_chan->handle = -1;
45893984 1081 ua_chan->session = ua_sess;
ffe60014 1082 ua_chan->key = get_next_channel_key();
bec39940
DG
1083 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1084 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1085 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
8b366481
DG
1086
1087 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
31746f93 1088 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
8b366481
DG
1089
1090 /* Copy attributes */
1091 if (attr) {
ffe60014 1092 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
2fe6e7f5
DG
1093 ua_chan->attr.subbuf_size = attr->subbuf_size;
1094 ua_chan->attr.num_subbuf = attr->num_subbuf;
1095 ua_chan->attr.overwrite = attr->overwrite;
1096 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
1097 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
1098 ua_chan->attr.output = attr->output;
491d1539 1099 ua_chan->attr.blocking_timeout = attr->u.s.blocking_timeout;
8b366481 1100 }
ffe60014
DG
1101 /* By default, the channel is a per cpu channel. */
1102 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
8b366481
DG
1103
1104 DBG3("UST app channel %s allocated", ua_chan->name);
1105
1106 return ua_chan;
1107
1108error:
1109 return NULL;
1110}
1111
37f1c236
DG
1112/*
1113 * Allocate and initialize a UST app stream.
1114 *
1115 * Return newly allocated stream pointer or NULL on error.
1116 */
ffe60014 1117struct ust_app_stream *ust_app_alloc_stream(void)
37f1c236
DG
1118{
1119 struct ust_app_stream *stream = NULL;
1120
1121 stream = zmalloc(sizeof(*stream));
1122 if (stream == NULL) {
1123 PERROR("zmalloc ust app stream");
1124 goto error;
1125 }
1126
1127 /* Zero could be a valid value for a handle so flag it to -1. */
1128 stream->handle = -1;
1129
1130error:
1131 return stream;
1132}
1133
8b366481
DG
1134/*
1135 * Alloc new UST app event.
1136 */
1137static
1138struct ust_app_event *alloc_ust_app_event(char *name,
1139 struct lttng_ust_event *attr)
1140{
1141 struct ust_app_event *ua_event;
1142
1143 /* Init most of the default value by allocating and zeroing */
1144 ua_event = zmalloc(sizeof(struct ust_app_event));
1145 if (ua_event == NULL) {
20533947 1146 PERROR("Failed to allocate ust_app_event structure");
8b366481
DG
1147 goto error;
1148 }
1149
1150 ua_event->enabled = 1;
1151 strncpy(ua_event->name, name, sizeof(ua_event->name));
1152 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
bec39940 1153 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
8b366481
DG
1154
1155 /* Copy attributes */
1156 if (attr) {
1157 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1158 }
1159
1160 DBG3("UST app event %s allocated", ua_event->name);
1161
1162 return ua_event;
1163
1164error:
1165 return NULL;
1166}
1167
1831ae68
FD
1168/*
1169 * Alloc new UST app token event rule.
1170 */
1171static struct ust_app_token_event_rule *alloc_ust_app_token_event_rule(
1172 struct lttng_event_rule *event_rule, uint64_t token)
1173{
1174 struct ust_app_token_event_rule *ua_token;
1175
1176 ua_token = zmalloc(sizeof(struct ust_app_token_event_rule));
1177 if (ua_token == NULL) {
1178 PERROR("Failed to allocate ust_app_token_event_rule structure");
1179 goto error;
1180 }
1181
1182 ua_token->enabled = 1;
1183 ua_token->token = token;
1184 lttng_ht_node_init_u64(&ua_token->node, token);
1185
1186 /* Get reference of the event_rule */
1187 if (!lttng_event_rule_get(event_rule)) {
1188 assert(0);
1189 }
1190
1191 ua_token->event_rule = event_rule;
1192 ua_token->filter = lttng_event_rule_get_filter_bytecode(event_rule);
1193 ua_token->exclusion = lttng_event_rule_generate_exclusions(event_rule);
1194
1195 DBG3("UST app token event rule %" PRIu64 " allocated", ua_token->token);
1196
1197 return ua_token;
1198
1199error:
1200 return NULL;
1201}
1202
8b366481
DG
1203/*
1204 * Alloc new UST app context.
1205 */
1206static
bdf64013 1207struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
8b366481
DG
1208{
1209 struct ust_app_ctx *ua_ctx;
1210
1211 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
1212 if (ua_ctx == NULL) {
1213 goto error;
1214 }
1215
31746f93
DG
1216 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1217
8b366481
DG
1218 if (uctx) {
1219 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
bdf64013
JG
1220 if (uctx->ctx == LTTNG_UST_CONTEXT_APP_CONTEXT) {
1221 char *provider_name = NULL, *ctx_name = NULL;
1222
1223 provider_name = strdup(uctx->u.app_ctx.provider_name);
1224 ctx_name = strdup(uctx->u.app_ctx.ctx_name);
1225 if (!provider_name || !ctx_name) {
1226 free(provider_name);
1227 free(ctx_name);
1228 goto error;
1229 }
1230
1231 ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
1232 ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
1233 }
8b366481
DG
1234 }
1235
1236 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
8b366481 1237 return ua_ctx;
bdf64013
JG
1238error:
1239 free(ua_ctx);
1240 return NULL;
8b366481
DG
1241}
1242
51755dc8
JG
1243/*
1244 * Create a liblttng-ust filter bytecode from given bytecode.
1245 *
1246 * Return allocated filter or NULL on error.
1247 */
1248static struct lttng_ust_filter_bytecode *create_ust_bytecode_from_bytecode(
1831ae68 1249 const struct lttng_filter_bytecode *orig_f)
51755dc8
JG
1250{
1251 struct lttng_ust_filter_bytecode *filter = NULL;
1252
1253 /* Copy filter bytecode */
1254 filter = zmalloc(sizeof(*filter) + orig_f->len);
1255 if (!filter) {
1256 PERROR("zmalloc alloc ust filter bytecode");
1257 goto error;
1258 }
1259
1260 assert(sizeof(struct lttng_filter_bytecode) ==
1261 sizeof(struct lttng_ust_filter_bytecode));
1262 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1263error:
1264 return filter;
1265}
1266
099e26bd 1267/*
421cb601
DG
1268 * Find an ust_app using the sock and return it. RCU read side lock must be
1269 * held before calling this helper function.
099e26bd 1270 */
f20baf8e 1271struct ust_app *ust_app_find_by_sock(int sock)
099e26bd 1272{
bec39940 1273 struct lttng_ht_node_ulong *node;
bec39940 1274 struct lttng_ht_iter iter;
f6a9efaa 1275
852d0037 1276 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
bec39940 1277 node = lttng_ht_iter_get_node_ulong(&iter);
f6a9efaa
DG
1278 if (node == NULL) {
1279 DBG2("UST app find by sock %d not found", sock);
f6a9efaa
DG
1280 goto error;
1281 }
852d0037
DG
1282
1283 return caa_container_of(node, struct ust_app, sock_n);
f6a9efaa
DG
1284
1285error:
1286 return NULL;
099e26bd
DG
1287}
1288
d0b96690
DG
1289/*
1290 * Find an ust_app using the notify sock and return it. RCU read side lock must
1291 * be held before calling this helper function.
1292 */
1293static struct ust_app *find_app_by_notify_sock(int sock)
1294{
1295 struct lttng_ht_node_ulong *node;
1296 struct lttng_ht_iter iter;
1297
1298 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1299 &iter);
1300 node = lttng_ht_iter_get_node_ulong(&iter);
1301 if (node == NULL) {
1302 DBG2("UST app find by notify sock %d not found", sock);
1303 goto error;
1304 }
1305
1306 return caa_container_of(node, struct ust_app, notify_sock_n);
1307
1308error:
1309 return NULL;
1310}
1311
025faf73
DG
1312/*
1313 * Lookup for an ust app event based on event name, filter bytecode and the
1314 * event loglevel.
1315 *
1316 * Return an ust_app_event object or NULL on error.
1317 */
18eace3b 1318static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
88e3c2f5 1319 const char *name, const struct lttng_filter_bytecode *filter,
2106efa0 1320 int loglevel_value,
39c5a3a7 1321 const struct lttng_event_exclusion *exclusion)
18eace3b
DG
1322{
1323 struct lttng_ht_iter iter;
1324 struct lttng_ht_node_str *node;
1325 struct ust_app_event *event = NULL;
1326 struct ust_app_ht_key key;
18eace3b
DG
1327
1328 assert(name);
1329 assert(ht);
1330
1331 /* Setup key for event lookup. */
1332 key.name = name;
1333 key.filter = filter;
2106efa0 1334 key.loglevel_type = loglevel_value;
39c5a3a7 1335 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
51755dc8 1336 key.exclusion = exclusion;
18eace3b 1337
025faf73
DG
1338 /* Lookup using the event name as hash and a custom match fct. */
1339 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1340 ht_match_ust_app_event, &key, &iter.iter);
18eace3b
DG
1341 node = lttng_ht_iter_get_node_str(&iter);
1342 if (node == NULL) {
1343 goto end;
1344 }
1345
1346 event = caa_container_of(node, struct ust_app_event, node);
1347
1348end:
18eace3b
DG
1349 return event;
1350}
1351
1831ae68
FD
1352/*
1353 * Lookup for an ust app tokens based on a token id.
1354 *
1355 * Return an ust_app_token_event_rule object or NULL on error.
1356 */
1357static struct ust_app_token_event_rule *find_ust_app_token_event_rule(struct lttng_ht *ht,
1358 uint64_t token)
1359{
1360 struct lttng_ht_iter iter;
1361 struct lttng_ht_node_u64 *node;
1362 struct ust_app_token_event_rule *token_event_rule = NULL;
1363
1364 assert(ht);
1365
1366 lttng_ht_lookup(ht, &token, &iter);
1367 node = lttng_ht_iter_get_node_u64(&iter);
1368 if (node == NULL) {
1369 DBG2("UST app token %" PRIu64 " not found", token);
1370 goto end;
1371 }
1372
1373 token_event_rule = caa_container_of(node, struct ust_app_token_event_rule, node);
1374end:
1375 return token_event_rule;
1376}
1377
55cc08a6
DG
1378/*
1379 * Create the channel context on the tracer.
d0b96690
DG
1380 *
1381 * Called with UST app session lock held.
55cc08a6
DG
1382 */
1383static
1384int create_ust_channel_context(struct ust_app_channel *ua_chan,
1385 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1386{
1387 int ret;
1388
840cb59c 1389 health_code_update();
86acf0da 1390
fb45065e 1391 pthread_mutex_lock(&app->sock_lock);
852d0037 1392 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
55cc08a6 1393 ua_chan->obj, &ua_ctx->obj);
fb45065e 1394 pthread_mutex_unlock(&app->sock_lock);
55cc08a6 1395 if (ret < 0) {
ffe60014
DG
1396 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1397 ERR("UST app create channel context failed for app (pid: %d) "
1398 "with ret %d", app->pid, ret);
1399 } else {
3757b385
DG
1400 /*
1401 * This is normal behavior, an application can die during the
1402 * creation process. Don't report an error so the execution can
1403 * continue normally.
1404 */
1405 ret = 0;
88e3c2f5 1406 DBG3("UST app add context failed. Application is dead.");
ffe60014 1407 }
55cc08a6
DG
1408 goto error;
1409 }
1410
1411 ua_ctx->handle = ua_ctx->obj->handle;
1412
d0b96690
DG
1413 DBG2("UST app context handle %d created successfully for channel %s",
1414 ua_ctx->handle, ua_chan->name);
55cc08a6
DG
1415
1416error:
840cb59c 1417 health_code_update();
55cc08a6
DG
1418 return ret;
1419}
1420
53a80697
MD
1421/*
1422 * Set the filter on the tracer.
1423 */
1831ae68
FD
1424static int set_ust_filter(struct ust_app *app,
1425 const struct lttng_filter_bytecode *bytecode,
1426 struct lttng_ust_object_data *ust_object)
53a80697
MD
1427{
1428 int ret;
51755dc8 1429 struct lttng_ust_filter_bytecode *ust_bytecode = NULL;
53a80697 1430
840cb59c 1431 health_code_update();
86acf0da 1432
1831ae68 1433 ust_bytecode = create_ust_bytecode_from_bytecode(bytecode);
51755dc8
JG
1434 if (!ust_bytecode) {
1435 ret = -LTTNG_ERR_NOMEM;
1436 goto error;
1437 }
fb45065e 1438 pthread_mutex_lock(&app->sock_lock);
51755dc8 1439 ret = ustctl_set_filter(app->sock, ust_bytecode,
1831ae68 1440 ust_object);
fb45065e 1441 pthread_mutex_unlock(&app->sock_lock);
53a80697 1442 if (ret < 0) {
ffe60014 1443 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1831ae68
FD
1444 ERR("UST app set filter failed for object %p of app (pid: %d) "
1445 "with ret %d", ust_object, app->pid, ret);
ffe60014 1446 } else {
3757b385
DG
1447 /*
1448 * This is normal behavior, an application can die during the
1449 * creation process. Don't report an error so the execution can
1450 * continue normally.
1451 */
1452 ret = 0;
1831ae68 1453 DBG3("UST app set filter. Application is dead.");
ffe60014 1454 }
53a80697
MD
1455 goto error;
1456 }
1457
1831ae68 1458 DBG2("UST filter set for object %p successfully", ust_object);
53a80697
MD
1459
1460error:
840cb59c 1461 health_code_update();
51755dc8 1462 free(ust_bytecode);
53a80697
MD
1463 return ret;
1464}
1465
51755dc8
JG
1466static
1467struct lttng_ust_event_exclusion *create_ust_exclusion_from_exclusion(
1468 struct lttng_event_exclusion *exclusion)
1469{
1470 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1471 size_t exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1472 LTTNG_UST_SYM_NAME_LEN * exclusion->count;
1473
1474 ust_exclusion = zmalloc(exclusion_alloc_size);
1475 if (!ust_exclusion) {
1476 PERROR("malloc");
1477 goto end;
1478 }
1479
1480 assert(sizeof(struct lttng_event_exclusion) ==
1481 sizeof(struct lttng_ust_event_exclusion));
1482 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1483end:
1484 return ust_exclusion;
1485}
1486
7cc9a73c
JI
1487/*
1488 * Set event exclusions on the tracer.
1489 */
1831ae68
FD
1490static int set_ust_exclusions(struct ust_app *app,
1491 struct lttng_event_exclusion *exclusions,
1492 struct lttng_ust_object_data *ust_object)
7cc9a73c
JI
1493{
1494 int ret;
1831ae68 1495 struct lttng_ust_event_exclusion *ust_exclusions = NULL;
7cc9a73c 1496
1831ae68 1497 assert(exclusions && exclusions->count > 0);
7cc9a73c 1498
1831ae68 1499 health_code_update();
7cc9a73c 1500
1831ae68
FD
1501 ust_exclusions = create_ust_exclusion_from_exclusion(
1502 exclusions);
1503 if (!ust_exclusions) {
51755dc8
JG
1504 ret = -LTTNG_ERR_NOMEM;
1505 goto error;
1506 }
fb45065e 1507 pthread_mutex_lock(&app->sock_lock);
1831ae68 1508 ret = ustctl_set_exclusion(app->sock, ust_exclusions, ust_object);
fb45065e 1509 pthread_mutex_unlock(&app->sock_lock);
7cc9a73c
JI
1510 if (ret < 0) {
1511 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1831ae68
FD
1512 ERR("UST app exclusions failed for object %p of app (pid: %d) "
1513 "with ret %d", ust_object, app->pid, ret);
7cc9a73c
JI
1514 } else {
1515 /*
1516 * This is normal behavior, an application can die during the
1517 * creation process. Don't report an error so the execution can
1518 * continue normally.
1519 */
1520 ret = 0;
1831ae68 1521 DBG3("UST app set exclusions failed. Application is dead.");
7cc9a73c
JI
1522 }
1523 goto error;
1524 }
1525
1831ae68 1526 DBG2("UST exclusions set successfully for object %p", ust_object);
7cc9a73c
JI
1527
1528error:
1529 health_code_update();
1831ae68 1530 free(ust_exclusions);
7cc9a73c
JI
1531 return ret;
1532}
1533
9730260e
DG
1534/*
1535 * Disable the specified event on to UST tracer for the UST session.
1536 */
1831ae68
FD
1537static int disable_ust_object(struct ust_app *app,
1538 struct lttng_ust_object_data *object)
9730260e
DG
1539{
1540 int ret;
1541
840cb59c 1542 health_code_update();
86acf0da 1543
fb45065e 1544 pthread_mutex_lock(&app->sock_lock);
1831ae68 1545 ret = ustctl_disable(app->sock, object);
fb45065e 1546 pthread_mutex_unlock(&app->sock_lock);
9730260e 1547 if (ret < 0) {
ffe60014 1548 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1831ae68
FD
1549 ERR("UST app disable failed for object %p app (pid: %d) with ret %d",
1550 object, app->pid, ret);
ffe60014 1551 } else {
3757b385
DG
1552 /*
1553 * This is normal behavior, an application can die during the
1554 * creation process. Don't report an error so the execution can
1555 * continue normally.
1556 */
1557 ret = 0;
ffe60014
DG
1558 DBG3("UST app disable event failed. Application is dead.");
1559 }
9730260e
DG
1560 goto error;
1561 }
1562
1831ae68
FD
1563 DBG2("UST app object %p disabled successfully for app (pid: %d)",
1564 object, app->pid);
9730260e
DG
1565
1566error:
840cb59c 1567 health_code_update();
9730260e
DG
1568 return ret;
1569}
1570
78f0bacd
DG
1571/*
1572 * Disable the specified channel on to UST tracer for the UST session.
1573 */
1574static int disable_ust_channel(struct ust_app *app,
1575 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1576{
1577 int ret;
1578
840cb59c 1579 health_code_update();
86acf0da 1580
fb45065e 1581 pthread_mutex_lock(&app->sock_lock);
852d0037 1582 ret = ustctl_disable(app->sock, ua_chan->obj);
fb45065e 1583 pthread_mutex_unlock(&app->sock_lock);
78f0bacd 1584 if (ret < 0) {
ffe60014
DG
1585 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1586 ERR("UST app channel %s disable failed for app (pid: %d) "
1587 "and session handle %d with ret %d",
1588 ua_chan->name, app->pid, ua_sess->handle, ret);
1589 } else {
3757b385
DG
1590 /*
1591 * This is normal behavior, an application can die during the
1592 * creation process. Don't report an error so the execution can
1593 * continue normally.
1594 */
1595 ret = 0;
ffe60014
DG
1596 DBG3("UST app disable channel failed. Application is dead.");
1597 }
78f0bacd
DG
1598 goto error;
1599 }
1600
78f0bacd 1601 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
852d0037 1602 ua_chan->name, app->pid);
78f0bacd
DG
1603
1604error:
840cb59c 1605 health_code_update();
78f0bacd
DG
1606 return ret;
1607}
1608
1609/*
1610 * Enable the specified channel on to UST tracer for the UST session.
1611 */
1612static int enable_ust_channel(struct ust_app *app,
1613 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1614{
1615 int ret;
1616
840cb59c 1617 health_code_update();
86acf0da 1618
fb45065e 1619 pthread_mutex_lock(&app->sock_lock);
852d0037 1620 ret = ustctl_enable(app->sock, ua_chan->obj);
fb45065e 1621 pthread_mutex_unlock(&app->sock_lock);
78f0bacd 1622 if (ret < 0) {
ffe60014
DG
1623 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1624 ERR("UST app channel %s enable failed for app (pid: %d) "
1625 "and session handle %d with ret %d",
1626 ua_chan->name, app->pid, ua_sess->handle, ret);
1627 } else {
3757b385
DG
1628 /*
1629 * This is normal behavior, an application can die during the
1630 * creation process. Don't report an error so the execution can
1631 * continue normally.
1632 */
1633 ret = 0;
ffe60014
DG
1634 DBG3("UST app enable channel failed. Application is dead.");
1635 }
78f0bacd
DG
1636 goto error;
1637 }
1638
1639 ua_chan->enabled = 1;
1640
1641 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
852d0037 1642 ua_chan->name, app->pid);
78f0bacd
DG
1643
1644error:
840cb59c 1645 health_code_update();
78f0bacd
DG
1646 return ret;
1647}
1648
edb67388
DG
1649/*
1650 * Enable the specified event on to UST tracer for the UST session.
1651 */
1831ae68 1652static int enable_ust_object(struct ust_app *app, struct lttng_ust_object_data *ust_object)
edb67388
DG
1653{
1654 int ret;
1655
840cb59c 1656 health_code_update();
86acf0da 1657
fb45065e 1658 pthread_mutex_lock(&app->sock_lock);
1831ae68 1659 ret = ustctl_enable(app->sock, ust_object);
fb45065e 1660 pthread_mutex_unlock(&app->sock_lock);
edb67388 1661 if (ret < 0) {
ffe60014 1662 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1831ae68
FD
1663 ERR("UST app enable failed for object %p app (pid: %d) with ret %d",
1664 ust_object, app->pid, ret);
ffe60014 1665 } else {
3757b385
DG
1666 /*
1667 * This is normal behavior, an application can die during the
1668 * creation process. Don't report an error so the execution can
1669 * continue normally.
1670 */
1671 ret = 0;
1831ae68 1672 DBG3("UST app enable failed. Application is dead.");
ffe60014 1673 }
edb67388
DG
1674 goto error;
1675 }
1676
1831ae68
FD
1677 DBG2("UST app object %p enabled successfully for app (pid: %d)",
1678 ust_object, app->pid);
edb67388
DG
1679
1680error:
840cb59c 1681 health_code_update();
edb67388
DG
1682 return ret;
1683}
1684
099e26bd 1685/*
7972aab2 1686 * Send channel and stream buffer to application.
4f3ab6ee 1687 *
ffe60014 1688 * Return 0 on success. On error, a negative value is returned.
4f3ab6ee 1689 */
7972aab2
DG
1690static int send_channel_pid_to_ust(struct ust_app *app,
1691 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
4f3ab6ee
DG
1692{
1693 int ret;
ffe60014 1694 struct ust_app_stream *stream, *stmp;
4f3ab6ee
DG
1695
1696 assert(app);
ffe60014 1697 assert(ua_sess);
4f3ab6ee 1698 assert(ua_chan);
4f3ab6ee 1699
840cb59c 1700 health_code_update();
4f3ab6ee 1701
7972aab2
DG
1702 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1703 app->sock);
86acf0da 1704
ffe60014
DG
1705 /* Send channel to the application. */
1706 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
a7169585
MD
1707 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1708 ret = -ENOTCONN; /* Caused by app exiting. */
1709 goto error;
1710 } else if (ret < 0) {
b551a063
DG
1711 goto error;
1712 }
1713
d88aee68
DG
1714 health_code_update();
1715
ffe60014
DG
1716 /* Send all streams to application. */
1717 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1718 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
a7169585
MD
1719 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1720 ret = -ENOTCONN; /* Caused by app exiting. */
1721 goto error;
1722 } else if (ret < 0) {
ffe60014
DG
1723 goto error;
1724 }
1725 /* We don't need the stream anymore once sent to the tracer. */
1726 cds_list_del(&stream->list);
fb45065e 1727 delete_ust_app_stream(-1, stream, app);
ffe60014 1728 }
ffe60014
DG
1729 /* Flag the channel that it is sent to the application. */
1730 ua_chan->is_sent = 1;
ffe60014 1731
b551a063 1732error:
840cb59c 1733 health_code_update();
b551a063
DG
1734 return ret;
1735}
1736
91d76f53 1737/*
5b4a0ec0 1738 * Create the specified event onto the UST tracer for a UST session.
d0b96690
DG
1739 *
1740 * Should be called with session mutex held.
91d76f53 1741 */
edb67388
DG
1742static
1743int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1744 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
91d76f53 1745{
5b4a0ec0 1746 int ret = 0;
284d8f55 1747
840cb59c 1748 health_code_update();
86acf0da 1749
5b4a0ec0 1750 /* Create UST event on tracer */
fb45065e 1751 pthread_mutex_lock(&app->sock_lock);
852d0037 1752 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
5b4a0ec0 1753 &ua_event->obj);
fb45065e 1754 pthread_mutex_unlock(&app->sock_lock);
5b4a0ec0 1755 if (ret < 0) {
ffe60014 1756 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
88e3c2f5 1757 abort();
ffe60014
DG
1758 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1759 ua_event->attr.name, app->pid, ret);
1760 } else {
3757b385
DG
1761 /*
1762 * This is normal behavior, an application can die during the
1763 * creation process. Don't report an error so the execution can
1764 * continue normally.
1765 */
1766 ret = 0;
ffe60014
DG
1767 DBG3("UST app create event failed. Application is dead.");
1768 }
5b4a0ec0 1769 goto error;
91d76f53 1770 }
f6a9efaa 1771
5b4a0ec0 1772 ua_event->handle = ua_event->obj->handle;
284d8f55 1773
1831ae68
FD
1774 DBG2("UST app event %s created successfully for pid:%d object: %p",
1775 ua_event->attr.name, app->pid, ua_event->obj);
f6a9efaa 1776
840cb59c 1777 health_code_update();
86acf0da 1778
025faf73
DG
1779 /* Set filter if one is present. */
1780 if (ua_event->filter) {
1831ae68 1781 ret = set_ust_filter(app, ua_event->filter, ua_event->obj);
025faf73
DG
1782 if (ret < 0) {
1783 goto error;
1784 }
1785 }
1786
7cc9a73c
JI
1787 /* Set exclusions for the event */
1788 if (ua_event->exclusion) {
1831ae68 1789 ret = set_ust_exclusions(app, ua_event->exclusion, ua_event->obj);
7cc9a73c
JI
1790 if (ret < 0) {
1791 goto error;
1792 }
1793 }
1794
8535a6d9 1795 /* If event not enabled, disable it on the tracer */
40113787
MD
1796 if (ua_event->enabled) {
1797 /*
1798 * We now need to explicitly enable the event, since it
1799 * is now disabled at creation.
1800 */
1831ae68 1801 ret = enable_ust_object(app, ua_event->obj);
40113787
MD
1802 if (ret < 0) {
1803 /*
1804 * If we hit an EPERM, something is wrong with our enable call. If
1805 * we get an EEXIST, there is a problem on the tracer side since we
1806 * just created it.
1807 */
1808 switch (ret) {
1809 case -LTTNG_UST_ERR_PERM:
1810 /* Code flow problem */
1811 assert(0);
1812 case -LTTNG_UST_ERR_EXIST:
1813 /* It's OK for our use case. */
1814 ret = 0;
1815 break;
1816 default:
1817 break;
1818 }
1819 goto error;
1820 }
8535a6d9
DG
1821 }
1822
5b4a0ec0 1823error:
840cb59c 1824 health_code_update();
5b4a0ec0 1825 return ret;
91d76f53 1826}
48842b30 1827
1831ae68
FD
1828static
1829void init_ust_trigger_from_event_rule(const struct lttng_event_rule *rule, struct lttng_ust_trigger *trigger)
1830{
1831 enum lttng_event_rule_status status;
1832 enum lttng_loglevel_type loglevel_type;
1833 enum lttng_ust_loglevel_type ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
1834 int loglevel = -1;
1835 const char *pattern;
1836
1837 /* For now only LTTNG_EVENT_RULE_TYPE_TRACEPOINT are supported */
1838 assert(lttng_event_rule_get_type(rule) == LTTNG_EVENT_RULE_TYPE_TRACEPOINT);
1839
1840 memset(trigger, 0, sizeof(*trigger));
1841
1842 if (lttng_event_rule_is_agent(rule)) {
1843 /*
1844 * Special event for agents
1845 * The actual meat of the event is in the filter that will be
1846 * attached later on.
1847 * Set the default values for the agent event.
1848 */
1849 pattern = event_get_default_agent_ust_name(lttng_event_rule_get_domain_type(rule));
1850 loglevel = 0;
1851 ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
1852 } else {
1853 status = lttng_event_rule_tracepoint_get_pattern(rule, &pattern);
1854 if (status != LTTNG_EVENT_RULE_STATUS_OK) {
1855 /* At this point this is a fatal error */
1856 assert(0);
1857 }
1858
1859 status = lttng_event_rule_tracepoint_get_loglevel_type(
1860 rule, &loglevel_type);
1861 if (status != LTTNG_EVENT_RULE_STATUS_OK) {
1862 /* At this point this is a fatal error */
1863 assert(0);
1864 }
1865
1866 switch (loglevel_type) {
1867 case LTTNG_EVENT_LOGLEVEL_ALL:
1868 ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
1869 break;
1870 case LTTNG_EVENT_LOGLEVEL_RANGE:
1871 ust_loglevel_type = LTTNG_UST_LOGLEVEL_RANGE;
1872 break;
1873 case LTTNG_EVENT_LOGLEVEL_SINGLE:
1874 ust_loglevel_type = LTTNG_UST_LOGLEVEL_SINGLE;
1875 break;
1876 }
1877
1878 if (loglevel_type != LTTNG_EVENT_LOGLEVEL_ALL) {
1879 status = lttng_event_rule_tracepoint_get_loglevel(
1880 rule, &loglevel);
1881 assert(status == LTTNG_EVENT_RULE_STATUS_OK);
1882 }
1883 }
1884
1885 trigger->instrumentation = LTTNG_UST_TRACEPOINT;
1886 strncpy(trigger->name, pattern, LTTNG_UST_SYM_NAME_LEN - 1);
1887 trigger->loglevel_type = ust_loglevel_type;
1888 trigger->loglevel = loglevel;
1889}
1890
1891/*
1892 * Create the specified event rule token onto the UST tracer for a UST app.
1893 *
1894 */
1895static
1896int create_ust_token_event_rule(struct ust_app *app, struct ust_app_token_event_rule *ua_token)
1897{
1898 int ret = 0;
1899 struct lttng_ust_trigger trigger;
1900
1901 health_code_update();
1902
1903 init_ust_trigger_from_event_rule(ua_token->event_rule, &trigger);
1904 trigger.id = ua_token->token;
1905
1906 /* Create UST trigger on tracer */
1907 pthread_mutex_lock(&app->sock_lock);
1908 ret = ustctl_create_trigger(app->sock, &trigger, app->token_communication.handle, &ua_token->obj);
1909 pthread_mutex_unlock(&app->sock_lock);
1910 if (ret < 0) {
1911 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1912 abort();
1913 ERR("Error ustctl create trigger %s for app pid: %d with ret %d",
1914 trigger.name, app->pid, ret);
1915 } else {
1916 /*
1917 * This is normal behavior, an application can die during the
1918 * creation process. Don't report an error so the execution can
1919 * continue normally.
1920 */
1921 ret = 0;
1922 DBG3("UST app create event failed. Application is dead.");
1923 }
1924 goto error;
1925 }
1926
1927 ua_token->handle = ua_token->obj->handle;
1928
1929 DBG2("UST app event %s created successfully for pid:%d object: %p",
1930 trigger.name, app->pid, ua_token->obj);
1931
1932 health_code_update();
1933
1934 /* Set filter if one is present. */
1935 if (ua_token->filter) {
1936 ret = set_ust_filter(app, ua_token->filter, ua_token->obj);
1937 if (ret < 0) {
1938 goto error;
1939 }
1940 }
1941
1942 /* Set exclusions for the event */
1943 if (ua_token->exclusion) {
1944 ret = set_ust_exclusions(app, ua_token->exclusion, ua_token->obj);
1945 if (ret < 0) {
1946 goto error;
1947 }
1948 }
1949
1950 /*
1951 * We now need to explicitly enable the event, since it
1952 * is disabled at creation.
1953 */
1954 ret = enable_ust_object(app, ua_token->obj);
1955 if (ret < 0) {
1956 /*
1957 * If we hit an EPERM, something is wrong with our enable call. If
1958 * we get an EEXIST, there is a problem on the tracer side since we
1959 * just created it.
1960 */
1961 switch (ret) {
1962 case -LTTNG_UST_ERR_PERM:
1963 /* Code flow problem */
1964 assert(0);
1965 case -LTTNG_UST_ERR_EXIST:
1966 /* It's OK for our use case. */
1967 ret = 0;
1968 break;
1969 default:
1970 break;
1971 }
1972 goto error;
1973 }
1974 ua_token->enabled = true;
1975
1976error:
1977 health_code_update();
1978 return ret;
1979}
1980
5b4a0ec0
DG
1981/*
1982 * Copy data between an UST app event and a LTT event.
1983 */
421cb601 1984static void shadow_copy_event(struct ust_app_event *ua_event,
48842b30
DG
1985 struct ltt_ust_event *uevent)
1986{
b4ffad32
JI
1987 size_t exclusion_alloc_size;
1988
48842b30
DG
1989 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1990 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1991
fc34caaa
DG
1992 ua_event->enabled = uevent->enabled;
1993
5b4a0ec0
DG
1994 /* Copy event attributes */
1995 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1996
53a80697
MD
1997 /* Copy filter bytecode */
1998 if (uevent->filter) {
51755dc8 1999 ua_event->filter = copy_filter_bytecode(uevent->filter);
025faf73 2000 /* Filter might be NULL here in case of ENONEM. */
53a80697 2001 }
b4ffad32
JI
2002
2003 /* Copy exclusion data */
2004 if (uevent->exclusion) {
51755dc8 2005 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
b4ffad32
JI
2006 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
2007 ua_event->exclusion = zmalloc(exclusion_alloc_size);
5f8df26c
JI
2008 if (ua_event->exclusion == NULL) {
2009 PERROR("malloc");
2010 } else {
2011 memcpy(ua_event->exclusion, uevent->exclusion,
2012 exclusion_alloc_size);
b4ffad32
JI
2013 }
2014 }
48842b30
DG
2015}
2016
5b4a0ec0
DG
2017/*
2018 * Copy data between an UST app channel and a LTT channel.
2019 */
421cb601 2020static void shadow_copy_channel(struct ust_app_channel *ua_chan,
48842b30
DG
2021 struct ltt_ust_channel *uchan)
2022{
fc34caaa 2023 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
48842b30
DG
2024
2025 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
2026 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
ffe60014 2027
1624d5b7
JD
2028 ua_chan->tracefile_size = uchan->tracefile_size;
2029 ua_chan->tracefile_count = uchan->tracefile_count;
2030
ffe60014
DG
2031 /* Copy event attributes since the layout is different. */
2032 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
2033 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
2034 ua_chan->attr.overwrite = uchan->attr.overwrite;
2035 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
2036 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
e9404c27 2037 ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
ffe60014 2038 ua_chan->attr.output = uchan->attr.output;
491d1539
MD
2039 ua_chan->attr.blocking_timeout = uchan->attr.u.s.blocking_timeout;
2040
ffe60014
DG
2041 /*
2042 * Note that the attribute channel type is not set since the channel on the
2043 * tracing registry side does not have this information.
2044 */
48842b30 2045
fc34caaa 2046 ua_chan->enabled = uchan->enabled;
7972aab2 2047 ua_chan->tracing_channel_id = uchan->id;
fc34caaa 2048
fc34caaa 2049 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
48842b30
DG
2050}
2051
5b4a0ec0
DG
2052/*
2053 * Copy data between a UST app session and a regular LTT session.
2054 */
421cb601 2055static void shadow_copy_session(struct ust_app_session *ua_sess,
bec39940 2056 struct ltt_ust_session *usess, struct ust_app *app)
48842b30 2057{
477d7741
MD
2058 struct tm *timeinfo;
2059 char datetime[16];
2060 int ret;
d7ba1388 2061 char tmp_shm_path[PATH_MAX];
477d7741 2062
940c4592 2063 timeinfo = localtime(&app->registration_time);
477d7741 2064 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
48842b30 2065
421cb601 2066 DBG2("Shadow copy of session handle %d", ua_sess->handle);
48842b30 2067
7972aab2
DG
2068 ua_sess->tracing_id = usess->id;
2069 ua_sess->id = get_next_session_id();
470cc211
JG
2070 ua_sess->real_credentials.uid = app->uid;
2071 ua_sess->real_credentials.gid = app->gid;
2072 ua_sess->effective_credentials.uid = usess->uid;
2073 ua_sess->effective_credentials.gid = usess->gid;
7972aab2
DG
2074 ua_sess->buffer_type = usess->buffer_type;
2075 ua_sess->bits_per_long = app->bits_per_long;
6addfa37 2076
7972aab2 2077 /* There is only one consumer object per session possible. */
6addfa37 2078 consumer_output_get(usess->consumer);
7972aab2 2079 ua_sess->consumer = usess->consumer;
6addfa37 2080
2bba9e53 2081 ua_sess->output_traces = usess->output_traces;
ecc48a90 2082 ua_sess->live_timer_interval = usess->live_timer_interval;
84ad93e8
DG
2083 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
2084 &usess->metadata_attr);
7972aab2
DG
2085
2086 switch (ua_sess->buffer_type) {
2087 case LTTNG_BUFFER_PER_PID:
2088 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
dec56f6c 2089 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
7972aab2
DG
2090 datetime);
2091 break;
2092 case LTTNG_BUFFER_PER_UID:
2093 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
470cc211
JG
2094 DEFAULT_UST_TRACE_UID_PATH,
2095 ua_sess->real_credentials.uid,
2096 app->bits_per_long);
7972aab2
DG
2097 break;
2098 default:
2099 assert(0);
2100 goto error;
2101 }
477d7741
MD
2102 if (ret < 0) {
2103 PERROR("asprintf UST shadow copy session");
477d7741 2104 assert(0);
7972aab2 2105 goto error;
477d7741
MD
2106 }
2107
3d071855
MD
2108 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
2109 sizeof(ua_sess->root_shm_path));
2110 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
d7ba1388
MD
2111 strncpy(ua_sess->shm_path, usess->shm_path,
2112 sizeof(ua_sess->shm_path));
2113 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2114 if (ua_sess->shm_path[0]) {
2115 switch (ua_sess->buffer_type) {
2116 case LTTNG_BUFFER_PER_PID:
2117 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
5da88b0f 2118 "/" DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
d7ba1388
MD
2119 app->name, app->pid, datetime);
2120 break;
2121 case LTTNG_BUFFER_PER_UID:
2122 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
5da88b0f 2123 "/" DEFAULT_UST_TRACE_UID_PATH,
d7ba1388
MD
2124 app->uid, app->bits_per_long);
2125 break;
2126 default:
2127 assert(0);
2128 goto error;
2129 }
2130 if (ret < 0) {
2131 PERROR("sprintf UST shadow copy session");
2132 assert(0);
2133 goto error;
2134 }
2135 strncat(ua_sess->shm_path, tmp_shm_path,
2136 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
2137 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2138 }
6addfa37 2139 return;
7972aab2
DG
2140
2141error:
6addfa37 2142 consumer_output_put(ua_sess->consumer);
48842b30
DG
2143}
2144
78f0bacd
DG
2145/*
2146 * Lookup sesison wrapper.
2147 */
84cd17c6 2148static
fb9a95c4 2149void __lookup_session_by_app(const struct ltt_ust_session *usess,
bec39940 2150 struct ust_app *app, struct lttng_ht_iter *iter)
84cd17c6
MD
2151{
2152 /* Get right UST app session from app */
d9bf3ca4 2153 lttng_ht_lookup(app->sessions, &usess->id, iter);
84cd17c6
MD
2154}
2155
421cb601
DG
2156/*
2157 * Return ust app session from the app session hashtable using the UST session
a991f516 2158 * id.
421cb601 2159 */
48842b30 2160static struct ust_app_session *lookup_session_by_app(
fb9a95c4 2161 const struct ltt_ust_session *usess, struct ust_app *app)
48842b30 2162{
bec39940 2163 struct lttng_ht_iter iter;
d9bf3ca4 2164 struct lttng_ht_node_u64 *node;
48842b30 2165
84cd17c6 2166 __lookup_session_by_app(usess, app, &iter);
d9bf3ca4 2167 node = lttng_ht_iter_get_node_u64(&iter);
48842b30
DG
2168 if (node == NULL) {
2169 goto error;
2170 }
2171
2172 return caa_container_of(node, struct ust_app_session, node);
2173
2174error:
2175 return NULL;
2176}
2177
7972aab2
DG
2178/*
2179 * Setup buffer registry per PID for the given session and application. If none
2180 * is found, a new one is created, added to the global registry and
2181 * initialized. If regp is valid, it's set with the newly created object.
2182 *
2183 * Return 0 on success or else a negative value.
2184 */
2185static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
2186 struct ust_app *app, struct buffer_reg_pid **regp)
2187{
2188 int ret = 0;
2189 struct buffer_reg_pid *reg_pid;
2190
2191 assert(ua_sess);
2192 assert(app);
2193
2194 rcu_read_lock();
2195
2196 reg_pid = buffer_reg_pid_find(ua_sess->id);
2197 if (!reg_pid) {
2198 /*
2199 * This is the create channel path meaning that if there is NO
2200 * registry available, we have to create one for this session.
2201 */
d7ba1388 2202 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
3d071855 2203 ua_sess->root_shm_path, ua_sess->shm_path);
7972aab2
DG
2204 if (ret < 0) {
2205 goto error;
2206 }
7972aab2
DG
2207 } else {
2208 goto end;
2209 }
2210
2211 /* Initialize registry. */
2212 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
2213 app->bits_per_long, app->uint8_t_alignment,
2214 app->uint16_t_alignment, app->uint32_t_alignment,
af6142cf 2215 app->uint64_t_alignment, app->long_alignment,
470cc211
JG
2216 app->byte_order, app->version.major, app->version.minor,
2217 reg_pid->root_shm_path, reg_pid->shm_path,
2218 ua_sess->effective_credentials.uid,
8de88061
JR
2219 ua_sess->effective_credentials.gid, ua_sess->tracing_id,
2220 app->uid);
7972aab2 2221 if (ret < 0) {
286c991a
MD
2222 /*
2223 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2224 * destroy the buffer registry, because it is always expected
2225 * that if the buffer registry can be found, its ust registry is
2226 * non-NULL.
2227 */
2228 buffer_reg_pid_destroy(reg_pid);
7972aab2
DG
2229 goto error;
2230 }
2231
286c991a
MD
2232 buffer_reg_pid_add(reg_pid);
2233
7972aab2
DG
2234 DBG3("UST app buffer registry per PID created successfully");
2235
2236end:
2237 if (regp) {
2238 *regp = reg_pid;
2239 }
2240error:
2241 rcu_read_unlock();
2242 return ret;
2243}
2244
2245/*
2246 * Setup buffer registry per UID for the given session and application. If none
2247 * is found, a new one is created, added to the global registry and
2248 * initialized. If regp is valid, it's set with the newly created object.
2249 *
2250 * Return 0 on success or else a negative value.
2251 */
2252static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
d7ba1388 2253 struct ust_app_session *ua_sess,
7972aab2
DG
2254 struct ust_app *app, struct buffer_reg_uid **regp)
2255{
2256 int ret = 0;
2257 struct buffer_reg_uid *reg_uid;
2258
2259 assert(usess);
2260 assert(app);
2261
2262 rcu_read_lock();
2263
2264 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2265 if (!reg_uid) {
2266 /*
2267 * This is the create channel path meaning that if there is NO
2268 * registry available, we have to create one for this session.
2269 */
2270 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
3d071855
MD
2271 LTTNG_DOMAIN_UST, &reg_uid,
2272 ua_sess->root_shm_path, ua_sess->shm_path);
7972aab2
DG
2273 if (ret < 0) {
2274 goto error;
2275 }
7972aab2
DG
2276 } else {
2277 goto end;
2278 }
2279
2280 /* Initialize registry. */
af6142cf 2281 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
7972aab2
DG
2282 app->bits_per_long, app->uint8_t_alignment,
2283 app->uint16_t_alignment, app->uint32_t_alignment,
af6142cf
MD
2284 app->uint64_t_alignment, app->long_alignment,
2285 app->byte_order, app->version.major,
3d071855 2286 app->version.minor, reg_uid->root_shm_path,
8de88061
JR
2287 reg_uid->shm_path, usess->uid, usess->gid,
2288 ua_sess->tracing_id, app->uid);
7972aab2 2289 if (ret < 0) {
286c991a
MD
2290 /*
2291 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2292 * destroy the buffer registry, because it is always expected
2293 * that if the buffer registry can be found, its ust registry is
2294 * non-NULL.
2295 */
2296 buffer_reg_uid_destroy(reg_uid, NULL);
7972aab2
DG
2297 goto error;
2298 }
2299 /* Add node to teardown list of the session. */
2300 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2301
286c991a 2302 buffer_reg_uid_add(reg_uid);
7972aab2 2303
286c991a 2304 DBG3("UST app buffer registry per UID created successfully");
7972aab2
DG
2305end:
2306 if (regp) {
2307 *regp = reg_uid;
2308 }
2309error:
2310 rcu_read_unlock();
2311 return ret;
2312}
2313
421cb601 2314/*
3d8ca23b 2315 * Create a session on the tracer side for the given app.
421cb601 2316 *
3d8ca23b
DG
2317 * On success, ua_sess_ptr is populated with the session pointer or else left
2318 * untouched. If the session was created, is_created is set to 1. On error,
2319 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2320 * be NULL.
2321 *
2322 * Returns 0 on success or else a negative code which is either -ENOMEM or
2323 * -ENOTCONN which is the default code if the ustctl_create_session fails.
421cb601 2324 */
03f91eaa 2325static int find_or_create_ust_app_session(struct ltt_ust_session *usess,
3d8ca23b
DG
2326 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2327 int *is_created)
421cb601 2328{
3d8ca23b 2329 int ret, created = 0;
421cb601
DG
2330 struct ust_app_session *ua_sess;
2331
3d8ca23b
DG
2332 assert(usess);
2333 assert(app);
2334 assert(ua_sess_ptr);
2335
840cb59c 2336 health_code_update();
86acf0da 2337
421cb601
DG
2338 ua_sess = lookup_session_by_app(usess, app);
2339 if (ua_sess == NULL) {
d9bf3ca4 2340 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
852d0037 2341 app->pid, usess->id);
40bbd087 2342 ua_sess = alloc_ust_app_session();
421cb601
DG
2343 if (ua_sess == NULL) {
2344 /* Only malloc can failed so something is really wrong */
3d8ca23b
DG
2345 ret = -ENOMEM;
2346 goto error;
421cb601 2347 }
477d7741 2348 shadow_copy_session(ua_sess, usess, app);
3d8ca23b 2349 created = 1;
421cb601
DG
2350 }
2351
7972aab2
DG
2352 switch (usess->buffer_type) {
2353 case LTTNG_BUFFER_PER_PID:
2354 /* Init local registry. */
2355 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
421cb601 2356 if (ret < 0) {
e64207cf 2357 delete_ust_app_session(-1, ua_sess, app);
7972aab2
DG
2358 goto error;
2359 }
2360 break;
2361 case LTTNG_BUFFER_PER_UID:
2362 /* Look for a global registry. If none exists, create one. */
d7ba1388 2363 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
7972aab2 2364 if (ret < 0) {
e64207cf 2365 delete_ust_app_session(-1, ua_sess, app);
7972aab2
DG
2366 goto error;
2367 }
2368 break;
2369 default:
2370 assert(0);
2371 ret = -EINVAL;
2372 goto error;
2373 }
2374
2375 health_code_update();
2376
2377 if (ua_sess->handle == -1) {
fb45065e 2378 pthread_mutex_lock(&app->sock_lock);
7972aab2 2379 ret = ustctl_create_session(app->sock);
fb45065e 2380 pthread_mutex_unlock(&app->sock_lock);
7972aab2
DG
2381 if (ret < 0) {
2382 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2383 ERR("Creating session for app pid %d with ret %d",
ffe60014
DG
2384 app->pid, ret);
2385 } else {
2386 DBG("UST app creating session failed. Application is dead");
3757b385
DG
2387 /*
2388 * This is normal behavior, an application can die during the
2389 * creation process. Don't report an error so the execution can
2390 * continue normally. This will get flagged ENOTCONN and the
2391 * caller will handle it.
2392 */
2393 ret = 0;
ffe60014 2394 }
d0b96690 2395 delete_ust_app_session(-1, ua_sess, app);
3d8ca23b
DG
2396 if (ret != -ENOMEM) {
2397 /*
2398 * Tracer is probably gone or got an internal error so let's
2399 * behave like it will soon unregister or not usable.
2400 */
2401 ret = -ENOTCONN;
2402 }
2403 goto error;
421cb601
DG
2404 }
2405
7972aab2
DG
2406 ua_sess->handle = ret;
2407
2408 /* Add ust app session to app's HT */
d9bf3ca4
MD
2409 lttng_ht_node_init_u64(&ua_sess->node,
2410 ua_sess->tracing_id);
2411 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
10b56aef
MD
2412 lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
2413 lttng_ht_add_unique_ulong(app->ust_sessions_objd,
2414 &ua_sess->ust_objd_node);
7972aab2
DG
2415
2416 DBG2("UST app session created successfully with handle %d", ret);
2417 }
2418
2419 *ua_sess_ptr = ua_sess;
2420 if (is_created) {
2421 *is_created = created;
2422 }
2423
2424 /* Everything went well. */
2425 ret = 0;
2426
2427error:
2428 health_code_update();
2429 return ret;
2430}
2431
6a6b2068
JG
2432/*
2433 * Match function for a hash table lookup of ust_app_ctx.
2434 *
2435 * It matches an ust app context based on the context type and, in the case
2436 * of perf counters, their name.
2437 */
2438static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2439{
2440 struct ust_app_ctx *ctx;
bdf64013 2441 const struct lttng_ust_context_attr *key;
6a6b2068
JG
2442
2443 assert(node);
2444 assert(_key);
2445
2446 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2447 key = _key;
2448
2449 /* Context type */
2450 if (ctx->ctx.ctx != key->ctx) {
2451 goto no_match;
2452 }
2453
bdf64013
JG
2454 switch(key->ctx) {
2455 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
6a6b2068 2456 if (strncmp(key->u.perf_counter.name,
bdf64013
JG
2457 ctx->ctx.u.perf_counter.name,
2458 sizeof(key->u.perf_counter.name))) {
2459 goto no_match;
2460 }
2461 break;
2462 case LTTNG_UST_CONTEXT_APP_CONTEXT:
2463 if (strcmp(key->u.app_ctx.provider_name,
2464 ctx->ctx.u.app_ctx.provider_name) ||
2465 strcmp(key->u.app_ctx.ctx_name,
2466 ctx->ctx.u.app_ctx.ctx_name)) {
6a6b2068
JG
2467 goto no_match;
2468 }
bdf64013
JG
2469 break;
2470 default:
2471 break;
6a6b2068
JG
2472 }
2473
2474 /* Match. */
2475 return 1;
2476
2477no_match:
2478 return 0;
2479}
2480
2481/*
2482 * Lookup for an ust app context from an lttng_ust_context.
2483 *
be184a0f 2484 * Must be called while holding RCU read side lock.
6a6b2068
JG
2485 * Return an ust_app_ctx object or NULL on error.
2486 */
2487static
2488struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
bdf64013 2489 struct lttng_ust_context_attr *uctx)
6a6b2068
JG
2490{
2491 struct lttng_ht_iter iter;
2492 struct lttng_ht_node_ulong *node;
2493 struct ust_app_ctx *app_ctx = NULL;
2494
2495 assert(uctx);
2496 assert(ht);
2497
2498 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2499 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2500 ht_match_ust_app_ctx, uctx, &iter.iter);
2501 node = lttng_ht_iter_get_node_ulong(&iter);
2502 if (!node) {
2503 goto end;
2504 }
2505
2506 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2507
2508end:
2509 return app_ctx;
2510}
2511
7972aab2
DG
2512/*
2513 * Create a context for the channel on the tracer.
2514 *
2515 * Called with UST app session lock held and a RCU read side lock.
2516 */
2517static
c9edf082 2518int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
bdf64013 2519 struct lttng_ust_context_attr *uctx,
7972aab2
DG
2520 struct ust_app *app)
2521{
2522 int ret = 0;
7972aab2
DG
2523 struct ust_app_ctx *ua_ctx;
2524
2525 DBG2("UST app adding context to channel %s", ua_chan->name);
2526
6a6b2068
JG
2527 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2528 if (ua_ctx) {
7972aab2
DG
2529 ret = -EEXIST;
2530 goto error;
2531 }
2532
2533 ua_ctx = alloc_ust_app_ctx(uctx);
2534 if (ua_ctx == NULL) {
2535 /* malloc failed */
7682f304 2536 ret = -ENOMEM;
7972aab2
DG
2537 goto error;
2538 }
2539
2540 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
aa3514e9 2541 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
31746f93 2542 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
7972aab2
DG
2543
2544 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2545 if (ret < 0) {
2546 goto error;
2547 }
2548
2549error:
2550 return ret;
2551}
2552
2553/*
2554 * Enable on the tracer side a ust app event for the session and channel.
2555 *
2556 * Called with UST app session lock held.
2557 */
2558static
2559int enable_ust_app_event(struct ust_app_session *ua_sess,
2560 struct ust_app_event *ua_event, struct ust_app *app)
2561{
2562 int ret;
2563
1831ae68 2564 ret = enable_ust_object(app, ua_event->obj);
7972aab2
DG
2565 if (ret < 0) {
2566 goto error;
2567 }
2568
2569 ua_event->enabled = 1;
2570
2571error:
2572 return ret;
2573}
2574
2575/*
2576 * Disable on the tracer side a ust app event for the session and channel.
2577 */
2578static int disable_ust_app_event(struct ust_app_session *ua_sess,
2579 struct ust_app_event *ua_event, struct ust_app *app)
2580{
2581 int ret;
2582
1831ae68 2583 ret = disable_ust_object(app, ua_event->obj);
7972aab2
DG
2584 if (ret < 0) {
2585 goto error;
2586 }
2587
2588 ua_event->enabled = 0;
2589
2590error:
2591 return ret;
2592}
2593
2594/*
2595 * Lookup ust app channel for session and disable it on the tracer side.
2596 */
2597static
2598int disable_ust_app_channel(struct ust_app_session *ua_sess,
2599 struct ust_app_channel *ua_chan, struct ust_app *app)
2600{
2601 int ret;
2602
2603 ret = disable_ust_channel(app, ua_sess, ua_chan);
2604 if (ret < 0) {
2605 goto error;
2606 }
2607
2608 ua_chan->enabled = 0;
2609
2610error:
2611 return ret;
2612}
2613
2614/*
2615 * Lookup ust app channel for session and enable it on the tracer side. This
2616 * MUST be called with a RCU read side lock acquired.
2617 */
2618static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2619 struct ltt_ust_channel *uchan, struct ust_app *app)
2620{
2621 int ret = 0;
2622 struct lttng_ht_iter iter;
2623 struct lttng_ht_node_str *ua_chan_node;
2624 struct ust_app_channel *ua_chan;
2625
2626 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2627 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2628 if (ua_chan_node == NULL) {
d9bf3ca4 2629 DBG2("Unable to find channel %s in ust session id %" PRIu64,
7972aab2
DG
2630 uchan->name, ua_sess->tracing_id);
2631 goto error;
2632 }
2633
2634 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2635
2636 ret = enable_ust_channel(app, ua_sess, ua_chan);
2637 if (ret < 0) {
2638 goto error;
2639 }
2640
2641error:
2642 return ret;
2643}
2644
2645/*
2646 * Ask the consumer to create a channel and get it if successful.
2647 *
fad1ed2f
JR
2648 * Called with UST app session lock held.
2649 *
7972aab2
DG
2650 * Return 0 on success or else a negative value.
2651 */
2652static int do_consumer_create_channel(struct ltt_ust_session *usess,
2653 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
e098433c
JG
2654 int bitness, struct ust_registry_session *registry,
2655 uint64_t trace_archive_id)
7972aab2
DG
2656{
2657 int ret;
2658 unsigned int nb_fd = 0;
2659 struct consumer_socket *socket;
2660
2661 assert(usess);
2662 assert(ua_sess);
2663 assert(ua_chan);
2664 assert(registry);
2665
2666 rcu_read_lock();
2667 health_code_update();
2668
2669 /* Get the right consumer socket for the application. */
2670 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2671 if (!socket) {
2672 ret = -EINVAL;
2673 goto error;
2674 }
2675
2676 health_code_update();
2677
2678 /* Need one fd for the channel. */
2679 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2680 if (ret < 0) {
2681 ERR("Exhausted number of available FD upon create channel");
2682 goto error;
2683 }
2684
2685 /*
2686 * Ask consumer to create channel. The consumer will return the number of
2687 * stream we have to expect.
2688 */
2689 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
d2956687 2690 registry, usess->current_trace_chunk);
7972aab2
DG
2691 if (ret < 0) {
2692 goto error_ask;
2693 }
2694
2695 /*
2696 * Compute the number of fd needed before receiving them. It must be 2 per
2697 * stream (2 being the default value here).
2698 */
2699 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2700
2701 /* Reserve the amount of file descriptor we need. */
2702 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2703 if (ret < 0) {
2704 ERR("Exhausted number of available FD upon create channel");
2705 goto error_fd_get_stream;
2706 }
2707
2708 health_code_update();
2709
2710 /*
2711 * Now get the channel from the consumer. This call wil populate the stream
2712 * list of that channel and set the ust objects.
2713 */
d9078d0c
DG
2714 if (usess->consumer->enabled) {
2715 ret = ust_consumer_get_channel(socket, ua_chan);
2716 if (ret < 0) {
2717 goto error_destroy;
2718 }
7972aab2
DG
2719 }
2720
2721 rcu_read_unlock();
2722 return 0;
2723
2724error_destroy:
2725 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2726error_fd_get_stream:
2727 /*
2728 * Initiate a destroy channel on the consumer since we had an error
2729 * handling it on our side. The return value is of no importance since we
2730 * already have a ret value set by the previous error that we need to
2731 * return.
2732 */
2733 (void) ust_consumer_destroy_channel(socket, ua_chan);
2734error_ask:
2735 lttng_fd_put(LTTNG_FD_APPS, 1);
2736error:
2737 health_code_update();
2738 rcu_read_unlock();
2739 return ret;
2740}
2741
2742/*
2743 * Duplicate the ust data object of the ust app stream and save it in the
2744 * buffer registry stream.
2745 *
2746 * Return 0 on success or else a negative value.
2747 */
2748static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2749 struct ust_app_stream *stream)
2750{
2751 int ret;
2752
2753 assert(reg_stream);
2754 assert(stream);
2755
2756 /* Reserve the amount of file descriptor we need. */
2757 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2758 if (ret < 0) {
2759 ERR("Exhausted number of available FD upon duplicate stream");
2760 goto error;
2761 }
2762
2763 /* Duplicate object for stream once the original is in the registry. */
2764 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2765 reg_stream->obj.ust);
2766 if (ret < 0) {
2767 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2768 reg_stream->obj.ust, stream->obj, ret);
2769 lttng_fd_put(LTTNG_FD_APPS, 2);
2770 goto error;
2771 }
2772 stream->handle = stream->obj->handle;
2773
2774error:
2775 return ret;
2776}
2777
2778/*
2779 * Duplicate the ust data object of the ust app. channel and save it in the
2780 * buffer registry channel.
2781 *
2782 * Return 0 on success or else a negative value.
2783 */
2784static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2785 struct ust_app_channel *ua_chan)
2786{
2787 int ret;
2788
2789 assert(reg_chan);
2790 assert(ua_chan);
2791
2792 /* Need two fds for the channel. */
2793 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2794 if (ret < 0) {
2795 ERR("Exhausted number of available FD upon duplicate channel");
2796 goto error_fd_get;
2797 }
2798
2799 /* Duplicate object for stream once the original is in the registry. */
2800 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2801 if (ret < 0) {
2802 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2803 reg_chan->obj.ust, ua_chan->obj, ret);
2804 goto error;
2805 }
2806 ua_chan->handle = ua_chan->obj->handle;
2807
2808 return 0;
2809
2810error:
2811 lttng_fd_put(LTTNG_FD_APPS, 1);
2812error_fd_get:
2813 return ret;
2814}
2815
2816/*
2817 * For a given channel buffer registry, setup all streams of the given ust
2818 * application channel.
2819 *
2820 * Return 0 on success or else a negative value.
2821 */
2822static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
fb45065e
MD
2823 struct ust_app_channel *ua_chan,
2824 struct ust_app *app)
7972aab2
DG
2825{
2826 int ret = 0;
2827 struct ust_app_stream *stream, *stmp;
2828
2829 assert(reg_chan);
2830 assert(ua_chan);
2831
2832 DBG2("UST app setup buffer registry stream");
2833
2834 /* Send all streams to application. */
2835 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2836 struct buffer_reg_stream *reg_stream;
2837
2838 ret = buffer_reg_stream_create(&reg_stream);
2839 if (ret < 0) {
2840 goto error;
2841 }
2842
2843 /*
2844 * Keep original pointer and nullify it in the stream so the delete
2845 * stream call does not release the object.
2846 */
2847 reg_stream->obj.ust = stream->obj;
2848 stream->obj = NULL;
2849 buffer_reg_stream_add(reg_stream, reg_chan);
421cb601 2850
7972aab2
DG
2851 /* We don't need the streams anymore. */
2852 cds_list_del(&stream->list);
fb45065e 2853 delete_ust_app_stream(-1, stream, app);
7972aab2 2854 }
421cb601 2855
7972aab2
DG
2856error:
2857 return ret;
2858}
2859
2860/*
2861 * Create a buffer registry channel for the given session registry and
2862 * application channel object. If regp pointer is valid, it's set with the
2863 * created object. Important, the created object is NOT added to the session
2864 * registry hash table.
2865 *
2866 * Return 0 on success else a negative value.
2867 */
2868static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2869 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2870{
2871 int ret;
2872 struct buffer_reg_channel *reg_chan = NULL;
2873
2874 assert(reg_sess);
2875 assert(ua_chan);
2876
2877 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2878
2879 /* Create buffer registry channel. */
2880 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2881 if (ret < 0) {
2882 goto error_create;
421cb601 2883 }
7972aab2
DG
2884 assert(reg_chan);
2885 reg_chan->consumer_key = ua_chan->key;
8c924c7b 2886 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
d07ceecd 2887 reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
421cb601 2888
7972aab2
DG
2889 /* Create and add a channel registry to session. */
2890 ret = ust_registry_channel_add(reg_sess->reg.ust,
2891 ua_chan->tracing_channel_id);
2892 if (ret < 0) {
2893 goto error;
d88aee68 2894 }
7972aab2 2895 buffer_reg_channel_add(reg_sess, reg_chan);
d88aee68 2896
7972aab2
DG
2897 if (regp) {
2898 *regp = reg_chan;
3d8ca23b 2899 }
d88aee68 2900
7972aab2 2901 return 0;
3d8ca23b
DG
2902
2903error:
7972aab2
DG
2904 /* Safe because the registry channel object was not added to any HT. */
2905 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2906error_create:
3d8ca23b 2907 return ret;
421cb601
DG
2908}
2909
55cc08a6 2910/*
7972aab2
DG
2911 * Setup buffer registry channel for the given session registry and application
2912 * channel object. If regp pointer is valid, it's set with the created object.
d0b96690 2913 *
7972aab2 2914 * Return 0 on success else a negative value.
55cc08a6 2915 */
7972aab2 2916static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
fb45065e
MD
2917 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan,
2918 struct ust_app *app)
55cc08a6 2919{
7972aab2 2920 int ret;
55cc08a6 2921
7972aab2
DG
2922 assert(reg_sess);
2923 assert(reg_chan);
2924 assert(ua_chan);
2925 assert(ua_chan->obj);
55cc08a6 2926
7972aab2 2927 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
55cc08a6 2928
7972aab2 2929 /* Setup all streams for the registry. */
fb45065e 2930 ret = setup_buffer_reg_streams(reg_chan, ua_chan, app);
7972aab2 2931 if (ret < 0) {
55cc08a6
DG
2932 goto error;
2933 }
2934
7972aab2
DG
2935 reg_chan->obj.ust = ua_chan->obj;
2936 ua_chan->obj = NULL;
55cc08a6 2937
7972aab2 2938 return 0;
55cc08a6
DG
2939
2940error:
7972aab2
DG
2941 buffer_reg_channel_remove(reg_sess, reg_chan);
2942 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
55cc08a6
DG
2943 return ret;
2944}
2945
edb67388 2946/*
7972aab2 2947 * Send buffer registry channel to the application.
d0b96690 2948 *
7972aab2 2949 * Return 0 on success else a negative value.
edb67388 2950 */
7972aab2
DG
2951static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2952 struct ust_app *app, struct ust_app_session *ua_sess,
2953 struct ust_app_channel *ua_chan)
edb67388
DG
2954{
2955 int ret;
7972aab2 2956 struct buffer_reg_stream *reg_stream;
edb67388 2957
7972aab2
DG
2958 assert(reg_chan);
2959 assert(app);
2960 assert(ua_sess);
2961 assert(ua_chan);
2962
2963 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2964
2965 ret = duplicate_channel_object(reg_chan, ua_chan);
edb67388
DG
2966 if (ret < 0) {
2967 goto error;
2968 }
2969
7972aab2
DG
2970 /* Send channel to the application. */
2971 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
a7169585
MD
2972 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2973 ret = -ENOTCONN; /* Caused by app exiting. */
2974 goto error;
2975 } else if (ret < 0) {
7972aab2
DG
2976 goto error;
2977 }
2978
2979 health_code_update();
2980
2981 /* Send all streams to application. */
2982 pthread_mutex_lock(&reg_chan->stream_list_lock);
2983 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2984 struct ust_app_stream stream;
2985
2986 ret = duplicate_stream_object(reg_stream, &stream);
2987 if (ret < 0) {
2988 goto error_stream_unlock;
2989 }
2990
2991 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2992 if (ret < 0) {
fb45065e 2993 (void) release_ust_app_stream(-1, &stream, app);
a7169585
MD
2994 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2995 ret = -ENOTCONN; /* Caused by app exiting. */
a7169585 2996 }
7972aab2
DG
2997 goto error_stream_unlock;
2998 }
edb67388 2999
7972aab2
DG
3000 /*
3001 * The return value is not important here. This function will output an
3002 * error if needed.
3003 */
fb45065e 3004 (void) release_ust_app_stream(-1, &stream, app);
7972aab2
DG
3005 }
3006 ua_chan->is_sent = 1;
3007
3008error_stream_unlock:
3009 pthread_mutex_unlock(&reg_chan->stream_list_lock);
edb67388
DG
3010error:
3011 return ret;
3012}
3013
9730260e 3014/*
7972aab2
DG
3015 * Create and send to the application the created buffers with per UID buffers.
3016 *
9acdc1d6 3017 * This MUST be called with a RCU read side lock acquired.
71e0a100 3018 * The session list lock and the session's lock must be acquired.
9acdc1d6 3019 *
7972aab2 3020 * Return 0 on success else a negative value.
9730260e 3021 */
7972aab2
DG
3022static int create_channel_per_uid(struct ust_app *app,
3023 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3024 struct ust_app_channel *ua_chan)
9730260e
DG
3025{
3026 int ret;
7972aab2
DG
3027 struct buffer_reg_uid *reg_uid;
3028 struct buffer_reg_channel *reg_chan;
e32d7f27 3029 struct ltt_session *session = NULL;
e098433c
JG
3030 enum lttng_error_code notification_ret;
3031 struct ust_registry_channel *chan_reg;
9730260e 3032
7972aab2
DG
3033 assert(app);
3034 assert(usess);
3035 assert(ua_sess);
3036 assert(ua_chan);
3037
3038 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
3039
3040 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
3041 /*
3042 * The session creation handles the creation of this global registry
3043 * object. If none can be find, there is a code flow problem or a
3044 * teardown race.
3045 */
3046 assert(reg_uid);
3047
3048 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
3049 reg_uid);
2721f7ea
JG
3050 if (reg_chan) {
3051 goto send_channel;
3052 }
7972aab2 3053
2721f7ea
JG
3054 /* Create the buffer registry channel object. */
3055 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
3056 if (ret < 0) {
3057 ERR("Error creating the UST channel \"%s\" registry instance",
f14256d6 3058 ua_chan->name);
2721f7ea
JG
3059 goto error;
3060 }
f14256d6 3061
e098433c
JG
3062 session = session_find_by_id(ua_sess->tracing_id);
3063 assert(session);
3064 assert(pthread_mutex_trylock(&session->lock));
3065 assert(session_trylock_list());
3066
2721f7ea
JG
3067 /*
3068 * Create the buffers on the consumer side. This call populates the
3069 * ust app channel object with all streams and data object.
3070 */
3071 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
e098433c 3072 app->bits_per_long, reg_uid->registry->reg.ust,
d2956687 3073 session->most_recent_chunk_id.value);
2721f7ea
JG
3074 if (ret < 0) {
3075 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3076 ua_chan->name);
7972aab2
DG
3077
3078 /*
2721f7ea
JG
3079 * Let's remove the previously created buffer registry channel so
3080 * it's not visible anymore in the session registry.
7972aab2 3081 */
2721f7ea
JG
3082 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
3083 ua_chan->tracing_channel_id, false);
3084 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
3085 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
3086 goto error;
7972aab2
DG
3087 }
3088
2721f7ea
JG
3089 /*
3090 * Setup the streams and add it to the session registry.
3091 */
3092 ret = setup_buffer_reg_channel(reg_uid->registry,
3093 ua_chan, reg_chan, app);
3094 if (ret < 0) {
3095 ERR("Error setting up UST channel \"%s\"", ua_chan->name);
3096 goto error;
3097 }
3098
e098433c
JG
3099 /* Notify the notification subsystem of the channel's creation. */
3100 pthread_mutex_lock(&reg_uid->registry->reg.ust->lock);
3101 chan_reg = ust_registry_channel_find(reg_uid->registry->reg.ust,
3102 ua_chan->tracing_channel_id);
3103 assert(chan_reg);
3104 chan_reg->consumer_key = ua_chan->key;
3105 chan_reg = NULL;
3106 pthread_mutex_unlock(&reg_uid->registry->reg.ust->lock);
e9404c27 3107
e098433c
JG
3108 notification_ret = notification_thread_command_add_channel(
3109 notification_thread_handle, session->name,
470cc211
JG
3110 ua_sess->effective_credentials.uid,
3111 ua_sess->effective_credentials.gid, ua_chan->name,
3112 ua_chan->key, LTTNG_DOMAIN_UST,
e098433c
JG
3113 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3114 if (notification_ret != LTTNG_OK) {
3115 ret = - (int) notification_ret;
3116 ERR("Failed to add channel to notification thread");
3117 goto error;
e9404c27
JG
3118 }
3119
2721f7ea 3120send_channel:
66ff8e3f
JG
3121 /* Send buffers to the application. */
3122 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
3123 if (ret < 0) {
3124 if (ret != -ENOTCONN) {
3125 ERR("Error sending channel to application");
3126 }
3127 goto error;
3128 }
3129
9730260e 3130error:
e32d7f27
JG
3131 if (session) {
3132 session_put(session);
3133 }
9730260e
DG
3134 return ret;
3135}
3136
78f0bacd 3137/*
7972aab2
DG
3138 * Create and send to the application the created buffers with per PID buffers.
3139 *
fad1ed2f 3140 * Called with UST app session lock held.
71e0a100 3141 * The session list lock and the session's lock must be acquired.
fad1ed2f 3142 *
7972aab2 3143 * Return 0 on success else a negative value.
78f0bacd 3144 */
7972aab2
DG
3145static int create_channel_per_pid(struct ust_app *app,
3146 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3147 struct ust_app_channel *ua_chan)
78f0bacd 3148{
8535a6d9 3149 int ret;
7972aab2 3150 struct ust_registry_session *registry;
e9404c27 3151 enum lttng_error_code cmd_ret;
e32d7f27 3152 struct ltt_session *session = NULL;
e9404c27
JG
3153 uint64_t chan_reg_key;
3154 struct ust_registry_channel *chan_reg;
78f0bacd 3155
7972aab2
DG
3156 assert(app);
3157 assert(usess);
3158 assert(ua_sess);
3159 assert(ua_chan);
3160
3161 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
3162
3163 rcu_read_lock();
3164
3165 registry = get_session_registry(ua_sess);
fad1ed2f 3166 /* The UST app session lock is held, registry shall not be null. */
7972aab2
DG
3167 assert(registry);
3168
3169 /* Create and add a new channel registry to session. */
3170 ret = ust_registry_channel_add(registry, ua_chan->key);
78f0bacd 3171 if (ret < 0) {
f14256d6
MD
3172 ERR("Error creating the UST channel \"%s\" registry instance",
3173 ua_chan->name);
78f0bacd
DG
3174 goto error;
3175 }
3176
e098433c
JG
3177 session = session_find_by_id(ua_sess->tracing_id);
3178 assert(session);
3179
3180 assert(pthread_mutex_trylock(&session->lock));
3181 assert(session_trylock_list());
3182
7972aab2
DG
3183 /* Create and get channel on the consumer side. */
3184 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
e098433c 3185 app->bits_per_long, registry,
d2956687 3186 session->most_recent_chunk_id.value);
7972aab2 3187 if (ret < 0) {
f14256d6
MD
3188 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3189 ua_chan->name);
5b951542 3190 goto error_remove_from_registry;
7972aab2
DG
3191 }
3192
3193 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
3194 if (ret < 0) {
a7169585
MD
3195 if (ret != -ENOTCONN) {
3196 ERR("Error sending channel to application");
3197 }
5b951542 3198 goto error_remove_from_registry;
7972aab2 3199 }
8535a6d9 3200
e9404c27
JG
3201 chan_reg_key = ua_chan->key;
3202 pthread_mutex_lock(&registry->lock);
3203 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
3204 assert(chan_reg);
3205 chan_reg->consumer_key = ua_chan->key;
3206 pthread_mutex_unlock(&registry->lock);
3207
3208 cmd_ret = notification_thread_command_add_channel(
3209 notification_thread_handle, session->name,
470cc211
JG
3210 ua_sess->effective_credentials.uid,
3211 ua_sess->effective_credentials.gid, ua_chan->name,
3212 ua_chan->key, LTTNG_DOMAIN_UST,
e9404c27
JG
3213 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3214 if (cmd_ret != LTTNG_OK) {
3215 ret = - (int) cmd_ret;
3216 ERR("Failed to add channel to notification thread");
5b951542 3217 goto error_remove_from_registry;
e9404c27
JG
3218 }
3219
5b951542
MD
3220error_remove_from_registry:
3221 if (ret) {
3222 ust_registry_channel_del_free(registry, ua_chan->key, false);
3223 }
78f0bacd 3224error:
7972aab2 3225 rcu_read_unlock();
e32d7f27
JG
3226 if (session) {
3227 session_put(session);
3228 }
78f0bacd
DG
3229 return ret;
3230}
3231
3232/*
7972aab2 3233 * From an already allocated ust app channel, create the channel buffers if
88e3c2f5 3234 * needed and send them to the application. This MUST be called with a RCU read
7972aab2
DG
3235 * side lock acquired.
3236 *
fad1ed2f
JR
3237 * Called with UST app session lock held.
3238 *
a7169585
MD
3239 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3240 * the application exited concurrently.
78f0bacd 3241 */
88e3c2f5 3242static int ust_app_channel_send(struct ust_app *app,
7972aab2
DG
3243 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3244 struct ust_app_channel *ua_chan)
78f0bacd 3245{
7972aab2 3246 int ret;
78f0bacd 3247
7972aab2
DG
3248 assert(app);
3249 assert(usess);
88e3c2f5 3250 assert(usess->active);
7972aab2
DG
3251 assert(ua_sess);
3252 assert(ua_chan);
3253
3254 /* Handle buffer type