SoW-2020-0002: Trace Hit Counters: trigger error reporting integration
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
CommitLineData
91d76f53 1/*
ab5be9fa
MJ
2 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
3 * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
91d76f53 4 *
ab5be9fa 5 * SPDX-License-Identifier: GPL-2.0-only
91d76f53 6 *
91d76f53
DG
7 */
8
6c1c0768 9#define _LGPL_SOURCE
91d76f53 10#include <errno.h>
2463b787 11#include <fcntl.h>
7972aab2 12#include <inttypes.h>
91d76f53
DG
13#include <pthread.h>
14#include <stdio.h>
15#include <stdlib.h>
099e26bd 16#include <string.h>
2463b787 17#include <sys/mman.h>
aba8e916
DG
18#include <sys/stat.h>
19#include <sys/types.h>
099e26bd 20#include <unistd.h>
0df502fd 21#include <urcu/compiler.h>
331744e3 22#include <signal.h>
bec39940 23
2463b787 24#include <common/bytecode/bytecode.h>
990570ed 25#include <common/common.h>
2463b787
JR
26#include <common/hashtable/utils.h>
27#include <lttng/event-rule/event-rule.h>
28#include <lttng/event-rule/event-rule-internal.h>
29#include <lttng/event-rule/tracepoint.h>
30#include <lttng/condition/condition.h>
31#include <lttng/condition/event-rule-internal.h>
32#include <lttng/condition/event-rule.h>
33#include <lttng/trigger/trigger-internal.h>
86acf0da 34#include <common/sessiond-comm/sessiond-comm.h>
1e307fab 35
7972aab2 36#include "buffer-registry.h"
2463b787 37#include "condition-internal.h"
86acf0da 38#include "fd-limit.h"
8782cc74 39#include "health-sessiond.h"
56fff090 40#include "ust-app.h"
48842b30 41#include "ust-consumer.h"
75018ab6
JG
42#include "lttng-ust-ctl.h"
43#include "lttng-ust-error.h"
0b2dc8df 44#include "utils.h"
fb83fe64 45#include "session.h"
e9404c27
JG
46#include "lttng-sessiond.h"
47#include "notification-thread-commands.h"
5c408ad8 48#include "rotate.h"
2463b787
JR
49#include "event.h"
50#include "trigger-error-accounting.h"
51
d80a6244 52
44cdb3a2
MJ
53struct lttng_ht *ust_app_ht;
54struct lttng_ht *ust_app_ht_by_sock;
55struct lttng_ht *ust_app_ht_by_notify_sock;
56
c4b88406
MD
57static
58int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
59
d9bf3ca4
MD
60/* Next available channel key. Access under next_channel_key_lock. */
61static uint64_t _next_channel_key;
62static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
63
64/* Next available session ID. Access under next_session_id_lock. */
65static uint64_t _next_session_id;
66static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
ffe60014
DG
67
68/*
d9bf3ca4 69 * Return the incremented value of next_channel_key.
ffe60014 70 */
d9bf3ca4 71static uint64_t get_next_channel_key(void)
ffe60014 72{
d9bf3ca4
MD
73 uint64_t ret;
74
75 pthread_mutex_lock(&next_channel_key_lock);
76 ret = ++_next_channel_key;
77 pthread_mutex_unlock(&next_channel_key_lock);
78 return ret;
ffe60014
DG
79}
80
81/*
7972aab2 82 * Return the atomically incremented value of next_session_id.
ffe60014 83 */
d9bf3ca4 84static uint64_t get_next_session_id(void)
ffe60014 85{
d9bf3ca4
MD
86 uint64_t ret;
87
88 pthread_mutex_lock(&next_session_id_lock);
89 ret = ++_next_session_id;
90 pthread_mutex_unlock(&next_session_id_lock);
91 return ret;
ffe60014
DG
92}
93
d65d2de8
DG
94static void copy_channel_attr_to_ustctl(
95 struct ustctl_consumer_channel_attr *attr,
96 struct lttng_ust_channel_attr *uattr)
97{
98 /* Copy event attributes since the layout is different. */
99 attr->subbuf_size = uattr->subbuf_size;
100 attr->num_subbuf = uattr->num_subbuf;
101 attr->overwrite = uattr->overwrite;
102 attr->switch_timer_interval = uattr->switch_timer_interval;
103 attr->read_timer_interval = uattr->read_timer_interval;
104 attr->output = uattr->output;
491d1539 105 attr->blocking_timeout = uattr->u.s.blocking_timeout;
d65d2de8
DG
106}
107
025faf73
DG
108/*
109 * Match function for the hash table lookup.
110 *
111 * It matches an ust app event based on three attributes which are the event
112 * name, the filter bytecode and the loglevel.
113 */
18eace3b
DG
114static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
115{
116 struct ust_app_event *event;
117 const struct ust_app_ht_key *key;
2106efa0 118 int ev_loglevel_value;
18eace3b
DG
119
120 assert(node);
121 assert(_key);
122
123 event = caa_container_of(node, struct ust_app_event, node.node);
124 key = _key;
2106efa0 125 ev_loglevel_value = event->attr.loglevel;
18eace3b 126
1af53eb5 127 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
18eace3b
DG
128
129 /* Event name */
130 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
131 goto no_match;
132 }
133
134 /* Event loglevel. */
2106efa0 135 if (ev_loglevel_value != key->loglevel_type) {
025faf73 136 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
2106efa0
PP
137 && key->loglevel_type == 0 &&
138 ev_loglevel_value == -1) {
025faf73
DG
139 /*
140 * Match is accepted. This is because on event creation, the
141 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
142 * -1 are accepted for this loglevel type since 0 is the one set by
143 * the API when receiving an enable event.
144 */
145 } else {
146 goto no_match;
147 }
18eace3b
DG
148 }
149
150 /* One of the filters is NULL, fail. */
151 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
152 goto no_match;
153 }
154
025faf73
DG
155 if (key->filter && event->filter) {
156 /* Both filters exists, check length followed by the bytecode. */
157 if (event->filter->len != key->filter->len ||
158 memcmp(event->filter->data, key->filter->data,
159 event->filter->len) != 0) {
160 goto no_match;
161 }
18eace3b
DG
162 }
163
1af53eb5
JI
164 /* One of the exclusions is NULL, fail. */
165 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
166 goto no_match;
167 }
168
169 if (key->exclusion && event->exclusion) {
170 /* Both exclusions exists, check count followed by the names. */
171 if (event->exclusion->count != key->exclusion->count ||
172 memcmp(event->exclusion->names, key->exclusion->names,
173 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
174 goto no_match;
175 }
176 }
177
178
025faf73 179 /* Match. */
18eace3b
DG
180 return 1;
181
182no_match:
183 return 0;
18eace3b
DG
184}
185
025faf73
DG
186/*
187 * Unique add of an ust app event in the given ht. This uses the custom
188 * ht_match_ust_app_event match function and the event name as hash.
189 */
d0b96690 190static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
18eace3b
DG
191 struct ust_app_event *event)
192{
193 struct cds_lfht_node *node_ptr;
194 struct ust_app_ht_key key;
d0b96690 195 struct lttng_ht *ht;
18eace3b 196
d0b96690
DG
197 assert(ua_chan);
198 assert(ua_chan->events);
18eace3b
DG
199 assert(event);
200
d0b96690 201 ht = ua_chan->events;
18eace3b
DG
202 key.name = event->attr.name;
203 key.filter = event->filter;
2106efa0 204 key.loglevel_type = event->attr.loglevel;
91c89f23 205 key.exclusion = event->exclusion;
18eace3b
DG
206
207 node_ptr = cds_lfht_add_unique(ht->ht,
208 ht->hash_fct(event->node.key, lttng_ht_seed),
209 ht_match_ust_app_event, &key, &event->node.node);
210 assert(node_ptr == &event->node.node);
211}
212
d88aee68
DG
213/*
214 * Close the notify socket from the given RCU head object. This MUST be called
215 * through a call_rcu().
216 */
217static void close_notify_sock_rcu(struct rcu_head *head)
218{
219 int ret;
220 struct ust_app_notify_sock_obj *obj =
221 caa_container_of(head, struct ust_app_notify_sock_obj, head);
222
223 /* Must have a valid fd here. */
224 assert(obj->fd >= 0);
225
226 ret = close(obj->fd);
227 if (ret) {
228 ERR("close notify sock %d RCU", obj->fd);
229 }
230 lttng_fd_put(LTTNG_FD_APPS, 1);
231
232 free(obj);
233}
234
7972aab2
DG
235/*
236 * Return the session registry according to the buffer type of the given
237 * session.
238 *
239 * A registry per UID object MUST exists before calling this function or else
240 * it assert() if not found. RCU read side lock must be acquired.
241 */
242static struct ust_registry_session *get_session_registry(
243 struct ust_app_session *ua_sess)
244{
245 struct ust_registry_session *registry = NULL;
246
247 assert(ua_sess);
248
249 switch (ua_sess->buffer_type) {
250 case LTTNG_BUFFER_PER_PID:
251 {
252 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
253 if (!reg_pid) {
254 goto error;
255 }
256 registry = reg_pid->registry->reg.ust;
257 break;
258 }
259 case LTTNG_BUFFER_PER_UID:
260 {
261 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
470cc211 262 ua_sess->tracing_id, ua_sess->bits_per_long,
2463b787 263 lttng_credentials_get_uid(&ua_sess->real_credentials));
7972aab2
DG
264 if (!reg_uid) {
265 goto error;
266 }
267 registry = reg_uid->registry->reg.ust;
268 break;
269 }
270 default:
271 assert(0);
272 };
273
274error:
275 return registry;
276}
277
55cc08a6
DG
278/*
279 * Delete ust context safely. RCU read lock must be held before calling
280 * this function.
281 */
282static
fb45065e
MD
283void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
284 struct ust_app *app)
55cc08a6 285{
ffe60014
DG
286 int ret;
287
288 assert(ua_ctx);
289
55cc08a6 290 if (ua_ctx->obj) {
fb45065e 291 pthread_mutex_lock(&app->sock_lock);
ffe60014 292 ret = ustctl_release_object(sock, ua_ctx->obj);
fb45065e 293 pthread_mutex_unlock(&app->sock_lock);
d0b96690
DG
294 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
295 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
296 sock, ua_ctx->obj->handle, ret);
ffe60014 297 }
55cc08a6
DG
298 free(ua_ctx->obj);
299 }
300 free(ua_ctx);
301}
302
d80a6244
DG
303/*
304 * Delete ust app event safely. RCU read lock must be held before calling
305 * this function.
306 */
8b366481 307static
fb45065e
MD
308void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
309 struct ust_app *app)
d80a6244 310{
ffe60014
DG
311 int ret;
312
313 assert(ua_event);
314
53a80697 315 free(ua_event->filter);
951f0b71
JI
316 if (ua_event->exclusion != NULL)
317 free(ua_event->exclusion);
edb67388 318 if (ua_event->obj != NULL) {
fb45065e 319 pthread_mutex_lock(&app->sock_lock);
ffe60014 320 ret = ustctl_release_object(sock, ua_event->obj);
fb45065e 321 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
322 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
323 ERR("UST app sock %d release event obj failed with ret %d",
324 sock, ret);
325 }
edb67388
DG
326 free(ua_event->obj);
327 }
d80a6244
DG
328 free(ua_event);
329}
330
2463b787
JR
331/*
332 * Delete ust app token event_rule safely. RCU read lock must be held before calling
333 * this function. TODO: or does it????
334 */
335static
336void delete_ust_app_token_event_rule(int sock, struct ust_app_token_event_rule *ua_token,
337 struct ust_app *app)
338{
339 int ret;
340
341 assert(ua_token);
342
343 if (ua_token->exclusion != NULL)
344 free(ua_token->exclusion);
345 if (ua_token->obj != NULL) {
346 pthread_mutex_lock(&app->sock_lock);
347 ret = ustctl_release_object(sock, ua_token->obj);
348 pthread_mutex_unlock(&app->sock_lock);
349 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
350 ERR("UST app sock %d release event obj failed with ret %d",
351 sock, ret);
352 }
353 free(ua_token->obj);
354 }
355 lttng_trigger_put(ua_token->trigger);
356 free(ua_token);
357}
358
d80a6244 359/*
7972aab2
DG
360 * Release ust data object of the given stream.
361 *
362 * Return 0 on success or else a negative value.
d80a6244 363 */
fb45065e
MD
364static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
365 struct ust_app *app)
d80a6244 366{
7972aab2 367 int ret = 0;
ffe60014
DG
368
369 assert(stream);
370
8b366481 371 if (stream->obj) {
fb45065e 372 pthread_mutex_lock(&app->sock_lock);
ffe60014 373 ret = ustctl_release_object(sock, stream->obj);
fb45065e 374 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
375 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
376 ERR("UST app sock %d release stream obj failed with ret %d",
377 sock, ret);
378 }
4063050c 379 lttng_fd_put(LTTNG_FD_APPS, 2);
8b366481
DG
380 free(stream->obj);
381 }
7972aab2
DG
382
383 return ret;
384}
385
386/*
387 * Delete ust app stream safely. RCU read lock must be held before calling
388 * this function.
389 */
390static
fb45065e
MD
391void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
392 struct ust_app *app)
7972aab2
DG
393{
394 assert(stream);
395
fb45065e 396 (void) release_ust_app_stream(sock, stream, app);
84cd17c6 397 free(stream);
d80a6244
DG
398}
399
36b588ed
MD
400/*
401 * We need to execute ht_destroy outside of RCU read-side critical
0b2dc8df
MD
402 * section and outside of call_rcu thread, so we postpone its execution
403 * using ht_cleanup_push. It is simpler than to change the semantic of
404 * the many callers of delete_ust_app_session().
36b588ed
MD
405 */
406static
407void delete_ust_app_channel_rcu(struct rcu_head *head)
408{
409 struct ust_app_channel *ua_chan =
410 caa_container_of(head, struct ust_app_channel, rcu_head);
411
0b2dc8df
MD
412 ht_cleanup_push(ua_chan->ctx);
413 ht_cleanup_push(ua_chan->events);
36b588ed
MD
414 free(ua_chan);
415}
416
fb83fe64
JD
417/*
418 * Extract the lost packet or discarded events counter when the channel is
419 * being deleted and store the value in the parent channel so we can
420 * access it from lttng list and at stop/destroy.
82cac6d2
JG
421 *
422 * The session list lock must be held by the caller.
fb83fe64
JD
423 */
424static
425void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
426{
427 uint64_t discarded = 0, lost = 0;
428 struct ltt_session *session;
429 struct ltt_ust_channel *uchan;
430
431 if (ua_chan->attr.type != LTTNG_UST_CHAN_PER_CPU) {
432 return;
433 }
434
435 rcu_read_lock();
436 session = session_find_by_id(ua_chan->session->tracing_id);
d68ec974
JG
437 if (!session || !session->ust_session) {
438 /*
439 * Not finding the session is not an error because there are
440 * multiple ways the channels can be torn down.
441 *
442 * 1) The session daemon can initiate the destruction of the
443 * ust app session after receiving a destroy command or
444 * during its shutdown/teardown.
445 * 2) The application, since we are in per-pid tracing, is
446 * unregistering and tearing down its ust app session.
447 *
448 * Both paths are protected by the session list lock which
449 * ensures that the accounting of lost packets and discarded
450 * events is done exactly once. The session is then unpublished
451 * from the session list, resulting in this condition.
452 */
fb83fe64
JD
453 goto end;
454 }
455
456 if (ua_chan->attr.overwrite) {
457 consumer_get_lost_packets(ua_chan->session->tracing_id,
458 ua_chan->key, session->ust_session->consumer,
459 &lost);
460 } else {
461 consumer_get_discarded_events(ua_chan->session->tracing_id,
462 ua_chan->key, session->ust_session->consumer,
463 &discarded);
464 }
465 uchan = trace_ust_find_channel_by_name(
466 session->ust_session->domain_global.channels,
467 ua_chan->name);
468 if (!uchan) {
469 ERR("Missing UST channel to store discarded counters");
470 goto end;
471 }
472
473 uchan->per_pid_closed_app_discarded += discarded;
474 uchan->per_pid_closed_app_lost += lost;
475
476end:
477 rcu_read_unlock();
e32d7f27
JG
478 if (session) {
479 session_put(session);
480 }
fb83fe64
JD
481}
482
d80a6244
DG
483/*
484 * Delete ust app channel safely. RCU read lock must be held before calling
485 * this function.
82cac6d2
JG
486 *
487 * The session list lock must be held by the caller.
d80a6244 488 */
8b366481 489static
d0b96690
DG
490void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
491 struct ust_app *app)
d80a6244
DG
492{
493 int ret;
bec39940 494 struct lttng_ht_iter iter;
d80a6244 495 struct ust_app_event *ua_event;
55cc08a6 496 struct ust_app_ctx *ua_ctx;
030a66fa 497 struct ust_app_stream *stream, *stmp;
7972aab2 498 struct ust_registry_session *registry;
d80a6244 499
ffe60014
DG
500 assert(ua_chan);
501
502 DBG3("UST app deleting channel %s", ua_chan->name);
503
55cc08a6 504 /* Wipe stream */
d80a6244 505 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
84cd17c6 506 cds_list_del(&stream->list);
fb45065e 507 delete_ust_app_stream(sock, stream, app);
d80a6244
DG
508 }
509
55cc08a6 510 /* Wipe context */
bec39940 511 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
31746f93 512 cds_list_del(&ua_ctx->list);
bec39940 513 ret = lttng_ht_del(ua_chan->ctx, &iter);
55cc08a6 514 assert(!ret);
fb45065e 515 delete_ust_app_ctx(sock, ua_ctx, app);
55cc08a6 516 }
d80a6244 517
55cc08a6 518 /* Wipe events */
bec39940
DG
519 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
520 node.node) {
521 ret = lttng_ht_del(ua_chan->events, &iter);
525b0740 522 assert(!ret);
fb45065e 523 delete_ust_app_event(sock, ua_event, app);
d80a6244 524 }
edb67388 525
c8335706
MD
526 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
527 /* Wipe and free registry from session registry. */
528 registry = get_session_registry(ua_chan->session);
529 if (registry) {
e9404c27 530 ust_registry_channel_del_free(registry, ua_chan->key,
e38d96f9
MD
531 sock >= 0);
532 }
45798a31
JG
533 /*
534 * A negative socket can be used by the caller when
535 * cleaning-up a ua_chan in an error path. Skip the
536 * accounting in this case.
537 */
e38d96f9
MD
538 if (sock >= 0) {
539 save_per_pid_lost_discarded_counters(ua_chan);
c8335706 540 }
7972aab2 541 }
d0b96690 542
edb67388 543 if (ua_chan->obj != NULL) {
d0b96690
DG
544 /* Remove channel from application UST object descriptor. */
545 iter.iter.node = &ua_chan->ust_objd_node.node;
c6e62271
DG
546 ret = lttng_ht_del(app->ust_objd, &iter);
547 assert(!ret);
fb45065e 548 pthread_mutex_lock(&app->sock_lock);
ffe60014 549 ret = ustctl_release_object(sock, ua_chan->obj);
fb45065e 550 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
551 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
552 ERR("UST app sock %d release channel obj failed with ret %d",
553 sock, ret);
554 }
7972aab2 555 lttng_fd_put(LTTNG_FD_APPS, 1);
edb67388
DG
556 free(ua_chan->obj);
557 }
36b588ed 558 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
d80a6244
DG
559}
560
fb45065e
MD
561int ust_app_register_done(struct ust_app *app)
562{
563 int ret;
564
565 pthread_mutex_lock(&app->sock_lock);
566 ret = ustctl_register_done(app->sock);
567 pthread_mutex_unlock(&app->sock_lock);
568 return ret;
569}
570
571int ust_app_release_object(struct ust_app *app, struct lttng_ust_object_data *data)
572{
573 int ret, sock;
574
575 if (app) {
576 pthread_mutex_lock(&app->sock_lock);
577 sock = app->sock;
578 } else {
579 sock = -1;
580 }
581 ret = ustctl_release_object(sock, data);
582 if (app) {
583 pthread_mutex_unlock(&app->sock_lock);
584 }
585 return ret;
586}
587
331744e3 588/*
1b532a60
DG
589 * Push metadata to consumer socket.
590 *
dc2bbdae
MD
591 * RCU read-side lock must be held to guarantee existance of socket.
592 * Must be called with the ust app session lock held.
593 * Must be called with the registry lock held.
331744e3
JD
594 *
595 * On success, return the len of metadata pushed or else a negative value.
2c57e06d
MD
596 * Returning a -EPIPE return value means we could not send the metadata,
597 * but it can be caused by recoverable errors (e.g. the application has
598 * terminated concurrently).
331744e3
JD
599 */
600ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
601 struct consumer_socket *socket, int send_zero_data)
602{
603 int ret;
604 char *metadata_str = NULL;
c585821b 605 size_t len, offset, new_metadata_len_sent;
331744e3 606 ssize_t ret_val;
93ec662e 607 uint64_t metadata_key, metadata_version;
331744e3
JD
608
609 assert(registry);
610 assert(socket);
1b532a60 611
c585821b
MD
612 metadata_key = registry->metadata_key;
613
ce34fcd0 614 /*
dc2bbdae
MD
615 * Means that no metadata was assigned to the session. This can
616 * happens if no start has been done previously.
ce34fcd0 617 */
c585821b 618 if (!metadata_key) {
ce34fcd0
MD
619 return 0;
620 }
621
331744e3
JD
622 offset = registry->metadata_len_sent;
623 len = registry->metadata_len - registry->metadata_len_sent;
c585821b 624 new_metadata_len_sent = registry->metadata_len;
93ec662e 625 metadata_version = registry->metadata_version;
331744e3
JD
626 if (len == 0) {
627 DBG3("No metadata to push for metadata key %" PRIu64,
628 registry->metadata_key);
629 ret_val = len;
630 if (send_zero_data) {
631 DBG("No metadata to push");
632 goto push_data;
633 }
634 goto end;
635 }
636
637 /* Allocate only what we have to send. */
638 metadata_str = zmalloc(len);
639 if (!metadata_str) {
640 PERROR("zmalloc ust app metadata string");
641 ret_val = -ENOMEM;
642 goto error;
643 }
c585821b 644 /* Copy what we haven't sent out. */
331744e3 645 memcpy(metadata_str, registry->metadata + offset, len);
331744e3
JD
646
647push_data:
c585821b
MD
648 pthread_mutex_unlock(&registry->lock);
649 /*
650 * We need to unlock the registry while we push metadata to
651 * break a circular dependency between the consumerd metadata
652 * lock and the sessiond registry lock. Indeed, pushing metadata
653 * to the consumerd awaits that it gets pushed all the way to
654 * relayd, but doing so requires grabbing the metadata lock. If
655 * a concurrent metadata request is being performed by
656 * consumerd, this can try to grab the registry lock on the
657 * sessiond while holding the metadata lock on the consumer
658 * daemon. Those push and pull schemes are performed on two
659 * different bidirectionnal communication sockets.
660 */
661 ret = consumer_push_metadata(socket, metadata_key,
93ec662e 662 metadata_str, len, offset, metadata_version);
c585821b 663 pthread_mutex_lock(&registry->lock);
331744e3 664 if (ret < 0) {
000baf6a 665 /*
dc2bbdae
MD
666 * There is an acceptable race here between the registry
667 * metadata key assignment and the creation on the
668 * consumer. The session daemon can concurrently push
669 * metadata for this registry while being created on the
670 * consumer since the metadata key of the registry is
671 * assigned *before* it is setup to avoid the consumer
672 * to ask for metadata that could possibly be not found
673 * in the session daemon.
000baf6a 674 *
dc2bbdae
MD
675 * The metadata will get pushed either by the session
676 * being stopped or the consumer requesting metadata if
677 * that race is triggered.
000baf6a
DG
678 */
679 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
680 ret = 0;
c585821b
MD
681 } else {
682 ERR("Error pushing metadata to consumer");
000baf6a 683 }
331744e3
JD
684 ret_val = ret;
685 goto error_push;
c585821b
MD
686 } else {
687 /*
688 * Metadata may have been concurrently pushed, since
689 * we're not holding the registry lock while pushing to
690 * consumer. This is handled by the fact that we send
691 * the metadata content, size, and the offset at which
692 * that metadata belongs. This may arrive out of order
693 * on the consumer side, and the consumer is able to
694 * deal with overlapping fragments. The consumer
695 * supports overlapping fragments, which must be
696 * contiguous starting from offset 0. We keep the
697 * largest metadata_len_sent value of the concurrent
698 * send.
699 */
700 registry->metadata_len_sent =
701 max_t(size_t, registry->metadata_len_sent,
702 new_metadata_len_sent);
331744e3 703 }
331744e3
JD
704 free(metadata_str);
705 return len;
706
707end:
708error:
ce34fcd0
MD
709 if (ret_val) {
710 /*
dc2bbdae
MD
711 * On error, flag the registry that the metadata is
712 * closed. We were unable to push anything and this
713 * means that either the consumer is not responding or
714 * the metadata cache has been destroyed on the
715 * consumer.
ce34fcd0
MD
716 */
717 registry->metadata_closed = 1;
718 }
331744e3
JD
719error_push:
720 free(metadata_str);
721 return ret_val;
722}
723
d88aee68 724/*
ce34fcd0 725 * For a given application and session, push metadata to consumer.
331744e3
JD
726 * Either sock or consumer is required : if sock is NULL, the default
727 * socket to send the metadata is retrieved from consumer, if sock
728 * is not NULL we use it to send the metadata.
ce34fcd0 729 * RCU read-side lock must be held while calling this function,
dc2bbdae
MD
730 * therefore ensuring existance of registry. It also ensures existance
731 * of socket throughout this function.
d88aee68
DG
732 *
733 * Return 0 on success else a negative error.
2c57e06d
MD
734 * Returning a -EPIPE return value means we could not send the metadata,
735 * but it can be caused by recoverable errors (e.g. the application has
736 * terminated concurrently).
d88aee68 737 */
7972aab2
DG
738static int push_metadata(struct ust_registry_session *registry,
739 struct consumer_output *consumer)
d88aee68 740{
331744e3
JD
741 int ret_val;
742 ssize_t ret;
d88aee68
DG
743 struct consumer_socket *socket;
744
7972aab2
DG
745 assert(registry);
746 assert(consumer);
747
ce34fcd0 748 pthread_mutex_lock(&registry->lock);
ce34fcd0 749 if (registry->metadata_closed) {
dc2bbdae
MD
750 ret_val = -EPIPE;
751 goto error;
d88aee68
DG
752 }
753
d88aee68 754 /* Get consumer socket to use to push the metadata.*/
7972aab2
DG
755 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
756 consumer);
d88aee68 757 if (!socket) {
331744e3 758 ret_val = -1;
ce34fcd0 759 goto error;
d88aee68
DG
760 }
761
331744e3 762 ret = ust_app_push_metadata(registry, socket, 0);
d88aee68 763 if (ret < 0) {
331744e3 764 ret_val = ret;
ce34fcd0 765 goto error;
d88aee68 766 }
dc2bbdae 767 pthread_mutex_unlock(&registry->lock);
d88aee68
DG
768 return 0;
769
ce34fcd0 770error:
dc2bbdae 771 pthread_mutex_unlock(&registry->lock);
331744e3 772 return ret_val;
d88aee68
DG
773}
774
775/*
776 * Send to the consumer a close metadata command for the given session. Once
777 * done, the metadata channel is deleted and the session metadata pointer is
dc2bbdae 778 * nullified. The session lock MUST be held unless the application is
d88aee68
DG
779 * in the destroy path.
780 *
a70ac2f4
MD
781 * Do not hold the registry lock while communicating with the consumerd, because
782 * doing so causes inter-process deadlocks between consumerd and sessiond with
783 * the metadata request notification.
784 *
d88aee68
DG
785 * Return 0 on success else a negative value.
786 */
7972aab2
DG
787static int close_metadata(struct ust_registry_session *registry,
788 struct consumer_output *consumer)
d88aee68
DG
789{
790 int ret;
791 struct consumer_socket *socket;
a70ac2f4
MD
792 uint64_t metadata_key;
793 bool registry_was_already_closed;
d88aee68 794
7972aab2
DG
795 assert(registry);
796 assert(consumer);
d88aee68 797
7972aab2
DG
798 rcu_read_lock();
799
ce34fcd0 800 pthread_mutex_lock(&registry->lock);
a70ac2f4
MD
801 metadata_key = registry->metadata_key;
802 registry_was_already_closed = registry->metadata_closed;
803 if (metadata_key != 0) {
804 /*
805 * Metadata closed. Even on error this means that the consumer
806 * is not responding or not found so either way a second close
807 * should NOT be emit for this registry.
808 */
809 registry->metadata_closed = 1;
810 }
811 pthread_mutex_unlock(&registry->lock);
ce34fcd0 812
a70ac2f4 813 if (metadata_key == 0 || registry_was_already_closed) {
d88aee68 814 ret = 0;
1b532a60 815 goto end;
d88aee68
DG
816 }
817
d88aee68 818 /* Get consumer socket to use to push the metadata.*/
7972aab2
DG
819 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
820 consumer);
d88aee68
DG
821 if (!socket) {
822 ret = -1;
a70ac2f4 823 goto end;
d88aee68
DG
824 }
825
a70ac2f4 826 ret = consumer_close_metadata(socket, metadata_key);
d88aee68 827 if (ret < 0) {
a70ac2f4 828 goto end;
d88aee68
DG
829 }
830
1b532a60 831end:
7972aab2 832 rcu_read_unlock();
d88aee68
DG
833 return ret;
834}
835
36b588ed
MD
836/*
837 * We need to execute ht_destroy outside of RCU read-side critical
0b2dc8df
MD
838 * section and outside of call_rcu thread, so we postpone its execution
839 * using ht_cleanup_push. It is simpler than to change the semantic of
840 * the many callers of delete_ust_app_session().
36b588ed
MD
841 */
842static
843void delete_ust_app_session_rcu(struct rcu_head *head)
844{
845 struct ust_app_session *ua_sess =
846 caa_container_of(head, struct ust_app_session, rcu_head);
847
0b2dc8df 848 ht_cleanup_push(ua_sess->channels);
36b588ed
MD
849 free(ua_sess);
850}
851
d80a6244
DG
852/*
853 * Delete ust app session safely. RCU read lock must be held before calling
854 * this function.
82cac6d2
JG
855 *
856 * The session list lock must be held by the caller.
d80a6244 857 */
8b366481 858static
d0b96690
DG
859void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
860 struct ust_app *app)
d80a6244
DG
861{
862 int ret;
bec39940 863 struct lttng_ht_iter iter;
d80a6244 864 struct ust_app_channel *ua_chan;
7972aab2 865 struct ust_registry_session *registry;
d80a6244 866
d88aee68
DG
867 assert(ua_sess);
868
1b532a60
DG
869 pthread_mutex_lock(&ua_sess->lock);
870
b161602a
MD
871 assert(!ua_sess->deleted);
872 ua_sess->deleted = true;
873
7972aab2 874 registry = get_session_registry(ua_sess);
fad1ed2f 875 /* Registry can be null on error path during initialization. */
ce34fcd0 876 if (registry) {
d88aee68 877 /* Push metadata for application before freeing the application. */
7972aab2 878 (void) push_metadata(registry, ua_sess->consumer);
d88aee68 879
7972aab2
DG
880 /*
881 * Don't ask to close metadata for global per UID buffers. Close
1b532a60
DG
882 * metadata only on destroy trace session in this case. Also, the
883 * previous push metadata could have flag the metadata registry to
884 * close so don't send a close command if closed.
7972aab2 885 */
ce34fcd0 886 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
7972aab2
DG
887 /* And ask to close it for this session registry. */
888 (void) close_metadata(registry, ua_sess->consumer);
889 }
d80a6244
DG
890 }
891
bec39940
DG
892 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
893 node.node) {
894 ret = lttng_ht_del(ua_sess->channels, &iter);
525b0740 895 assert(!ret);
d0b96690 896 delete_ust_app_channel(sock, ua_chan, app);
d80a6244 897 }
d80a6244 898
7972aab2
DG
899 /* In case of per PID, the registry is kept in the session. */
900 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
901 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
902 if (reg_pid) {
fad1ed2f
JR
903 /*
904 * Registry can be null on error path during
905 * initialization.
906 */
7972aab2
DG
907 buffer_reg_pid_remove(reg_pid);
908 buffer_reg_pid_destroy(reg_pid);
909 }
910 }
d0b96690 911
aee6bafd 912 if (ua_sess->handle != -1) {
fb45065e 913 pthread_mutex_lock(&app->sock_lock);
ffe60014 914 ret = ustctl_release_handle(sock, ua_sess->handle);
fb45065e 915 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
916 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
917 ERR("UST app sock %d release session handle failed with ret %d",
918 sock, ret);
919 }
10b56aef
MD
920 /* Remove session from application UST object descriptor. */
921 iter.iter.node = &ua_sess->ust_objd_node.node;
922 ret = lttng_ht_del(app->ust_sessions_objd, &iter);
923 assert(!ret);
aee6bafd 924 }
10b56aef 925
1b532a60
DG
926 pthread_mutex_unlock(&ua_sess->lock);
927
6addfa37
MD
928 consumer_output_put(ua_sess->consumer);
929
36b588ed 930 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
d80a6244 931}
91d76f53
DG
932
933/*
284d8f55
DG
934 * Delete a traceable application structure from the global list. Never call
935 * this function outside of a call_rcu call.
36b588ed
MD
936 *
937 * RCU read side lock should _NOT_ be held when calling this function.
91d76f53 938 */
8b366481
DG
939static
940void delete_ust_app(struct ust_app *app)
91d76f53 941{
8b366481 942 int ret, sock;
d42f20df 943 struct ust_app_session *ua_sess, *tmp_ua_sess;
2463b787
JR
944 struct lttng_ht_iter iter;
945 struct ust_app_token_event_rule *token;
44d3bd01 946
82cac6d2
JG
947 /*
948 * The session list lock must be held during this function to guarantee
949 * the existence of ua_sess.
950 */
951 session_lock_list();
d80a6244 952 /* Delete ust app sessions info */
852d0037
DG
953 sock = app->sock;
954 app->sock = -1;
d80a6244 955
8b366481 956 /* Wipe sessions */
d42f20df
DG
957 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
958 teardown_node) {
959 /* Free every object in the session and the session. */
36b588ed 960 rcu_read_lock();
d0b96690 961 delete_ust_app_session(sock, ua_sess, app);
36b588ed 962 rcu_read_unlock();
d80a6244 963 }
36b588ed 964
2463b787
JR
965 /* Wipe token associated with the app */
966 cds_lfht_for_each_entry(app->tokens_ht->ht, &iter.iter, token,
967 node.node) {
968 ret = lttng_ht_del(app->tokens_ht, &iter);
969 assert(!ret);
970 delete_ust_app_token_event_rule(app->sock, token, app);
971 }
972
0b2dc8df 973 ht_cleanup_push(app->sessions);
10b56aef 974 ht_cleanup_push(app->ust_sessions_objd);
0b2dc8df 975 ht_cleanup_push(app->ust_objd);
2463b787
JR
976 ht_cleanup_push(app->tokens_ht);
977
978 /* This can happen if trigger setup failed. e.g killed app */
979 if (app->token_communication.handle) {
980 ustctl_release_object(sock, app->token_communication.handle);
981 free(app->token_communication.handle);
982 }
983
984 lttng_pipe_destroy(app->token_communication.trigger_event_pipe);
d80a6244 985
6414a713 986 /*
852d0037
DG
987 * Wait until we have deleted the application from the sock hash table
988 * before closing this socket, otherwise an application could re-use the
989 * socket ID and race with the teardown, using the same hash table entry.
990 *
991 * It's OK to leave the close in call_rcu. We want it to stay unique for
992 * all RCU readers that could run concurrently with unregister app,
993 * therefore we _need_ to only close that socket after a grace period. So
994 * it should stay in this RCU callback.
995 *
996 * This close() is a very important step of the synchronization model so
997 * every modification to this function must be carefully reviewed.
6414a713 998 */
799e2c4f
MD
999 ret = close(sock);
1000 if (ret) {
1001 PERROR("close");
1002 }
4063050c 1003 lttng_fd_put(LTTNG_FD_APPS, 1);
d80a6244 1004
852d0037 1005 DBG2("UST app pid %d deleted", app->pid);
284d8f55 1006 free(app);
82cac6d2 1007 session_unlock_list();
099e26bd
DG
1008}
1009
1010/*
f6a9efaa 1011 * URCU intermediate call to delete an UST app.
099e26bd 1012 */
8b366481
DG
1013static
1014void delete_ust_app_rcu(struct rcu_head *head)
099e26bd 1015{
bec39940
DG
1016 struct lttng_ht_node_ulong *node =
1017 caa_container_of(head, struct lttng_ht_node_ulong, head);
f6a9efaa 1018 struct ust_app *app =
852d0037 1019 caa_container_of(node, struct ust_app, pid_n);
f6a9efaa 1020
852d0037 1021 DBG3("Call RCU deleting app PID %d", app->pid);
f6a9efaa 1022 delete_ust_app(app);
099e26bd
DG
1023}
1024
ffe60014
DG
1025/*
1026 * Delete the session from the application ht and delete the data structure by
1027 * freeing every object inside and releasing them.
82cac6d2
JG
1028 *
1029 * The session list lock must be held by the caller.
ffe60014 1030 */
d0b96690 1031static void destroy_app_session(struct ust_app *app,
ffe60014
DG
1032 struct ust_app_session *ua_sess)
1033{
1034 int ret;
1035 struct lttng_ht_iter iter;
1036
1037 assert(app);
1038 assert(ua_sess);
1039
1040 iter.iter.node = &ua_sess->node.node;
1041 ret = lttng_ht_del(app->sessions, &iter);
1042 if (ret) {
1043 /* Already scheduled for teardown. */
1044 goto end;
1045 }
1046
1047 /* Once deleted, free the data structure. */
d0b96690 1048 delete_ust_app_session(app->sock, ua_sess, app);
ffe60014
DG
1049
1050end:
1051 return;
1052}
1053
8b366481
DG
1054/*
1055 * Alloc new UST app session.
1056 */
1057static
40bbd087 1058struct ust_app_session *alloc_ust_app_session(void)
8b366481
DG
1059{
1060 struct ust_app_session *ua_sess;
1061
1062 /* Init most of the default value by allocating and zeroing */
1063 ua_sess = zmalloc(sizeof(struct ust_app_session));
1064 if (ua_sess == NULL) {
1065 PERROR("malloc");
ffe60014 1066 goto error_free;
8b366481
DG
1067 }
1068
1069 ua_sess->handle = -1;
bec39940 1070 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
ad7a9107 1071 ua_sess->metadata_attr.type = LTTNG_UST_CHAN_METADATA;
84ad93e8 1072 pthread_mutex_init(&ua_sess->lock, NULL);
ad7a9107 1073
8b366481
DG
1074 return ua_sess;
1075
ffe60014 1076error_free:
8b366481
DG
1077 return NULL;
1078}
1079
1080/*
1081 * Alloc new UST app channel.
1082 */
1083static
b53d4e59 1084struct ust_app_channel *alloc_ust_app_channel(const char *name,
d0b96690 1085 struct ust_app_session *ua_sess,
ffe60014 1086 struct lttng_ust_channel_attr *attr)
8b366481
DG
1087{
1088 struct ust_app_channel *ua_chan;
1089
1090 /* Init most of the default value by allocating and zeroing */
1091 ua_chan = zmalloc(sizeof(struct ust_app_channel));
1092 if (ua_chan == NULL) {
1093 PERROR("malloc");
1094 goto error;
1095 }
1096
1097 /* Setup channel name */
1098 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
1099 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1100
1101 ua_chan->enabled = 1;
1102 ua_chan->handle = -1;
45893984 1103 ua_chan->session = ua_sess;
ffe60014 1104 ua_chan->key = get_next_channel_key();
bec39940
DG
1105 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1106 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1107 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
8b366481
DG
1108
1109 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
31746f93 1110 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
8b366481
DG
1111
1112 /* Copy attributes */
1113 if (attr) {
ffe60014 1114 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
2fe6e7f5
DG
1115 ua_chan->attr.subbuf_size = attr->subbuf_size;
1116 ua_chan->attr.num_subbuf = attr->num_subbuf;
1117 ua_chan->attr.overwrite = attr->overwrite;
1118 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
1119 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
1120 ua_chan->attr.output = attr->output;
491d1539 1121 ua_chan->attr.blocking_timeout = attr->u.s.blocking_timeout;
8b366481 1122 }
ffe60014
DG
1123 /* By default, the channel is a per cpu channel. */
1124 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
8b366481
DG
1125
1126 DBG3("UST app channel %s allocated", ua_chan->name);
1127
1128 return ua_chan;
1129
1130error:
1131 return NULL;
1132}
1133
37f1c236
DG
1134/*
1135 * Allocate and initialize a UST app stream.
1136 *
1137 * Return newly allocated stream pointer or NULL on error.
1138 */
ffe60014 1139struct ust_app_stream *ust_app_alloc_stream(void)
37f1c236
DG
1140{
1141 struct ust_app_stream *stream = NULL;
1142
1143 stream = zmalloc(sizeof(*stream));
1144 if (stream == NULL) {
1145 PERROR("zmalloc ust app stream");
1146 goto error;
1147 }
1148
1149 /* Zero could be a valid value for a handle so flag it to -1. */
1150 stream->handle = -1;
1151
1152error:
1153 return stream;
1154}
1155
8b366481
DG
1156/*
1157 * Alloc new UST app event.
1158 */
1159static
1160struct ust_app_event *alloc_ust_app_event(char *name,
1161 struct lttng_ust_event *attr)
1162{
1163 struct ust_app_event *ua_event;
1164
1165 /* Init most of the default value by allocating and zeroing */
1166 ua_event = zmalloc(sizeof(struct ust_app_event));
1167 if (ua_event == NULL) {
20533947 1168 PERROR("Failed to allocate ust_app_event structure");
8b366481
DG
1169 goto error;
1170 }
1171
1172 ua_event->enabled = 1;
1173 strncpy(ua_event->name, name, sizeof(ua_event->name));
1174 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
bec39940 1175 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
8b366481
DG
1176
1177 /* Copy attributes */
1178 if (attr) {
1179 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1180 }
1181
1182 DBG3("UST app event %s allocated", ua_event->name);
1183
1184 return ua_event;
1185
1186error:
1187 return NULL;
1188}
1189
2463b787
JR
1190/*
1191 * Alloc new UST app token event rule.
1192 */
1193static struct ust_app_token_event_rule *alloc_ust_app_token_event_rule(
1194 struct lttng_trigger *trigger)
1195{
1196 struct ust_app_token_event_rule *ua_token;
1197 struct lttng_condition *condition = NULL;
1198 struct lttng_event_rule *event_rule = NULL;
1199
1200 ua_token = zmalloc(sizeof(struct ust_app_token_event_rule));
1201 if (ua_token == NULL) {
1202 PERROR("Failed to allocate ust_app_token_event_rule structure");
1203 goto error;
1204 }
1205
1206 /* Get reference of the trigger */
1207 /* TODO should this be like lttng_event_rule_get with a returned bool? */
1208 lttng_trigger_get(trigger);
1209
1210 ua_token->enabled = 1;
1211 ua_token->token = lttng_trigger_get_tracer_token(trigger);
1212 lttng_ht_node_init_u64(&ua_token->node, ua_token->token);
1213
1214 condition = lttng_trigger_get_condition(trigger);
1215 assert(condition);
1216 assert(lttng_condition_get_type(condition) == LTTNG_CONDITION_TYPE_EVENT_RULE_HIT);
1217
1218 assert(LTTNG_CONDITION_STATUS_OK == lttng_condition_event_rule_get_rule_mutable(condition, &event_rule));
1219 assert(event_rule);
1220
1221 ua_token->trigger = trigger;
1222 ua_token->filter = lttng_event_rule_get_filter_bytecode(event_rule);
1223 ua_token->exclusion = lttng_event_rule_generate_exclusions(event_rule);
1224 ua_token->error_counter_index = lttng_trigger_get_error_counter_index(trigger);
1225
1226 /* TODO put capture here? or later*/
1227
1228 DBG3("UST app token event rule %" PRIu64 " allocated", ua_token->token);
1229
1230 return ua_token;
1231
1232error:
1233 return NULL;
1234}
1235
8b366481
DG
1236/*
1237 * Alloc new UST app context.
1238 */
1239static
bdf64013 1240struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
8b366481
DG
1241{
1242 struct ust_app_ctx *ua_ctx;
1243
1244 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
1245 if (ua_ctx == NULL) {
1246 goto error;
1247 }
1248
31746f93
DG
1249 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1250
8b366481
DG
1251 if (uctx) {
1252 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
bdf64013 1253 if (uctx->ctx == LTTNG_UST_CONTEXT_APP_CONTEXT) {
f3db82be 1254 char *provider_name = NULL, *ctx_name = NULL;
bdf64013
JG
1255
1256 provider_name = strdup(uctx->u.app_ctx.provider_name);
1257 ctx_name = strdup(uctx->u.app_ctx.ctx_name);
1258 if (!provider_name || !ctx_name) {
1259 free(provider_name);
1260 free(ctx_name);
1261 goto error;
1262 }
1263
1264 ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
1265 ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
1266 }
8b366481
DG
1267 }
1268
1269 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
8b366481 1270 return ua_ctx;
bdf64013
JG
1271error:
1272 free(ua_ctx);
1273 return NULL;
8b366481
DG
1274}
1275
025faf73 1276/*
2463b787 1277 * Create a liblttng-ust filter bytecode from given bytecode.
025faf73
DG
1278 *
1279 * Return allocated filter or NULL on error.
1280 */
2463b787
JR
1281static struct lttng_ust_filter_bytecode *
1282create_ust_filter_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
025faf73 1283{
2463b787 1284 struct lttng_ust_filter_bytecode *filter = NULL;
025faf73
DG
1285
1286 /* Copy filter bytecode */
1287 filter = zmalloc(sizeof(*filter) + orig_f->len);
1288 if (!filter) {
2463b787 1289 PERROR("zmalloc alloc ust filter bytecode");
025faf73
DG
1290 goto error;
1291 }
1292
2463b787
JR
1293 assert(sizeof(struct lttng_bytecode) ==
1294 sizeof(struct lttng_ust_filter_bytecode));
025faf73 1295 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
025faf73
DG
1296error:
1297 return filter;
1298}
1299
51755dc8 1300/*
2463b787 1301 * Create a liblttng-ust capture bytecode from given bytecode.
51755dc8
JG
1302 *
1303 * Return allocated filter or NULL on error.
1304 */
2463b787
JR
1305static struct lttng_ust_capture_bytecode *
1306create_ust_capture_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
51755dc8 1307{
2463b787 1308 struct lttng_ust_capture_bytecode *capture = NULL;
51755dc8 1309
2463b787
JR
1310 /* Copy capture bytecode */
1311 capture = zmalloc(sizeof(*capture) + orig_f->len);
1312 if (!capture) {
1313 PERROR("zmalloc alloc ust capture bytecode");
51755dc8
JG
1314 goto error;
1315 }
1316
2463b787
JR
1317 assert(sizeof(struct lttng_bytecode) ==
1318 sizeof(struct lttng_ust_capture_bytecode));
1319 memcpy(capture, orig_f, sizeof(*capture) + orig_f->len);
51755dc8 1320error:
2463b787 1321 return capture;
51755dc8
JG
1322}
1323
099e26bd 1324/*
421cb601
DG
1325 * Find an ust_app using the sock and return it. RCU read side lock must be
1326 * held before calling this helper function.
099e26bd 1327 */
f20baf8e 1328struct ust_app *ust_app_find_by_sock(int sock)
099e26bd 1329{
bec39940 1330 struct lttng_ht_node_ulong *node;
bec39940 1331 struct lttng_ht_iter iter;
f6a9efaa 1332
852d0037 1333 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
bec39940 1334 node = lttng_ht_iter_get_node_ulong(&iter);
f6a9efaa
DG
1335 if (node == NULL) {
1336 DBG2("UST app find by sock %d not found", sock);
f6a9efaa
DG
1337 goto error;
1338 }
852d0037
DG
1339
1340 return caa_container_of(node, struct ust_app, sock_n);
f6a9efaa
DG
1341
1342error:
1343 return NULL;
099e26bd
DG
1344}
1345
d0b96690
DG
1346/*
1347 * Find an ust_app using the notify sock and return it. RCU read side lock must
1348 * be held before calling this helper function.
1349 */
1350static struct ust_app *find_app_by_notify_sock(int sock)
1351{
1352 struct lttng_ht_node_ulong *node;
1353 struct lttng_ht_iter iter;
1354
1355 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1356 &iter);
1357 node = lttng_ht_iter_get_node_ulong(&iter);
1358 if (node == NULL) {
1359 DBG2("UST app find by notify sock %d not found", sock);
1360 goto error;
1361 }
1362
1363 return caa_container_of(node, struct ust_app, notify_sock_n);
1364
1365error:
1366 return NULL;
1367}
1368
025faf73
DG
1369/*
1370 * Lookup for an ust app event based on event name, filter bytecode and the
1371 * event loglevel.
1372 *
1373 * Return an ust_app_event object or NULL on error.
1374 */
18eace3b 1375static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
2463b787 1376 const char *name, const struct lttng_bytecode *filter,
2106efa0 1377 int loglevel_value,
39c5a3a7 1378 const struct lttng_event_exclusion *exclusion)
18eace3b
DG
1379{
1380 struct lttng_ht_iter iter;
1381 struct lttng_ht_node_str *node;
1382 struct ust_app_event *event = NULL;
1383 struct ust_app_ht_key key;
18eace3b
DG
1384
1385 assert(name);
1386 assert(ht);
1387
1388 /* Setup key for event lookup. */
1389 key.name = name;
1390 key.filter = filter;
2106efa0 1391 key.loglevel_type = loglevel_value;
39c5a3a7 1392 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
51755dc8 1393 key.exclusion = exclusion;
18eace3b 1394
025faf73
DG
1395 /* Lookup using the event name as hash and a custom match fct. */
1396 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1397 ht_match_ust_app_event, &key, &iter.iter);
18eace3b
DG
1398 node = lttng_ht_iter_get_node_str(&iter);
1399 if (node == NULL) {
1400 goto end;
1401 }
1402
1403 event = caa_container_of(node, struct ust_app_event, node);
1404
1405end:
18eace3b
DG
1406 return event;
1407}
1408
2463b787
JR
1409/*
1410 * Lookup for an ust app tokens based on a token id.
1411 *
1412 * Return an ust_app_token_event_rule object or NULL on error.
1413 */
1414static struct ust_app_token_event_rule *find_ust_app_token_event_rule(struct lttng_ht *ht,
1415 uint64_t token)
1416{
1417 struct lttng_ht_iter iter;
1418 struct lttng_ht_node_u64 *node;
1419 struct ust_app_token_event_rule *token_event_rule = NULL;
1420
1421 assert(ht);
1422
1423 lttng_ht_lookup(ht, &token, &iter);
1424 node = lttng_ht_iter_get_node_u64(&iter);
1425 if (node == NULL) {
1426 DBG2("UST app token %" PRIu64 " not found", token);
1427 goto end;
1428 }
1429
1430 token_event_rule = caa_container_of(node, struct ust_app_token_event_rule, node);
1431end:
1432 return token_event_rule;
1433}
1434
55cc08a6
DG
1435/*
1436 * Create the channel context on the tracer.
d0b96690
DG
1437 *
1438 * Called with UST app session lock held.
55cc08a6
DG
1439 */
1440static
1441int create_ust_channel_context(struct ust_app_channel *ua_chan,
1442 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1443{
1444 int ret;
1445
840cb59c 1446 health_code_update();
86acf0da 1447
fb45065e 1448 pthread_mutex_lock(&app->sock_lock);
852d0037 1449 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
55cc08a6 1450 ua_chan->obj, &ua_ctx->obj);
fb45065e 1451 pthread_mutex_unlock(&app->sock_lock);
55cc08a6 1452 if (ret < 0) {
ffe60014
DG
1453 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1454 ERR("UST app create channel context failed for app (pid: %d) "
1455 "with ret %d", app->pid, ret);
1456 } else {
3757b385
DG
1457 /*
1458 * This is normal behavior, an application can die during the
1459 * creation process. Don't report an error so the execution can
1460 * continue normally.
1461 */
1462 ret = 0;
88e3c2f5 1463 DBG3("UST app add context failed. Application is dead.");
ffe60014 1464 }
55cc08a6
DG
1465 goto error;
1466 }
1467
1468 ua_ctx->handle = ua_ctx->obj->handle;
1469
d0b96690
DG
1470 DBG2("UST app context handle %d created successfully for channel %s",
1471 ua_ctx->handle, ua_chan->name);
55cc08a6
DG
1472
1473error:
840cb59c 1474 health_code_update();
55cc08a6
DG
1475 return ret;
1476}
1477
53a80697
MD
1478/*
1479 * Set the filter on the tracer.
1480 */
2463b787
JR
1481static int set_ust_filter(struct ust_app *app,
1482 const struct lttng_bytecode *bytecode,
1483 struct lttng_ust_object_data *ust_object)
53a80697
MD
1484{
1485 int ret;
51755dc8 1486 struct lttng_ust_filter_bytecode *ust_bytecode = NULL;
53a80697 1487
840cb59c 1488 health_code_update();
86acf0da 1489
2463b787
JR
1490 ust_bytecode = create_ust_filter_bytecode_from_bytecode(bytecode);
1491 if (!ust_bytecode) {
1492 ret = -LTTNG_ERR_NOMEM;
1493 goto error;
1494 }
1495 pthread_mutex_lock(&app->sock_lock);
1496 ret = ustctl_set_filter(app->sock, ust_bytecode,
1497 ust_object);
1498 pthread_mutex_unlock(&app->sock_lock);
1499 if (ret < 0) {
1500 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1501 ERR("UST app set filter failed for object %p of app (pid: %d) "
1502 "with ret %d", ust_object, app->pid, ret);
1503 } else {
1504 /*
1505 * This is normal behavior, an application can die during the
1506 * creation process. Don't report an error so the execution can
1507 * continue normally.
1508 */
1509 ret = 0;
1510 DBG3("UST app set filter. Application is dead.");
1511 }
86acf0da 1512 goto error;
53a80697
MD
1513 }
1514
2463b787
JR
1515 DBG2("UST filter set for object %p successfully", ust_object);
1516
1517error:
1518 health_code_update();
1519 free(ust_bytecode);
1520 return ret;
1521}
1522
1523/*
1524 * Set a capture bytecode for the passed object.
1525 * The seqnum enforce the ordering at runtime and on reception.
1526 */
1527static int set_ust_capture(struct ust_app *app,
1528 const struct lttng_bytecode *bytecode,
1529 unsigned int seqnum,
1530 struct lttng_ust_object_data *ust_object)
1531{
1532 int ret;
1533 struct lttng_ust_capture_bytecode *ust_bytecode = NULL;
1534
1535 health_code_update();
1536
1537 ust_bytecode = create_ust_capture_bytecode_from_bytecode(bytecode);
51755dc8
JG
1538 if (!ust_bytecode) {
1539 ret = -LTTNG_ERR_NOMEM;
1540 goto error;
1541 }
2463b787
JR
1542
1543 /* Set the seqnum */
1544 ust_bytecode->seqnum = seqnum;
1545
fb45065e 1546 pthread_mutex_lock(&app->sock_lock);
2463b787
JR
1547 ret = ustctl_set_capture(app->sock, ust_bytecode,
1548 ust_object);
fb45065e 1549 pthread_mutex_unlock(&app->sock_lock);
53a80697 1550 if (ret < 0) {
ffe60014 1551 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2463b787
JR
1552 ERR("UST app set capture failed for object %p of app (pid: %d) "
1553 "with ret %d", ust_object, app->pid, ret);
ffe60014 1554 } else {
3757b385
DG
1555 /*
1556 * This is normal behavior, an application can die during the
1557 * creation process. Don't report an error so the execution can
1558 * continue normally.
1559 */
1560 ret = 0;
2463b787 1561 DBG3("UST app set capture. Application is dead.");
ffe60014 1562 }
53a80697
MD
1563 goto error;
1564 }
1565
2463b787 1566 DBG2("UST capture set for object %p successfully", ust_object);
53a80697
MD
1567
1568error:
840cb59c 1569 health_code_update();
51755dc8 1570 free(ust_bytecode);
53a80697
MD
1571 return ret;
1572}
1573
51755dc8
JG
1574static
1575struct lttng_ust_event_exclusion *create_ust_exclusion_from_exclusion(
1576 struct lttng_event_exclusion *exclusion)
1577{
1578 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1579 size_t exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1580 LTTNG_UST_SYM_NAME_LEN * exclusion->count;
1581
1582 ust_exclusion = zmalloc(exclusion_alloc_size);
1583 if (!ust_exclusion) {
1584 PERROR("malloc");
1585 goto end;
1586 }
1587
1588 assert(sizeof(struct lttng_event_exclusion) ==
1589 sizeof(struct lttng_ust_event_exclusion));
1590 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1591end:
1592 return ust_exclusion;
1593}
1594
7cc9a73c
JI
1595/*
1596 * Set event exclusions on the tracer.
1597 */
2463b787
JR
1598static int set_ust_exclusions(struct ust_app *app,
1599 struct lttng_event_exclusion *exclusions,
1600 struct lttng_ust_object_data *ust_object)
7cc9a73c
JI
1601{
1602 int ret;
2463b787 1603 struct lttng_ust_event_exclusion *ust_exclusions = NULL;
7cc9a73c 1604
2463b787 1605 assert(exclusions && exclusions->count > 0);
7cc9a73c 1606
2463b787 1607 health_code_update();
7cc9a73c 1608
2463b787
JR
1609 ust_exclusions = create_ust_exclusion_from_exclusion(
1610 exclusions);
1611 if (!ust_exclusions) {
51755dc8
JG
1612 ret = -LTTNG_ERR_NOMEM;
1613 goto error;
1614 }
fb45065e 1615 pthread_mutex_lock(&app->sock_lock);
2463b787 1616 ret = ustctl_set_exclusion(app->sock, ust_exclusions, ust_object);
fb45065e 1617 pthread_mutex_unlock(&app->sock_lock);
7cc9a73c
JI
1618 if (ret < 0) {
1619 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2463b787
JR
1620 ERR("UST app exclusions failed for object %p of app (pid: %d) "
1621 "with ret %d", ust_object, app->pid, ret);
7cc9a73c
JI
1622 } else {
1623 /*
1624 * This is normal behavior, an application can die during the
1625 * creation process. Don't report an error so the execution can
1626 * continue normally.
1627 */
1628 ret = 0;
2463b787 1629 DBG3("UST app set exclusions failed. Application is dead.");
7cc9a73c
JI
1630 }
1631 goto error;
1632 }
1633
2463b787 1634 DBG2("UST exclusions set successfully for object %p", ust_object);
7cc9a73c
JI
1635
1636error:
1637 health_code_update();
2463b787 1638 free(ust_exclusions);
7cc9a73c
JI
1639 return ret;
1640}
1641
9730260e
DG
1642/*
1643 * Disable the specified event on to UST tracer for the UST session.
1644 */
2463b787
JR
1645static int disable_ust_object(struct ust_app *app,
1646 struct lttng_ust_object_data *object)
9730260e
DG
1647{
1648 int ret;
1649
840cb59c 1650 health_code_update();
86acf0da 1651
fb45065e 1652 pthread_mutex_lock(&app->sock_lock);
2463b787 1653 ret = ustctl_disable(app->sock, object);
fb45065e 1654 pthread_mutex_unlock(&app->sock_lock);
9730260e 1655 if (ret < 0) {
ffe60014 1656 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2463b787
JR
1657 ERR("UST app disable failed for object %p app (pid: %d) with ret %d",
1658 object, app->pid, ret);
ffe60014 1659 } else {
3757b385
DG
1660 /*
1661 * This is normal behavior, an application can die during the
1662 * creation process. Don't report an error so the execution can
1663 * continue normally.
1664 */
1665 ret = 0;
ffe60014
DG
1666 DBG3("UST app disable event failed. Application is dead.");
1667 }
9730260e
DG
1668 goto error;
1669 }
1670
2463b787
JR
1671 DBG2("UST app object %p disabled successfully for app (pid: %d)",
1672 object, app->pid);
9730260e
DG
1673
1674error:
840cb59c 1675 health_code_update();
9730260e
DG
1676 return ret;
1677}
1678
78f0bacd
DG
1679/*
1680 * Disable the specified channel on to UST tracer for the UST session.
1681 */
1682static int disable_ust_channel(struct ust_app *app,
1683 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1684{
1685 int ret;
1686
840cb59c 1687 health_code_update();
86acf0da 1688
fb45065e 1689 pthread_mutex_lock(&app->sock_lock);
852d0037 1690 ret = ustctl_disable(app->sock, ua_chan->obj);
fb45065e 1691 pthread_mutex_unlock(&app->sock_lock);
78f0bacd 1692 if (ret < 0) {
ffe60014
DG
1693 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1694 ERR("UST app channel %s disable failed for app (pid: %d) "
1695 "and session handle %d with ret %d",
1696 ua_chan->name, app->pid, ua_sess->handle, ret);
1697 } else {
3757b385
DG
1698 /*
1699 * This is normal behavior, an application can die during the
1700 * creation process. Don't report an error so the execution can
1701 * continue normally.
1702 */
1703 ret = 0;
ffe60014
DG
1704 DBG3("UST app disable channel failed. Application is dead.");
1705 }
78f0bacd
DG
1706 goto error;
1707 }
1708
78f0bacd 1709 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
852d0037 1710 ua_chan->name, app->pid);
78f0bacd
DG
1711
1712error:
840cb59c 1713 health_code_update();
78f0bacd
DG
1714 return ret;
1715}
1716
1717/*
1718 * Enable the specified channel on to UST tracer for the UST session.
1719 */
1720static int enable_ust_channel(struct ust_app *app,
1721 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1722{
1723 int ret;
1724
840cb59c 1725 health_code_update();
86acf0da 1726
fb45065e 1727 pthread_mutex_lock(&app->sock_lock);
852d0037 1728 ret = ustctl_enable(app->sock, ua_chan->obj);
fb45065e 1729 pthread_mutex_unlock(&app->sock_lock);
78f0bacd 1730 if (ret < 0) {
ffe60014
DG
1731 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1732 ERR("UST app channel %s enable failed for app (pid: %d) "
1733 "and session handle %d with ret %d",
1734 ua_chan->name, app->pid, ua_sess->handle, ret);
1735 } else {
3757b385
DG
1736 /*
1737 * This is normal behavior, an application can die during the
1738 * creation process. Don't report an error so the execution can
1739 * continue normally.
1740 */
1741 ret = 0;
ffe60014
DG
1742 DBG3("UST app enable channel failed. Application is dead.");
1743 }
78f0bacd
DG
1744 goto error;
1745 }
1746
1747 ua_chan->enabled = 1;
1748
1749 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
852d0037 1750 ua_chan->name, app->pid);
78f0bacd
DG
1751
1752error:
840cb59c 1753 health_code_update();
78f0bacd
DG
1754 return ret;
1755}
1756
edb67388
DG
1757/*
1758 * Enable the specified event on to UST tracer for the UST session.
1759 */
2463b787 1760static int enable_ust_object(struct ust_app *app, struct lttng_ust_object_data *ust_object)
edb67388
DG
1761{
1762 int ret;
1763
840cb59c 1764 health_code_update();
86acf0da 1765
fb45065e 1766 pthread_mutex_lock(&app->sock_lock);
2463b787 1767 ret = ustctl_enable(app->sock, ust_object);
fb45065e 1768 pthread_mutex_unlock(&app->sock_lock);
edb67388 1769 if (ret < 0) {
ffe60014 1770 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2463b787
JR
1771 ERR("UST app enable failed for object %p app (pid: %d) with ret %d",
1772 ust_object, app->pid, ret);
ffe60014 1773 } else {
3757b385
DG
1774 /*
1775 * This is normal behavior, an application can die during the
1776 * creation process. Don't report an error so the execution can
1777 * continue normally.
1778 */
1779 ret = 0;
2463b787 1780 DBG3("UST app enable failed. Application is dead.");
ffe60014 1781 }
edb67388
DG
1782 goto error;
1783 }
1784
2463b787
JR
1785 DBG2("UST app object %p enabled successfully for app (pid: %d)",
1786 ust_object, app->pid);
edb67388
DG
1787
1788error:
840cb59c 1789 health_code_update();
edb67388
DG
1790 return ret;
1791}
1792
099e26bd 1793/*
7972aab2 1794 * Send channel and stream buffer to application.
4f3ab6ee 1795 *
ffe60014 1796 * Return 0 on success. On error, a negative value is returned.
4f3ab6ee 1797 */
7972aab2
DG
1798static int send_channel_pid_to_ust(struct ust_app *app,
1799 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
4f3ab6ee
DG
1800{
1801 int ret;
ffe60014 1802 struct ust_app_stream *stream, *stmp;
4f3ab6ee
DG
1803
1804 assert(app);
ffe60014 1805 assert(ua_sess);
4f3ab6ee 1806 assert(ua_chan);
4f3ab6ee 1807
840cb59c 1808 health_code_update();
4f3ab6ee 1809
7972aab2
DG
1810 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1811 app->sock);
86acf0da 1812
ffe60014
DG
1813 /* Send channel to the application. */
1814 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
a7169585
MD
1815 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1816 ret = -ENOTCONN; /* Caused by app exiting. */
1817 goto error;
1818 } else if (ret < 0) {
b551a063
DG
1819 goto error;
1820 }
1821
d88aee68
DG
1822 health_code_update();
1823
ffe60014
DG
1824 /* Send all streams to application. */
1825 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1826 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
a7169585
MD
1827 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1828 ret = -ENOTCONN; /* Caused by app exiting. */
1829 goto error;
1830 } else if (ret < 0) {
ffe60014
DG
1831 goto error;
1832 }
1833 /* We don't need the stream anymore once sent to the tracer. */
1834 cds_list_del(&stream->list);
fb45065e 1835 delete_ust_app_stream(-1, stream, app);
ffe60014 1836 }
ffe60014
DG
1837 /* Flag the channel that it is sent to the application. */
1838 ua_chan->is_sent = 1;
ffe60014 1839
b551a063 1840error:
840cb59c 1841 health_code_update();
b551a063
DG
1842 return ret;
1843}
1844
91d76f53 1845/*
5b4a0ec0 1846 * Create the specified event onto the UST tracer for a UST session.
d0b96690
DG
1847 *
1848 * Should be called with session mutex held.
91d76f53 1849 */
edb67388
DG
1850static
1851int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1852 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
91d76f53 1853{
5b4a0ec0 1854 int ret = 0;
284d8f55 1855
840cb59c 1856 health_code_update();
86acf0da 1857
5b4a0ec0 1858 /* Create UST event on tracer */
fb45065e 1859 pthread_mutex_lock(&app->sock_lock);
852d0037 1860 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
5b4a0ec0 1861 &ua_event->obj);
fb45065e 1862 pthread_mutex_unlock(&app->sock_lock);
5b4a0ec0 1863 if (ret < 0) {
ffe60014 1864 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
88e3c2f5 1865 abort();
ffe60014
DG
1866 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1867 ua_event->attr.name, app->pid, ret);
1868 } else {
3757b385
DG
1869 /*
1870 * This is normal behavior, an application can die during the
1871 * creation process. Don't report an error so the execution can
1872 * continue normally.
1873 */
1874 ret = 0;
ffe60014
DG
1875 DBG3("UST app create event failed. Application is dead.");
1876 }
5b4a0ec0 1877 goto error;
91d76f53 1878 }
f6a9efaa 1879
5b4a0ec0 1880 ua_event->handle = ua_event->obj->handle;
284d8f55 1881
2463b787
JR
1882 DBG2("UST app event %s created successfully for pid:%d object: %p",
1883 ua_event->attr.name, app->pid, ua_event->obj);
f6a9efaa 1884
840cb59c 1885 health_code_update();
86acf0da 1886
025faf73
DG
1887 /* Set filter if one is present. */
1888 if (ua_event->filter) {
2463b787 1889 ret = set_ust_filter(app, ua_event->filter, ua_event->obj);
025faf73
DG
1890 if (ret < 0) {
1891 goto error;
1892 }
1893 }
1894
7cc9a73c
JI
1895 /* Set exclusions for the event */
1896 if (ua_event->exclusion) {
2463b787 1897 ret = set_ust_exclusions(app, ua_event->exclusion, ua_event->obj);
7cc9a73c
JI
1898 if (ret < 0) {
1899 goto error;
1900 }
1901 }
1902
8535a6d9 1903 /* If event not enabled, disable it on the tracer */
40113787
MD
1904 if (ua_event->enabled) {
1905 /*
1906 * We now need to explicitly enable the event, since it
1907 * is now disabled at creation.
1908 */
2463b787 1909 ret = enable_ust_object(app, ua_event->obj);
40113787
MD
1910 if (ret < 0) {
1911 /*
1912 * If we hit an EPERM, something is wrong with our enable call. If
1913 * we get an EEXIST, there is a problem on the tracer side since we
1914 * just created it.
1915 */
1916 switch (ret) {
1917 case -LTTNG_UST_ERR_PERM:
1918 /* Code flow problem */
1919 assert(0);
1920 case -LTTNG_UST_ERR_EXIST:
1921 /* It's OK for our use case. */
1922 ret = 0;
1923 break;
1924 default:
1925 break;
1926 }
1927 goto error;
1928 }
8535a6d9
DG
1929 }
1930
5b4a0ec0 1931error:
840cb59c 1932 health_code_update();
5b4a0ec0 1933 return ret;
91d76f53 1934}
48842b30 1935
2463b787
JR
1936static
1937void init_ust_trigger_from_event_rule(const struct lttng_event_rule *rule, struct lttng_ust_trigger *trigger)
1938{
1939 enum lttng_event_rule_status status;
1940 enum lttng_loglevel_type loglevel_type;
1941 enum lttng_ust_loglevel_type ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
1942 int loglevel = -1;
1943 const char *pattern;
1944
1945 /* For now only LTTNG_EVENT_RULE_TYPE_TRACEPOINT are supported */
1946 assert(lttng_event_rule_get_type(rule) == LTTNG_EVENT_RULE_TYPE_TRACEPOINT);
1947
1948 memset(trigger, 0, sizeof(*trigger));
1949
1950 if (lttng_event_rule_is_agent(rule)) {
1951 /*
1952 * Special event for agents
1953 * The actual meat of the event is in the filter that will be
1954 * attached later on.
1955 * Set the default values for the agent event.
1956 */
1957 pattern = event_get_default_agent_ust_name(lttng_event_rule_get_domain_type(rule));
1958 loglevel = 0;
1959 ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
1960 } else {
1961 status = lttng_event_rule_tracepoint_get_pattern(rule, &pattern);
1962 if (status != LTTNG_EVENT_RULE_STATUS_OK) {
1963 /* At this point this is a fatal error */
1964 assert(0);
1965 }
1966
1967 status = lttng_event_rule_tracepoint_get_log_level_type(
1968 rule, &loglevel_type);
1969 if (status != LTTNG_EVENT_RULE_STATUS_OK) {
1970 /* At this point this is a fatal error */
1971 assert(0);
1972 }
1973
1974 switch (loglevel_type) {
1975 case LTTNG_EVENT_LOGLEVEL_ALL:
1976 ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
1977 break;
1978 case LTTNG_EVENT_LOGLEVEL_RANGE:
1979 ust_loglevel_type = LTTNG_UST_LOGLEVEL_RANGE;
1980 break;
1981 case LTTNG_EVENT_LOGLEVEL_SINGLE:
1982 ust_loglevel_type = LTTNG_UST_LOGLEVEL_SINGLE;
1983 break;
1984 }
1985
1986 if (loglevel_type != LTTNG_EVENT_LOGLEVEL_ALL) {
1987 status = lttng_event_rule_tracepoint_get_log_level(
1988 rule, &loglevel);
1989 assert(status == LTTNG_EVENT_RULE_STATUS_OK);
1990 }
1991 }
1992
1993 trigger->instrumentation = LTTNG_UST_TRACEPOINT;
1994 strncpy(trigger->name, pattern, LTTNG_UST_SYM_NAME_LEN - 1);
1995 trigger->loglevel_type = ust_loglevel_type;
1996 trigger->loglevel = loglevel;
1997}
1998
1999/*
2000 * Create the specified event rule token onto the UST tracer for a UST app.
2001 */
2002static
2003int create_ust_token_event_rule(struct ust_app *app, struct ust_app_token_event_rule *ua_token)
2004{
2005 int ret = 0;
2006 struct lttng_ust_trigger trigger;
2007 struct lttng_condition *condition = NULL;
2008 struct lttng_event_rule *event_rule = NULL;
2009 unsigned int capture_bytecode_count = 0;
2010
2011 health_code_update();
2012 assert(app->token_communication.handle);
2013
2014 condition = lttng_trigger_get_condition(ua_token->trigger);
2015 assert(condition);
2016 assert(lttng_condition_get_type(condition) == LTTNG_CONDITION_TYPE_EVENT_RULE_HIT);
2017
2018 lttng_condition_event_rule_get_rule_mutable(condition, &event_rule);
2019 assert(event_rule);
2020 assert(lttng_event_rule_get_type(event_rule) == LTTNG_EVENT_RULE_TYPE_TRACEPOINT);
2021 /* Should we also test for UST at this point, or do we trust all the
2022 * upper level? */
2023
2024 init_ust_trigger_from_event_rule(event_rule, &trigger);
2025
2026 trigger.id = ua_token->token;
2027 trigger.error_counter_index = ua_token->error_counter_index;
2028
2029 /* Create UST trigger on tracer */
2030 pthread_mutex_lock(&app->sock_lock);
2031 ret = ustctl_create_trigger(app->sock, &trigger, app->token_communication.handle, &ua_token->obj);
2032 pthread_mutex_unlock(&app->sock_lock);
2033 if (ret < 0) {
2034 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2035 abort();
2036 ERR("Error ustctl create trigger %s for app pid: %d with ret %d",
2037 trigger.name, app->pid, ret);
2038 } else {
2039 /*
2040 * This is normal behavior, an application can die during the
2041 * creation process. Don't report an error so the execution can
2042 * continue normally.
2043 */
2044 ret = 0;
2045 DBG3("UST app create event failed. Application is dead.");
2046 }
2047 goto error;
2048 }
2049
2050 ua_token->handle = ua_token->obj->handle;
2051
2052 DBG2("UST app event %s created successfully for pid:%d object: %p",
2053 trigger.name, app->pid, ua_token->obj);
2054
2055 health_code_update();
2056
2057 /* Set filter if one is present. */
2058 if (ua_token->filter) {
2059 ret = set_ust_filter(app, ua_token->filter, ua_token->obj);
2060 if (ret < 0) {
2061 goto error;
2062 }
2063 }
2064
2065 /* Set exclusions for the event */
2066 if (ua_token->exclusion) {
2067 ret = set_ust_exclusions(app, ua_token->exclusion, ua_token->obj);
2068 if (ret < 0) {
2069 goto error;
2070 }
2071 }
2072
2073 /* Set the capture bytecode
2074 * TODO: do we want to emulate what is done with exclusion and provide
2075 * and object with a count of capture bytecode? instead of multiple
2076 * call?
2077 * */
2078 capture_bytecode_count = lttng_trigger_get_capture_bytecode_count(ua_token->trigger);
2079 for (unsigned int i = 0; i < capture_bytecode_count; i++) {
2080 const struct lttng_bytecode *capture_bytecode = lttng_trigger_get_capture_bytecode_at_index(ua_token->trigger, i);
2081 ret = set_ust_capture(app, capture_bytecode, i, ua_token->obj);
2082 if (ret < 0) {
2083 goto error;
2084 }
2085 }
2086
2087 /*
2088 * We now need to explicitly enable the event, since it
2089 * is disabled at creation.
2090 */
2091 ret = enable_ust_object(app, ua_token->obj);
2092 if (ret < 0) {
2093 /*
2094 * If we hit an EPERM, something is wrong with our enable call. If
2095 * we get an EEXIST, there is a problem on the tracer side since we
2096 * just created it.
2097 */
2098 switch (ret) {
2099 case -LTTNG_UST_ERR_PERM:
2100 /* Code flow problem */
2101 assert(0);
2102 case -LTTNG_UST_ERR_EXIST:
2103 /* It's OK for our use case. */
2104 ret = 0;
2105 break;
2106 default:
2107 break;
2108 }
2109 goto error;
2110 }
2111 ua_token->enabled = true;
2112
2113error:
2114 health_code_update();
2115 return ret;
2116}
2117
5b4a0ec0
DG
2118/*
2119 * Copy data between an UST app event and a LTT event.
2120 */
421cb601 2121static void shadow_copy_event(struct ust_app_event *ua_event,
48842b30
DG
2122 struct ltt_ust_event *uevent)
2123{
b4ffad32
JI
2124 size_t exclusion_alloc_size;
2125
48842b30
DG
2126 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
2127 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
2128
fc34caaa
DG
2129 ua_event->enabled = uevent->enabled;
2130
5b4a0ec0
DG
2131 /* Copy event attributes */
2132 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
2133
53a80697
MD
2134 /* Copy filter bytecode */
2135 if (uevent->filter) {
2463b787 2136 ua_event->filter = bytecode_copy(uevent->filter);
025faf73 2137 /* Filter might be NULL here in case of ENONEM. */
53a80697 2138 }
b4ffad32
JI
2139
2140 /* Copy exclusion data */
2141 if (uevent->exclusion) {
51755dc8 2142 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
b4ffad32
JI
2143 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
2144 ua_event->exclusion = zmalloc(exclusion_alloc_size);
5f8df26c
JI
2145 if (ua_event->exclusion == NULL) {
2146 PERROR("malloc");
2147 } else {
2148 memcpy(ua_event->exclusion, uevent->exclusion,
2149 exclusion_alloc_size);
b4ffad32
JI
2150 }
2151 }
48842b30
DG
2152}
2153
5b4a0ec0
DG
2154/*
2155 * Copy data between an UST app channel and a LTT channel.
2156 */
421cb601 2157static void shadow_copy_channel(struct ust_app_channel *ua_chan,
48842b30
DG
2158 struct ltt_ust_channel *uchan)
2159{
fc34caaa 2160 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
48842b30
DG
2161
2162 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
2163 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
ffe60014 2164
1624d5b7
JD
2165 ua_chan->tracefile_size = uchan->tracefile_size;
2166 ua_chan->tracefile_count = uchan->tracefile_count;
2167
ffe60014
DG
2168 /* Copy event attributes since the layout is different. */
2169 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
2170 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
2171 ua_chan->attr.overwrite = uchan->attr.overwrite;
2172 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
2173 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
e9404c27 2174 ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
ffe60014 2175 ua_chan->attr.output = uchan->attr.output;
491d1539
MD
2176 ua_chan->attr.blocking_timeout = uchan->attr.u.s.blocking_timeout;
2177
ffe60014
DG
2178 /*
2179 * Note that the attribute channel type is not set since the channel on the
2180 * tracing registry side does not have this information.
2181 */
48842b30 2182
fc34caaa 2183 ua_chan->enabled = uchan->enabled;
7972aab2 2184 ua_chan->tracing_channel_id = uchan->id;
fc34caaa 2185
fc34caaa 2186 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
48842b30
DG
2187}
2188
5b4a0ec0
DG
2189/*
2190 * Copy data between a UST app session and a regular LTT session.
2191 */
421cb601 2192static void shadow_copy_session(struct ust_app_session *ua_sess,
bec39940 2193 struct ltt_ust_session *usess, struct ust_app *app)
48842b30 2194{
477d7741
MD
2195 struct tm *timeinfo;
2196 char datetime[16];
2197 int ret;
d7ba1388 2198 char tmp_shm_path[PATH_MAX];
477d7741 2199
940c4592 2200 timeinfo = localtime(&app->registration_time);
477d7741 2201 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
48842b30 2202
421cb601 2203 DBG2("Shadow copy of session handle %d", ua_sess->handle);
48842b30 2204
7972aab2
DG
2205 ua_sess->tracing_id = usess->id;
2206 ua_sess->id = get_next_session_id();
2463b787
JR
2207 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.uid, app->uid);
2208 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.gid, app->gid);
2209 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.uid, usess->uid);
2210 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.gid, usess->gid);
7972aab2
DG
2211 ua_sess->buffer_type = usess->buffer_type;
2212 ua_sess->bits_per_long = app->bits_per_long;
6addfa37 2213
7972aab2 2214 /* There is only one consumer object per session possible. */
6addfa37 2215 consumer_output_get(usess->consumer);
7972aab2 2216 ua_sess->consumer = usess->consumer;
6addfa37 2217
2bba9e53 2218 ua_sess->output_traces = usess->output_traces;
ecc48a90 2219 ua_sess->live_timer_interval = usess->live_timer_interval;
84ad93e8
DG
2220 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
2221 &usess->metadata_attr);
7972aab2
DG
2222
2223 switch (ua_sess->buffer_type) {
2224 case LTTNG_BUFFER_PER_PID:
2225 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
dec56f6c 2226 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
7972aab2
DG
2227 datetime);
2228 break;
2229 case LTTNG_BUFFER_PER_UID:
2230 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
470cc211 2231 DEFAULT_UST_TRACE_UID_PATH,
2463b787 2232 lttng_credentials_get_uid(&ua_sess->real_credentials),
470cc211 2233 app->bits_per_long);
7972aab2
DG
2234 break;
2235 default:
2236 assert(0);
2237 goto error;
2238 }
477d7741
MD
2239 if (ret < 0) {
2240 PERROR("asprintf UST shadow copy session");
477d7741 2241 assert(0);
7972aab2 2242 goto error;
477d7741
MD
2243 }
2244
3d071855
MD
2245 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
2246 sizeof(ua_sess->root_shm_path));
2247 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
d7ba1388
MD
2248 strncpy(ua_sess->shm_path, usess->shm_path,
2249 sizeof(ua_sess->shm_path));
2250 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2251 if (ua_sess->shm_path[0]) {
2252 switch (ua_sess->buffer_type) {
2253 case LTTNG_BUFFER_PER_PID:
2254 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
5da88b0f 2255 "/" DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
d7ba1388
MD
2256 app->name, app->pid, datetime);
2257 break;
2258 case LTTNG_BUFFER_PER_UID:
2259 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
5da88b0f 2260 "/" DEFAULT_UST_TRACE_UID_PATH,
d7ba1388
MD
2261 app->uid, app->bits_per_long);
2262 break;
2263 default:
2264 assert(0);
2265 goto error;
2266 }
2267 if (ret < 0) {
2268 PERROR("sprintf UST shadow copy session");
2269 assert(0);
2270 goto error;
2271 }
2272 strncat(ua_sess->shm_path, tmp_shm_path,
2273 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
2274 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2275 }
6addfa37 2276 return;
7972aab2
DG
2277
2278error:
6addfa37 2279 consumer_output_put(ua_sess->consumer);
48842b30
DG
2280}
2281
78f0bacd
DG
2282/*
2283 * Lookup sesison wrapper.
2284 */
84cd17c6 2285static
fb9a95c4 2286void __lookup_session_by_app(const struct ltt_ust_session *usess,
bec39940 2287 struct ust_app *app, struct lttng_ht_iter *iter)
84cd17c6
MD
2288{
2289 /* Get right UST app session from app */
d9bf3ca4 2290 lttng_ht_lookup(app->sessions, &usess->id, iter);
84cd17c6
MD
2291}
2292
421cb601
DG
2293/*
2294 * Return ust app session from the app session hashtable using the UST session
a991f516 2295 * id.
421cb601 2296 */
48842b30 2297static struct ust_app_session *lookup_session_by_app(
fb9a95c4 2298 const struct ltt_ust_session *usess, struct ust_app *app)
48842b30 2299{
bec39940 2300 struct lttng_ht_iter iter;
d9bf3ca4 2301 struct lttng_ht_node_u64 *node;
48842b30 2302
84cd17c6 2303 __lookup_session_by_app(usess, app, &iter);
d9bf3ca4 2304 node = lttng_ht_iter_get_node_u64(&iter);
48842b30
DG
2305 if (node == NULL) {
2306 goto error;
2307 }
2308
2309 return caa_container_of(node, struct ust_app_session, node);
2310
2311error:
2312 return NULL;
2313}
2314
7972aab2
DG
2315/*
2316 * Setup buffer registry per PID for the given session and application. If none
2317 * is found, a new one is created, added to the global registry and
2318 * initialized. If regp is valid, it's set with the newly created object.
2319 *
2320 * Return 0 on success or else a negative value.
2321 */
2322static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
2323 struct ust_app *app, struct buffer_reg_pid **regp)
2324{
2325 int ret = 0;
2326 struct buffer_reg_pid *reg_pid;
2327
2328 assert(ua_sess);
2329 assert(app);
2330
2331 rcu_read_lock();
2332
2333 reg_pid = buffer_reg_pid_find(ua_sess->id);
2334 if (!reg_pid) {
2335 /*
2336 * This is the create channel path meaning that if there is NO
2337 * registry available, we have to create one for this session.
2338 */
d7ba1388 2339 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
3d071855 2340 ua_sess->root_shm_path, ua_sess->shm_path);
7972aab2
DG
2341 if (ret < 0) {
2342 goto error;
2343 }
7972aab2
DG
2344 } else {
2345 goto end;
2346 }
2347
2348 /* Initialize registry. */
2349 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
2350 app->bits_per_long, app->uint8_t_alignment,
2351 app->uint16_t_alignment, app->uint32_t_alignment,
af6142cf 2352 app->uint64_t_alignment, app->long_alignment,
470cc211
JG
2353 app->byte_order, app->version.major, app->version.minor,
2354 reg_pid->root_shm_path, reg_pid->shm_path,
2463b787
JR
2355 lttng_credentials_get_uid(&ua_sess->effective_credentials),
2356 lttng_credentials_get_gid(&ua_sess->effective_credentials),
2357 ua_sess->tracing_id,
8de88061 2358 app->uid);
7972aab2 2359 if (ret < 0) {
286c991a
MD
2360 /*
2361 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2362 * destroy the buffer registry, because it is always expected
2363 * that if the buffer registry can be found, its ust registry is
2364 * non-NULL.
2365 */
2366 buffer_reg_pid_destroy(reg_pid);
7972aab2
DG
2367 goto error;
2368 }
2369
286c991a
MD
2370 buffer_reg_pid_add(reg_pid);
2371
7972aab2
DG
2372 DBG3("UST app buffer registry per PID created successfully");
2373
2374end:
2375 if (regp) {
2376 *regp = reg_pid;
2377 }
2378error:
2379 rcu_read_unlock();
2380 return ret;
2381}
2382
2383/*
2384 * Setup buffer registry per UID for the given session and application. If none
2385 * is found, a new one is created, added to the global registry and
2386 * initialized. If regp is valid, it's set with the newly created object.
2387 *
2388 * Return 0 on success or else a negative value.
2389 */
2390static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
d7ba1388 2391 struct ust_app_session *ua_sess,
7972aab2
DG
2392 struct ust_app *app, struct buffer_reg_uid **regp)
2393{
2394 int ret = 0;
2395 struct buffer_reg_uid *reg_uid;
2396
2397 assert(usess);
2398 assert(app);
2399
2400 rcu_read_lock();
2401
2402 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2403 if (!reg_uid) {
2404 /*
2405 * This is the create channel path meaning that if there is NO
2406 * registry available, we have to create one for this session.
2407 */
2408 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
3d071855
MD
2409 LTTNG_DOMAIN_UST, &reg_uid,
2410 ua_sess->root_shm_path, ua_sess->shm_path);
7972aab2
DG
2411 if (ret < 0) {
2412 goto error;
2413 }
7972aab2
DG
2414 } else {
2415 goto end;
2416 }
2417
2418 /* Initialize registry. */
af6142cf 2419 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
7972aab2
DG
2420 app->bits_per_long, app->uint8_t_alignment,
2421 app->uint16_t_alignment, app->uint32_t_alignment,
af6142cf
MD
2422 app->uint64_t_alignment, app->long_alignment,
2423 app->byte_order, app->version.major,
3d071855 2424 app->version.minor, reg_uid->root_shm_path,
8de88061
JR
2425 reg_uid->shm_path, usess->uid, usess->gid,
2426 ua_sess->tracing_id, app->uid);
7972aab2 2427 if (ret < 0) {
286c991a
MD
2428 /*
2429 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2430 * destroy the buffer registry, because it is always expected
2431 * that if the buffer registry can be found, its ust registry is
2432 * non-NULL.
2433 */
2434 buffer_reg_uid_destroy(reg_uid, NULL);
7972aab2
DG
2435 goto error;
2436 }
2437 /* Add node to teardown list of the session. */
2438 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2439
286c991a 2440 buffer_reg_uid_add(reg_uid);
7972aab2 2441
286c991a 2442 DBG3("UST app buffer registry per UID created successfully");
7972aab2
DG
2443end:
2444 if (regp) {
2445 *regp = reg_uid;
2446 }
2447error:
2448 rcu_read_unlock();
2449 return ret;
2450}
2451
421cb601 2452/*
3d8ca23b 2453 * Create a session on the tracer side for the given app.
421cb601 2454 *
3d8ca23b
DG
2455 * On success, ua_sess_ptr is populated with the session pointer or else left
2456 * untouched. If the session was created, is_created is set to 1. On error,
2457 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2458 * be NULL.
2459 *
2460 * Returns 0 on success or else a negative code which is either -ENOMEM or
2461 * -ENOTCONN which is the default code if the ustctl_create_session fails.
421cb601 2462 */
03f91eaa 2463static int find_or_create_ust_app_session(struct ltt_ust_session *usess,
3d8ca23b
DG
2464 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2465 int *is_created)
421cb601 2466{
3d8ca23b 2467 int ret, created = 0;
421cb601
DG
2468 struct ust_app_session *ua_sess;
2469
3d8ca23b
DG
2470 assert(usess);
2471 assert(app);
2472 assert(ua_sess_ptr);
2473
840cb59c 2474 health_code_update();
86acf0da 2475
421cb601
DG
2476 ua_sess = lookup_session_by_app(usess, app);
2477 if (ua_sess == NULL) {
d9bf3ca4 2478 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
852d0037 2479 app->pid, usess->id);
40bbd087 2480 ua_sess = alloc_ust_app_session();
421cb601
DG
2481 if (ua_sess == NULL) {
2482 /* Only malloc can failed so something is really wrong */
3d8ca23b
DG
2483 ret = -ENOMEM;
2484 goto error;
421cb601 2485 }
477d7741 2486 shadow_copy_session(ua_sess, usess, app);
3d8ca23b 2487 created = 1;
421cb601
DG
2488 }
2489
7972aab2
DG
2490 switch (usess->buffer_type) {
2491 case LTTNG_BUFFER_PER_PID:
2492 /* Init local registry. */
2493 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
421cb601 2494 if (ret < 0) {
e64207cf 2495 delete_ust_app_session(-1, ua_sess, app);
7972aab2
DG
2496 goto error;
2497 }
2498 break;
2499 case LTTNG_BUFFER_PER_UID:
2500 /* Look for a global registry. If none exists, create one. */
d7ba1388 2501 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
7972aab2 2502 if (ret < 0) {
e64207cf 2503 delete_ust_app_session(-1, ua_sess, app);
7972aab2
DG
2504 goto error;
2505 }
2506 break;
2507 default:
2508 assert(0);
2509 ret = -EINVAL;
2510 goto error;
2511 }
2512
2513 health_code_update();
2514
2515 if (ua_sess->handle == -1) {
fb45065e 2516 pthread_mutex_lock(&app->sock_lock);
7972aab2 2517 ret = ustctl_create_session(app->sock);
fb45065e 2518 pthread_mutex_unlock(&app->sock_lock);
7972aab2
DG
2519 if (ret < 0) {
2520 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2521 ERR("Creating session for app pid %d with ret %d",
ffe60014
DG
2522 app->pid, ret);
2523 } else {
2524 DBG("UST app creating session failed. Application is dead");
3757b385
DG
2525 /*
2526 * This is normal behavior, an application can die during the
2527 * creation process. Don't report an error so the execution can
2528 * continue normally. This will get flagged ENOTCONN and the
2529 * caller will handle it.
2530 */
2531 ret = 0;
ffe60014 2532 }
d0b96690 2533 delete_ust_app_session(-1, ua_sess, app);
3d8ca23b
DG
2534 if (ret != -ENOMEM) {
2535 /*
2536 * Tracer is probably gone or got an internal error so let's
2537 * behave like it will soon unregister or not usable.
2538 */
2539 ret = -ENOTCONN;
2540 }
2541 goto error;
421cb601
DG
2542 }
2543
7972aab2
DG
2544 ua_sess->handle = ret;
2545
2546 /* Add ust app session to app's HT */
d9bf3ca4
MD
2547 lttng_ht_node_init_u64(&ua_sess->node,
2548 ua_sess->tracing_id);
2549 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
10b56aef
MD
2550 lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
2551 lttng_ht_add_unique_ulong(app->ust_sessions_objd,
2552 &ua_sess->ust_objd_node);
7972aab2
DG
2553
2554 DBG2("UST app session created successfully with handle %d", ret);
2555 }
2556
2557 *ua_sess_ptr = ua_sess;
2558 if (is_created) {
2559 *is_created = created;
2560 }
2561
2562 /* Everything went well. */
2563 ret = 0;
2564
2565error:
2566 health_code_update();
2567 return ret;
2568}
2569
6a6b2068
JG
2570/*
2571 * Match function for a hash table lookup of ust_app_ctx.
2572 *
2573 * It matches an ust app context based on the context type and, in the case
2574 * of perf counters, their name.
2575 */
2576static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2577{
2578 struct ust_app_ctx *ctx;
bdf64013 2579 const struct lttng_ust_context_attr *key;
6a6b2068
JG
2580
2581 assert(node);
2582 assert(_key);
2583
2584 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2585 key = _key;
2586
2587 /* Context type */
2588 if (ctx->ctx.ctx != key->ctx) {
2589 goto no_match;
2590 }
2591
bdf64013
JG
2592 switch(key->ctx) {
2593 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
6a6b2068 2594 if (strncmp(key->u.perf_counter.name,
bdf64013
JG
2595 ctx->ctx.u.perf_counter.name,
2596 sizeof(key->u.perf_counter.name))) {
2597 goto no_match;
2598 }
2599 break;
2600 case LTTNG_UST_CONTEXT_APP_CONTEXT:
2601 if (strcmp(key->u.app_ctx.provider_name,
2602 ctx->ctx.u.app_ctx.provider_name) ||
2603 strcmp(key->u.app_ctx.ctx_name,
2604 ctx->ctx.u.app_ctx.ctx_name)) {
6a6b2068
JG
2605 goto no_match;
2606 }
bdf64013
JG
2607 break;
2608 default:
2609 break;
6a6b2068
JG
2610 }
2611
2612 /* Match. */
2613 return 1;
2614
2615no_match:
2616 return 0;
2617}
2618
2619/*
2620 * Lookup for an ust app context from an lttng_ust_context.
2621 *
be184a0f 2622 * Must be called while holding RCU read side lock.
6a6b2068
JG
2623 * Return an ust_app_ctx object or NULL on error.
2624 */
2625static
2626struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
bdf64013 2627 struct lttng_ust_context_attr *uctx)
6a6b2068
JG
2628{
2629 struct lttng_ht_iter iter;
2630 struct lttng_ht_node_ulong *node;
2631 struct ust_app_ctx *app_ctx = NULL;
2632
2633 assert(uctx);
2634 assert(ht);
2635
2636 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2637 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2638 ht_match_ust_app_ctx, uctx, &iter.iter);
2639 node = lttng_ht_iter_get_node_ulong(&iter);
2640 if (!node) {
2641 goto end;
2642 }
2643
2644 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2645
2646end:
2647 return app_ctx;
2648}
2649
7972aab2
DG
2650/*
2651 * Create a context for the channel on the tracer.
2652 *
2653 * Called with UST app session lock held and a RCU read side lock.
2654 */
2655static
c9edf082 2656int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
f3db82be 2657 struct lttng_ust_context_attr *uctx,
7972aab2
DG
2658 struct ust_app *app)
2659{
2660 int ret = 0;
7972aab2
DG
2661 struct ust_app_ctx *ua_ctx;
2662
2663 DBG2("UST app adding context to channel %s", ua_chan->name);
2664
6a6b2068
JG
2665 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2666 if (ua_ctx) {
7972aab2
DG
2667 ret = -EEXIST;
2668 goto error;
2669 }
2670
2671 ua_ctx = alloc_ust_app_ctx(uctx);
2672 if (ua_ctx == NULL) {
2673 /* malloc failed */
7682f304 2674 ret = -ENOMEM;
7972aab2
DG
2675 goto error;
2676 }
2677
2678 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
aa3514e9 2679 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
31746f93 2680 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
7972aab2
DG
2681
2682 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2683 if (ret < 0) {
2684 goto error;
2685 }
2686
2687error:
2688 return ret;
2689}
2690
2691/*
2692 * Enable on the tracer side a ust app event for the session and channel.
2693 *
2694 * Called with UST app session lock held.
2695 */
2696static
2697int enable_ust_app_event(struct ust_app_session *ua_sess,
2698 struct ust_app_event *ua_event, struct ust_app *app)
2699{
2700 int ret;
2701
2463b787 2702 ret = enable_ust_object(app, ua_event->obj);
7972aab2
DG
2703 if (ret < 0) {
2704 goto error;
2705 }
2706
2707 ua_event->enabled = 1;
2708
2709error:
2710 return ret;
2711}
2712
2713/*
2714 * Disable on the tracer side a ust app event for the session and channel.
2715 */
2716static int disable_ust_app_event(struct ust_app_session *ua_sess,
2717 struct ust_app_event *ua_event, struct ust_app *app)
2718{
2719 int ret;
2720
2463b787 2721 ret = disable_ust_object(app, ua_event->obj);
7972aab2
DG
2722 if (ret < 0) {
2723 goto error;
2724 }
2725
2726 ua_event->enabled = 0;
2727
2728error:
2729 return ret;
2730}
2731
2732/*
2733 * Lookup ust app channel for session and disable it on the tracer side.
2734 */
2735static
2736int disable_ust_app_channel(struct ust_app_session *ua_sess,
2737 struct ust_app_channel *ua_chan, struct ust_app *app)
2738{
2739 int ret;
2740
2741 ret = disable_ust_channel(app, ua_sess, ua_chan);
2742 if (ret < 0) {
2743 goto error;
2744 }
2745
2746 ua_chan->enabled = 0;
2747
2748error:
2749 return ret;
2750}
2751
2752/*
2753 * Lookup ust app channel for session and enable it on the tracer side. This
2754 * MUST be called with a RCU read side lock acquired.
2755 */
2756static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2757 struct ltt_ust_channel *uchan, struct ust_app *app)
2758{
2759 int ret = 0;
2760 struct lttng_ht_iter iter;
2761 struct lttng_ht_node_str *ua_chan_node;
2762 struct ust_app_channel *ua_chan;
2763
2764 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2765 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2766 if (ua_chan_node == NULL) {
d9bf3ca4 2767 DBG2("Unable to find channel %s in ust session id %" PRIu64,
7972aab2
DG
2768 uchan->name, ua_sess->tracing_id);
2769 goto error;
2770 }
2771
2772 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2773
2774 ret = enable_ust_channel(app, ua_sess, ua_chan);
2775 if (ret < 0) {
2776 goto error;
2777 }
2778
2779error:
2780 return ret;
2781}
2782
2783/*
2784 * Ask the consumer to create a channel and get it if successful.
2785 *
fad1ed2f
JR
2786 * Called with UST app session lock held.
2787 *
7972aab2
DG
2788 * Return 0 on success or else a negative value.
2789 */
2790static int do_consumer_create_channel(struct ltt_ust_session *usess,
2791 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
e098433c
JG
2792 int bitness, struct ust_registry_session *registry,
2793 uint64_t trace_archive_id)
7972aab2
DG
2794{
2795 int ret;
2796 unsigned int nb_fd = 0;
2797 struct consumer_socket *socket;
2798
2799 assert(usess);
2800 assert(ua_sess);
2801 assert(ua_chan);
2802 assert(registry);
2803
2804 rcu_read_lock();
2805 health_code_update();
2806
2807 /* Get the right consumer socket for the application. */
2808 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2809 if (!socket) {
2810 ret = -EINVAL;
2811 goto error;
2812 }
2813
2814 health_code_update();
2815
2816 /* Need one fd for the channel. */
2817 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2818 if (ret < 0) {
2819 ERR("Exhausted number of available FD upon create channel");
2820 goto error;
2821 }
2822
2823 /*
2824 * Ask consumer to create channel. The consumer will return the number of
2825 * stream we have to expect.
2826 */
2827 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
d2956687 2828 registry, usess->current_trace_chunk);
7972aab2
DG
2829 if (ret < 0) {
2830 goto error_ask;
2831 }
2832
2833 /*
2834 * Compute the number of fd needed before receiving them. It must be 2 per
2835 * stream (2 being the default value here).
2836 */
2837 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2838
2839 /* Reserve the amount of file descriptor we need. */
2840 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2841 if (ret < 0) {
2842 ERR("Exhausted number of available FD upon create channel");
2843 goto error_fd_get_stream;
2844 }
2845
2846 health_code_update();
2847
2848 /*
2849 * Now get the channel from the consumer. This call wil populate the stream
2850 * list of that channel and set the ust objects.
2851 */
d9078d0c
DG
2852 if (usess->consumer->enabled) {
2853 ret = ust_consumer_get_channel(socket, ua_chan);
2854 if (ret < 0) {
2855 goto error_destroy;
2856 }
7972aab2
DG
2857 }
2858
2859 rcu_read_unlock();
2860 return 0;
2861
2862error_destroy:
2863 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2864error_fd_get_stream:
2865 /*
2866 * Initiate a destroy channel on the consumer since we had an error
2867 * handling it on our side. The return value is of no importance since we
2868 * already have a ret value set by the previous error that we need to
2869 * return.
2870 */
2871 (void) ust_consumer_destroy_channel(socket, ua_chan);
2872error_ask:
2873 lttng_fd_put(LTTNG_FD_APPS, 1);
2874error:
2875 health_code_update();
2876 rcu_read_unlock();
2877 return ret;
2878}
2879
2880/*
2881 * Duplicate the ust data object of the ust app stream and save it in the
2882 * buffer registry stream.
2883 *
2884 * Return 0 on success or else a negative value.
2885 */
2886static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2887 struct ust_app_stream *stream)
2888{
2889 int ret;
2890
2891 assert(reg_stream);
2892 assert(stream);
2893
2894 /* Reserve the amount of file descriptor we need. */
2895 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2896 if (ret < 0) {
2897 ERR("Exhausted number of available FD upon duplicate stream");
2898 goto error;
2899 }
2900
2901 /* Duplicate object for stream once the original is in the registry. */
2902 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2903 reg_stream->obj.ust);
2904 if (ret < 0) {
2905 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2906 reg_stream->obj.ust, stream->obj, ret);
2907 lttng_fd_put(LTTNG_FD_APPS, 2);
2908 goto error;
2909 }
2910 stream->handle = stream->obj->handle;
2911
2912error:
2913 return ret;
2914}
2915
2916/*
2917 * Duplicate the ust data object of the ust app. channel and save it in the
2918 * buffer registry channel.
2919 *
2920 * Return 0 on success or else a negative value.
2921 */
2922static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2923 struct ust_app_channel *ua_chan)
2924{
2925 int ret;
2926
2927 assert(reg_chan);
2928 assert(ua_chan);
2929
2930 /* Need two fds for the channel. */
2931 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2932 if (ret < 0) {
2933 ERR("Exhausted number of available FD upon duplicate channel");
2934 goto error_fd_get;
2935 }
2936
2937 /* Duplicate object for stream once the original is in the registry. */
2938 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2939 if (ret < 0) {
2940 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2941 reg_chan->obj.ust, ua_chan->obj, ret);
2942 goto error;
2943 }
2944 ua_chan->handle = ua_chan->obj->handle;
2945
2946 return 0;
2947
2948error:
2949 lttng_fd_put(LTTNG_FD_APPS, 1);
2950error_fd_get:
2951 return ret;
2952}
2953
2954/*
2955 * For a given channel buffer registry, setup all streams of the given ust
2956 * application channel.
2957 *
2958 * Return 0 on success or else a negative value.
2959 */
2960static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
fb45065e
MD
2961 struct ust_app_channel *ua_chan,
2962 struct ust_app *app)
7972aab2
DG
2963{
2964 int ret = 0;
2965 struct ust_app_stream *stream, *stmp;
2966
2967 assert(reg_chan);
2968 assert(ua_chan);
2969
2970 DBG2("UST app setup buffer registry stream");
2971
2972 /* Send all streams to application. */
2973 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2974 struct buffer_reg_stream *reg_stream;
2975
2976 ret = buffer_reg_stream_create(&reg_stream);
2977 if (ret < 0) {
2978 goto error;
2979 }
2980
2981 /*
2982 * Keep original pointer and nullify it in the stream so the delete
2983 * stream call does not release the object.
2984 */
2985 reg_stream->obj.ust = stream->obj;
2986 stream->obj = NULL;
2987 buffer_reg_stream_add(reg_stream, reg_chan);
421cb601 2988
7972aab2
DG
2989 /* We don't need the streams anymore. */
2990 cds_list_del(&stream->list);
fb45065e 2991 delete_ust_app_stream(-1, stream, app);
7972aab2 2992 }
421cb601 2993
7972aab2
DG
2994error:
2995 return ret;
2996}
2997
2998/*
2999 * Create a buffer registry channel for the given session registry and
3000 * application channel object. If regp pointer is valid, it's set with the
3001 * created object. Important, the created object is NOT added to the session
3002 * registry hash table.
3003 *
3004 * Return 0 on success else a negative value.
3005 */
3006static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
3007 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
3008{
3009 int ret;
3010 struct buffer_reg_channel *reg_chan = NULL;
3011
3012 assert(reg_sess);
3013 assert(ua_chan);
3014
3015 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
3016
3017 /* Create buffer registry channel. */
3018 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
3019 if (ret < 0) {
3020 goto error_create;
421cb601 3021 }
7972aab2
DG
3022 assert(reg_chan);
3023 reg_chan->consumer_key = ua_chan->key;
8c924c7b 3024 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
d07ceecd 3025 reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
421cb601 3026
7972aab2
DG
3027 /* Create and add a channel registry to session. */
3028 ret = ust_registry_channel_add(reg_sess->reg.ust,
3029 ua_chan->tracing_channel_id);
3030 if (ret < 0) {
3031 goto error;
d88aee68 3032 }
7972aab2 3033 buffer_reg_channel_add(reg_sess, reg_chan);
d88aee68 3034
7972aab2
DG
3035 if (regp) {
3036 *regp = reg_chan;
3d8ca23b 3037 }
d88aee68 3038
7972aab2 3039 return 0;
3d8ca23b
DG
3040
3041error:
7972aab2
DG
3042 /* Safe because the registry channel object was not added to any HT. */
3043 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
3044error_create:
3d8ca23b 3045 return ret;
421cb601
DG
3046}
3047
55cc08a6 3048/*
7972aab2
DG
3049 * Setup buffer registry channel for the given session registry and application
3050 * channel object. If regp pointer is valid, it's set with the created object.
d0b96690 3051 *
7972aab2 3052 * Return 0 on success else a negative value.
55cc08a6 3053 */
7972aab2 3054static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
fb45065e
MD
3055 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan,
3056 struct ust_app *app)
55cc08a6 3057{
7972aab2 3058 int ret;
55cc08a6 3059
7972aab2
DG
3060 assert(reg_sess);
3061 assert(reg_chan);
3062 assert(ua_chan);
3063 assert(ua_chan->obj);
55cc08a6 3064
7972aab2 3065 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
55cc08a6 3066
7972aab2 3067 /* Setup all streams for the registry. */
fb45065e 3068 ret = setup_buffer_reg_streams(reg_chan, ua_chan, app);
7972aab2 3069 if (ret < 0) {
55cc08a6
DG
3070 goto error;
3071 }
3072
7972aab2
DG
3073 reg_chan->obj.ust = ua_chan->obj;
3074 ua_chan->obj = NULL;
55cc08a6 3075
7972aab2 3076 return 0;
55cc08a6
DG
3077
3078error:
7972aab2
DG
3079 buffer_reg_channel_remove(reg_sess, reg_chan);
3080 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
55cc08a6
DG
3081 return ret;
3082}
3083
edb67388 3084/*
7972aab2 3085 * Send buffer registry channel to the application.
d0b96690 3086 *
7972aab2 3087 * Return 0 on success else a negative value.
edb67388 3088 */
7972aab2
DG
3089static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
3090 struct ust_app *app, struct ust_app_session *ua_sess,
3091 struct ust_app_channel *ua_chan)
edb67388
DG
3092{
3093 int ret;
7972aab2 3094 struct buffer_reg_stream *reg_stream;
edb67388 3095
7972aab2
DG
3096 assert(reg_chan);
3097 assert(app);
3098 assert(ua_sess);
3099 assert(ua_chan);
3100
3101 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
3102
3103 ret = duplicate_channel_object(reg_chan, ua_chan);
edb67388
DG
3104 if (ret < 0) {
3105 goto error;
3106 }
3107
7972aab2
DG
3108 /* Send channel to the application. */
3109 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
a7169585
MD
3110 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3111 ret = -ENOTCONN; /* Caused by app exiting. */
3112 goto error;
3113 } else if (ret < 0) {
7972aab2
DG
3114 goto error;
3115 }
3116
3117 health_code_update();
3118
3119 /* Send all streams to application. */
3120 pthread_mutex_lock(&reg_chan->stream_list_lock);
3121 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
3122 struct ust_app_stream stream;
3123
3124 ret = duplicate_stream_object(reg_stream, &stream);
3125 if (ret < 0) {
3126 goto error_stream_unlock;
3127 }
3128
3129 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
3130 if (ret < 0) {
fb45065e 3131 (void) release_ust_app_stream(-1, &stream, app);
a7169585
MD
3132 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3133 ret = -ENOTCONN; /* Caused by app exiting. */
a7169585 3134 }
7972aab2
DG
3135 goto error_stream_unlock;
3136 }
edb67388 3137
7972aab2
DG
3138 /*
3139 * The return value is not important here. This function will output an
3140 * error if needed.
3141 */
fb45065e 3142 (void) release_ust_app_stream(-1, &stream, app);
7972aab2
DG
3143 }
3144 ua_chan->is_sent = 1;
3145
3146error_stream_unlock:
3147 pthread_mutex_unlock(&reg_chan->stream_list_lock);
edb67388
DG
3148error:
3149 return ret;
3150}
3151
9730260e 3152/*
7972aab2
DG
3153 * Create and send to the application the created buffers with per UID buffers.
3154 *
9acdc1d6 3155 * This MUST be called with a RCU read side lock acquired.
71e0a100 3156 * The session list lock and the session's lock must be acquired.
9acdc1d6 3157 *
7972aab2 3158 * Return 0 on success else a negative value.
9730260e 3159 */
7972aab2
DG
3160static int create_channel_per_uid(struct ust_app *app,
3161 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3162 struct ust_app_channel *ua_chan)
9730260e
DG
3163{
3164 int ret;
7972aab2
DG
3165 struct buffer_reg_uid *reg_uid;
3166 struct buffer_reg_channel *reg_chan;
e32d7f27 3167 struct ltt_session *session = NULL;
e098433c
JG
3168 enum lttng_error_code notification_ret;
3169 struct ust_registry_channel *chan_reg;
9730260e 3170
7972aab2
DG
3171 assert(app);
3172 assert(usess);
3173 assert(ua_sess);
3174 assert(ua_chan);
3175
3176 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
3177
3178 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
3179 /*
3180 * The session creation handles the creation of this global registry
3181 * object. If none can be find, there is a code flow problem or a
3182 * teardown race.
3183 */
3184 assert(reg_uid);
3185
3186 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
3187 reg_uid);
2721f7ea
JG
3188 if (reg_chan) {
3189 goto send_channel;
3190 }
7972aab2 3191
2721f7ea
JG
3192 /* Create the buffer registry channel object. */
3193 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
3194 if (ret < 0) {
3195 ERR("Error creating the UST channel \"%s\" registry instance",
f14256d6 3196 ua_chan->name);
2721f7ea
JG
3197 goto error;
3198 }
f14256d6 3199
e098433c
JG
3200 session = session_find_by_id(ua_sess->tracing_id);
3201 assert(session);
3202 assert(pthread_mutex_trylock(&session->lock));
3203 assert(session_trylock_list());
3204
2721f7ea
JG
3205 /*
3206 * Create the buffers on the consumer side. This call populates the
3207 * ust app channel object with all streams and data object.
3208 */
3209 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
e098433c 3210 app->bits_per_long, reg_uid->registry->reg.ust,
d2956687 3211 session->most_recent_chunk_id.value);
2721f7ea
JG
3212 if (ret < 0) {
3213 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3214 ua_chan->name);
7972aab2
DG
3215
3216 /*
2721f7ea
JG
3217 * Let's remove the previously created buffer registry channel so
3218 * it's not visible anymore in the session registry.
7972aab2 3219 */
2721f7ea
JG
3220 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
3221 ua_chan->tracing_channel_id, false);
3222 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
3223 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
3224 goto error;
7972aab2
DG
3225 }
3226
2721f7ea
JG
3227 /*
3228 * Setup the streams and add it to the session registry.
3229 */
3230 ret = setup_buffer_reg_channel(reg_uid->registry,
3231 ua_chan, reg_chan, app);
3232 if (ret < 0) {
3233 ERR("Error setting up UST channel \"%s\"", ua_chan->name);
3234 goto error;
3235 }
3236
e098433c
JG
3237 /* Notify the notification subsystem of the channel's creation. */
3238 pthread_mutex_lock(&reg_uid->registry->reg.ust->lock);
3239 chan_reg = ust_registry_channel_find(reg_uid->registry->reg.ust,
3240 ua_chan->tracing_channel_id);
3241 assert(chan_reg);
3242 chan_reg->consumer_key = ua_chan->key;
3243 chan_reg = NULL;
3244 pthread_mutex_unlock(&reg_uid->registry->reg.ust->lock);
e9404c27 3245
e098433c
JG
3246 notification_ret = notification_thread_command_add_channel(
3247 notification_thread_handle, session->name,
2463b787
JR
3248 lttng_credentials_get_uid(&ua_sess->effective_credentials),
3249 lttng_credentials_get_gid(&ua_sess->effective_credentials),
3250 ua_chan->name,
470cc211 3251 ua_chan->key, LTTNG_DOMAIN_UST,
e098433c
JG
3252 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3253 if (notification_ret != LTTNG_OK) {
3254 ret = - (int) notification_ret;
3255 ERR("Failed to add channel to notification thread");
3256 goto error;
e9404c27
JG
3257 }
3258
2721f7ea 3259send_channel:
66ff8e3f
JG
3260 /* Send buffers to the application. */
3261 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
3262 if (ret < 0) {
3263 if (ret != -ENOTCONN) {
3264 ERR("Error sending channel to application");
3265 }
3266 goto error;
3267 }