SoW-2020-0002: Trace Hit Counters: trigger error reporting integration
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
CommitLineData
91d76f53 1/*
ab5be9fa
MJ
2 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
3 * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
91d76f53 4 *
ab5be9fa 5 * SPDX-License-Identifier: GPL-2.0-only
91d76f53 6 *
91d76f53
DG
7 */
8
6c1c0768 9#define _LGPL_SOURCE
91d76f53 10#include <errno.h>
2463b787 11#include <fcntl.h>
7972aab2 12#include <inttypes.h>
91d76f53
DG
13#include <pthread.h>
14#include <stdio.h>
15#include <stdlib.h>
099e26bd 16#include <string.h>
2463b787 17#include <sys/mman.h>
aba8e916
DG
18#include <sys/stat.h>
19#include <sys/types.h>
099e26bd 20#include <unistd.h>
0df502fd 21#include <urcu/compiler.h>
331744e3 22#include <signal.h>
bec39940 23
2463b787 24#include <common/bytecode/bytecode.h>
990570ed 25#include <common/common.h>
2463b787
JR
26#include <common/hashtable/utils.h>
27#include <lttng/event-rule/event-rule.h>
28#include <lttng/event-rule/event-rule-internal.h>
29#include <lttng/event-rule/tracepoint.h>
30#include <lttng/condition/condition.h>
31#include <lttng/condition/event-rule-internal.h>
32#include <lttng/condition/event-rule.h>
33#include <lttng/trigger/trigger-internal.h>
86acf0da 34#include <common/sessiond-comm/sessiond-comm.h>
1e307fab 35
7972aab2 36#include "buffer-registry.h"
2463b787 37#include "condition-internal.h"
86acf0da 38#include "fd-limit.h"
8782cc74 39#include "health-sessiond.h"
56fff090 40#include "ust-app.h"
48842b30 41#include "ust-consumer.h"
75018ab6
JG
42#include "lttng-ust-ctl.h"
43#include "lttng-ust-error.h"
0b2dc8df 44#include "utils.h"
fb83fe64 45#include "session.h"
e9404c27
JG
46#include "lttng-sessiond.h"
47#include "notification-thread-commands.h"
5c408ad8 48#include "rotate.h"
2463b787
JR
49#include "event.h"
50#include "trigger-error-accounting.h"
51
d80a6244 52
44cdb3a2
MJ
53struct lttng_ht *ust_app_ht;
54struct lttng_ht *ust_app_ht_by_sock;
55struct lttng_ht *ust_app_ht_by_notify_sock;
56
c4b88406
MD
57static
58int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
59
d9bf3ca4
MD
60/* Next available channel key. Access under next_channel_key_lock. */
61static uint64_t _next_channel_key;
62static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
63
64/* Next available session ID. Access under next_session_id_lock. */
65static uint64_t _next_session_id;
66static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
ffe60014
DG
67
68/*
d9bf3ca4 69 * Return the incremented value of next_channel_key.
ffe60014 70 */
d9bf3ca4 71static uint64_t get_next_channel_key(void)
ffe60014 72{
d9bf3ca4
MD
73 uint64_t ret;
74
75 pthread_mutex_lock(&next_channel_key_lock);
76 ret = ++_next_channel_key;
77 pthread_mutex_unlock(&next_channel_key_lock);
78 return ret;
ffe60014
DG
79}
80
81/*
7972aab2 82 * Return the atomically incremented value of next_session_id.
ffe60014 83 */
d9bf3ca4 84static uint64_t get_next_session_id(void)
ffe60014 85{
d9bf3ca4
MD
86 uint64_t ret;
87
88 pthread_mutex_lock(&next_session_id_lock);
89 ret = ++_next_session_id;
90 pthread_mutex_unlock(&next_session_id_lock);
91 return ret;
ffe60014
DG
92}
93
d65d2de8
DG
94static void copy_channel_attr_to_ustctl(
95 struct ustctl_consumer_channel_attr *attr,
96 struct lttng_ust_channel_attr *uattr)
97{
98 /* Copy event attributes since the layout is different. */
99 attr->subbuf_size = uattr->subbuf_size;
100 attr->num_subbuf = uattr->num_subbuf;
101 attr->overwrite = uattr->overwrite;
102 attr->switch_timer_interval = uattr->switch_timer_interval;
103 attr->read_timer_interval = uattr->read_timer_interval;
104 attr->output = uattr->output;
491d1539 105 attr->blocking_timeout = uattr->u.s.blocking_timeout;
d65d2de8
DG
106}
107
025faf73
DG
108/*
109 * Match function for the hash table lookup.
110 *
111 * It matches an ust app event based on three attributes which are the event
112 * name, the filter bytecode and the loglevel.
113 */
18eace3b
DG
114static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
115{
116 struct ust_app_event *event;
117 const struct ust_app_ht_key *key;
2106efa0 118 int ev_loglevel_value;
18eace3b
DG
119
120 assert(node);
121 assert(_key);
122
123 event = caa_container_of(node, struct ust_app_event, node.node);
124 key = _key;
2106efa0 125 ev_loglevel_value = event->attr.loglevel;
18eace3b 126
1af53eb5 127 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
18eace3b
DG
128
129 /* Event name */
130 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
131 goto no_match;
132 }
133
134 /* Event loglevel. */
2106efa0 135 if (ev_loglevel_value != key->loglevel_type) {
025faf73 136 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
2106efa0
PP
137 && key->loglevel_type == 0 &&
138 ev_loglevel_value == -1) {
025faf73
DG
139 /*
140 * Match is accepted. This is because on event creation, the
141 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
142 * -1 are accepted for this loglevel type since 0 is the one set by
143 * the API when receiving an enable event.
144 */
145 } else {
146 goto no_match;
147 }
18eace3b
DG
148 }
149
150 /* One of the filters is NULL, fail. */
151 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
152 goto no_match;
153 }
154
025faf73
DG
155 if (key->filter && event->filter) {
156 /* Both filters exists, check length followed by the bytecode. */
157 if (event->filter->len != key->filter->len ||
158 memcmp(event->filter->data, key->filter->data,
159 event->filter->len) != 0) {
160 goto no_match;
161 }
18eace3b
DG
162 }
163
1af53eb5
JI
164 /* One of the exclusions is NULL, fail. */
165 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
166 goto no_match;
167 }
168
169 if (key->exclusion && event->exclusion) {
170 /* Both exclusions exists, check count followed by the names. */
171 if (event->exclusion->count != key->exclusion->count ||
172 memcmp(event->exclusion->names, key->exclusion->names,
173 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
174 goto no_match;
175 }
176 }
177
178
025faf73 179 /* Match. */
18eace3b
DG
180 return 1;
181
182no_match:
183 return 0;
18eace3b
DG
184}
185
025faf73
DG
186/*
187 * Unique add of an ust app event in the given ht. This uses the custom
188 * ht_match_ust_app_event match function and the event name as hash.
189 */
d0b96690 190static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
18eace3b
DG
191 struct ust_app_event *event)
192{
193 struct cds_lfht_node *node_ptr;
194 struct ust_app_ht_key key;
d0b96690 195 struct lttng_ht *ht;
18eace3b 196
d0b96690
DG
197 assert(ua_chan);
198 assert(ua_chan->events);
18eace3b
DG
199 assert(event);
200
d0b96690 201 ht = ua_chan->events;
18eace3b
DG
202 key.name = event->attr.name;
203 key.filter = event->filter;
2106efa0 204 key.loglevel_type = event->attr.loglevel;
91c89f23 205 key.exclusion = event->exclusion;
18eace3b
DG
206
207 node_ptr = cds_lfht_add_unique(ht->ht,
208 ht->hash_fct(event->node.key, lttng_ht_seed),
209 ht_match_ust_app_event, &key, &event->node.node);
210 assert(node_ptr == &event->node.node);
211}
212
d88aee68
DG
213/*
214 * Close the notify socket from the given RCU head object. This MUST be called
215 * through a call_rcu().
216 */
217static void close_notify_sock_rcu(struct rcu_head *head)
218{
219 int ret;
220 struct ust_app_notify_sock_obj *obj =
221 caa_container_of(head, struct ust_app_notify_sock_obj, head);
222
223 /* Must have a valid fd here. */
224 assert(obj->fd >= 0);
225
226 ret = close(obj->fd);
227 if (ret) {
228 ERR("close notify sock %d RCU", obj->fd);
229 }
230 lttng_fd_put(LTTNG_FD_APPS, 1);
231
232 free(obj);
233}
234
7972aab2
DG
235/*
236 * Return the session registry according to the buffer type of the given
237 * session.
238 *
239 * A registry per UID object MUST exists before calling this function or else
240 * it assert() if not found. RCU read side lock must be acquired.
241 */
242static struct ust_registry_session *get_session_registry(
243 struct ust_app_session *ua_sess)
244{
245 struct ust_registry_session *registry = NULL;
246
247 assert(ua_sess);
248
249 switch (ua_sess->buffer_type) {
250 case LTTNG_BUFFER_PER_PID:
251 {
252 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
253 if (!reg_pid) {
254 goto error;
255 }
256 registry = reg_pid->registry->reg.ust;
257 break;
258 }
259 case LTTNG_BUFFER_PER_UID:
260 {
261 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
470cc211 262 ua_sess->tracing_id, ua_sess->bits_per_long,
2463b787 263 lttng_credentials_get_uid(&ua_sess->real_credentials));
7972aab2
DG
264 if (!reg_uid) {
265 goto error;
266 }
267 registry = reg_uid->registry->reg.ust;
268 break;
269 }
270 default:
271 assert(0);
272 };
273
274error:
275 return registry;
276}
277
55cc08a6
DG
278/*
279 * Delete ust context safely. RCU read lock must be held before calling
280 * this function.
281 */
282static
fb45065e
MD
283void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
284 struct ust_app *app)
55cc08a6 285{
ffe60014
DG
286 int ret;
287
288 assert(ua_ctx);
289
55cc08a6 290 if (ua_ctx->obj) {
fb45065e 291 pthread_mutex_lock(&app->sock_lock);
ffe60014 292 ret = ustctl_release_object(sock, ua_ctx->obj);
fb45065e 293 pthread_mutex_unlock(&app->sock_lock);
d0b96690
DG
294 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
295 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
296 sock, ua_ctx->obj->handle, ret);
ffe60014 297 }
55cc08a6
DG
298 free(ua_ctx->obj);
299 }
300 free(ua_ctx);
301}
302
d80a6244
DG
303/*
304 * Delete ust app event safely. RCU read lock must be held before calling
305 * this function.
306 */
8b366481 307static
fb45065e
MD
308void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
309 struct ust_app *app)
d80a6244 310{
ffe60014
DG
311 int ret;
312
313 assert(ua_event);
314
53a80697 315 free(ua_event->filter);
951f0b71
JI
316 if (ua_event->exclusion != NULL)
317 free(ua_event->exclusion);
edb67388 318 if (ua_event->obj != NULL) {
fb45065e 319 pthread_mutex_lock(&app->sock_lock);
ffe60014 320 ret = ustctl_release_object(sock, ua_event->obj);
fb45065e 321 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
322 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
323 ERR("UST app sock %d release event obj failed with ret %d",
324 sock, ret);
325 }
edb67388
DG
326 free(ua_event->obj);
327 }
d80a6244
DG
328 free(ua_event);
329}
330
2463b787
JR
331/*
332 * Delete ust app token event_rule safely. RCU read lock must be held before calling
333 * this function. TODO: or does it????
334 */
335static
336void delete_ust_app_token_event_rule(int sock, struct ust_app_token_event_rule *ua_token,
337 struct ust_app *app)
338{
339 int ret;
340
341 assert(ua_token);
342
343 if (ua_token->exclusion != NULL)
344 free(ua_token->exclusion);
345 if (ua_token->obj != NULL) {
346 pthread_mutex_lock(&app->sock_lock);
347 ret = ustctl_release_object(sock, ua_token->obj);
348 pthread_mutex_unlock(&app->sock_lock);
349 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
350 ERR("UST app sock %d release event obj failed with ret %d",
351 sock, ret);
352 }
353 free(ua_token->obj);
354 }
355 lttng_trigger_put(ua_token->trigger);
356 free(ua_token);
357}
358
d80a6244 359/*
7972aab2
DG
360 * Release ust data object of the given stream.
361 *
362 * Return 0 on success or else a negative value.
d80a6244 363 */
fb45065e
MD
364static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
365 struct ust_app *app)
d80a6244 366{
7972aab2 367 int ret = 0;
ffe60014
DG
368
369 assert(stream);
370
8b366481 371 if (stream->obj) {
fb45065e 372 pthread_mutex_lock(&app->sock_lock);
ffe60014 373 ret = ustctl_release_object(sock, stream->obj);
fb45065e 374 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
375 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
376 ERR("UST app sock %d release stream obj failed with ret %d",
377 sock, ret);
378 }
4063050c 379 lttng_fd_put(LTTNG_FD_APPS, 2);
8b366481
DG
380 free(stream->obj);
381 }
7972aab2
DG
382
383 return ret;
384}
385
386/*
387 * Delete ust app stream safely. RCU read lock must be held before calling
388 * this function.
389 */
390static
fb45065e
MD
391void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
392 struct ust_app *app)
7972aab2
DG
393{
394 assert(stream);
395
fb45065e 396 (void) release_ust_app_stream(sock, stream, app);
84cd17c6 397 free(stream);
d80a6244
DG
398}
399
36b588ed
MD
400/*
401 * We need to execute ht_destroy outside of RCU read-side critical
0b2dc8df
MD
402 * section and outside of call_rcu thread, so we postpone its execution
403 * using ht_cleanup_push. It is simpler than to change the semantic of
404 * the many callers of delete_ust_app_session().
36b588ed
MD
405 */
406static
407void delete_ust_app_channel_rcu(struct rcu_head *head)
408{
409 struct ust_app_channel *ua_chan =
410 caa_container_of(head, struct ust_app_channel, rcu_head);
411
0b2dc8df
MD
412 ht_cleanup_push(ua_chan->ctx);
413 ht_cleanup_push(ua_chan->events);
36b588ed
MD
414 free(ua_chan);
415}
416
fb83fe64
JD
417/*
418 * Extract the lost packet or discarded events counter when the channel is
419 * being deleted and store the value in the parent channel so we can
420 * access it from lttng list and at stop/destroy.
82cac6d2
JG
421 *
422 * The session list lock must be held by the caller.
fb83fe64
JD
423 */
424static
425void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
426{
427 uint64_t discarded = 0, lost = 0;
428 struct ltt_session *session;
429 struct ltt_ust_channel *uchan;
430
431 if (ua_chan->attr.type != LTTNG_UST_CHAN_PER_CPU) {
432 return;
433 }
434
435 rcu_read_lock();
436 session = session_find_by_id(ua_chan->session->tracing_id);
d68ec974
JG
437 if (!session || !session->ust_session) {
438 /*
439 * Not finding the session is not an error because there are
440 * multiple ways the channels can be torn down.
441 *
442 * 1) The session daemon can initiate the destruction of the
443 * ust app session after receiving a destroy command or
444 * during its shutdown/teardown.
445 * 2) The application, since we are in per-pid tracing, is
446 * unregistering and tearing down its ust app session.
447 *
448 * Both paths are protected by the session list lock which
449 * ensures that the accounting of lost packets and discarded
450 * events is done exactly once. The session is then unpublished
451 * from the session list, resulting in this condition.
452 */
fb83fe64
JD
453 goto end;
454 }
455
456 if (ua_chan->attr.overwrite) {
457 consumer_get_lost_packets(ua_chan->session->tracing_id,
458 ua_chan->key, session->ust_session->consumer,
459 &lost);
460 } else {
461 consumer_get_discarded_events(ua_chan->session->tracing_id,
462 ua_chan->key, session->ust_session->consumer,
463 &discarded);
464 }
465 uchan = trace_ust_find_channel_by_name(
466 session->ust_session->domain_global.channels,
467 ua_chan->name);
468 if (!uchan) {
469 ERR("Missing UST channel to store discarded counters");
470 goto end;
471 }
472
473 uchan->per_pid_closed_app_discarded += discarded;
474 uchan->per_pid_closed_app_lost += lost;
475
476end:
477 rcu_read_unlock();
e32d7f27
JG
478 if (session) {
479 session_put(session);
480 }
fb83fe64
JD
481}
482
d80a6244
DG
483/*
484 * Delete ust app channel safely. RCU read lock must be held before calling
485 * this function.
82cac6d2
JG
486 *
487 * The session list lock must be held by the caller.
d80a6244 488 */
8b366481 489static
d0b96690
DG
490void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
491 struct ust_app *app)
d80a6244
DG
492{
493 int ret;
bec39940 494 struct lttng_ht_iter iter;
d80a6244 495 struct ust_app_event *ua_event;
55cc08a6 496 struct ust_app_ctx *ua_ctx;
030a66fa 497 struct ust_app_stream *stream, *stmp;
7972aab2 498 struct ust_registry_session *registry;
d80a6244 499
ffe60014
DG
500 assert(ua_chan);
501
502 DBG3("UST app deleting channel %s", ua_chan->name);
503
55cc08a6 504 /* Wipe stream */
d80a6244 505 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
84cd17c6 506 cds_list_del(&stream->list);
fb45065e 507 delete_ust_app_stream(sock, stream, app);
d80a6244
DG
508 }
509
55cc08a6 510 /* Wipe context */
bec39940 511 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
31746f93 512 cds_list_del(&ua_ctx->list);
bec39940 513 ret = lttng_ht_del(ua_chan->ctx, &iter);
55cc08a6 514 assert(!ret);
fb45065e 515 delete_ust_app_ctx(sock, ua_ctx, app);
55cc08a6 516 }
d80a6244 517
55cc08a6 518 /* Wipe events */
bec39940
DG
519 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
520 node.node) {
521 ret = lttng_ht_del(ua_chan->events, &iter);
525b0740 522 assert(!ret);
fb45065e 523 delete_ust_app_event(sock, ua_event, app);
d80a6244 524 }
edb67388 525
c8335706
MD
526 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
527 /* Wipe and free registry from session registry. */
528 registry = get_session_registry(ua_chan->session);
529 if (registry) {
e9404c27 530 ust_registry_channel_del_free(registry, ua_chan->key,
e38d96f9
MD
531 sock >= 0);
532 }
45798a31
JG
533 /*
534 * A negative socket can be used by the caller when
535 * cleaning-up a ua_chan in an error path. Skip the
536 * accounting in this case.
537 */
e38d96f9
MD
538 if (sock >= 0) {
539 save_per_pid_lost_discarded_counters(ua_chan);
c8335706 540 }
7972aab2 541 }
d0b96690 542
edb67388 543 if (ua_chan->obj != NULL) {
d0b96690
DG
544 /* Remove channel from application UST object descriptor. */
545 iter.iter.node = &ua_chan->ust_objd_node.node;
c6e62271
DG
546 ret = lttng_ht_del(app->ust_objd, &iter);
547 assert(!ret);
fb45065e 548 pthread_mutex_lock(&app->sock_lock);
ffe60014 549 ret = ustctl_release_object(sock, ua_chan->obj);
fb45065e 550 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
551 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
552 ERR("UST app sock %d release channel obj failed with ret %d",
553 sock, ret);
554 }
7972aab2 555 lttng_fd_put(LTTNG_FD_APPS, 1);
edb67388
DG
556 free(ua_chan->obj);
557 }
36b588ed 558 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
d80a6244
DG
559}
560
fb45065e
MD
561int ust_app_register_done(struct ust_app *app)
562{
563 int ret;
564
565 pthread_mutex_lock(&app->sock_lock);
566 ret = ustctl_register_done(app->sock);
567 pthread_mutex_unlock(&app->sock_lock);
568 return ret;
569}
570
571int ust_app_release_object(struct ust_app *app, struct lttng_ust_object_data *data)
572{
573 int ret, sock;
574
575 if (app) {
576 pthread_mutex_lock(&app->sock_lock);
577 sock = app->sock;
578 } else {
579 sock = -1;
580 }
581 ret = ustctl_release_object(sock, data);
582 if (app) {
583 pthread_mutex_unlock(&app->sock_lock);
584 }
585 return ret;
586}
587
331744e3 588/*
1b532a60
DG
589 * Push metadata to consumer socket.
590 *
dc2bbdae
MD
591 * RCU read-side lock must be held to guarantee existance of socket.
592 * Must be called with the ust app session lock held.
593 * Must be called with the registry lock held.
331744e3
JD
594 *
595 * On success, return the len of metadata pushed or else a negative value.
2c57e06d
MD
596 * Returning a -EPIPE return value means we could not send the metadata,
597 * but it can be caused by recoverable errors (e.g. the application has
598 * terminated concurrently).
331744e3
JD
599 */
600ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
601 struct consumer_socket *socket, int send_zero_data)
602{
603 int ret;
604 char *metadata_str = NULL;
c585821b 605 size_t len, offset, new_metadata_len_sent;
331744e3 606 ssize_t ret_val;
93ec662e 607 uint64_t metadata_key, metadata_version;
331744e3
JD
608
609 assert(registry);
610 assert(socket);
1b532a60 611
c585821b
MD
612 metadata_key = registry->metadata_key;
613
ce34fcd0 614 /*
dc2bbdae
MD
615 * Means that no metadata was assigned to the session. This can
616 * happens if no start has been done previously.
ce34fcd0 617 */
c585821b 618 if (!metadata_key) {
ce34fcd0
MD
619 return 0;
620 }
621
331744e3
JD
622 offset = registry->metadata_len_sent;
623 len = registry->metadata_len - registry->metadata_len_sent;
c585821b 624 new_metadata_len_sent = registry->metadata_len;
93ec662e 625 metadata_version = registry->metadata_version;
331744e3
JD
626 if (len == 0) {
627 DBG3("No metadata to push for metadata key %" PRIu64,
628 registry->metadata_key);
629 ret_val = len;
630 if (send_zero_data) {
631 DBG("No metadata to push");
632 goto push_data;
633 }
634 goto end;
635 }
636
637 /* Allocate only what we have to send. */
638 metadata_str = zmalloc(len);
639 if (!metadata_str) {
640 PERROR("zmalloc ust app metadata string");
641 ret_val = -ENOMEM;
642 goto error;
643 }
c585821b 644 /* Copy what we haven't sent out. */
331744e3 645 memcpy(metadata_str, registry->metadata + offset, len);
331744e3
JD
646
647push_data:
c585821b
MD
648 pthread_mutex_unlock(&registry->lock);
649 /*
650 * We need to unlock the registry while we push metadata to
651 * break a circular dependency between the consumerd metadata
652 * lock and the sessiond registry lock. Indeed, pushing metadata
653 * to the consumerd awaits that it gets pushed all the way to
654 * relayd, but doing so requires grabbing the metadata lock. If
655 * a concurrent metadata request is being performed by
656 * consumerd, this can try to grab the registry lock on the
657 * sessiond while holding the metadata lock on the consumer
658 * daemon. Those push and pull schemes are performed on two
659 * different bidirectionnal communication sockets.
660 */
661 ret = consumer_push_metadata(socket, metadata_key,
93ec662e 662 metadata_str, len, offset, metadata_version);
c585821b 663 pthread_mutex_lock(&registry->lock);
331744e3 664 if (ret < 0) {
000baf6a 665 /*
dc2bbdae
MD
666 * There is an acceptable race here between the registry
667 * metadata key assignment and the creation on the
668 * consumer. The session daemon can concurrently push
669 * metadata for this registry while being created on the
670 * consumer since the metadata key of the registry is
671 * assigned *before* it is setup to avoid the consumer
672 * to ask for metadata that could possibly be not found
673 * in the session daemon.
000baf6a 674 *
dc2bbdae
MD
675 * The metadata will get pushed either by the session
676 * being stopped or the consumer requesting metadata if
677 * that race is triggered.
000baf6a
DG
678 */
679 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
680 ret = 0;
c585821b
MD
681 } else {
682 ERR("Error pushing metadata to consumer");
000baf6a 683 }
331744e3
JD
684 ret_val = ret;
685 goto error_push;
c585821b
MD
686 } else {
687 /*
688 * Metadata may have been concurrently pushed, since
689 * we're not holding the registry lock while pushing to
690 * consumer. This is handled by the fact that we send
691 * the metadata content, size, and the offset at which
692 * that metadata belongs. This may arrive out of order
693 * on the consumer side, and the consumer is able to
694 * deal with overlapping fragments. The consumer
695 * supports overlapping fragments, which must be
696 * contiguous starting from offset 0. We keep the
697 * largest metadata_len_sent value of the concurrent
698 * send.
699 */
700 registry->metadata_len_sent =
701 max_t(size_t, registry->metadata_len_sent,
702 new_metadata_len_sent);
331744e3 703 }
331744e3
JD
704 free(metadata_str);
705 return len;
706
707end:
708error:
ce34fcd0
MD
709 if (ret_val) {
710 /*
dc2bbdae
MD
711 * On error, flag the registry that the metadata is
712 * closed. We were unable to push anything and this
713 * means that either the consumer is not responding or
714 * the metadata cache has been destroyed on the
715 * consumer.
ce34fcd0
MD
716 */
717 registry->metadata_closed = 1;
718 }
331744e3
JD
719error_push:
720 free(metadata_str);
721 return ret_val;
722}
723
d88aee68 724/*
ce34fcd0 725 * For a given application and session, push metadata to consumer.
331744e3
JD
726 * Either sock or consumer is required : if sock is NULL, the default
727 * socket to send the metadata is retrieved from consumer, if sock
728 * is not NULL we use it to send the metadata.
ce34fcd0 729 * RCU read-side lock must be held while calling this function,
dc2bbdae
MD
730 * therefore ensuring existance of registry. It also ensures existance
731 * of socket throughout this function.
d88aee68
DG
732 *
733 * Return 0 on success else a negative error.
2c57e06d
MD
734 * Returning a -EPIPE return value means we could not send the metadata,
735 * but it can be caused by recoverable errors (e.g. the application has
736 * terminated concurrently).
d88aee68 737 */
7972aab2
DG
738static int push_metadata(struct ust_registry_session *registry,
739 struct consumer_output *consumer)
d88aee68 740{
331744e3
JD
741 int ret_val;
742 ssize_t ret;
d88aee68
DG
743 struct consumer_socket *socket;
744
7972aab2
DG
745 assert(registry);
746 assert(consumer);
747
ce34fcd0 748 pthread_mutex_lock(&registry->lock);
ce34fcd0 749 if (registry->metadata_closed) {
dc2bbdae
MD
750 ret_val = -EPIPE;
751 goto error;
d88aee68
DG
752 }
753
d88aee68 754 /* Get consumer socket to use to push the metadata.*/
7972aab2
DG
755 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
756 consumer);
d88aee68 757 if (!socket) {
331744e3 758 ret_val = -1;
ce34fcd0 759 goto error;
d88aee68
DG
760 }
761
331744e3 762 ret = ust_app_push_metadata(registry, socket, 0);
d88aee68 763 if (ret < 0) {
331744e3 764 ret_val = ret;
ce34fcd0 765 goto error;
d88aee68 766 }
dc2bbdae 767 pthread_mutex_unlock(&registry->lock);
d88aee68
DG
768 return 0;
769
ce34fcd0 770error:
dc2bbdae 771 pthread_mutex_unlock(&registry->lock);
331744e3 772 return ret_val;
d88aee68
DG
773}
774
775/*
776 * Send to the consumer a close metadata command for the given session. Once
777 * done, the metadata channel is deleted and the session metadata pointer is
dc2bbdae 778 * nullified. The session lock MUST be held unless the application is
d88aee68
DG
779 * in the destroy path.
780 *
a70ac2f4
MD
781 * Do not hold the registry lock while communicating with the consumerd, because
782 * doing so causes inter-process deadlocks between consumerd and sessiond with
783 * the metadata request notification.
784 *
d88aee68
DG
785 * Return 0 on success else a negative value.
786 */
7972aab2
DG
787static int close_metadata(struct ust_registry_session *registry,
788 struct consumer_output *consumer)
d88aee68
DG
789{
790 int ret;
791 struct consumer_socket *socket;
a70ac2f4
MD
792 uint64_t metadata_key;
793 bool registry_was_already_closed;
d88aee68 794
7972aab2
DG
795 assert(registry);
796 assert(consumer);
d88aee68 797
7972aab2
DG
798 rcu_read_lock();
799
ce34fcd0 800 pthread_mutex_lock(&registry->lock);
a70ac2f4
MD
801 metadata_key = registry->metadata_key;
802 registry_was_already_closed = registry->metadata_closed;
803 if (metadata_key != 0) {
804 /*
805 * Metadata closed. Even on error this means that the consumer
806 * is not responding or not found so either way a second close
807 * should NOT be emit for this registry.
808 */
809 registry->metadata_closed = 1;
810 }
811 pthread_mutex_unlock(&registry->lock);
ce34fcd0 812
a70ac2f4 813 if (metadata_key == 0 || registry_was_already_closed) {
d88aee68 814 ret = 0;
1b532a60 815 goto end;
d88aee68
DG
816 }
817
d88aee68 818 /* Get consumer socket to use to push the metadata.*/
7972aab2
DG
819 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
820 consumer);
d88aee68
DG
821 if (!socket) {
822 ret = -1;
a70ac2f4 823 goto end;
d88aee68
DG
824 }
825
a70ac2f4 826 ret = consumer_close_metadata(socket, metadata_key);
d88aee68 827 if (ret < 0) {
a70ac2f4 828 goto end;
d88aee68
DG
829 }
830
1b532a60 831end:
7972aab2 832 rcu_read_unlock();
d88aee68
DG
833 return ret;
834}
835
36b588ed
MD
836/*
837 * We need to execute ht_destroy outside of RCU read-side critical
0b2dc8df
MD
838 * section and outside of call_rcu thread, so we postpone its execution
839 * using ht_cleanup_push. It is simpler than to change the semantic of
840 * the many callers of delete_ust_app_session().
36b588ed
MD
841 */
842static
843void delete_ust_app_session_rcu(struct rcu_head *head)
844{
845 struct ust_app_session *ua_sess =
846 caa_container_of(head, struct ust_app_session, rcu_head);
847
0b2dc8df 848 ht_cleanup_push(ua_sess->channels);
36b588ed
MD
849 free(ua_sess);
850}
851
d80a6244
DG
852/*
853 * Delete ust app session safely. RCU read lock must be held before calling
854 * this function.
82cac6d2
JG
855 *
856 * The session list lock must be held by the caller.
d80a6244 857 */
8b366481 858static
d0b96690
DG
859void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
860 struct ust_app *app)
d80a6244
DG
861{
862 int ret;
bec39940 863 struct lttng_ht_iter iter;
d80a6244 864 struct ust_app_channel *ua_chan;
7972aab2 865 struct ust_registry_session *registry;
d80a6244 866
d88aee68
DG
867 assert(ua_sess);
868
1b532a60
DG
869 pthread_mutex_lock(&ua_sess->lock);
870
b161602a
MD
871 assert(!ua_sess->deleted);
872 ua_sess->deleted = true;
873
7972aab2 874 registry = get_session_registry(ua_sess);
fad1ed2f 875 /* Registry can be null on error path during initialization. */
ce34fcd0 876 if (registry) {
d88aee68 877 /* Push metadata for application before freeing the application. */
7972aab2 878 (void) push_metadata(registry, ua_sess->consumer);
d88aee68 879
7972aab2
DG
880 /*
881 * Don't ask to close metadata for global per UID buffers. Close
1b532a60
DG
882 * metadata only on destroy trace session in this case. Also, the
883 * previous push metadata could have flag the metadata registry to
884 * close so don't send a close command if closed.
7972aab2 885 */
ce34fcd0 886 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
7972aab2
DG
887 /* And ask to close it for this session registry. */
888 (void) close_metadata(registry, ua_sess->consumer);
889 }
d80a6244
DG
890 }
891
bec39940
DG
892 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
893 node.node) {
894 ret = lttng_ht_del(ua_sess->channels, &iter);
525b0740 895 assert(!ret);
d0b96690 896 delete_ust_app_channel(sock, ua_chan, app);
d80a6244 897 }
d80a6244 898
7972aab2
DG
899 /* In case of per PID, the registry is kept in the session. */
900 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
901 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
902 if (reg_pid) {
fad1ed2f
JR
903 /*
904 * Registry can be null on error path during
905 * initialization.
906 */
7972aab2
DG
907 buffer_reg_pid_remove(reg_pid);
908 buffer_reg_pid_destroy(reg_pid);
909 }
910 }
d0b96690 911
aee6bafd 912 if (ua_sess->handle != -1) {
fb45065e 913 pthread_mutex_lock(&app->sock_lock);
ffe60014 914 ret = ustctl_release_handle(sock, ua_sess->handle);
fb45065e 915 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
916 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
917 ERR("UST app sock %d release session handle failed with ret %d",
918 sock, ret);
919 }
10b56aef
MD
920 /* Remove session from application UST object descriptor. */
921 iter.iter.node = &ua_sess->ust_objd_node.node;
922 ret = lttng_ht_del(app->ust_sessions_objd, &iter);
923 assert(!ret);
aee6bafd 924 }
10b56aef 925
1b532a60
DG
926 pthread_mutex_unlock(&ua_sess->lock);
927
6addfa37
MD
928 consumer_output_put(ua_sess->consumer);
929
36b588ed 930 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
d80a6244 931}
91d76f53
DG
932
933/*
284d8f55
DG
934 * Delete a traceable application structure from the global list. Never call
935 * this function outside of a call_rcu call.
36b588ed
MD
936 *
937 * RCU read side lock should _NOT_ be held when calling this function.
91d76f53 938 */
8b366481
DG
939static
940void delete_ust_app(struct ust_app *app)
91d76f53 941{
8b366481 942 int ret, sock;
d42f20df 943 struct ust_app_session *ua_sess, *tmp_ua_sess;
2463b787
JR
944 struct lttng_ht_iter iter;
945 struct ust_app_token_event_rule *token;
44d3bd01 946
82cac6d2
JG
947 /*
948 * The session list lock must be held during this function to guarantee
949 * the existence of ua_sess.
950 */
951 session_lock_list();
d80a6244 952 /* Delete ust app sessions info */
852d0037
DG
953 sock = app->sock;
954 app->sock = -1;
d80a6244 955
8b366481 956 /* Wipe sessions */
d42f20df
DG
957 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
958 teardown_node) {
959 /* Free every object in the session and the session. */
36b588ed 960 rcu_read_lock();
d0b96690 961 delete_ust_app_session(sock, ua_sess, app);
36b588ed 962 rcu_read_unlock();
d80a6244 963 }
36b588ed 964
2463b787
JR
965 /* Wipe token associated with the app */
966 cds_lfht_for_each_entry(app->tokens_ht->ht, &iter.iter, token,
967 node.node) {
968 ret = lttng_ht_del(app->tokens_ht, &iter);
969 assert(!ret);
970 delete_ust_app_token_event_rule(app->sock, token, app);
971 }
972
0b2dc8df 973 ht_cleanup_push(app->sessions);
10b56aef 974 ht_cleanup_push(app->ust_sessions_objd);
0b2dc8df 975 ht_cleanup_push(app->ust_objd);
2463b787
JR
976 ht_cleanup_push(app->tokens_ht);
977
978 /* This can happen if trigger setup failed. e.g killed app */
979 if (app->token_communication.handle) {
980 ustctl_release_object(sock, app->token_communication.handle);
981 free(app->token_communication.handle);
982 }
983
984 lttng_pipe_destroy(app->token_communication.trigger_event_pipe);
d80a6244 985
6414a713 986 /*
852d0037
DG
987 * Wait until we have deleted the application from the sock hash table
988 * before closing this socket, otherwise an application could re-use the
989 * socket ID and race with the teardown, using the same hash table entry.
990 *
991 * It's OK to leave the close in call_rcu. We want it to stay unique for
992 * all RCU readers that could run concurrently with unregister app,
993 * therefore we _need_ to only close that socket after a grace period. So
994 * it should stay in this RCU callback.
995 *
996 * This close() is a very important step of the synchronization model so
997 * every modification to this function must be carefully reviewed.
6414a713 998 */
799e2c4f
MD
999 ret = close(sock);
1000 if (ret) {
1001 PERROR("close");
1002 }
4063050c 1003 lttng_fd_put(LTTNG_FD_APPS, 1);
d80a6244 1004
852d0037 1005 DBG2("UST app pid %d deleted", app->pid);
284d8f55 1006 free(app);
82cac6d2 1007 session_unlock_list();
099e26bd
DG
1008}
1009
1010/*
f6a9efaa 1011 * URCU intermediate call to delete an UST app.
099e26bd 1012 */
8b366481
DG
1013static
1014void delete_ust_app_rcu(struct rcu_head *head)
099e26bd 1015{
bec39940
DG
1016 struct lttng_ht_node_ulong *node =
1017 caa_container_of(head, struct lttng_ht_node_ulong, head);
f6a9efaa 1018 struct ust_app *app =
852d0037 1019 caa_container_of(node, struct ust_app, pid_n);
f6a9efaa 1020
852d0037 1021 DBG3("Call RCU deleting app PID %d", app->pid);
f6a9efaa 1022 delete_ust_app(app);
099e26bd
DG
1023}
1024
ffe60014
DG
1025/*
1026 * Delete the session from the application ht and delete the data structure by
1027 * freeing every object inside and releasing them.
82cac6d2
JG
1028 *
1029 * The session list lock must be held by the caller.
ffe60014 1030 */
d0b96690 1031static void destroy_app_session(struct ust_app *app,
ffe60014
DG
1032 struct ust_app_session *ua_sess)
1033{
1034 int ret;
1035 struct lttng_ht_iter iter;
1036
1037 assert(app);
1038 assert(ua_sess);
1039
1040 iter.iter.node = &ua_sess->node.node;
1041 ret = lttng_ht_del(app->sessions, &iter);
1042 if (ret) {
1043 /* Already scheduled for teardown. */
1044 goto end;
1045 }
1046
1047 /* Once deleted, free the data structure. */
d0b96690 1048 delete_ust_app_session(app->sock, ua_sess, app);
ffe60014
DG
1049
1050end:
1051 return;
1052}
1053
8b366481
DG
1054/*
1055 * Alloc new UST app session.
1056 */
1057static
40bbd087 1058struct ust_app_session *alloc_ust_app_session(void)
8b366481
DG
1059{
1060 struct ust_app_session *ua_sess;
1061
1062 /* Init most of the default value by allocating and zeroing */
1063 ua_sess = zmalloc(sizeof(struct ust_app_session));
1064 if (ua_sess == NULL) {
1065 PERROR("malloc");
ffe60014 1066 goto error_free;
8b366481
DG
1067 }
1068
1069 ua_sess->handle = -1;
bec39940 1070 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
ad7a9107 1071 ua_sess->metadata_attr.type = LTTNG_UST_CHAN_METADATA;
84ad93e8 1072 pthread_mutex_init(&ua_sess->lock, NULL);
ad7a9107 1073
8b366481
DG
1074 return ua_sess;
1075
ffe60014 1076error_free:
8b366481
DG
1077 return NULL;
1078}
1079
1080/*
1081 * Alloc new UST app channel.
1082 */
1083static
b53d4e59 1084struct ust_app_channel *alloc_ust_app_channel(const char *name,
d0b96690 1085 struct ust_app_session *ua_sess,
ffe60014 1086 struct lttng_ust_channel_attr *attr)
8b366481
DG
1087{
1088 struct ust_app_channel *ua_chan;
1089
1090 /* Init most of the default value by allocating and zeroing */
1091 ua_chan = zmalloc(sizeof(struct ust_app_channel));
1092 if (ua_chan == NULL) {
1093 PERROR("malloc");
1094 goto error;
1095 }
1096
1097 /* Setup channel name */
1098 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
1099 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1100
1101 ua_chan->enabled = 1;
1102 ua_chan->handle = -1;
45893984 1103 ua_chan->session = ua_sess;
ffe60014 1104 ua_chan->key = get_next_channel_key();
bec39940
DG
1105 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1106 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1107 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
8b366481
DG
1108
1109 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
31746f93 1110 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
8b366481
DG
1111
1112 /* Copy attributes */
1113 if (attr) {
ffe60014 1114 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
2fe6e7f5
DG
1115 ua_chan->attr.subbuf_size = attr->subbuf_size;
1116 ua_chan->attr.num_subbuf = attr->num_subbuf;
1117 ua_chan->attr.overwrite = attr->overwrite;
1118 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
1119 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
1120 ua_chan->attr.output = attr->output;
491d1539 1121 ua_chan->attr.blocking_timeout = attr->u.s.blocking_timeout;
8b366481 1122 }
ffe60014
DG
1123 /* By default, the channel is a per cpu channel. */
1124 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
8b366481
DG
1125
1126 DBG3("UST app channel %s allocated", ua_chan->name);
1127
1128 return ua_chan;
1129
1130error:
1131 return NULL;
1132}
1133
37f1c236
DG
1134/*
1135 * Allocate and initialize a UST app stream.
1136 *
1137 * Return newly allocated stream pointer or NULL on error.
1138 */
ffe60014 1139struct ust_app_stream *ust_app_alloc_stream(void)
37f1c236
DG
1140{
1141 struct ust_app_stream *stream = NULL;
1142
1143 stream = zmalloc(sizeof(*stream));
1144 if (stream == NULL) {
1145 PERROR("zmalloc ust app stream");
1146 goto error;
1147 }
1148
1149 /* Zero could be a valid value for a handle so flag it to -1. */
1150 stream->handle = -1;
1151
1152error:
1153 return stream;
1154}
1155
8b366481
DG
1156/*
1157 * Alloc new UST app event.
1158 */
1159static
1160struct ust_app_event *alloc_ust_app_event(char *name,
1161 struct lttng_ust_event *attr)
1162{
1163 struct ust_app_event *ua_event;
1164
1165 /* Init most of the default value by allocating and zeroing */
1166 ua_event = zmalloc(sizeof(struct ust_app_event));
1167 if (ua_event == NULL) {
20533947 1168 PERROR("Failed to allocate ust_app_event structure");
8b366481
DG
1169 goto error;
1170 }
1171
1172 ua_event->enabled = 1;
1173 strncpy(ua_event->name, name, sizeof(ua_event->name));
1174 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
bec39940 1175 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
8b366481
DG
1176
1177 /* Copy attributes */
1178 if (attr) {
1179 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1180 }
1181
1182 DBG3("UST app event %s allocated", ua_event->name);
1183
1184 return ua_event;
1185
1186error:
1187 return NULL;
1188}
1189
2463b787
JR
1190/*
1191 * Alloc new UST app token event rule.
1192 */
1193static struct ust_app_token_event_rule *alloc_ust_app_token_event_rule(
1194 struct lttng_trigger *trigger)
1195{
1196 struct ust_app_token_event_rule *ua_token;
1197 struct lttng_condition *condition = NULL;
1198 struct lttng_event_rule *event_rule = NULL;
1199
1200 ua_token = zmalloc(sizeof(struct ust_app_token_event_rule));
1201 if (ua_token == NULL) {
1202 PERROR("Failed to allocate ust_app_token_event_rule structure");
1203 goto error;
1204 }
1205
1206 /* Get reference of the trigger */
1207 /* TODO should this be like lttng_event_rule_get with a returned bool? */
1208 lttng_trigger_get(trigger);
1209
1210 ua_token->enabled = 1;
1211 ua_token->token = lttng_trigger_get_tracer_token(trigger);
1212 lttng_ht_node_init_u64(&ua_token->node, ua_token->token);
1213
1214 condition = lttng_trigger_get_condition(trigger);
1215 assert(condition);
1216 assert(lttng_condition_get_type(condition) == LTTNG_CONDITION_TYPE_EVENT_RULE_HIT);
1217
1218 assert(LTTNG_CONDITION_STATUS_OK == lttng_condition_event_rule_get_rule_mutable(condition, &event_rule));
1219 assert(event_rule);
1220
1221 ua_token->trigger = trigger;
1222 ua_token->filter = lttng_event_rule_get_filter_bytecode(event_rule);
1223 ua_token->exclusion = lttng_event_rule_generate_exclusions(event_rule);
1224 ua_token->error_counter_index = lttng_trigger_get_error_counter_index(trigger);
1225
1226 /* TODO put capture here? or later*/
1227
1228 DBG3("UST app token event rule %" PRIu64 " allocated", ua_token->token);
1229
1230 return ua_token;
1231
1232error:
1233 return NULL;
1234}
1235
8b366481
DG
1236/*
1237 * Alloc new UST app context.
1238 */
1239static
bdf64013 1240struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
8b366481
DG
1241{
1242 struct ust_app_ctx *ua_ctx;
1243
1244 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
1245 if (ua_ctx == NULL) {
1246 goto error;
1247 }
1248
31746f93
DG
1249 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1250
8b366481
DG
1251 if (uctx) {
1252 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
bdf64013 1253 if (uctx->ctx == LTTNG_UST_CONTEXT_APP_CONTEXT) {
f3db82be 1254 char *provider_name = NULL, *ctx_name = NULL;
bdf64013
JG
1255
1256 provider_name = strdup(uctx->u.app_ctx.provider_name);
1257 ctx_name = strdup(uctx->u.app_ctx.ctx_name);
1258 if (!provider_name || !ctx_name) {
1259 free(provider_name);
1260 free(ctx_name);
1261 goto error;
1262 }
1263
1264 ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
1265 ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
1266 }
8b366481
DG
1267 }
1268
1269 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
8b366481 1270 return ua_ctx;
bdf64013
JG
1271error:
1272 free(ua_ctx);
1273 return NULL;
8b366481
DG
1274}
1275
025faf73 1276/*
2463b787 1277 * Create a liblttng-ust filter bytecode from given bytecode.
025faf73
DG
1278 *
1279 * Return allocated filter or NULL on error.
1280 */
2463b787
JR
1281static struct lttng_ust_filter_bytecode *
1282create_ust_filter_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
025faf73 1283{
2463b787 1284 struct lttng_ust_filter_bytecode *filter = NULL;
025faf73
DG
1285
1286 /* Copy filter bytecode */
1287 filter = zmalloc(sizeof(*filter) + orig_f->len);
1288 if (!filter) {
2463b787 1289 PERROR("zmalloc alloc ust filter bytecode");
025faf73
DG
1290 goto error;
1291 }
1292
2463b787
JR
1293 assert(sizeof(struct lttng_bytecode) ==
1294 sizeof(struct lttng_ust_filter_bytecode));
025faf73 1295 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
025faf73
DG
1296error:
1297 return filter;
1298}
1299
51755dc8 1300/*
2463b787 1301 * Create a liblttng-ust capture bytecode from given bytecode.
51755dc8
JG
1302 *
1303 * Return allocated filter or NULL on error.
1304 */
2463b787
JR
1305static struct lttng_ust_capture_bytecode *
1306create_ust_capture_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
51755dc8 1307{
2463b787 1308 struct lttng_ust_capture_bytecode *capture = NULL;
51755dc8 1309
2463b787
JR
1310 /* Copy capture bytecode */
1311 capture = zmalloc(sizeof(*capture) + orig_f->len);
1312 if (!capture) {
1313 PERROR("zmalloc alloc ust capture bytecode");
51755dc8
JG
1314 goto error;
1315 }
1316
2463b787
JR
1317 assert(sizeof(struct lttng_bytecode) ==
1318 sizeof(struct lttng_ust_capture_bytecode));
1319 memcpy(capture, orig_f, sizeof(*capture) + orig_f->len);
51755dc8 1320error:
2463b787 1321 return capture;
51755dc8
JG
1322}
1323
099e26bd 1324/*
421cb601
DG
1325 * Find an ust_app using the sock and return it. RCU read side lock must be
1326 * held before calling this helper function.
099e26bd 1327 */
f20baf8e 1328struct ust_app *ust_app_find_by_sock(int sock)
099e26bd 1329{
bec39940 1330 struct lttng_ht_node_ulong *node;
bec39940 1331 struct lttng_ht_iter iter;
f6a9efaa 1332
852d0037 1333 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
bec39940 1334 node = lttng_ht_iter_get_node_ulong(&iter);
f6a9efaa
DG
1335 if (node == NULL) {
1336 DBG2("UST app find by sock %d not found", sock);
f6a9efaa
DG
1337 goto error;
1338 }
852d0037
DG
1339
1340 return caa_container_of(node, struct ust_app, sock_n);
f6a9efaa
DG
1341
1342error:
1343 return NULL;
099e26bd
DG
1344}
1345
d0b96690
DG
1346/*
1347 * Find an ust_app using the notify sock and return it. RCU read side lock must
1348 * be held before calling this helper function.
1349 */
1350static struct ust_app *find_app_by_notify_sock(int sock)
1351{
1352 struct lttng_ht_node_ulong *node;
1353 struct lttng_ht_iter iter;
1354
1355 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1356 &iter);
1357 node = lttng_ht_iter_get_node_ulong(&iter);
1358 if (node == NULL) {
1359 DBG2("UST app find by notify sock %d not found", sock);
1360 goto error;
1361 }
1362
1363 return caa_container_of(node, struct ust_app, notify_sock_n);
1364
1365error:
1366 return NULL;
1367}
1368
025faf73
DG
1369/*
1370 * Lookup for an ust app event based on event name, filter bytecode and the
1371 * event loglevel.
1372 *
1373 * Return an ust_app_event object or NULL on error.
1374 */
18eace3b 1375static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
2463b787 1376 const char *name, const struct lttng_bytecode *filter,
2106efa0 1377 int loglevel_value,
39c5a3a7 1378 const struct lttng_event_exclusion *exclusion)
18eace3b
DG
1379{
1380 struct lttng_ht_iter iter;
1381 struct lttng_ht_node_str *node;
1382 struct ust_app_event *event = NULL;
1383 struct ust_app_ht_key key;
18eace3b
DG
1384
1385 assert(name);
1386 assert(ht);
1387
1388 /* Setup key for event lookup. */
1389 key.name = name;
1390 key.filter = filter;
2106efa0 1391 key.loglevel_type = loglevel_value;
39c5a3a7 1392 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
51755dc8 1393 key.exclusion = exclusion;
18eace3b 1394
025faf73
DG
1395 /* Lookup using the event name as hash and a custom match fct. */
1396 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1397 ht_match_ust_app_event, &key, &iter.iter);
18eace3b
DG
1398 node = lttng_ht_iter_get_node_str(&iter);
1399 if (node == NULL) {
1400 goto end;
1401 }
1402
1403 event = caa_container_of(node, struct ust_app_event, node);
1404
1405end:
18eace3b
DG
1406 return event;
1407}
1408
2463b787
JR
1409/*
1410 * Lookup for an ust app tokens based on a token id.
1411 *
1412 * Return an ust_app_token_event_rule object or NULL on error.
1413 */
1414static struct ust_app_token_event_rule *find_ust_app_token_event_rule(struct lttng_ht *ht,
1415 uint64_t token)
1416{
1417 struct lttng_ht_iter iter;
1418 struct lttng_ht_node_u64 *node;
1419 struct ust_app_token_event_rule *token_event_rule = NULL;
1420
1421 assert(ht);
1422
1423 lttng_ht_lookup(ht, &token, &iter);
1424 node = lttng_ht_iter_get_node_u64(&iter);
1425 if (node == NULL) {
1426 DBG2("UST app token %" PRIu64 " not found", token);
1427 goto end;
1428 }
1429
1430 token_event_rule = caa_container_of(node, struct ust_app_token_event_rule, node);
1431end:
1432 return token_event_rule;
1433}
1434
55cc08a6
DG
1435/*
1436 * Create the channel context on the tracer.
d0b96690
DG
1437 *
1438 * Called with UST app session lock held.
55cc08a6
DG
1439 */
1440static
1441int create_ust_channel_context(struct ust_app_channel *ua_chan,
1442 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1443{
1444 int ret;
1445
840cb59c 1446 health_code_update();
86acf0da 1447
fb45065e 1448 pthread_mutex_lock(&app->sock_lock);
852d0037 1449 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
55cc08a6 1450 ua_chan->obj, &ua_ctx->obj);
fb45065e 1451 pthread_mutex_unlock(&app->sock_lock);
55cc08a6 1452 if (ret < 0) {
ffe60014
DG
1453 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1454 ERR("UST app create channel context failed for app (pid: %d) "
1455 "with ret %d", app->pid, ret);
1456 } else {
3757b385
DG
1457 /*
1458 * This is normal behavior, an application can die during the
1459 * creation process. Don't report an error so the execution can
1460 * continue normally.
1461 */
1462 ret = 0;
88e3c2f5 1463 DBG3("UST app add context failed. Application is dead.");
ffe60014 1464 }
55cc08a6
DG
1465 goto error;
1466 }
1467
1468 ua_ctx->handle = ua_ctx->obj->handle;
1469
d0b96690
DG
1470 DBG2("UST app context handle %d created successfully for channel %s",
1471 ua_ctx->handle, ua_chan->name);
55cc08a6
DG
1472
1473error:
840cb59c 1474 health_code_update();
55cc08a6
DG
1475 return ret;
1476}
1477
53a80697
MD
1478/*
1479 * Set the filter on the tracer.
1480 */
2463b787
JR
1481static int set_ust_filter(struct ust_app *app,
1482 const struct lttng_bytecode *bytecode,
1483 struct lttng_ust_object_data *ust_object)
53a80697
MD
1484{
1485 int ret;
51755dc8 1486 struct lttng_ust_filter_bytecode *ust_bytecode = NULL;
53a80697 1487
840cb59c 1488 health_code_update();
86acf0da 1489
2463b787
JR
1490 ust_bytecode = create_ust_filter_bytecode_from_bytecode(bytecode);
1491 if (!ust_bytecode) {
1492 ret = -LTTNG_ERR_NOMEM;
1493 goto error;
1494 }
1495 pthread_mutex_lock(&app->sock_lock);
1496 ret = ustctl_set_filter(app->sock, ust_bytecode,
1497 ust_object);
1498 pthread_mutex_unlock(&app->sock_lock);
1499 if (ret < 0) {
1500 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1501 ERR("UST app set filter failed for object %p of app (pid: %d) "
1502 "with ret %d", ust_object, app->pid, ret);
1503 } else {
1504 /*
1505 * This is normal behavior, an application can die during the
1506 * creation process. Don't report an error so the execution can
1507 * continue normally.
1508 */
1509 ret = 0;
1510 DBG3("UST app set filter. Application is dead.");
1511 }
86acf0da 1512 goto error;
53a80697
MD
1513 }
1514
2463b787
JR
1515 DBG2("UST filter set for object %p successfully", ust_object);
1516
1517error:
1518 health_code_update();
1519 free(ust_bytecode);
1520 return ret;
1521}
1522
1523/*
1524 * Set a capture bytecode for the passed object.
1525 * The seqnum enforce the ordering at runtime and on reception.
1526 */
1527static int set_ust_capture(struct ust_app *app,
1528 const struct lttng_bytecode *bytecode,
1529 unsigned int seqnum,
1530 struct lttng_ust_object_data *ust_object)
1531{
1532 int ret;
1533 struct lttng_ust_capture_bytecode *ust_bytecode = NULL;
1534
1535 health_code_update();
1536
1537 ust_bytecode = create_ust_capture_bytecode_from_bytecode(bytecode);
51755dc8
JG
1538 if (!ust_bytecode) {
1539 ret = -LTTNG_ERR_NOMEM;
1540 goto error;
1541 }
2463b787
JR
1542
1543 /* Set the seqnum */
1544 ust_bytecode->seqnum = seqnum;
1545
fb45065e 1546 pthread_mutex_lock(&app->sock_lock);
2463b787
JR
1547 ret = ustctl_set_capture(app->sock, ust_bytecode,
1548 ust_object);
fb45065e 1549 pthread_mutex_unlock(&app->sock_lock);
53a80697 1550 if (ret < 0) {
ffe60014 1551 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2463b787
JR
1552 ERR("UST app set capture failed for object %p of app (pid: %d) "
1553 "with ret %d", ust_object, app->pid, ret);
ffe60014 1554 } else {
3757b385
DG
1555 /*
1556 * This is normal behavior, an application can die during the
1557 * creation process. Don't report an error so the execution can
1558 * continue normally.
1559 */
1560 ret = 0;
2463b787 1561 DBG3("UST app set capture. Application is dead.");
ffe60014 1562 }
53a80697
MD
1563 goto error;
1564 }
1565
2463b787 1566 DBG2("UST capture set for object %p successfully", ust_object);
53a80697
MD
1567
1568error:
840cb59c 1569 health_code_update();
51755dc8 1570 free(ust_bytecode);
53a80697
MD
1571 return ret;
1572}
1573
51755dc8
JG
1574static
1575struct lttng_ust_event_exclusion *create_ust_exclusion_from_exclusion(
1576 struct lttng_event_exclusion *exclusion)
1577{
1578 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1579 size_t exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1580 LTTNG_UST_SYM_NAME_LEN * exclusion->count;
1581
1582 ust_exclusion = zmalloc(exclusion_alloc_size);
1583 if (!ust_exclusion) {
1584 PERROR("malloc");
1585 goto end;
1586 }
1587
1588 assert(sizeof(struct lttng_event_exclusion) ==
1589 sizeof(struct lttng_ust_event_exclusion));
1590 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1591end:
1592 return ust_exclusion;
1593}
1594
7cc9a73c
JI
1595/*
1596 * Set event exclusions on the tracer.
1597 */
2463b787
JR
1598static int set_ust_exclusions(struct ust_app *app,
1599 struct lttng_event_exclusion *exclusions,
1600 struct lttng_ust_object_data *ust_object)
7cc9a73c
JI
1601{
1602 int ret;
2463b787 1603 struct lttng_ust_event_exclusion *ust_exclusions = NULL;
7cc9a73c 1604
2463b787 1605 assert(exclusions && exclusions->count > 0);
7cc9a73c 1606
2463b787 1607 health_code_update();
7cc9a73c 1608
2463b787
JR
1609 ust_exclusions = create_ust_exclusion_from_exclusion(
1610 exclusions);
1611 if (!ust_exclusions) {
51755dc8
JG
1612 ret = -LTTNG_ERR_NOMEM;
1613 goto error;
1614 }
fb45065e 1615 pthread_mutex_lock(&app->sock_lock);
2463b787 1616 ret = ustctl_set_exclusion(app->sock, ust_exclusions, ust_object);
fb45065e 1617 pthread_mutex_unlock(&app->sock_lock);
7cc9a73c
JI
1618 if (ret < 0) {
1619 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2463b787
JR
1620 ERR("UST app exclusions failed for object %p of app (pid: %d) "
1621 "with ret %d", ust_object, app->pid, ret);
7cc9a73c
JI
1622 } else {
1623 /*
1624 * This is normal behavior, an application can die during the
1625 * creation process. Don't report an error so the execution can
1626 * continue normally.
1627 */
1628 ret = 0;
2463b787 1629 DBG3("UST app set exclusions failed. Application is dead.");
7cc9a73c
JI
1630 }
1631 goto error;
1632 }
1633
2463b787 1634 DBG2("UST exclusions set successfully for object %p", ust_object);
7cc9a73c
JI
1635
1636error:
1637 health_code_update();
2463b787 1638 free(ust_exclusions);
7cc9a73c
JI
1639 return ret;
1640}
1641
9730260e
DG
1642/*
1643 * Disable the specified event on to UST tracer for the UST session.
1644 */
2463b787
JR
1645static int disable_ust_object(struct ust_app *app,
1646 struct lttng_ust_object_data *object)
9730260e
DG
1647{
1648 int ret;
1649
840cb59c 1650 health_code_update();
86acf0da 1651
fb45065e 1652 pthread_mutex_lock(&app->sock_lock);
2463b787 1653 ret = ustctl_disable(app->sock, object);
fb45065e 1654 pthread_mutex_unlock(&app->sock_lock);
9730260e 1655 if (ret < 0) {
ffe60014 1656 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2463b787
JR
1657 ERR("UST app disable failed for object %p app (pid: %d) with ret %d",
1658 object, app->pid, ret);
ffe60014 1659 } else {
3757b385
DG
1660 /*
1661 * This is normal behavior, an application can die during the
1662 * creation process. Don't report an error so the execution can
1663 * continue normally.
1664 */
1665 ret = 0;
ffe60014
DG
1666 DBG3("UST app disable event failed. Application is dead.");
1667 }
9730260e
DG
1668 goto error;
1669 }
1670
2463b787
JR
1671 DBG2("UST app object %p disabled successfully for app (pid: %d)",
1672 object, app->pid);
9730260e
DG
1673
1674error:
840cb59c 1675 health_code_update();
9730260e
DG
1676 return ret;
1677}
1678
78f0bacd
DG
1679/*
1680 * Disable the specified channel on to UST tracer for the UST session.
1681 */
1682static int disable_ust_channel(struct ust_app *app,
1683 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1684{
1685 int ret;
1686
840cb59c 1687 health_code_update();
86acf0da 1688
fb45065e 1689 pthread_mutex_lock(&app->sock_lock);
852d0037 1690 ret = ustctl_disable(app->sock, ua_chan->obj);
fb45065e 1691 pthread_mutex_unlock(&app->sock_lock);
78f0bacd 1692 if (ret < 0) {
ffe60014
DG
1693 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1694 ERR("UST app channel %s disable failed for app (pid: %d) "
1695 "and session handle %d with ret %d",
1696 ua_chan->name, app->pid, ua_sess->handle, ret);
1697 } else {
3757b385
DG
1698 /*
1699 * This is normal behavior, an application can die during the
1700 * creation process. Don't report an error so the execution can
1701 * continue normally.
1702 */
1703 ret = 0;
ffe60014
DG
1704 DBG3("UST app disable channel failed. Application is dead.");
1705 }
78f0bacd
DG
1706 goto error;
1707 }
1708
78f0bacd 1709 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
852d0037 1710 ua_chan->name, app->pid);
78f0bacd
DG
1711
1712error:
840cb59c 1713 health_code_update();
78f0bacd
DG
1714 return ret;
1715}
1716
1717/*
1718 * Enable the specified channel on to UST tracer for the UST session.
1719 */
1720static int enable_ust_channel(struct ust_app *app,
1721 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1722{
1723 int ret;
1724
840cb59c 1725 health_code_update();
86acf0da 1726
fb45065e 1727 pthread_mutex_lock(&app->sock_lock);
852d0037 1728 ret = ustctl_enable(app->sock, ua_chan->obj);
fb45065e 1729 pthread_mutex_unlock(&app->sock_lock);
78f0bacd 1730 if (ret < 0) {
ffe60014
DG
1731 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1732 ERR("UST app channel %s enable failed for app (pid: %d) "
1733 "and session handle %d with ret %d",
1734 ua_chan->name, app->pid, ua_sess->handle, ret);
1735 } else {
3757b385
DG
1736 /*
1737 * This is normal behavior, an application can die during the
1738 * creation process. Don't report an error so the execution can
1739 * continue normally.
1740 */
1741 ret = 0;
ffe60014
DG
1742 DBG3("UST app enable channel failed. Application is dead.");
1743 }
78f0bacd
DG
1744 goto error;
1745 }
1746
1747 ua_chan->enabled = 1;
1748
1749 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
852d0037 1750 ua_chan->name, app->pid);
78f0bacd
DG
1751
1752error:
840cb59c 1753 health_code_update();
78f0bacd
DG
1754 return ret;
1755}
1756
edb67388
DG
1757/*
1758 * Enable the specified event on to UST tracer for the UST session.
1759 */
2463b787 1760static int enable_ust_object(struct ust_app *app, struct lttng_ust_object_data *ust_object)
edb67388
DG
1761{
1762 int ret;
1763
840cb59c 1764 health_code_update();
86acf0da 1765
fb45065e 1766 pthread_mutex_lock(&app->sock_lock);
2463b787 1767 ret = ustctl_enable(app->sock, ust_object);
fb45065e 1768 pthread_mutex_unlock(&app->sock_lock);
edb67388 1769 if (ret < 0) {
ffe60014 1770 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2463b787
JR
1771 ERR("UST app enable failed for object %p app (pid: %d) with ret %d",
1772 ust_object, app->pid, ret);
ffe60014 1773 } else {
3757b385
DG
1774 /*
1775 * This is normal behavior, an application can die during the
1776 * creation process. Don't report an error so the execution can
1777 * continue normally.
1778 */
1779 ret = 0;
2463b787 1780 DBG3("UST app enable failed. Application is dead.");
ffe60014 1781 }
edb67388
DG
1782 goto error;
1783 }
1784
2463b787
JR
1785 DBG2("UST app object %p enabled successfully for app (pid: %d)",
1786 ust_object, app->pid);
edb67388
DG
1787
1788error:
840cb59c 1789 health_code_update();
edb67388
DG
1790 return ret;
1791}
1792
099e26bd 1793/*
7972aab2 1794 * Send channel and stream buffer to application.
4f3ab6ee 1795 *
ffe60014 1796 * Return 0 on success. On error, a negative value is returned.
4f3ab6ee 1797 */
7972aab2
DG
1798static int send_channel_pid_to_ust(struct ust_app *app,
1799 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
4f3ab6ee
DG
1800{
1801 int ret;
ffe60014 1802 struct ust_app_stream *stream, *stmp;
4f3ab6ee
DG
1803
1804 assert(app);
ffe60014 1805 assert(ua_sess);
4f3ab6ee 1806 assert(ua_chan);
4f3ab6ee 1807
840cb59c 1808 health_code_update();
4f3ab6ee 1809
7972aab2
DG
1810 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1811 app->sock);
86acf0da 1812
ffe60014
DG
1813 /* Send channel to the application. */
1814 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
a7169585
MD
1815 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1816 ret = -ENOTCONN; /* Caused by app exiting. */
1817 goto error;
1818 } else if (ret < 0) {
b551a063
DG
1819 goto error;
1820 }
1821
d88aee68
DG
1822 health_code_update();
1823
ffe60014
DG
1824 /* Send all streams to application. */
1825 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1826 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
a7169585
MD
1827 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1828 ret = -ENOTCONN; /* Caused by app exiting. */
1829 goto error;
1830 } else if (ret < 0) {
ffe60014
DG
1831 goto error;
1832 }
1833 /* We don't need the stream anymore once sent to the tracer. */
1834 cds_list_del(&stream->list);
fb45065e 1835 delete_ust_app_stream(-1, stream, app);
ffe60014 1836 }
ffe60014
DG
1837 /* Flag the channel that it is sent to the application. */
1838 ua_chan->is_sent = 1;
ffe60014 1839
b551a063 1840error:
840cb59c 1841 health_code_update();
b551a063
DG
1842 return ret;
1843}
1844
91d76f53 1845/*
5b4a0ec0 1846 * Create the specified event onto the UST tracer for a UST session.
d0b96690
DG
1847 *
1848 * Should be called with session mutex held.
91d76f53 1849 */
edb67388
DG
1850static
1851int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1852 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
91d76f53 1853{
5b4a0ec0 1854 int ret = 0;
284d8f55 1855
840cb59c 1856 health_code_update();
86acf0da 1857
5b4a0ec0 1858 /* Create UST event on tracer */
fb45065e 1859 pthread_mutex_lock(&app->sock_lock);
852d0037 1860 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
5b4a0ec0 1861 &ua_event->obj);
fb45065e 1862 pthread_mutex_unlock(&app->sock_lock);
5b4a0ec0 1863 if (ret < 0) {
ffe60014 1864 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
88e3c2f5 1865 abort();
ffe60014
DG
1866 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1867 ua_event->attr.name, app->pid, ret);
1868 } else {
3757b385
DG
1869 /*
1870 * This is normal behavior, an application can die during the
1871 * creation process. Don't report an error so the execution can
1872 * continue normally.
1873 */
1874 ret = 0;
ffe60014
DG
1875 DBG3("UST app create event failed. Application is dead.");
1876 }
5b4a0ec0 1877 goto error;
91d76f53 1878 }
f6a9efaa 1879
5b4a0ec0 1880 ua_event->handle = ua_event->obj->handle;
284d8f55 1881
2463b787
JR
1882 DBG2("UST app event %s created successfully for pid:%d object: %p",
1883 ua_event->attr.name, app->pid, ua_event->obj);
f6a9efaa 1884
840cb59c 1885 health_code_update();
86acf0da 1886
025faf73
DG
1887 /* Set filter if one is present. */
1888 if (ua_event->filter) {
2463b787 1889 ret = set_ust_filter(app, ua_event->filter, ua_event->obj);
025faf73
DG
1890 if (ret < 0) {
1891 goto error;
1892 }
1893 }
1894
7cc9a73c
JI
1895 /* Set exclusions for the event */
1896 if (ua_event->exclusion) {
2463b787 1897 ret = set_ust_exclusions(app, ua_event->exclusion, ua_event->obj);
7cc9a73c
JI
1898 if (ret < 0) {
1899 goto error;
1900 }
1901 }
1902
8535a6d9 1903 /* If event not enabled, disable it on the tracer */
40113787
MD
1904 if (ua_event->enabled) {
1905 /*
1906 * We now need to explicitly enable the event, since it
1907 * is now disabled at creation.
1908 */
2463b787 1909 ret = enable_ust_object(app, ua_event->obj);
40113787
MD
1910 if (ret < 0) {
1911 /*
1912 * If we hit an EPERM, something is wrong with our enable call. If
1913 * we get an EEXIST, there is a problem on the tracer side since we
1914 * just created it.
1915 */
1916 switch (ret) {
1917 case -LTTNG_UST_ERR_PERM:
1918 /* Code flow problem */
1919 assert(0);
1920 case -LTTNG_UST_ERR_EXIST:
1921 /* It's OK for our use case. */
1922 ret = 0;
1923 break;
1924 default:
1925 break;
1926 }
1927 goto error;
1928 }
8535a6d9
DG
1929 }
1930
5b4a0ec0 1931error:
840cb59c 1932 health_code_update();
5b4a0ec0 1933 return ret;
91d76f53 1934}
48842b30 1935
2463b787
JR
1936static
1937void init_ust_trigger_from_event_rule(const struct lttng_event_rule *rule, struct lttng_ust_trigger *trigger)
1938{
1939 enum lttng_event_rule_status status;
1940 enum lttng_loglevel_type loglevel_type;
1941 enum lttng_ust_loglevel_type ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
1942 int loglevel = -1;
1943 const char *pattern;
1944
1945 /* For now only LTTNG_EVENT_RULE_TYPE_TRACEPOINT are supported */
1946 assert(lttng_event_rule_get_type(rule) == LTTNG_EVENT_RULE_TYPE_TRACEPOINT);
1947
1948 memset(trigger, 0, sizeof(*trigger));
1949
1950 if (lttng_event_rule_is_agent(rule)) {
1951 /*
1952 * Special event for agents
1953 * The actual meat of the event is in the filter that will be
1954 * attached later on.
1955 * Set the default values for the agent event.
1956 */
1957 pattern = event_get_default_agent_ust_name(lttng_event_rule_get_domain_type(rule));
1958 loglevel = 0;
1959 ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
1960 } else {
1961 status = lttng_event_rule_tracepoint_get_pattern(rule, &pattern);
1962 if (status != LTTNG_EVENT_RULE_STATUS_OK) {
1963 /* At this point this is a fatal error */
1964 assert(0);
1965 }
1966
1967 status = lttng_event_rule_tracepoint_get_log_level_type(
1968 rule, &loglevel_type);
1969 if (status != LTTNG_EVENT_RULE_STATUS_OK) {
1970 /* At this point this is a fatal error */
1971 assert(0);
1972 }
1973
1974 switch (loglevel_type) {
1975 case LTTNG_EVENT_LOGLEVEL_ALL:
1976 ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
1977 break;
1978 case LTTNG_EVENT_LOGLEVEL_RANGE:
1979 ust_loglevel_type = LTTNG_UST_LOGLEVEL_RANGE;
1980 break;
1981 case LTTNG_EVENT_LOGLEVEL_SINGLE:
1982 ust_loglevel_type = LTTNG_UST_LOGLEVEL_SINGLE;
1983 break;
1984 }
1985
1986 if (loglevel_type != LTTNG_EVENT_LOGLEVEL_ALL) {
1987 status = lttng_event_rule_tracepoint_get_log_level(
1988 rule, &loglevel);
1989 assert(status == LTTNG_EVENT_RULE_STATUS_OK);
1990 }
1991 }
1992
1993 trigger->instrumentation = LTTNG_UST_TRACEPOINT;
1994 strncpy(trigger->name, pattern, LTTNG_UST_SYM_NAME_LEN - 1);
1995 trigger->loglevel_type = ust_loglevel_type;
1996 trigger->loglevel = loglevel;
1997}
1998
1999/*
2000 * Create the specified event rule token onto the UST tracer for a UST app.
2001 */
2002static
2003int create_ust_token_event_rule(struct ust_app *app, struct ust_app_token_event_rule *ua_token)
2004{
2005 int ret = 0;
2006 struct lttng_ust_trigger trigger;
2007 struct lttng_condition *condition = NULL;
2008 struct lttng_event_rule *event_rule = NULL;
2009 unsigned int capture_bytecode_count = 0;
2010
2011 health_code_update();
2012 assert(app->token_communication.handle);
2013
2014 condition = lttng_trigger_get_condition(ua_token->trigger);
2015 assert(condition);
2016 assert(lttng_condition_get_type(condition) == LTTNG_CONDITION_TYPE_EVENT_RULE_HIT);
2017
2018 lttng_condition_event_rule_get_rule_mutable(condition, &event_rule);
2019 assert(event_rule);
2020 assert(lttng_event_rule_get_type(event_rule) == LTTNG_EVENT_RULE_TYPE_TRACEPOINT);
2021 /* Should we also test for UST at this point, or do we trust all the
2022 * upper level? */
2023
2024 init_ust_trigger_from_event_rule(event_rule, &trigger);
2025
2026 trigger.id = ua_token->token;
2027 trigger.error_counter_index = ua_token->error_counter_index;
2028
2029 /* Create UST trigger on tracer */
2030 pthread_mutex_lock(&app->sock_lock);
2031 ret = ustctl_create_trigger(app->sock, &trigger, app->token_communication.handle, &ua_token->obj);
2032 pthread_mutex_unlock(&app->sock_lock);
2033 if (ret < 0) {
2034 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2035 abort();
2036 ERR("Error ustctl create trigger %s for app pid: %d with ret %d",
2037 trigger.name, app->pid, ret);
2038 } else {
2039 /*
2040 * This is normal behavior, an application can die during the
2041 * creation process. Don't report an error so the execution can
2042 * continue normally.
2043 */
2044 ret = 0;
2045 DBG3("UST app create event failed. Application is dead.");
2046 }
2047 goto error;
2048 }
2049
2050 ua_token->handle = ua_token->obj->handle;
2051
2052 DBG2("UST app event %s created successfully for pid:%d object: %p",
2053 trigger.name, app->pid, ua_token->obj);
2054
2055 health_code_update();
2056
2057 /* Set filter if one is present. */
2058 if (ua_token->filter) {
2059 ret = set_ust_filter(app, ua_token->filter, ua_token->obj);
2060 if (ret < 0) {
2061 goto error;
2062 }
2063 }
2064
2065 /* Set exclusions for the event */
2066 if (ua_token->exclusion) {
2067 ret = set_ust_exclusions(app, ua_token->exclusion, ua_token->obj);
2068 if (ret < 0) {
2069 goto error;
2070 }
2071 }
2072
2073 /* Set the capture bytecode
2074 * TODO: do we want to emulate what is done with exclusion and provide
2075 * and object with a count of capture bytecode? instead of multiple
2076 * call?
2077 * */
2078 capture_bytecode_count = lttng_trigger_get_capture_bytecode_count(ua_token->trigger);
2079 for (unsigned int i = 0; i < capture_bytecode_count; i++) {
2080 const struct lttng_bytecode *capture_bytecode = lttng_trigger_get_capture_bytecode_at_index(ua_token->trigger, i);
2081 ret = set_ust_capture(app, capture_bytecode, i, ua_token->obj);
2082 if (ret < 0) {
2083 goto error;
2084 }
2085 }
2086
2087 /*
2088 * We now need to explicitly enable the event, since it
2089 * is disabled at creation.
2090 */
2091 ret = enable_ust_object(app, ua_token->obj);
2092 if (ret < 0) {
2093 /*
2094 * If we hit an EPERM, something is wrong with our enable call. If
2095 * we get an EEXIST, there is a problem on the tracer side since we
2096 * just created it.
2097 */
2098 switch (ret) {
2099 case -LTTNG_UST_ERR_PERM:
2100 /* Code flow problem */
2101 assert(0);
2102 case -LTTNG_UST_ERR_EXIST:
2103 /* It's OK for our use case. */
2104 ret = 0;
2105 break;
2106 default:
2107 break;
2108 }
2109 goto error;
2110 }
2111 ua_token->enabled = true;
2112
2113error:
2114 health_code_update();
2115 return ret;
2116}
2117
5b4a0ec0
DG
2118/*
2119 * Copy data between an UST app event and a LTT event.
2120 */
421cb601 2121static void shadow_copy_event(struct ust_app_event *ua_event,
48842b30
DG
2122 struct ltt_ust_event *uevent)
2123{
b4ffad32
JI
2124 size_t exclusion_alloc_size;
2125
48842b30
DG
2126 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
2127 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
2128
fc34caaa
DG
2129 ua_event->enabled = uevent->enabled;
2130
5b4a0ec0
DG
2131 /* Copy event attributes */
2132 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
2133
53a80697
MD
2134 /* Copy filter bytecode */
2135 if (uevent->filter) {
2463b787 2136 ua_event->filter = bytecode_copy(uevent->filter);
025faf73 2137 /* Filter might be NULL here in case of ENONEM. */
53a80697 2138 }
b4ffad32
JI
2139
2140 /* Copy exclusion data */
2141 if (uevent->exclusion) {
51755dc8 2142 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
b4ffad32
JI
2143 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
2144 ua_event->exclusion = zmalloc(exclusion_alloc_size);
5f8df26c
JI
2145 if (ua_event->exclusion == NULL) {
2146 PERROR("malloc");
2147 } else {
2148 memcpy(ua_event->exclusion, uevent->exclusion,
2149 exclusion_alloc_size);
b4ffad32
JI
2150 }
2151 }
48842b30
DG
2152}
2153
5b4a0ec0
DG
2154/*
2155 * Copy data between an UST app channel and a LTT channel.
2156 */
421cb601 2157static void shadow_copy_channel(struct ust_app_channel *ua_chan,
48842b30
DG
2158 struct ltt_ust_channel *uchan)
2159{
fc34caaa 2160 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
48842b30
DG
2161
2162 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
2163 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
ffe60014 2164
1624d5b7
JD
2165 ua_chan->tracefile_size = uchan->tracefile_size;
2166 ua_chan->tracefile_count = uchan->tracefile_count;
2167
ffe60014
DG
2168 /* Copy event attributes since the layout is different. */
2169 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
2170 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
2171 ua_chan->attr.overwrite = uchan->attr.overwrite;
2172 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
2173 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
e9404c27 2174 ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
ffe60014 2175 ua_chan->attr.output = uchan->attr.output;
491d1539
MD
2176 ua_chan->attr.blocking_timeout = uchan->attr.u.s.blocking_timeout;
2177
ffe60014
DG
2178 /*
2179 * Note that the attribute channel type is not set since the channel on the
2180 * tracing registry side does not have this information.
2181 */
48842b30 2182
fc34caaa 2183 ua_chan->enabled = uchan->enabled;
7972aab2 2184 ua_chan->tracing_channel_id = uchan->id;
fc34caaa 2185
fc34caaa 2186 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
48842b30
DG
2187}
2188
5b4a0ec0
DG
2189/*
2190 * Copy data between a UST app session and a regular LTT session.
2191 */
421cb601 2192static void shadow_copy_session(struct ust_app_session *ua_sess,
bec39940 2193 struct ltt_ust_session *usess, struct ust_app *app)
48842b30 2194{
477d7741
MD
2195 struct tm *timeinfo;
2196 char datetime[16];
2197 int ret;
d7ba1388 2198 char tmp_shm_path[PATH_MAX];
477d7741 2199
940c4592 2200 timeinfo = localtime(&app->registration_time);
477d7741 2201 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
48842b30 2202
421cb601 2203 DBG2("Shadow copy of session handle %d", ua_sess->handle);
48842b30 2204
7972aab2
DG
2205 ua_sess->tracing_id = usess->id;
2206 ua_sess->id = get_next_session_id();
2463b787
JR
2207 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.uid, app->uid);
2208 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.gid, app->gid);
2209 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.uid, usess->uid);
2210 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.gid, usess->gid);
7972aab2
DG
2211 ua_sess->buffer_type = usess->buffer_type;
2212 ua_sess->bits_per_long = app->bits_per_long;
6addfa37 2213
7972aab2 2214 /* There is only one consumer object per session possible. */
6addfa37 2215 consumer_output_get(usess->consumer);
7972aab2 2216 ua_sess->consumer = usess->consumer;
6addfa37 2217
2bba9e53 2218 ua_sess->output_traces = usess->output_traces;
ecc48a90 2219 ua_sess->live_timer_interval = usess->live_timer_interval;
84ad93e8
DG
2220 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
2221 &usess->metadata_attr);
7972aab2
DG
2222
2223 switch (ua_sess->buffer_type) {
2224 case LTTNG_BUFFER_PER_PID:
2225 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
dec56f6c 2226 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
7972aab2
DG
2227 datetime);
2228 break;
2229 case LTTNG_BUFFER_PER_UID:
2230 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
470cc211 2231 DEFAULT_UST_TRACE_UID_PATH,
2463b787 2232 lttng_credentials_get_uid(&ua_sess->real_credentials),
470cc211 2233 app->bits_per_long);
7972aab2
DG
2234 break;
2235 default:
2236 assert(0);
2237 goto error;
2238 }
477d7741
MD
2239 if (ret < 0) {
2240 PERROR("asprintf UST shadow copy session");
477d7741 2241 assert(0);
7972aab2 2242 goto error;
477d7741
MD
2243 }
2244
3d071855
MD
2245 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
2246 sizeof(ua_sess->root_shm_path));
2247 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
d7ba1388
MD
2248 strncpy(ua_sess->shm_path, usess->shm_path,
2249 sizeof(ua_sess->shm_path));
2250 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2251 if (ua_sess->shm_path[0]) {
2252 switch (ua_sess->buffer_type) {
2253 case LTTNG_BUFFER_PER_PID:
2254 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
5da88b0f 2255 "/" DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
d7ba1388
MD
2256 app->name, app->pid, datetime);
2257 break;
2258 case LTTNG_BUFFER_PER_UID:
2259 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
5da88b0f 2260 "/" DEFAULT_UST_TRACE_UID_PATH,
d7ba1388
MD
2261 app->uid, app->bits_per_long);
2262 break;
2263 default:
2264 assert(0);
2265 goto error;
2266 }
2267 if (ret < 0) {
2268 PERROR("sprintf UST shadow copy session");
2269 assert(0);
2270 goto error;
2271 }
2272 strncat(ua_sess->shm_path, tmp_shm_path,
2273 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
2274 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2275 }
6addfa37 2276 return;
7972aab2
DG
2277
2278error:
6addfa37 2279 consumer_output_put(ua_sess->consumer);
48842b30
DG
2280}
2281
78f0bacd
DG
2282/*
2283 * Lookup sesison wrapper.
2284 */
84cd17c6 2285static
fb9a95c4 2286void __lookup_session_by_app(const struct ltt_ust_session *usess,
bec39940 2287 struct ust_app *app, struct lttng_ht_iter *iter)
84cd17c6
MD
2288{
2289 /* Get right UST app session from app */
d9bf3ca4 2290 lttng_ht_lookup(app->sessions, &usess->id, iter);
84cd17c6
MD
2291}
2292
421cb601
DG
2293/*
2294 * Return ust app session from the app session hashtable using the UST session
a991f516 2295 * id.
421cb601 2296 */
48842b30 2297static struct ust_app_session *lookup_session_by_app(
fb9a95c4 2298 const struct ltt_ust_session *usess, struct ust_app *app)
48842b30 2299{
bec39940 2300 struct lttng_ht_iter iter;
d9bf3ca4 2301 struct lttng_ht_node_u64 *node;
48842b30 2302
84cd17c6 2303 __lookup_session_by_app(usess, app, &iter);
d9bf3ca4 2304 node = lttng_ht_iter_get_node_u64(&iter);
48842b30
DG
2305 if (node == NULL) {
2306 goto error;
2307 }
2308
2309 return caa_container_of(node, struct ust_app_session, node);
2310
2311error:
2312 return NULL;
2313}
2314
7972aab2
DG
2315/*
2316 * Setup buffer registry per PID for the given session and application. If none
2317 * is found, a new one is created, added to the global registry and
2318 * initialized. If regp is valid, it's set with the newly created object.
2319 *
2320 * Return 0 on success or else a negative value.
2321 */
2322static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
2323 struct ust_app *app, struct buffer_reg_pid **regp)
2324{
2325 int ret = 0;
2326 struct buffer_reg_pid *reg_pid;
2327
2328 assert(ua_sess);
2329 assert(app);
2330
2331 rcu_read_lock();
2332
2333 reg_pid = buffer_reg_pid_find(ua_sess->id);
2334 if (!reg_pid) {
2335 /*
2336 * This is the create channel path meaning that if there is NO
2337 * registry available, we have to create one for this session.
2338 */
d7ba1388 2339 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
3d071855 2340 ua_sess->root_shm_path, ua_sess->shm_path);
7972aab2
DG
2341 if (ret < 0) {
2342 goto error;
2343 }
7972aab2
DG
2344 } else {
2345 goto end;
2346 }
2347
2348 /* Initialize registry. */
2349 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
2350 app->bits_per_long, app->uint8_t_alignment,
2351 app->uint16_t_alignment, app->uint32_t_alignment,
af6142cf 2352 app->uint64_t_alignment, app->long_alignment,
470cc211
JG
2353 app->byte_order, app->version.major, app->version.minor,
2354 reg_pid->root_shm_path, reg_pid->shm_path,
2463b787
JR
2355 lttng_credentials_get_uid(&ua_sess->effective_credentials),
2356 lttng_credentials_get_gid(&ua_sess->effective_credentials),
2357 ua_sess->tracing_id,
8de88061 2358 app->uid);
7972aab2 2359 if (ret < 0) {
286c991a
MD
2360 /*
2361 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2362 * destroy the buffer registry, because it is always expected
2363 * that if the buffer registry can be found, its ust registry is
2364 * non-NULL.
2365 */
2366 buffer_reg_pid_destroy(reg_pid);
7972aab2
DG
2367 goto error;
2368 }
2369
286c991a
MD
2370 buffer_reg_pid_add(reg_pid);
2371
7972aab2
DG
2372 DBG3("UST app buffer registry per PID created successfully");
2373
2374end:
2375 if (regp) {
2376 *regp = reg_pid;
2377 }
2378error:
2379 rcu_read_unlock();
2380 return ret;
2381}
2382
2383/*
2384 * Setup buffer registry per UID for the given session and application. If none
2385 * is found, a new one is created, added to the global registry and
2386 * initialized. If regp is valid, it's set with the newly created object.
2387 *
2388 * Return 0 on success or else a negative value.
2389 */
2390static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
d7ba1388 2391 struct ust_app_session *ua_sess,
7972aab2
DG
2392 struct ust_app *app, struct buffer_reg_uid **regp)
2393{
2394 int ret = 0;
2395 struct buffer_reg_uid *reg_uid;
2396
2397 assert(usess);
2398 assert(app);
2399
2400 rcu_read_lock();
2401
2402 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2403 if (!reg_uid) {
2404 /*
2405 * This is the create channel path meaning that if there is NO
2406 * registry available, we have to create one for this session.
2407 */
2408 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
3d071855
MD
2409 LTTNG_DOMAIN_UST, &reg_uid,
2410 ua_sess->root_shm_path, ua_sess->shm_path);
7972aab2
DG
2411 if (ret < 0) {
2412 goto error;
2413 }
7972aab2
DG
2414 } else {
2415 goto end;
2416 }
2417
2418 /* Initialize registry. */
af6142cf 2419 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
7972aab2
DG
2420 app->bits_per_long, app->uint8_t_alignment,
2421 app->uint16_t_alignment, app->uint32_t_alignment,
af6142cf
MD
2422 app->uint64_t_alignment, app->long_alignment,
2423 app->byte_order, app->version.major,
3d071855 2424 app->version.minor, reg_uid->root_shm_path,
8de88061
JR
2425 reg_uid->shm_path, usess->uid, usess->gid,
2426 ua_sess->tracing_id, app->uid);
7972aab2 2427 if (ret < 0) {
286c991a
MD
2428 /*
2429 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2430 * destroy the buffer registry, because it is always expected
2431 * that if the buffer registry can be found, its ust registry is
2432 * non-NULL.
2433 */
2434 buffer_reg_uid_destroy(reg_uid, NULL);
7972aab2
DG
2435 goto error;
2436 }
2437 /* Add node to teardown list of the session. */
2438 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2439
286c991a 2440 buffer_reg_uid_add(reg_uid);
7972aab2 2441
286c991a 2442 DBG3("UST app buffer registry per UID created successfully");
7972aab2
DG
2443end:
2444 if (regp) {
2445 *regp = reg_uid;
2446 }
2447error:
2448 rcu_read_unlock();
2449 return ret;
2450}
2451
421cb601 2452/*
3d8ca23b 2453 * Create a session on the tracer side for the given app.
421cb601 2454 *
3d8ca23b
DG
2455 * On success, ua_sess_ptr is populated with the session pointer or else left
2456 * untouched. If the session was created, is_created is set to 1. On error,
2457 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2458 * be NULL.
2459 *
2460 * Returns 0 on success or else a negative code which is either -ENOMEM or
2461 * -ENOTCONN which is the default code if the ustctl_create_session fails.
421cb601 2462 */
03f91eaa 2463static int find_or_create_ust_app_session(struct ltt_ust_session *usess,
3d8ca23b
DG
2464 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2465 int *is_created)
421cb601 2466{
3d8ca23b 2467 int ret, created = 0;
421cb601
DG
2468 struct ust_app_session *ua_sess;
2469
3d8ca23b
DG
2470 assert(usess);
2471 assert(app);
2472 assert(ua_sess_ptr);
2473
840cb59c 2474 health_code_update();
86acf0da 2475
421cb601
DG
2476 ua_sess = lookup_session_by_app(usess, app);
2477 if (ua_sess == NULL) {
d9bf3ca4 2478 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
852d0037 2479 app->pid, usess->id);
40bbd087 2480 ua_sess = alloc_ust_app_session();
421cb601
DG
2481 if (ua_sess == NULL) {
2482 /* Only malloc can failed so something is really wrong */
3d8ca23b
DG
2483 ret = -ENOMEM;
2484 goto error;
421cb601 2485 }
477d7741 2486 shadow_copy_session(ua_sess, usess, app);
3d8ca23b 2487 created = 1;
421cb601
DG
2488 }
2489
7972aab2
DG
2490 switch (usess->buffer_type) {
2491 case LTTNG_BUFFER_PER_PID:
2492 /* Init local registry. */
2493 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
421cb601 2494 if (ret < 0) {
e64207cf 2495 delete_ust_app_session(-1, ua_sess, app);
7972aab2
DG
2496 goto error;
2497 }
2498 break;
2499 case LTTNG_BUFFER_PER_UID:
2500 /* Look for a global registry. If none exists, create one. */
d7ba1388 2501 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
7972aab2 2502 if (ret < 0) {
e64207cf 2503 delete_ust_app_session(-1, ua_sess, app);
7972aab2
DG
2504 goto error;
2505 }
2506 break;
2507 default:
2508 assert(0);
2509 ret = -EINVAL;
2510 goto error;
2511 }
2512
2513 health_code_update();
2514
2515 if (ua_sess->handle == -1) {
fb45065e 2516 pthread_mutex_lock(&app->sock_lock);
7972aab2 2517 ret = ustctl_create_session(app->sock);
fb45065e 2518 pthread_mutex_unlock(&app->sock_lock);
7972aab2
DG
2519 if (ret < 0) {
2520 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2521 ERR("Creating session for app pid %d with ret %d",
ffe60014
DG
2522 app->pid, ret);
2523 } else {
2524 DBG("UST app creating session failed. Application is dead");
3757b385
DG
2525 /*
2526 * This is normal behavior, an application can die during the
2527 * creation process. Don't report an error so the execution can
2528 * continue normally. This will get flagged ENOTCONN and the
2529 * caller will handle it.
2530 */
2531 ret = 0;
ffe60014 2532 }
d0b96690 2533 delete_ust_app_session(-1, ua_sess, app);
3d8ca23b
DG
2534 if (ret != -ENOMEM) {
2535 /*
2536 * Tracer is probably gone or got an internal error so let's
2537 * behave like it will soon unregister or not usable.
2538 */
2539 ret = -ENOTCONN;
2540 }
2541 goto error;
421cb601
DG
2542 }
2543
7972aab2
DG
2544 ua_sess->handle = ret;
2545
2546 /* Add ust app session to app's HT */
d9bf3ca4
MD
2547 lttng_ht_node_init_u64(&ua_sess->node,
2548 ua_sess->tracing_id);
2549 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
10b56aef
MD
2550 lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
2551 lttng_ht_add_unique_ulong(app->ust_sessions_objd,
2552 &ua_sess->ust_objd_node);
7972aab2
DG
2553
2554 DBG2("UST app session created successfully with handle %d", ret);
2555 }
2556
2557 *ua_sess_ptr = ua_sess;
2558 if (is_created) {
2559 *is_created = created;
2560 }
2561
2562 /* Everything went well. */
2563 ret = 0;
2564
2565error:
2566 health_code_update();
2567 return ret;
2568}
2569
6a6b2068
JG
2570/*
2571 * Match function for a hash table lookup of ust_app_ctx.
2572 *
2573 * It matches an ust app context based on the context type and, in the case
2574 * of perf counters, their name.
2575 */
2576static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2577{
2578 struct ust_app_ctx *ctx;
bdf64013 2579 const struct lttng_ust_context_attr *key;
6a6b2068
JG
2580
2581 assert(node);
2582 assert(_key);
2583
2584 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2585 key = _key;
2586
2587 /* Context type */
2588 if (ctx->ctx.ctx != key->ctx) {
2589 goto no_match;
2590 }
2591
bdf64013
JG
2592 switch(key->ctx) {
2593 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
6a6b2068 2594 if (strncmp(key->u.perf_counter.name,
bdf64013
JG
2595 ctx->ctx.u.perf_counter.name,
2596 sizeof(key->u.perf_counter.name))) {
2597 goto no_match;
2598 }
2599 break;
2600 case LTTNG_UST_CONTEXT_APP_CONTEXT:
2601 if (strcmp(key->u.app_ctx.provider_name,
2602 ctx->ctx.u.app_ctx.provider_name) ||
2603 strcmp(key->u.app_ctx.ctx_name,
2604 ctx->ctx.u.app_ctx.ctx_name)) {
6a6b2068
JG
2605 goto no_match;
2606 }
bdf64013
JG
2607 break;
2608 default:
2609 break;
6a6b2068
JG
2610 }
2611
2612 /* Match. */
2613 return 1;
2614
2615no_match:
2616 return 0;
2617}
2618
2619/*
2620 * Lookup for an ust app context from an lttng_ust_context.
2621 *
be184a0f 2622 * Must be called while holding RCU read side lock.
6a6b2068
JG
2623 * Return an ust_app_ctx object or NULL on error.
2624 */
2625static
2626struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
bdf64013 2627 struct lttng_ust_context_attr *uctx)
6a6b2068
JG
2628{
2629 struct lttng_ht_iter iter;
2630 struct lttng_ht_node_ulong *node;
2631 struct ust_app_ctx *app_ctx = NULL;
2632
2633 assert(uctx);
2634 assert(ht);
2635
2636 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2637 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2638 ht_match_ust_app_ctx, uctx, &iter.iter);
2639 node = lttng_ht_iter_get_node_ulong(&iter);
2640 if (!node) {
2641 goto end;
2642 }
2643
2644 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2645
2646end:
2647 return app_ctx;
2648}
2649
7972aab2
DG
2650/*
2651 * Create a context for the channel on the tracer.
2652 *
2653 * Called with UST app session lock held and a RCU read side lock.
2654 */
2655static
c9edf082 2656int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
f3db82be 2657 struct lttng_ust_context_attr *uctx,
7972aab2
DG
2658 struct ust_app *app)
2659{
2660 int ret = 0;
7972aab2
DG
2661 struct ust_app_ctx *ua_ctx;
2662
2663 DBG2("UST app adding context to channel %s", ua_chan->name);
2664
6a6b2068
JG
2665 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2666 if (ua_ctx) {
7972aab2
DG
2667 ret = -EEXIST;
2668 goto error;
2669 }
2670
2671 ua_ctx = alloc_ust_app_ctx(uctx);
2672 if (ua_ctx == NULL) {
2673 /* malloc failed */
7682f304 2674 ret = -ENOMEM;
7972aab2
DG
2675 goto error;
2676 }
2677
2678 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
aa3514e9 2679 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
31746f93 2680 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
7972aab2
DG
2681
2682 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2683 if (ret < 0) {
2684 goto error;
2685 }
2686
2687error:
2688 return ret;
2689}
2690
2691/*
2692 * Enable on the tracer side a ust app event for the session and channel.
2693 *
2694 * Called with UST app session lock held.
2695 */
2696static
2697int enable_ust_app_event(struct ust_app_session *ua_sess,
2698 struct ust_app_event *ua_event, struct ust_app *app)
2699{
2700 int ret;
2701
2463b787 2702 ret = enable_ust_object(app, ua_event->obj);
7972aab2
DG
2703 if (ret < 0) {
2704 goto error;
2705 }
2706
2707 ua_event->enabled = 1;
2708
2709error:
2710 return ret;
2711}
2712
2713/*
2714 * Disable on the tracer side a ust app event for the session and channel.
2715 */
2716static int disable_ust_app_event(struct ust_app_session *ua_sess,
2717 struct ust_app_event *ua_event, struct ust_app *app)
2718{
2719 int ret;
2720
2463b787 2721 ret = disable_ust_object(app, ua_event->obj);
7972aab2
DG
2722 if (ret < 0) {
2723 goto error;
2724 }
2725
2726 ua_event->enabled = 0;
2727
2728error:
2729 return ret;
2730}
2731
2732/*
2733 * Lookup ust app channel for session and disable it on the tracer side.
2734 */
2735static
2736int disable_ust_app_channel(struct ust_app_session *ua_sess,
2737 struct ust_app_channel *ua_chan, struct ust_app *app)
2738{
2739 int ret;
2740
2741 ret = disable_ust_channel(app, ua_sess, ua_chan);
2742 if (ret < 0) {
2743 goto error;
2744 }
2745
2746 ua_chan->enabled = 0;
2747
2748error:
2749 return ret;
2750}
2751
2752/*
2753 * Lookup ust app channel for session and enable it on the tracer side. This
2754 * MUST be called with a RCU read side lock acquired.
2755 */
2756static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2757 struct ltt_ust_channel *uchan, struct ust_app *app)
2758{
2759 int ret = 0;
2760 struct lttng_ht_iter iter;
2761 struct lttng_ht_node_str *ua_chan_node;
2762 struct ust_app_channel *ua_chan;
2763
2764 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2765 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2766 if (ua_chan_node == NULL) {
d9bf3ca4 2767 DBG2("Unable to find channel %s in ust session id %" PRIu64,
7972aab2
DG
2768 uchan->name, ua_sess->tracing_id);
2769 goto error;
2770 }
2771
2772 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2773
2774 ret = enable_ust_channel(app, ua_sess, ua_chan);
2775 if (ret < 0) {
2776 goto error;
2777 }
2778
2779error:
2780 return ret;
2781}
2782
2783/*
2784 * Ask the consumer to create a channel and get it if successful.
2785 *
fad1ed2f
JR
2786 * Called with UST app session lock held.
2787 *
7972aab2
DG
2788 * Return 0 on success or else a negative value.
2789 */
2790static int do_consumer_create_channel(struct ltt_ust_session *usess,
2791 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
e098433c
JG
2792 int bitness, struct ust_registry_session *registry,
2793 uint64_t trace_archive_id)
7972aab2
DG
2794{
2795 int ret;
2796 unsigned int nb_fd = 0;
2797 struct consumer_socket *socket;
2798
2799 assert(usess);
2800 assert(ua_sess);
2801 assert(ua_chan);
2802 assert(registry);
2803
2804 rcu_read_lock();
2805 health_code_update();
2806
2807 /* Get the right consumer socket for the application. */
2808 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2809 if (!socket) {
2810 ret = -EINVAL;
2811 goto error;
2812 }
2813
2814 health_code_update();
2815
2816 /* Need one fd for the channel. */
2817 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2818 if (ret < 0) {
2819 ERR("Exhausted number of available FD upon create channel");
2820 goto error;
2821 }
2822
2823 /*
2824 * Ask consumer to create channel. The consumer will return the number of
2825 * stream we have to expect.
2826 */
2827 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
d2956687 2828 registry, usess->current_trace_chunk);
7972aab2
DG
2829 if (ret < 0) {
2830 goto error_ask;
2831 }
2832
2833 /*
2834 * Compute the number of fd needed before receiving them. It must be 2 per
2835 * stream (2 being the default value here).
2836 */
2837 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2838
2839 /* Reserve the amount of file descriptor we need. */
2840 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2841 if (ret < 0) {
2842 ERR("Exhausted number of available FD upon create channel");
2843 goto error_fd_get_stream;
2844 }
2845
2846 health_code_update();
2847
2848 /*
2849 * Now get the channel from the consumer. This call wil populate the stream
2850 * list of that channel and set the ust objects.
2851 */
d9078d0c
DG
2852 if (usess->consumer->enabled) {
2853 ret = ust_consumer_get_channel(socket, ua_chan);
2854 if (ret < 0) {
2855 goto error_destroy;
2856 }
7972aab2
DG
2857 }
2858
2859 rcu_read_unlock();
2860 return 0;
2861
2862error_destroy:
2863 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2864error_fd_get_stream:
2865 /*
2866 * Initiate a destroy channel on the consumer since we had an error
2867 * handling it on our side. The return value is of no importance since we
2868 * already have a ret value set by the previous error that we need to
2869 * return.
2870 */
2871 (void) ust_consumer_destroy_channel(socket, ua_chan);
2872error_ask:
2873 lttng_fd_put(LTTNG_FD_APPS, 1);
2874error:
2875 health_code_update();
2876 rcu_read_unlock();
2877 return ret;
2878}
2879
2880/*
2881 * Duplicate the ust data object of the ust app stream and save it in the
2882 * buffer registry stream.
2883 *
2884 * Return 0 on success or else a negative value.
2885 */
2886static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2887 struct ust_app_stream *stream)
2888{
2889 int ret;
2890
2891 assert(reg_stream);
2892 assert(stream);
2893
2894 /* Reserve the amount of file descriptor we need. */
2895 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2896 if (ret < 0) {
2897 ERR("Exhausted number of available FD upon duplicate stream");
2898 goto error;
2899 }
2900
2901 /* Duplicate object for stream once the original is in the registry. */
2902 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2903 reg_stream->obj.ust);
2904 if (ret < 0) {
2905 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2906 reg_stream->obj.ust, stream->obj, ret);
2907 lttng_fd_put(LTTNG_FD_APPS, 2);
2908 goto error;
2909 }
2910 stream->handle = stream->obj->handle;
2911
2912error:
2913 return ret;
2914}
2915
2916/*
2917 * Duplicate the ust data object of the ust app. channel and save it in the
2918 * buffer registry channel.
2919 *
2920 * Return 0 on success or else a negative value.
2921 */
2922static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2923 struct ust_app_channel *ua_chan)
2924{
2925 int ret;
2926
2927 assert(reg_chan);
2928 assert(ua_chan);
2929
2930 /* Need two fds for the channel. */
2931 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2932 if (ret < 0) {
2933 ERR("Exhausted number of available FD upon duplicate channel");
2934 goto error_fd_get;
2935 }
2936
2937 /* Duplicate object for stream once the original is in the registry. */
2938 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2939 if (ret < 0) {
2940 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2941 reg_chan->obj.ust, ua_chan->obj, ret);
2942 goto error;
2943 }
2944 ua_chan->handle = ua_chan->obj->handle;
2945
2946 return 0;
2947
2948error:
2949 lttng_fd_put(LTTNG_FD_APPS, 1);
2950error_fd_get:
2951 return ret;
2952}
2953
2954/*
2955 * For a given channel buffer registry, setup all streams of the given ust
2956 * application channel.
2957 *
2958 * Return 0 on success or else a negative value.
2959 */
2960static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
fb45065e
MD
2961 struct ust_app_channel *ua_chan,
2962 struct ust_app *app)
7972aab2
DG
2963{
2964 int ret = 0;
2965 struct ust_app_stream *stream, *stmp;
2966
2967 assert(reg_chan);
2968 assert(ua_chan);
2969
2970 DBG2("UST app setup buffer registry stream");
2971
2972 /* Send all streams to application. */
2973 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2974 struct buffer_reg_stream *reg_stream;
2975
2976 ret = buffer_reg_stream_create(&reg_stream);
2977 if (ret < 0) {
2978 goto error;
2979 }
2980
2981 /*
2982 * Keep original pointer and nullify it in the stream so the delete
2983 * stream call does not release the object.
2984 */
2985 reg_stream->obj.ust = stream->obj;
2986 stream->obj = NULL;
2987 buffer_reg_stream_add(reg_stream, reg_chan);
421cb601 2988
7972aab2
DG
2989 /* We don't need the streams anymore. */
2990 cds_list_del(&stream->list);
fb45065e 2991 delete_ust_app_stream(-1, stream, app);
7972aab2 2992 }
421cb601 2993
7972aab2
DG
2994error:
2995 return ret;
2996}
2997
2998/*
2999 * Create a buffer registry channel for the given session registry and
3000 * application channel object. If regp pointer is valid, it's set with the
3001 * created object. Important, the created object is NOT added to the session
3002 * registry hash table.
3003 *
3004 * Return 0 on success else a negative value.
3005 */
3006static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
3007 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
3008{
3009 int ret;
3010 struct buffer_reg_channel *reg_chan = NULL;
3011
3012 assert(reg_sess);
3013 assert(ua_chan);
3014
3015 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
3016
3017 /* Create buffer registry channel. */
3018 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
3019 if (ret < 0) {
3020 goto error_create;
421cb601 3021 }
7972aab2
DG
3022 assert(reg_chan);
3023 reg_chan->consumer_key = ua_chan->key;
8c924c7b 3024 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
d07ceecd 3025 reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
421cb601 3026
7972aab2
DG
3027 /* Create and add a channel registry to session. */
3028 ret = ust_registry_channel_add(reg_sess->reg.ust,
3029 ua_chan->tracing_channel_id);
3030 if (ret < 0) {
3031 goto error;
d88aee68 3032 }
7972aab2 3033 buffer_reg_channel_add(reg_sess, reg_chan);
d88aee68 3034
7972aab2
DG
3035 if (regp) {
3036 *regp = reg_chan;
3d8ca23b 3037 }
d88aee68 3038
7972aab2 3039 return 0;
3d8ca23b
DG
3040
3041error:
7972aab2
DG
3042 /* Safe because the registry channel object was not added to any HT. */
3043 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
3044error_create:
3d8ca23b 3045 return ret;
421cb601
DG
3046}
3047
55cc08a6 3048/*
7972aab2
DG
3049 * Setup buffer registry channel for the given session registry and application
3050 * channel object. If regp pointer is valid, it's set with the created object.
d0b96690 3051 *
7972aab2 3052 * Return 0 on success else a negative value.
55cc08a6 3053 */
7972aab2 3054static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
fb45065e
MD
3055 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan,
3056 struct ust_app *app)
55cc08a6 3057{
7972aab2 3058 int ret;
55cc08a6 3059
7972aab2
DG
3060 assert(reg_sess);
3061 assert(reg_chan);
3062 assert(ua_chan);
3063 assert(ua_chan->obj);
55cc08a6 3064
7972aab2 3065 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
55cc08a6 3066
7972aab2 3067 /* Setup all streams for the registry. */
fb45065e 3068 ret = setup_buffer_reg_streams(reg_chan, ua_chan, app);
7972aab2 3069 if (ret < 0) {
55cc08a6
DG
3070 goto error;
3071 }
3072
7972aab2
DG
3073 reg_chan->obj.ust = ua_chan->obj;
3074 ua_chan->obj = NULL;
55cc08a6 3075
7972aab2 3076 return 0;
55cc08a6
DG
3077
3078error:
7972aab2
DG
3079 buffer_reg_channel_remove(reg_sess, reg_chan);
3080 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
55cc08a6
DG
3081 return ret;
3082}
3083
edb67388 3084/*
7972aab2 3085 * Send buffer registry channel to the application.
d0b96690 3086 *
7972aab2 3087 * Return 0 on success else a negative value.
edb67388 3088 */
7972aab2
DG
3089static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
3090 struct ust_app *app, struct ust_app_session *ua_sess,
3091 struct ust_app_channel *ua_chan)
edb67388
DG
3092{
3093 int ret;
7972aab2 3094 struct buffer_reg_stream *reg_stream;
edb67388 3095
7972aab2
DG
3096 assert(reg_chan);
3097 assert(app);
3098 assert(ua_sess);
3099 assert(ua_chan);
3100
3101 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
3102
3103 ret = duplicate_channel_object(reg_chan, ua_chan);
edb67388
DG
3104 if (ret < 0) {
3105 goto error;
3106 }
3107
7972aab2
DG
3108 /* Send channel to the application. */
3109 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
a7169585
MD
3110 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3111 ret = -ENOTCONN; /* Caused by app exiting. */
3112 goto error;
3113 } else if (ret < 0) {
7972aab2
DG
3114 goto error;
3115 }
3116
3117 health_code_update();
3118
3119 /* Send all streams to application. */
3120 pthread_mutex_lock(&reg_chan->stream_list_lock);
3121 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
3122 struct ust_app_stream stream;
3123
3124 ret = duplicate_stream_object(reg_stream, &stream);
3125 if (ret < 0) {
3126 goto error_stream_unlock;
3127 }
3128
3129 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
3130 if (ret < 0) {
fb45065e 3131 (void) release_ust_app_stream(-1, &stream, app);
a7169585
MD
3132 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3133 ret = -ENOTCONN; /* Caused by app exiting. */
a7169585 3134 }
7972aab2
DG
3135 goto error_stream_unlock;
3136 }
edb67388 3137
7972aab2
DG
3138 /*
3139 * The return value is not important here. This function will output an
3140 * error if needed.
3141 */
fb45065e 3142 (void) release_ust_app_stream(-1, &stream, app);
7972aab2
DG
3143 }
3144 ua_chan->is_sent = 1;
3145
3146error_stream_unlock:
3147 pthread_mutex_unlock(&reg_chan->stream_list_lock);
edb67388
DG
3148error:
3149 return ret;
3150}
3151
9730260e 3152/*
7972aab2
DG
3153 * Create and send to the application the created buffers with per UID buffers.
3154 *
9acdc1d6 3155 * This MUST be called with a RCU read side lock acquired.
71e0a100 3156 * The session list lock and the session's lock must be acquired.
9acdc1d6 3157 *
7972aab2 3158 * Return 0 on success else a negative value.
9730260e 3159 */
7972aab2
DG
3160static int create_channel_per_uid(struct ust_app *app,
3161 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3162 struct ust_app_channel *ua_chan)
9730260e
DG
3163{
3164 int ret;
7972aab2
DG
3165 struct buffer_reg_uid *reg_uid;
3166 struct buffer_reg_channel *reg_chan;
e32d7f27 3167 struct ltt_session *session = NULL;
e098433c
JG
3168 enum lttng_error_code notification_ret;
3169 struct ust_registry_channel *chan_reg;
9730260e 3170
7972aab2
DG
3171 assert(app);
3172 assert(usess);
3173 assert(ua_sess);
3174 assert(ua_chan);
3175
3176 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
3177
3178 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
3179 /*
3180 * The session creation handles the creation of this global registry
3181 * object. If none can be find, there is a code flow problem or a
3182 * teardown race.
3183 */
3184 assert(reg_uid);
3185
3186 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
3187 reg_uid);
2721f7ea
JG
3188 if (reg_chan) {
3189 goto send_channel;
3190 }
7972aab2 3191
2721f7ea
JG
3192 /* Create the buffer registry channel object. */
3193 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
3194 if (ret < 0) {
3195 ERR("Error creating the UST channel \"%s\" registry instance",
f14256d6 3196 ua_chan->name);
2721f7ea
JG
3197 goto error;
3198 }
f14256d6 3199
e098433c
JG
3200 session = session_find_by_id(ua_sess->tracing_id);
3201 assert(session);
3202 assert(pthread_mutex_trylock(&session->lock));
3203 assert(session_trylock_list());
3204
2721f7ea
JG
3205 /*
3206 * Create the buffers on the consumer side. This call populates the
3207 * ust app channel object with all streams and data object.
3208 */
3209 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
e098433c 3210 app->bits_per_long, reg_uid->registry->reg.ust,
d2956687 3211 session->most_recent_chunk_id.value);
2721f7ea
JG
3212 if (ret < 0) {
3213 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3214 ua_chan->name);
7972aab2
DG
3215
3216 /*
2721f7ea
JG
3217 * Let's remove the previously created buffer registry channel so
3218 * it's not visible anymore in the session registry.
7972aab2 3219 */
2721f7ea
JG
3220 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
3221 ua_chan->tracing_channel_id, false);
3222 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
3223 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
3224 goto error;
7972aab2
DG
3225 }
3226
2721f7ea
JG
3227 /*
3228 * Setup the streams and add it to the session registry.
3229 */
3230 ret = setup_buffer_reg_channel(reg_uid->registry,
3231 ua_chan, reg_chan, app);
3232 if (ret < 0) {
3233 ERR("Error setting up UST channel \"%s\"", ua_chan->name);
3234 goto error;
3235 }
3236
e098433c
JG
3237 /* Notify the notification subsystem of the channel's creation. */
3238 pthread_mutex_lock(&reg_uid->registry->reg.ust->lock);
3239 chan_reg = ust_registry_channel_find(reg_uid->registry->reg.ust,
3240 ua_chan->tracing_channel_id);
3241 assert(chan_reg);
3242 chan_reg->consumer_key = ua_chan->key;
3243 chan_reg = NULL;
3244 pthread_mutex_unlock(&reg_uid->registry->reg.ust->lock);
e9404c27 3245
e098433c
JG
3246 notification_ret = notification_thread_command_add_channel(
3247 notification_thread_handle, session->name,
2463b787
JR
3248 lttng_credentials_get_uid(&ua_sess->effective_credentials),
3249 lttng_credentials_get_gid(&ua_sess->effective_credentials),
3250 ua_chan->name,
470cc211 3251 ua_chan->key, LTTNG_DOMAIN_UST,
e098433c
JG
3252 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3253 if (notification_ret != LTTNG_OK) {
3254 ret = - (int) notification_ret;
3255 ERR("Failed to add channel to notification thread");
3256 goto error;
e9404c27
JG
3257 }
3258
2721f7ea 3259send_channel:
66ff8e3f
JG
3260 /* Send buffers to the application. */
3261 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
3262 if (ret < 0) {
3263 if (ret != -ENOTCONN) {
3264 ERR("Error sending channel to application");
3265 }
3266 goto error;
3267 }
3268
9730260e 3269error:
e32d7f27
JG
3270 if (session) {
3271 session_put(session);
3272 }
9730260e
DG
3273 return ret;
3274}
3275
78f0bacd 3276/*
7972aab2
DG
3277 * Create and send to the application the created buffers with per PID buffers.
3278 *
fad1ed2f 3279 * Called with UST app session lock held.
71e0a100 3280 * The session list lock and the session's lock must be acquired.
fad1ed2f 3281 *
7972aab2 3282 * Return 0 on success else a negative value.
78f0bacd 3283 */
7972aab2
DG
3284static int create_channel_per_pid(struct ust_app *app,
3285 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3286 struct ust_app_channel *ua_chan)
78f0bacd 3287{
8535a6d9 3288 int ret;
7972aab2 3289 struct ust_registry_session *registry;
e9404c27 3290 enum lttng_error_code cmd_ret;
e32d7f27 3291 struct ltt_session *session = NULL;
e9404c27
JG
3292 uint64_t chan_reg_key;
3293 struct ust_registry_channel *chan_reg;
78f0bacd 3294
7972aab2
DG
3295 assert(app);
3296 assert(usess);
3297 assert(ua_sess);
3298 assert(ua_chan);
3299
3300 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
3301
3302 rcu_read_lock();
3303
3304 registry = get_session_registry(ua_sess);
fad1ed2f 3305 /* The UST app session lock is held, registry shall not be null. */
7972aab2
DG
3306 assert(registry);
3307
3308 /* Create and add a new channel registry to session. */
3309 ret = ust_registry_channel_add(registry, ua_chan->key);
78f0bacd 3310 if (ret < 0) {
f14256d6
MD
3311 ERR("Error creating the UST channel \"%s\" registry instance",
3312 ua_chan->name);
78f0bacd
DG
3313 goto error;
3314 }
3315
e098433c
JG
3316 session = session_find_by_id(ua_sess->tracing_id);
3317 assert(session);
3318
3319 assert(pthread_mutex_trylock(&session->lock));
3320 assert(session_trylock_list());
3321
7972aab2
DG
3322 /* Create and get channel on the consumer side. */
3323 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
e098433c 3324 app->bits_per_long, registry,
d2956687 3325 session->most_recent_chunk_id.value);
7972aab2 3326 if (ret < 0) {
f14256d6
MD
3327 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3328 ua_chan->name);
5b951542 3329 goto error_remove_from_registry;
7972aab2
DG
3330 }
3331
3332 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
3333 if (ret < 0) {
a7169585
MD
3334 if (ret != -ENOTCONN) {
3335 ERR("Error sending channel to application");
3336 }
5b951542 3337 goto error_remove_from_registry;
7972aab2 3338 }
8535a6d9 3339
e9404c27
JG
3340 chan_reg_key = ua_chan->key;
3341 pthread_mutex_lock(&registry->lock);
3342 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
3343 assert(chan_reg);
3344 chan_reg->consumer_key = ua_chan->key;
3345 pthread_mutex_unlock(&registry->lock);
3346
3347 cmd_ret = notification_thread_command_add_channel(
3348 notification_thread_handle, session->name,
2463b787
JR
3349 lttng_credentials_get_uid(&ua_sess->effective_credentials),
3350 lttng_credentials_get_gid(&ua_sess->effective_credentials),
3351 ua_chan->name,
470cc211 3352 ua_chan->key, LTTNG_DOMAIN_UST,
e9404c27
JG
3353 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3354 if (cmd_ret != LTTNG_OK) {
3355 ret = - (int) cmd_ret;
3356 ERR("Failed to add channel to notification thread");
5b951542 3357 goto error_remove_from_registry;
e9404c27
JG
3358 }
3359
5b951542
MD
3360error_remove_from_registry:
3361 if (ret) {
3362 ust_registry_channel_del_free(registry, ua_chan->key, false);
3363 }
78f0bacd 3364error:
7972aab2 3365 rcu_read_unlock();
e32d7f27
JG
3366 if (session) {
3367 session_put(session);
3368 }
78f0bacd
DG
3369 return ret;
3370}
3371
3372/*
7972aab2 3373 * From an already allocated ust app channel, create the channel buffers if
88e3c2f5 3374 * needed and send them to the application. This MUST be called with a RCU read
7972aab2
DG
3375 * side lock acquired.
3376 *
fad1ed2f
JR
3377 * Called with UST app session lock held.
3378 *
a7169585
MD
3379 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3380 * the application exited concurrently.
78f0bacd 3381 */
88e3c2f5 3382static int ust_app_channel_send(struct ust_app *app,
7972aab2
DG
3383 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3384 struct ust_app_channel *ua_chan)
78f0bacd 3385{
7972aab2 3386 int ret;
78f0bacd 3387
7972aab2
DG
3388 assert(app);
3389 assert(usess);
88e3c2f5 3390 assert(usess->active);
7972aab2
DG
3391 assert(ua_sess);
3392 assert(ua_chan);
3393
3394 /* Handle buffer type before sending the channel to the application. */
3395 switch (usess->buffer_type) {
3396 case LTTNG_BUFFER_PER_UID:
3397 {
3398 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
3399 if (ret < 0) {
3400 goto error;
3401 }
3402 break;
3403 }
3404 case LTTNG_BUFFER_PER_PID:
3405 {
3406 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
3407 if (ret < 0) {
3408 goto error;
3409 }
3410 break;
3411 }
3412 default:
3413 assert(0);
3414 ret = -EINVAL;
78f0bacd
DG
3415 goto error;
3416 }
3417
7972aab2
DG
3418 /* Initialize ust objd object using the received handle and add it. */
3419 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
3420 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
78f0bacd 3421
7972aab2
DG
3422 /* If channel is not enabled, disable it on the tracer */
3423 if (!ua_chan->enabled) {
3424 ret = disable_ust_channel(app, ua_sess, ua_chan);
3425 if (ret < 0) {
3426 goto error;
3427 }
78f0bacd
DG
3428 }
3429
3430error:
3431 return ret;
3432}
3433
284d8f55 3434/*
88e3c2f5 3435 * Create UST app channel and return it through ua_chanp if not NULL.
d0b96690 3436 *
36b588ed 3437 * Called with UST app session lock and RCU read-side lock held.
7972aab2 3438 *
88e3c2f5 3439 * Return 0 on success or else a negative value.
284d8f55 3440 */
88e3c2f5
JG
3441static int ust_app_channel_allocate(struct ust_app_session *ua_sess,
3442 struct ltt_ust_channel *uchan,
7972aab2 3443 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
4d710ac2 3444 struct ust_app_channel **ua_chanp)
5b4a0ec0
DG
3445{
3446 int ret = 0;
bec39940
DG
3447 struct lttng_ht_iter iter;
3448 struct lttng_ht_node_str *ua_chan_node;
5b4a0ec0
DG
3449 struct ust_app_channel *ua_chan;
3450
3451 /* Lookup channel in the ust app session */
bec39940
DG
3452 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
3453 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
fc34caaa 3454 if (ua_chan_node != NULL) {
5b4a0ec0 3455 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
fc34caaa 3456 goto end;
5b4a0ec0
DG
3457 }
3458
d0b96690 3459 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
fc34caaa
DG
3460 if (ua_chan == NULL) {
3461 /* Only malloc can fail here */
4d710ac2 3462 ret = -ENOMEM;
88e3c2f5 3463 goto error;
fc34caaa
DG
3464 }
3465 shadow_copy_channel(ua_chan, uchan);
3466
ffe60014
DG
3467 /* Set channel type. */
3468 ua_chan->attr.type = type;
3469
d0b96690
DG
3470 /* Only add the channel if successful on the tracer side. */
3471 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
fc34caaa 3472end:
4d710ac2
DG
3473 if (ua_chanp) {
3474 *ua_chanp = ua_chan;
3475 }
3476
3477 /* Everything went well. */
3478 return 0;
5b4a0ec0
DG
3479
3480error:
4d710ac2 3481 return ret;
5b4a0ec0
DG
3482}
3483
3484/*
3485 * Create UST app event and create it on the tracer side.
d0b96690
DG
3486 *
3487 * Called with ust app session mutex held.
5b4a0ec0 3488 */
edb67388
DG
3489static
3490int create_ust_app_event(struct ust_app_session *ua_sess,
3491 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
3492 struct ust_app *app)
284d8f55 3493{
edb67388 3494 int ret = 0;
5b4a0ec0 3495 struct ust_app_event *ua_event;
284d8f55 3496
edb67388
DG
3497 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
3498 if (ua_event == NULL) {
20533947 3499 /* Only failure mode of alloc_ust_app_event(). */
edb67388 3500 ret = -ENOMEM;
fc34caaa 3501 goto end;
5b4a0ec0 3502 }
edb67388 3503 shadow_copy_event(ua_event, uevent);
5b4a0ec0 3504
edb67388 3505 /* Create it on the tracer side */
5b4a0ec0 3506 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
284d8f55 3507 if (ret < 0) {
e9f11505
JG
3508 /*
3509 * Not found previously means that it does not exist on the
3510 * tracer. If the application reports that the event existed,
3511 * it means there is a bug in the sessiond or lttng-ust
3512 * (or corruption, etc.)
3513 */
3514 if (ret == -LTTNG_UST_ERR_EXIST) {
3515 ERR("Tracer for application reported that an event being created already existed: "
3516 "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d",
3517 uevent->attr.name,
3518 app->pid, app->ppid, app->uid,
3519 app->gid);
3520 }
284d8f55
DG
3521 goto error;
3522 }
3523
d0b96690 3524 add_unique_ust_app_event(ua_chan, ua_event);
284d8f55 3525
fc34caaa 3526 DBG2("UST app create event %s for PID %d completed", ua_event->name,
852d0037 3527 app->pid);
7f79d3a1 3528
edb67388 3529end:
fc34caaa
DG
3530 return ret;
3531
5b4a0ec0 3532error:
fc34caaa 3533 /* Valid. Calling here is already in a read side lock */
fb45065e 3534 delete_ust_app_event(-1, ua_event, app);
edb67388 3535 return ret;
5b4a0ec0
DG
3536}
3537
2463b787
JR
3538/*
3539 * Create UST app event and create it on the tracer side.
3540 *
3541 * Called with ust app session mutex held.
3542 */
3543static
3544int create_ust_app_token_event_rule(struct lttng_trigger *trigger,
3545 struct ust_app *app)
3546{
3547 int ret = 0;
3548 struct ust_app_token_event_rule *ua_token;
3549
3550 ua_token = alloc_ust_app_token_event_rule(trigger);
3551 if (ua_token == NULL) {
3552 ret = -ENOMEM;
3553 goto end;
3554 }
3555
3556 /* Create it on the tracer side */
3557 ret = create_ust_token_event_rule(app, ua_token);
3558 if (ret < 0) {
3559 /*
3560 * Not found previously means that it does not exist on the
3561 * tracer. If the application reports that the event existed,
3562 * it means there is a bug in the sessiond or lttng-ust
3563 * (or corruption, etc.)
3564 */
3565 if (ret == -LTTNG_UST_ERR_EXIST) {
3566 ERR("Tracer for application reported that a token event rule being created already existed: "
3567 "token = \"%" PRIu64 "\", pid = %d, ppid = %d, uid = %d, gid = %d",
3568 lttng_trigger_get_tracer_token(trigger),
3569 app->pid, app->ppid, app->uid,
3570 app->gid);
3571 }
3572 goto error;
3573 }
3574
3575 lttng_ht_add_unique_u64(app->tokens_ht, &ua_token->node);
3576
3577 DBG2("UST app create token event rule %" PRIu64 " for PID %d completed", lttng_trigger_get_tracer_token(trigger),
3578 app->pid);
3579
3580 goto end;
3581
3582error:
3583 /* Valid. Calling here is already in a read side lock */
3584 delete_ust_app_token_event_rule(-1, ua_token, app);
3585end:
3586 return ret;
3587}
3588
5b4a0ec0
DG
3589/*
3590 * Create UST metadata and open it on the tracer side.
d0b96690 3591 *
7972aab2 3592 * Called with UST app session lock held and RCU read side lock.
5b4a0ec0
DG
3593 */
3594static int create_ust_app_metadata(struct ust_app_session *ua_sess,
ad7a9107 3595 struct ust_app *app, struct consumer_output *consumer)
5b4a0ec0
DG
3596{
3597 int ret = 0;
ffe60014 3598 struct ust_app_channel *metadata;
d88aee68 3599 struct consumer_socket *socket;
7972aab2 3600 struct ust_registry_session *registry;
e32d7f27 3601 struct ltt_session *session = NULL;
5b4a0ec0 3602
ffe60014
DG
3603 assert(ua_sess);
3604 assert(app);
d88aee68 3605 assert(consumer);
5b4a0ec0 3606
7972aab2 3607 registry = get_session_registry(ua_sess);
fad1ed2f 3608 /* The UST app session is held registry shall not be null. */
7972aab2
DG
3609 assert(registry);
3610
ce34fcd0
MD
3611 pthread_mutex_lock(&registry->lock);
3612
1b532a60
DG
3613 /* Metadata already exists for this registry or it was closed previously */
3614 if (registry->metadata_key || registry->metadata_closed) {
7972aab2
DG
3615 ret = 0;
3616 goto error;
5b4a0ec0
DG
3617 }
3618
ffe60014 3619 /* Allocate UST metadata */
d0b96690 3620 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
ffe60014
DG
3621 if (!metadata) {
3622 /* malloc() failed */
3623 ret = -ENOMEM;
3624 goto error;
3625 }
5b4a0ec0 3626
ad7a9107 3627 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
5b4a0ec0 3628
7972aab2
DG
3629 /* Need one fd for the channel. */
3630 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3631 if (ret < 0) {
3632 ERR("Exhausted number of available FD upon create metadata");
3633 goto error;
3634 }
3635
4dc3dfc5
DG
3636 /* Get the right consumer socket for the application. */
3637 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
3638 if (!socket) {
3639 ret = -EINVAL;
3640 goto error_consumer;
3641 }
3642
331744e3
JD
3643 /*
3644 * Keep metadata key so we can identify it on the consumer side. Assign it
3645 * to the registry *before* we ask the consumer so we avoid the race of the
3646 * consumer requesting the metadata and the ask_channel call on our side
3647 * did not returned yet.
3648 */
3649 registry->metadata_key = metadata->key;
3650
e098433c
JG
3651 session = session_find_by_id(ua_sess->tracing_id);
3652 assert(session);
3653
3654 assert(pthread_mutex_trylock(&session->lock));
3655 assert(session_trylock_list());
3656
d88aee68
DG
3657 /*
3658 * Ask the metadata channel creation to the consumer. The metadata object
3659 * will be created by the consumer and kept their. However, the stream is
3660 * never added or monitored until we do a first push metadata to the
3661 * consumer.
3662 */
7972aab2 3663 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
d2956687 3664 registry, session->current_trace_chunk);
d88aee68 3665 if (ret < 0) {
f2a444f1
DG
3666 /* Nullify the metadata key so we don't try to close it later on. */
3667 registry->metadata_key = 0;
d88aee68
DG
3668 goto error_consumer;
3669 }
3670
3671 /*
3672 * The setup command will make the metadata stream be sent to the relayd,
3673 * if applicable, and the thread managing the metadatas. This is important
3674 * because after this point, if an error occurs, the only way the stream
3675 * can be deleted is to be monitored in the consumer.
3676 */
7972aab2 3677 ret = consumer_setup_metadata(socket, metadata->key);
ffe60014 3678 if (ret < 0) {
f2a444f1
DG
3679 /* Nullify the metadata key so we don't try to close it later on. */
3680 registry->metadata_key = 0;
d88aee68 3681 goto error_consumer;
5b4a0ec0
DG
3682 }
3683
7972aab2
DG
3684 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
3685 metadata->key, app->pid);
5b4a0ec0 3686
d88aee68 3687error_consumer:
b80f0b6c 3688 lttng_fd_put(LTTNG_FD_APPS, 1);
d88aee68 3689 delete_ust_app_channel(-1, metadata, app);
5b4a0ec0 3690error:
ce34fcd0 3691 pthread_mutex_unlock(&registry->lock);
e32d7f27
JG
3692 if (session) {
3693 session_put(session);
3694 }
ffe60014 3695 return ret;
5b4a0ec0
DG
3696}
3697
5b4a0ec0 3698/*
d88aee68
DG
3699 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3700 * acquired before calling this function.
5b4a0ec0
DG
3701 */
3702struct ust_app *ust_app_find_by_pid(pid_t pid)
3703{
d88aee68 3704 struct ust_app *app = NULL;
bec39940
DG
3705 struct lttng_ht_node_ulong *node;
3706 struct lttng_ht_iter iter;
5b4a0ec0 3707
bec39940
DG
3708 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
3709 node = lttng_ht_iter_get_node_ulong(&iter);
5b4a0ec0
DG
3710 if (node == NULL) {
3711 DBG2("UST app no found with pid %d", pid);
3712 goto error;
3713 }
5b4a0ec0
DG
3714
3715 DBG2("Found UST app by pid %d", pid);
3716
d88aee68 3717 app = caa_container_of(node, struct ust_app, pid_n);
5b4a0ec0
DG
3718
3719error:
d88aee68 3720 return app;
5b4a0ec0
DG
3721}
3722
d88aee68
DG
3723/*
3724 * Allocate and init an UST app object using the registration information and
3725 * the command socket. This is called when the command socket connects to the
3726 * session daemon.
3727 *
3728 * The object is returned on success or else NULL.
3729 */
d0b96690 3730struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
5b4a0ec0 3731{
d0b96690 3732 struct ust_app *lta = NULL;
2463b787 3733 struct lttng_pipe *trigger_event_source_pipe = NULL;
d0b96690
DG
3734
3735 assert(msg);
3736 assert(sock >= 0);
3737
3738 DBG3("UST app creating application for socket %d", sock);
5b4a0ec0 3739
173af62f
DG
3740 if ((msg->bits_per_long == 64 &&
3741 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
3742 || (msg->bits_per_long == 32 &&
3743 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
f943b0fb 3744 ERR("Registration failed: application \"%s\" (pid: %d) has "
d0b96690
DG
3745 "%d-bit long, but no consumerd for this size is available.\n",
3746 msg->name, msg->pid, msg->bits_per_long);
3747 goto error;
3f2c5fcc 3748 }
d0b96690 3749
2463b787
JR
3750 trigger_event_source_pipe = lttng_pipe_open(FD_CLOEXEC);
3751 if (!trigger_event_source_pipe) {
3752 PERROR("Open trigger pipe");
3753 goto error;
3754 }
3755
5b4a0ec0
DG
3756 lta = zmalloc(sizeof(struct ust_app));
3757 if (lta == NULL) {
3758 PERROR("malloc");
d0b96690 3759 goto error;
5b4a0ec0
DG
3760 }
3761
2463b787
JR
3762 lta->token_communication.trigger_event_pipe = trigger_event_source_pipe;
3763
5b4a0ec0
DG
3764 lta->ppid = msg->ppid;
3765 lta->uid = msg->uid;
3766 lta->gid = msg->gid;
d0b96690 3767
7753dea8 3768 lta->bits_per_long = msg->bits_per_long;
d0b96690
DG
3769 lta->uint8_t_alignment = msg->uint8_t_alignment;
3770 lta->uint16_t_alignment = msg->uint16_t_alignment;
3771 lta->uint32_t_alignment = msg->uint32_t_alignment;
3772 lta->uint64_t_alignment = msg->uint64_t_alignment;
3773 lta->long_alignment = msg->long_alignment;
3774 lta->byte_order = msg->byte_order;
3775
5b4a0ec0
DG
3776 lta->v_major = msg->major;
3777 lta->v_minor = msg->minor;
d9bf3ca4 3778 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
d0b96690 3779 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
10b56aef 3780 lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
d0b96690 3781 lta->notify_sock = -1;
2463b787 3782 lta->tokens_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
d88aee68
DG
3783
3784 /* Copy name and make sure it's NULL terminated. */
3785 strncpy(lta->name, msg->name, sizeof(lta->name));
3786 lta->name[UST_APP_PROCNAME_LEN] = '\0';
3787
3788 /*
3789 * Before this can be called, when receiving the registration information,
3790 * the application compatibility is checked. So, at this point, the
3791 * application can work with this session daemon.
3792 */
d0b96690 3793 lta->compatible = 1;
5b4a0ec0 3794
852d0037 3795 lta->pid = msg->pid;
d0b96690 3796 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
852d0037 3797 lta->sock = sock;
fb45065e 3798 pthread_mutex_init(&lta->sock_lock, NULL);
d0b96690 3799 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
5b4a0ec0 3800
d42f20df 3801 CDS_INIT_LIST_HEAD(&lta->teardown_head);
d0b96690
DG
3802error:
3803 return lta;
3804}
3805
d88aee68
DG
3806/*
3807 * For a given application object, add it to every hash table.
3808 */
d0b96690
DG
3809void ust_app_add(struct ust_app *app)
3810{
3811 assert(app);
3812 assert(app->notify_sock >= 0);
3813
940c4592
JR
3814 app->registration_time = time(NULL);
3815
5b4a0ec0 3816 rcu_read_lock();
852d0037
DG
3817
3818 /*
3819 * On a re-registration, we want to kick out the previous registration of
3820 * that pid
3821 */
d0b96690 3822 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
852d0037
DG
3823
3824 /*
3825 * The socket _should_ be unique until _we_ call close. So, a add_unique
3826 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3827 * already in the table.
3828 */
d0b96690 3829 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
852d0037 3830
d0b96690
DG
3831 /* Add application to the notify socket hash table. */
3832 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
3833 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
5b4a0ec0 3834
d0b96690 3835 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
d88aee68
DG
3836 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
3837 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
3838 app->v_minor);
5b4a0ec0 3839
d0b96690
DG
3840 rcu_read_unlock();
3841}
3842
d88aee68
DG
3843/*
3844 * Set the application version into the object.
3845 *
3846 * Return 0 on success else a negative value either an errno code or a
3847 * LTTng-UST error code.
3848 */
d0b96690
DG
3849int ust_app_version(struct ust_app *app)
3850{
d88aee68
DG
3851 int ret;
3852
d0b96690 3853 assert(app);
d88aee68 3854
fb45065e 3855 pthread_mutex_lock(&app->sock_lock);
d88aee68 3856 ret = ustctl_tracer_version(app->sock, &app->version);
fb45065e 3857 pthread_mutex_unlock(&app->sock_lock);
d88aee68
DG
3858 if (ret < 0) {
3859 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
5368d366 3860 ERR("UST app %d version failed with ret %d", app->sock, ret);
d88aee68 3861 } else {
5368d366 3862 DBG3("UST app %d version failed. Application is dead", app->sock);
d88aee68
DG
3863 }
3864 }
3865
3866 return ret;
5b4a0ec0
DG
3867}
3868
2463b787
JR
3869/*
3870 * Setup the base trigger group.
3871 *
3872 * Return 0 on success else a negative value either an errno code or a
3873 * LTTng-UST error code.
3874 */
3875int ust_app_setup_trigger_group(struct ust_app *app)
3876{
3877 int ret;
3878 int writefd;
3879 struct lttng_ust_object_data *group = NULL;
3880 enum lttng_error_code lttng_ret;
3881 enum trigger_error_accounting_status trigger_error_accounting_status;
3882
3883 assert(app);
3884
3885 /* Get the write side of the pipe */
3886 writefd = lttng_pipe_get_writefd(app->token_communication.trigger_event_pipe);
3887
3888 pthread_mutex_lock(&app->sock_lock);
3889 ret = ustctl_create_trigger_group(app->sock, writefd, &group);
3890 pthread_mutex_unlock(&app->sock_lock);
3891 if (ret < 0) {
3892 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3893 ERR("UST app %d create_trigger_group failed with ret %d, trigger pipe %d", app->sock, ret, writefd);
3894 } else {
3895 DBG("UST app %d create trigger group failed. Application is dead", app->sock);
3896 }
3897 goto end;
3898 }
3899
3900 lttng_ret = notification_thread_command_add_application(
3901 notification_thread_handle, lttng_pipe_get_readfd(app->token_communication.trigger_event_pipe), LTTNG_DOMAIN_UST);
3902 if (lttng_ret != LTTNG_OK) {
3903 /* TODO: error */
3904 ret = - 1;
3905 ERR("Failed to add channel to notification thread");
3906 goto end;
3907 }
3908
3909 /* Assign handle only when the complete setup is valid */
3910 app->token_communication.handle = group;
3911
3912 trigger_error_accounting_status = trigger_error_accounting_register_app(app);
3913 if (trigger_error_accounting_status != TRIGGER_ERROR_ACCOUNTING_STATUS_OK) {
3914 ERR("Failed to setup trigger error accouting for app");
3915 ret = -1;
3916 goto end;
3917 }
3918
3919
3920end:
3921 return ret;
3922}
3923
5b4a0ec0
DG
3924/*
3925 * Unregister app by removing it from the global traceable app list and freeing
3926 * the data struct.
3927 *
3928 * The socket is already closed at this point so no close to sock.
3929 */
3930void ust_app_unregister(int sock)
3931{
2463b787 3932 enum lttng_error_code ret_code;
5b4a0ec0 3933 struct ust_app *lta;
bec39940 3934 struct lttng_ht_node_ulong *node;
c4b88406 3935 struct lttng_ht_iter ust_app_sock_iter;
bec39940 3936 struct lttng_ht_iter iter;
d42f20df 3937 struct ust_app_session *ua_sess;
525b0740 3938 int ret;
5b4a0ec0
DG
3939
3940 rcu_read_lock();
886459c6 3941
5b4a0ec0 3942 /* Get the node reference for a call_rcu */
c4b88406
MD
3943 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
3944 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
d0b96690 3945 assert(node);
284d8f55 3946
852d0037 3947 lta = caa_container_of(node, struct ust_app, sock_n);
852d0037
DG
3948 DBG("PID %d unregistering with sock %d", lta->pid, sock);
3949
d88aee68 3950 /*
ce34fcd0
MD
3951 * For per-PID buffers, perform "push metadata" and flush all
3952 * application streams before removing app from hash tables,
3953 * ensuring proper behavior of data_pending check.
c4b88406 3954 * Remove sessions so they are not visible during deletion.
d88aee68 3955 */
d42f20df
DG
3956 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
3957 node.node) {
7972aab2
DG
3958 struct ust_registry_session *registry;
3959
d42f20df
DG
3960 ret = lttng_ht_del(lta->sessions, &iter);
3961 if (ret) {
3962 /* The session was already removed so scheduled for teardown. */
3963 continue;
3964 }
3965
ce34fcd0
MD
3966 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
3967 (void) ust_app_flush_app_session(lta, ua_sess);
3968 }
c4b88406 3969
d42f20df
DG
3970 /*
3971 * Add session to list for teardown. This is safe since at this point we
3972 * are the only one using this list.
3973 */
d88aee68
DG
3974 pthread_mutex_lock(&ua_sess->lock);
3975
b161602a
MD
3976 if (ua_sess->deleted) {
3977 pthread_mutex_unlock(&ua_sess->lock);
3978 continue;
3979 }
3980
d88aee68
DG
3981 /*
3982 * Normally, this is done in the delete session process which is
3983 * executed in the call rcu below. However, upon registration we can't
3984 * afford to wait for the grace period before pushing data or else the
3985 * data pending feature can race between the unregistration and stop
3986 * command where the data pending command is sent *before* the grace
3987 * period ended.
3988 *
3989 * The close metadata below nullifies the metadata pointer in the
3990 * session so the delete session will NOT push/close a second time.
3991 */
7972aab2 3992 registry = get_session_registry(ua_sess);
ce34fcd0 3993 if (registry) {
7972aab2
DG
3994 /* Push metadata for application before freeing the application. */
3995 (void) push_metadata(registry, ua_sess->consumer);
3996
3997 /*
3998 * Don't ask to close metadata for global per UID buffers. Close
1b532a60
DG
3999 * metadata only on destroy trace session in this case. Also, the
4000 * previous push metadata could have flag the metadata registry to
4001 * close so don't send a close command if closed.
7972aab2 4002 */
ce34fcd0 4003 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
7972aab2
DG
4004 /* And ask to close it for this session registry. */
4005 (void) close_metadata(registry, ua_sess->consumer);
4006 }
4007 }
d42f20df 4008 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
c4b88406 4009
d88aee68 4010 pthread_mutex_unlock(&ua_sess->lock);
d42f20df
DG
4011 }
4012
c4b88406
MD
4013 /* Remove application from PID hash table */
4014 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
4015 assert(!ret);
4016
4017 /*
4018 * Remove application from notify hash table. The thread handling the
4019 * notify socket could have deleted the node so ignore on error because
c48239ca
JG
4020 * either way it's valid. The close of that socket is handled by the
4021 * apps_notify_thread.
c4b88406
MD
4022 */
4023 iter.iter.node = &lta->notify_sock_n.node;
4024 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
4025
4026 /*
4027 * Ignore return value since the node might have been removed before by an
4028 * add replace during app registration because the PID can be reassigned by
4029 * the OS.
4030 */
4031 iter.iter.node = &lta->pid_n.node;
4032 ret = lttng_ht_del(ust_app_ht, &iter);
4033 if (ret) {
4034 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
4035 lta->pid);
4036 }
4037
2463b787
JR
4038 /* trigger handle can be null in certain scenario such as a dead app */
4039 if (lta->token_communication.handle) {
4040 int fd = lttng_pipe_get_readfd(
4041 lta->token_communication.trigger_event_pipe);
4042
4043 ret_code = notification_thread_command_remove_application(
4044 notification_thread_handle,
4045 fd);
4046 if (ret_code != LTTNG_OK) {
4047 ERR("Failed to remove application from notification thread");
4048 }
4049 }
4050
852d0037
DG
4051 /* Free memory */
4052 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
4053
5b4a0ec0
DG
4054 rcu_read_unlock();
4055 return;
284d8f55
DG
4056}
4057
5b4a0ec0
DG
4058/*
4059 * Fill events array with all events name of all registered apps.
4060 */
4061int ust_app_list_events(struct lttng_event **events)
421cb601 4062{
5b4a0ec0
DG
4063 int ret, handle;
4064 size_t nbmem, count = 0;
bec39940 4065 struct lttng_ht_iter iter;
5b4a0ec0 4066 struct ust_app *app;
c617c0c6 4067 struct lttng_event *tmp_event;
421cb601 4068
5b4a0ec0 4069 nbmem = UST_APP_EVENT_LIST_SIZE;
c617c0c6
MD
4070 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
4071 if (tmp_event == NULL) {
5b4a0ec0
DG
4072 PERROR("zmalloc ust app events");
4073 ret = -ENOMEM;
421cb601
DG
4074 goto error;
4075 }
4076
5b4a0ec0 4077 rcu_read_lock();
421cb601 4078
852d0037 4079 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
90eaa0d2 4080 struct lttng_ust_tracepoint_iter uiter;
ac3bd9c0 4081
840cb59c 4082 health_code_update();
86acf0da 4083
e0c7ec2b
DG
4084 if (!app->compatible) {
4085 /*
4086 * TODO: In time, we should notice the caller of this error by
4087 * telling him that this is a version error.
4088 */
4089 continue;
4090 }
fb45065e 4091 pthread_mutex_lock(&app->sock_lock);
852d0037 4092 handle = ustctl_tracepoint_list(app->sock);
5b4a0ec0 4093 if (handle < 0) {
ffe60014
DG
4094 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
4095 ERR("UST app list events getting handle failed for app pid %d",
4096 app->pid);
4097 }
fb45065e 4098 pthread_mutex_unlock(&app->sock_lock);
5b4a0ec0
DG
4099 continue;
4100 }
421cb601 4101
852d0037 4102 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
fb54cdbf 4103 &uiter)) != -LTTNG_UST_ERR_NOENT) {
ffe60014
DG
4104 /* Handle ustctl error. */
4105 if (ret < 0) {
fb45065e
MD
4106 int release_ret;
4107
a2ba1ab0 4108 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
ffe60014
DG
4109 ERR("UST app tp list get failed for app %d with ret %d",
4110 app->sock, ret);
4111 } else {
4112 DBG3("UST app tp list get failed. Application is dead");
3757b385
DG
4113 /*
4114 * This is normal behavior, an application can die during the
4115 * creation process. Don't report an error so the execution can
4116 * continue normally. Continue normal execution.
4117 */
4118 break;
ffe60014 4119 }
98f595d4 4120 free(tmp_event);
fb45065e 4121 release_ret = ustctl_release_handle(app->sock, handle);
68313703
JG
4122 if (release_ret < 0 &&
4123 release_ret != -LTTNG_UST_ERR_EXITING &&
4124 release_ret != -EPIPE) {
fb45065e
MD
4125 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4126 }
4127 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
4128 goto rcu_error;
4129 }
4130
840cb59c 4131 health_code_update();
815564d8 4132 if (count >= nbmem) {
d7b3776f 4133 /* In case the realloc fails, we free the memory */
53efb85a
MD
4134 struct lttng_event *new_tmp_event;
4135 size_t new_nbmem;
4136
4137 new_nbmem = nbmem << 1;
4138 DBG2("Reallocating event list from %zu to %zu entries",
4139 nbmem, new_nbmem);
4140 new_tmp_event = realloc(tmp_event,
4141 new_nbmem * sizeof(struct lttng_event));
4142 if (new_tmp_event == NULL) {
fb45065e
MD
4143 int release_ret;
4144
5b4a0ec0 4145 PERROR("realloc ust app events");
c617c0c6 4146 free(tmp_event);
5b4a0ec0 4147 ret = -ENOMEM;
fb45065e 4148 release_ret = ustctl_release_handle(app->sock, handle);
68313703
JG
4149 if (release_ret < 0 &&
4150 release_ret != -LTTNG_UST_ERR_EXITING &&
4151 release_ret != -EPIPE) {
fb45065e
MD
4152 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4153 }
4154 pthread_mutex_unlock(&app->sock_lock);
5b4a0ec0
DG
4155 goto rcu_error;
4156 }
53efb85a
MD
4157 /* Zero the new memory */
4158 memset(new_tmp_event + nbmem, 0,
4159 (new_nbmem - nbmem) * sizeof(struct lttng_event));
4160 nbmem = new_nbmem;
4161 tmp_event = new_tmp_event;
5b4a0ec0 4162 }
c617c0c6
MD
4163 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
4164 tmp_event[count].loglevel = uiter.loglevel;
4165 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
4166 tmp_event[count].pid = app->pid;
4167 tmp_event[count].enabled = -1;
5b4a0ec0 4168 count++;
421cb601 4169 }
fb45065e
MD
4170 ret = ustctl_release_handle(app->sock, handle);
4171 pthread_mutex_unlock(&app->sock_lock);
68313703 4172 if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
fb45065e
MD
4173 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
4174 }
421cb601
DG
4175 }
4176
5b4a0ec0 4177 ret = count;
c617c0c6 4178 *events = tmp_event;
421cb601 4179
5b4a0ec0 4180 DBG2("UST app list events done (%zu events)", count);
421cb601 4181
5b4a0ec0
DG
4182rcu_error:
4183 rcu_read_unlock();
421cb601 4184error:
840cb59c 4185 health_code_update();
5b4a0ec0 4186 return ret;
421cb601
DG
4187}
4188
f37d259d
MD
4189/*
4190 * Fill events array with all events name of all registered apps.
4191 */
4192int ust_app_list_event_fields(struct lttng_event_field **fields)
4193{
4194 int ret, handle;
4195 size_t nbmem, count = 0;
4196 struct lttng_ht_iter iter;
4197 struct ust_app *app;
c617c0c6 4198 struct lttng_event_field *tmp_event;
f37d259d
MD
4199
4200 nbmem = UST_APP_EVENT_LIST_SIZE;
c617c0c6
MD
4201 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
4202 if (tmp_event == NULL) {
f37d259d
MD
4203 PERROR("zmalloc ust app event fields");
4204 ret = -ENOMEM;
4205 goto error;
4206 }
4207
4208 rcu_read_lock();
4209
4210 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4211 struct lttng_ust_field_iter uiter;
4212
840cb59c 4213 health_code_update();
86acf0da 4214
f37d259d
MD
4215 if (!app->compatible) {
4216 /*
4217 * TODO: In time, we should notice the caller of this error by
4218 * telling him that this is a version error.
4219 */
4220 continue;
4221 }
fb45065e 4222 pthread_mutex_lock(&app->sock_lock);
f37d259d
MD
4223 handle = ustctl_tracepoint_field_list(app->sock);
4224 if (handle < 0) {
ffe60014
DG
4225 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
4226 ERR("UST app list field getting handle failed for app pid %d",
4227 app->pid);
4228 }
fb45065e 4229 pthread_mutex_unlock(&app->sock_lock);
f37d259d
MD
4230 continue;
4231 }
4232
4233 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
fb54cdbf 4234 &uiter)) != -LTTNG_UST_ERR_NOENT) {
ffe60014
DG
4235 /* Handle ustctl error. */
4236 if (ret < 0) {
fb45065e
MD
4237 int release_ret;
4238
a2ba1ab0 4239 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
ffe60014
DG
4240 ERR("UST app tp list field failed for app %d with ret %d",
4241 app->sock, ret);
4242 } else {
4243 DBG3("UST app tp list field failed. Application is dead");
3757b385
DG
4244 /*
4245 * This is normal behavior, an application can die during the
4246 * creation process. Don't report an error so the execution can
98f595d4 4247 * continue normally. Reset list and count for next app.
3757b385
DG
4248 */
4249 break;
ffe60014 4250 }
98f595d4 4251 free(tmp_event);
fb45065e
MD
4252 release_ret = ustctl_release_handle(app->sock, handle);
4253 pthread_mutex_unlock(&app->sock_lock);
68313703
JG
4254 if (release_ret < 0 &&
4255 release_ret != -LTTNG_UST_ERR_EXITING &&
4256 release_ret != -EPIPE) {
fb45065e
MD
4257 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4258 }
ffe60014
DG
4259 goto rcu_error;
4260 }
4261
840cb59c 4262 health_code_update();
f37d259d 4263 if (count >= nbmem) {
d7b3776f 4264 /* In case the realloc fails, we free the memory */
53efb85a
MD
4265 struct lttng_event_field *new_tmp_event;
4266 size_t new_nbmem;
4267
4268 new_nbmem = nbmem << 1;
4269 DBG2("Reallocating event field list from %zu to %zu entries",
4270 nbmem, new_nbmem);
4271 new_tmp_event = realloc(tmp_event,
4272 new_nbmem * sizeof(struct lttng_event_field));
4273 if (new_tmp_event == NULL) {
fb45065e
MD
4274 int release_ret;
4275
f37d259d 4276 PERROR("realloc ust app event fields");
c617c0c6 4277 free(tmp_event);
f37d259d 4278 ret = -ENOMEM;
fb45065e
MD
4279 release_ret = ustctl_release_handle(app->sock, handle);
4280 pthread_mutex_unlock(&app->sock_lock);
68313703
JG
4281 if (release_ret &&
4282 release_ret != -LTTNG_UST_ERR_EXITING &&
4283 release_ret != -EPIPE) {
fb45065e
MD
4284 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4285 }
f37d259d
MD
4286 goto rcu_error;
4287 }
53efb85a
MD
4288 /* Zero the new memory */
4289 memset(new_tmp_event + nbmem, 0,
4290 (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
4291 nbmem = new_nbmem;
4292 tmp_event = new_tmp_event;
f37d259d 4293 }
f37d259d 4294
c617c0c6 4295 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
2e84128e
DG
4296 /* Mapping between these enums matches 1 to 1. */
4297 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
c617c0c6 4298 tmp_event[count].nowrite = uiter.nowrite;
f37d259d 4299
c617c0c6
MD
4300 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
4301 tmp_event[count].event.loglevel = uiter.loglevel;
2e84128e 4302 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
c617c0c6
MD
4303 tmp_event[count].event.pid = app->pid;
4304 tmp_event[count].event.enabled = -1;
f37d259d
MD
4305 count++;
4306 }
fb45065e
MD
4307 ret = ustctl_release_handle(app->sock, handle);
4308 pthread_mutex_unlock(&app->sock_lock);
68313703
JG
4309 if (ret < 0 &&
4310 ret != -LTTNG_UST_ERR_EXITING &&
4311 ret != -EPIPE) {
fb45065e
MD
4312 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
4313 }
f37d259d
MD
4314 }
4315
4316 ret = count;
c617c0c6 4317 *fields = tmp_event;
f37d259d
MD
4318
4319 DBG2("UST app list event fields done (%zu events)", count);
4320
4321rcu_error:
4322 rcu_read_unlock();
4323error:
840cb59c 4324 health_code_update();
f37d259d
MD
4325 return ret;
4326}
4327
5b4a0ec0
DG
4328/*
4329 * Free and clean all traceable apps of the global list.
36b588ed
MD
4330 *
4331 * Should _NOT_ be called with RCU read-side lock held.
5b4a0ec0
DG
4332 */
4333void ust_app_clean_list(void)
421cb601 4334{
5b4a0ec0 4335 int ret;
659ed79f 4336 struct ust_app *app;
bec39940 4337 struct lttng_ht_iter iter;
421cb601 4338
5b4a0ec0 4339 DBG2("UST app cleaning registered apps hash table");
421cb601 4340
5b4a0ec0 4341 rcu_read_lock();
421cb601 4342
faadaa3a
JG
4343 /* Cleanup notify socket hash table */
4344 if (ust_app_ht_by_notify_sock) {
4345 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
4346 notify_sock_n.node) {
4347 struct cds_lfht_node *node;
4348 struct ust_app *app;
4349
4350 node = cds_lfht_iter_get_node(&iter.iter);
4351 if (!node) {
4352 continue;
4353 }
4354
4355 app = container_of(node, struct ust_app,
4356 notify_sock_n.node);
4357 ust_app_notify_sock_unregister(app->notify_sock);
4358 }
4359 }
4360
f1b711c4
MD
4361 if (ust_app_ht) {
4362 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4363 ret = lttng_ht_del(ust_app_ht, &iter);
4364 assert(!ret);
4365 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
4366 }
421cb601
DG
4367 }
4368
852d0037 4369 /* Cleanup socket hash table */
f1b711c4
MD
4370 if (ust_app_ht_by_sock) {
4371 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
4372 sock_n.node) {
4373 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
4374 assert(!ret);
4375 }
bec39940 4376 }
852d0037 4377
36b588ed 4378 rcu_read_unlock();
d88aee68 4379
bec39940 4380 /* Destroy is done only when the ht is empty */
f1b711c4
MD
4381 if (ust_app_ht) {
4382 ht_cleanup_push(ust_app_ht);
4383 }
4384 if (ust_app_ht_by_sock) {
4385 ht_cleanup_push(ust_app_ht_by_sock);
4386 }
4387 if (ust_app_ht_by_notify_sock) {
4388 ht_cleanup_push(ust_app_ht_by_notify_sock);
4389 }
5b4a0ec0
DG
4390}
4391
4392/*
4393 * Init UST app hash table.
4394 */
57703f6e 4395int ust_app_ht_alloc(void)
5b4a0ec0 4396{
bec39940 4397 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
57703f6e
MD
4398 if (!ust_app_ht) {
4399 return -1;
4400 }
852d0037 4401 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
57703f6e
MD
4402 if (!ust_app_ht_by_sock) {
4403 return -1;
4404 }
d0b96690 4405 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
57703f6e
MD
4406 if (!ust_app_ht_by_notify_sock) {
4407 return -1;
4408 }
4409 return 0;
421cb601
DG
4410}
4411
78f0bacd
DG
4412/*
4413 * For a specific UST session, disable the channel for all registered apps.
4414 */
35a9059d 4415int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
78f0bacd
DG
4416 struct ltt_ust_channel *uchan)
4417{
4418 int ret = 0;
bec39940
DG
4419 struct lttng_ht_iter iter;
4420 struct lttng_ht_node_str *ua_chan_node;
78f0bacd
DG
4421 struct ust_app *app;
4422 struct ust_app_session *ua_sess;
8535a6d9 4423 struct ust_app_channel *ua_chan;
78f0bacd 4424
88e3c2f5 4425 assert(usess->active);
d9bf3ca4 4426 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
a991f516 4427 uchan->name, usess->id);
78f0bacd
DG
4428
4429 rcu_read_lock();
4430
4431 /* For every registered applications */
852d0037 4432 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
bec39940 4433 struct lttng_ht_iter uiter;
e0c7ec2b
DG
4434 if (!app->compatible) {
4435 /*
4436 * TODO: In time, we should notice the caller of this error by
4437 * telling him that this is a version error.
4438 */
4439 continue;
4440 }
78f0bacd
DG
4441 ua_sess = lookup_session_by_app(usess, app);
4442 if (ua_sess == NULL) {
4443 continue;
4444 }
4445
8535a6d9 4446 /* Get channel */
bec39940
DG
4447 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4448 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
8535a6d9
DG
4449 /* If the session if found for the app, the channel must be there */
4450 assert(ua_chan_node);
4451
4452 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4453 /* The channel must not be already disabled */
4454 assert(ua_chan->enabled == 1);
4455
4456 /* Disable channel onto application */
4457 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
78f0bacd
DG
4458 if (ret < 0) {
4459 /* XXX: We might want to report this error at some point... */
4460 continue;
4461 }
4462 }
4463
4464 rcu_read_unlock();
78f0bacd
DG
4465 return ret;
4466}
4467
4468/*
4469 * For a specific UST session, enable the channel for all registered apps.
4470 */
35a9059d 4471int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
78f0bacd
DG
4472 struct ltt_ust_channel *uchan)
4473{
4474 int ret = 0;
bec39940 4475 struct lttng_ht_iter iter;
78f0bacd
DG
4476 struct ust_app *app;
4477 struct ust_app_session *ua_sess;
4478
88e3c2f5 4479 assert(usess->active);
d9bf3ca4 4480 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
a991f516 4481 uchan->name, usess->id);
78f0bacd
DG
4482
4483 rcu_read_lock();
4484
4485 /* For every registered applications */
852d0037 4486 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
4487 if (!app->compatible) {
4488 /*
4489 * TODO: In time, we should notice the caller of this error by
4490 * telling him that this is a version error.
4491 */
4492 continue;
4493 }
78f0bacd
DG
4494 ua_sess = lookup_session_by_app(usess, app);
4495 if (ua_sess == NULL) {
4496 continue;
4497 }
4498
4499 /* Enable channel onto application */
4500 ret = enable_ust_app_channel(ua_sess, uchan, app);
4501 if (ret < 0) {
4502 /* XXX: We might want to report this error at some point... */
4503 continue;
4504 }
4505 }
4506
4507 rcu_read_unlock();
78f0bacd
DG
4508 return ret;
4509}
4510
b0a40d28
DG
4511/*
4512 * Disable an event in a channel and for a specific session.
4513 */
35a9059d
DG
4514int ust_app_disable_event_glb(struct ltt_ust_session *usess,
4515 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
b0a40d28
DG
4516{
4517 int ret = 0;
bec39940 4518 struct lttng_ht_iter iter, uiter;
700c5a9d 4519 struct lttng_ht_node_str *ua_chan_node;
b0a40d28
DG
4520 struct ust_app *app;
4521 struct ust_app_session *ua_sess;
4522 struct ust_app_channel *ua_chan;
4523 struct ust_app_event *ua_event;
4524
88e3c2f5 4525 assert(usess->active);
b0a40d28 4526 DBG("UST app disabling event %s for all apps in channel "
d9bf3ca4
MD
4527 "%s for session id %" PRIu64,
4528 uevent->attr.name, uchan->name, usess->id);
b0a40d28
DG
4529
4530 rcu_read_lock();
4531
4532 /* For all registered applications */
852d0037 4533 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
4534 if (!app->compatible) {
4535 /*
4536 * TODO: In time, we should notice the caller of this error by
4537 * telling him that this is a version error.
4538 */
4539 continue;
4540 }
b0a40d28
DG
4541 ua_sess = lookup_session_by_app(usess, app);
4542 if (ua_sess == NULL) {
4543 /* Next app */
4544 continue;
4545 }
4546
4547 /* Lookup channel in the ust app session */
bec39940
DG
4548 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4549 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
b0a40d28 4550 if (ua_chan_node == NULL) {
d9bf3ca4 4551 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
852d0037 4552 "Skipping", uchan->name, usess->id, app->pid);
b0a40d28
DG
4553 continue;
4554 }
4555 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4556
700c5a9d
JR
4557 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4558 uevent->filter, uevent->attr.loglevel,
4559 uevent->exclusion);
4560 if (ua_event == NULL) {
b0a40d28 4561 DBG2("Event %s not found in channel %s for app pid %d."
852d0037 4562 "Skipping", uevent->attr.name, uchan->name, app->pid);
b0a40d28
DG
4563 continue;
4564 }
b0a40d28 4565
7f79d3a1 4566 ret = disable_ust_app_event(ua_sess, ua_event, app);
b0a40d28
DG
4567 if (ret < 0) {
4568 /* XXX: Report error someday... */
4569 continue;
4570 }
4571 }
4572
4573 rcu_read_unlock();
88e3c2f5
JG
4574 return ret;
4575}
4576
4577/* The ua_sess lock must be held by the caller. */
4578static
4579int ust_app_channel_create(struct ltt_ust_session *usess,
4580 struct ust_app_session *ua_sess,
4581 struct ltt_ust_channel *uchan, struct ust_app *app,
4582 struct ust_app_channel **_ua_chan)
4583{
4584 int ret = 0;
4585 struct ust_app_channel *ua_chan = NULL;
4586
4587 assert(ua_sess);
4588 ASSERT_LOCKED(ua_sess->lock);
4589
4590 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
4591 sizeof(uchan->name))) {
4592 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
4593 &uchan->attr);
4594 ret = 0;
4595 } else {
4596 struct ltt_ust_context *uctx = NULL;
4597
4598 /*
4599 * Create channel onto application and synchronize its
4600 * configuration.
4601 */
4602 ret = ust_app_channel_allocate(ua_sess, uchan,
4603 LTTNG_UST_CHAN_PER_CPU, usess,
4604 &ua_chan);
88ebf5a7
JR
4605 if (ret < 0) {
4606 goto error;
4607 }
4608
4609 ret = ust_app_channel_send(app, usess,
4610 ua_sess, ua_chan);
4611 if (ret) {
4612 goto error;
88e3c2f5
JG
4613 }
4614
4615 /* Add contexts. */
4616 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
4617 ret = create_ust_app_channel_context(ua_chan,
4618 &uctx->ctx, app);
4619 if (ret) {
88ebf5a7 4620 goto error;
88e3c2f5
JG
4621 }
4622 }
4623 }
88ebf5a7
JR
4624
4625error:
88e3c2f5
JG
4626 if (ret < 0) {
4627 switch (ret) {
4628 case -ENOTCONN:
4629 /*
4630 * The application's socket is not valid. Either a bad socket
4631 * or a timeout on it. We can't inform the caller that for a
4632 * specific app, the session failed so lets continue here.
4633 */
4634 ret = 0; /* Not an error. */
4635 break;
4636 case -ENOMEM:
4637 default:
4638 break;
4639 }
4640 }
88ebf5a7 4641
88e3c2f5
JG
4642 if (ret == 0 && _ua_chan) {
4643 /*
4644 * Only return the application's channel on success. Note
4645 * that the channel can still be part of the application's
4646 * channel hashtable on error.
4647 */
4648 *_ua_chan = ua_chan;
4649 }
b0a40d28
DG
4650 return ret;
4651}
4652
5b4a0ec0 4653/*
edb67388 4654 * Enable event for a specific session and channel on the tracer.
5b4a0ec0 4655 */
35a9059d 4656int ust_app_enable_event_glb(struct ltt_ust_session *usess,
48842b30
DG
4657 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4658{
4659 int ret = 0;
bec39940 4660 struct lttng_ht_iter iter, uiter;
18eace3b 4661 struct lttng_ht_node_str *ua_chan_node;
48842b30
DG
4662 struct ust_app *app;
4663 struct ust_app_session *ua_sess;
4664 struct ust_app_channel *ua_chan;
4665 struct ust_app_event *ua_event;
48842b30 4666
88e3c2f5 4667 assert(usess->active);
d9bf3ca4 4668 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
a991f516 4669 uevent->attr.name, usess->id);
48842b30 4670
edb67388
DG
4671 /*
4672 * NOTE: At this point, this function is called only if the session and
4673 * channel passed are already created for all apps. and enabled on the
4674 * tracer also.
4675 */
4676
48842b30 4677 rcu_read_lock();
421cb601
DG
4678
4679 /* For all registered applications */
852d0037 4680 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
4681 if (!app->compatible) {
4682 /*
4683 * TODO: In time, we should notice the caller of this error by
4684 * telling him that this is a version error.
4685 */
4686 continue;
4687 }
edb67388 4688 ua_sess = lookup_session_by_app(usess, app);
c4a1715b
DG
4689 if (!ua_sess) {
4690 /* The application has problem or is probably dead. */
4691 continue;
4692 }
ba767faf 4693
d0b96690
DG
4694 pthread_mutex_lock(&ua_sess->lock);
4695
b161602a
MD
4696 if (ua_sess->deleted) {
4697 pthread_mutex_unlock(&ua_sess->lock);
4698 continue;
4699 }
4700
edb67388 4701 /* Lookup channel in the ust app session */
bec39940
DG
4702 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4703 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
a7169585
MD
4704 /*
4705 * It is possible that the channel cannot be found is
4706 * the channel/event creation occurs concurrently with
4707 * an application exit.
4708 */
4709 if (!ua_chan_node) {
4710 pthread_mutex_unlock(&ua_sess->lock);
4711 continue;
4712 }
edb67388
DG
4713
4714 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4715
18eace3b
DG
4716 /* Get event node */
4717 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
39c5a3a7 4718 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
18eace3b 4719 if (ua_event == NULL) {
7f79d3a1 4720 DBG3("UST app enable event %s not found for app PID %d."
852d0037 4721 "Skipping app", uevent->attr.name, app->pid);
d0b96690 4722 goto next_app;
35a9059d 4723 }
35a9059d
DG
4724
4725 ret = enable_ust_app_event(ua_sess, ua_event, app);
4726 if (ret < 0) {
d0b96690 4727 pthread_mutex_unlock(&ua_sess->lock);
7f79d3a1 4728 goto error;
48842b30 4729 }
d0b96690
DG
4730 next_app:
4731 pthread_mutex_unlock(&ua_sess->lock);
edb67388
DG
4732 }
4733
7f79d3a1 4734error:
edb67388 4735 rcu_read_unlock();
edb67388
DG
4736 return ret;
4737}
4738
4739/*
4740 * For a specific existing UST session and UST channel, creates the event for
4741 * all registered apps.
4742 */
35a9059d 4743int ust_app_create_event_glb(struct ltt_ust_session *usess,
edb67388
DG
4744 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4745{
4746 int ret = 0;
bec39940
DG
4747 struct lttng_ht_iter iter, uiter;
4748 struct lttng_ht_node_str *ua_chan_node;
edb67388
DG
4749 struct ust_app *app;
4750 struct ust_app_session *ua_sess;
4751 struct ust_app_channel *ua_chan;
4752
88e3c2f5 4753 assert(usess->active);
d9bf3ca4 4754 DBG("UST app creating event %s for all apps for session id %" PRIu64,
a991f516 4755 uevent->attr.name, usess->id);
edb67388 4756
edb67388
DG
4757 rcu_read_lock();
4758
4759 /* For all registered applications */
852d0037 4760 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
4761 if (!app->compatible) {
4762 /*
4763 * TODO: In time, we should notice the caller of this error by
4764 * telling him that this is a version error.
4765 */
4766 continue;
4767 }
edb67388 4768 ua_sess = lookup_session_by_app(usess, app);
c4a1715b
DG
4769 if (!ua_sess) {
4770 /* The application has problem or is probably dead. */
4771 continue;
4772 }
48842b30 4773
d0b96690 4774 pthread_mutex_lock(&ua_sess->lock);
b161602a
MD
4775
4776 if (ua_sess->deleted) {
4777 pthread_mutex_unlock(&ua_sess->lock);
4778 continue;
4779 }
4780
48842b30 4781 /* Lookup channel in the ust app session */
bec39940
DG
4782 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4783 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
edb67388
DG
4784 /* If the channel is not found, there is a code flow error */
4785 assert(ua_chan_node);
4786
48842b30
DG
4787 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4788
edb67388 4789 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
d0b96690 4790 pthread_mutex_unlock(&ua_sess->lock);
edb67388 4791 if (ret < 0) {
49c336c1 4792 if (ret != -LTTNG_UST_ERR_EXIST) {
fc34caaa
DG
4793 /* Possible value at this point: -ENOMEM. If so, we stop! */
4794 break;
4795 }
4796 DBG2("UST app event %s already exist on app PID %d",
852d0037 4797 uevent->attr.name, app->pid);
5b4a0ec0 4798 continue;
48842b30 4799 }
48842b30 4800 }
5b4a0ec0 4801
48842b30 4802 rcu_read_unlock();
48842b30
DG
4803 return ret;
4804}
4805
5b4a0ec0
DG
4806/*
4807 * Start tracing for a specific UST session and app.
fad1ed2f
JR
4808 *
4809 * Called with UST app session lock held.
4810 *
5b4a0ec0 4811 */
b34cbebf 4812static
421cb601 4813int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
48842b30
DG
4814{
4815 int ret = 0;
48842b30 4816 struct ust_app_session *ua_sess;
48842b30 4817
852d0037 4818 DBG("Starting tracing for ust app pid %d", app->pid);
5cf5d0e7 4819
509cbaf8
MD
4820 rcu_read_lock();
4821
e0c7ec2b
DG
4822 if (!app->compatible) {
4823 goto end;
4824 }
4825
421cb601
DG
4826 ua_sess = lookup_session_by_app(usess, app);
4827 if (ua_sess == NULL) {
d42f20df
DG
4828 /* The session is in teardown process. Ignore and continue. */
4829 goto end;
421cb601 4830 }
48842b30 4831
d0b96690
DG
4832 pthread_mutex_lock(&ua_sess->lock);
4833
b161602a
MD
4834 if (ua_sess->deleted) {
4835 pthread_mutex_unlock(&ua_sess->lock);
4836 goto end;
4837 }
4838
b0a1c741
JR
4839 if (ua_sess->enabled) {
4840 pthread_mutex_unlock(&ua_sess->lock);
4841 goto end;
4842 }
4843
aea829b3
DG
4844 /* Upon restart, we skip the setup, already done */
4845 if (ua_sess->started) {
8be98f9a 4846 goto skip_setup;
aea829b3 4847 }
8be98f9a 4848
d65d2de8
DG
4849 /*
4850 * Create the metadata for the application. This returns gracefully if a
4851 * metadata was already set for the session.
4852 */
ad7a9107 4853 ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
421cb601 4854 if (ret < 0) {
d0b96690 4855 goto error_unlock;
421cb601 4856 }
48842b30 4857
840cb59c 4858 health_code_update();
86acf0da 4859
8be98f9a 4860skip_setup:
a945cdc7 4861 /* This starts the UST tracing */
fb45065e 4862 pthread_mutex_lock(&app->sock_lock);
852d0037 4863 ret = ustctl_start_session(app->sock, ua_sess->handle);
fb45065e 4864 pthread_mutex_unlock(&app->sock_lock);
421cb601 4865 if (ret < 0) {
ffe60014
DG
4866 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4867 ERR("Error starting tracing for app pid: %d (ret: %d)",
4868 app->pid, ret);
4869 } else {
4870 DBG("UST app start session failed. Application is dead.");
3757b385
DG
4871 /*
4872 * This is normal behavior, an application can die during the
4873 * creation process. Don't report an error so the execution can
4874 * continue normally.
4875 */
4876 pthread_mutex_unlock(&ua_sess->lock);
4877 goto end;
ffe60014 4878 }
d0b96690 4879 goto error_unlock;
421cb601 4880 }
5b4a0ec0 4881
55c3953d
DG
4882 /* Indicate that the session has been started once */
4883 ua_sess->started = 1;
b0a1c741 4884 ua_sess->enabled = 1;
55c3953d 4885
d0b96690
DG
4886 pthread_mutex_unlock(&ua_sess->lock);
4887
840cb59c 4888 health_code_update();
86acf0da 4889
421cb601 4890 /* Quiescent wait after starting trace */
fb45065e 4891 pthread_mutex_lock(&app->sock_lock);
ffe60014 4892 ret = ustctl_wait_quiescent(app->sock);
fb45065e 4893 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
4894 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4895 ERR("UST app wait quiescent failed for app pid %d ret %d",
4896 app->pid, ret);
4897 }
48842b30 4898
e0c7ec2b
DG
4899end:
4900 rcu_read_unlock();
840cb59c 4901 health_code_update();
421cb601 4902 return 0;
48842b30 4903
d0b96690
DG
4904error_unlock:
4905 pthread_mutex_unlock(&ua_sess->lock);
509cbaf8 4906 rcu_read_unlock();
840cb59c 4907 health_code_update();
421cb601
DG
4908 return -1;
4909}
48842b30 4910
8be98f9a
MD
4911/*
4912 * Stop tracing for a specific UST session and app.
4913 */
b34cbebf 4914static
8be98f9a
MD
4915int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
4916{
4917 int ret = 0;
4918 struct ust_app_session *ua_sess;
7972aab2 4919 struct ust_registry_session *registry;
8be98f9a 4920
852d0037 4921 DBG("Stopping tracing for ust app pid %d", app->pid);
8be98f9a
MD
4922
4923 rcu_read_lock();
4924
e0c7ec2b 4925 if (!app->compatible) {
d88aee68 4926 goto end_no_session;
e0c7ec2b
DG
4927 }
4928
8be98f9a
MD
4929 ua_sess = lookup_session_by_app(usess, app);
4930 if (ua_sess == NULL) {
d88aee68 4931 goto end_no_session;
8be98f9a
MD
4932 }
4933
d88aee68
DG
4934 pthread_mutex_lock(&ua_sess->lock);
4935
b161602a
MD
4936 if (ua_sess->deleted) {
4937 pthread_mutex_unlock(&ua_sess->lock);
4938 goto end_no_session;
4939 }
4940
9bc07046
DG
4941 /*
4942 * If started = 0, it means that stop trace has been called for a session
c45536e1
DG
4943 * that was never started. It's possible since we can have a fail start
4944 * from either the application manager thread or the command thread. Simply
4945 * indicate that this is a stop error.
9bc07046 4946 */
f9dfc3d9 4947 if (!ua_sess->started) {
c45536e1
DG
4948 goto error_rcu_unlock;
4949 }
7db205b5 4950
840cb59c 4951 health_code_update();
86acf0da 4952
9d6c7d3f 4953 /* This inhibits UST tracing */
fb45065e 4954 pthread_mutex_lock(&app->sock_lock);
852d0037 4955 ret = ustctl_stop_session(app->sock, ua_sess->handle);
fb45065e 4956 pthread_mutex_unlock(&app->sock_lock);
9d6c7d3f 4957 if (ret < 0) {
ffe60014
DG
4958 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4959 ERR("Error stopping tracing for app pid: %d (ret: %d)",
4960 app->pid, ret);
4961 } else {
4962 DBG("UST app stop session failed. Application is dead.");
3757b385
DG
4963 /*
4964 * This is normal behavior, an application can die during the
4965 * creation process. Don't report an error so the execution can
4966 * continue normally.
4967 */
4968 goto end_unlock;
ffe60014 4969 }
9d6c7d3f
DG
4970 goto error_rcu_unlock;
4971 }
4972
840cb59c 4973 health_code_update();
b0a1c741 4974 ua_sess->enabled = 0;
86acf0da 4975
9d6c7d3f 4976 /* Quiescent wait after stopping trace */
fb45065e 4977 pthread_mutex_lock(&app->sock_lock);
ffe60014 4978 ret = ustctl_wait_quiescent(app->sock);
fb45065e 4979 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
4980 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4981 ERR("UST app wait quiescent failed for app pid %d ret %d",
4982 app->pid, ret);
4983 }
9d6c7d3f 4984
840cb59c 4985 health_code_update();
86acf0da 4986
b34cbebf 4987 registry = get_session_registry(ua_sess);
fad1ed2f
JR
4988
4989 /* The UST app session is held registry shall not be null. */
b34cbebf 4990 assert(registry);
1b532a60 4991
ce34fcd0
MD
4992 /* Push metadata for application before freeing the application. */
4993 (void) push_metadata(registry, ua_sess->consumer);
b34cbebf 4994
3757b385 4995end_unlock:
b34cbebf
MD
4996 pthread_mutex_unlock(&ua_sess->lock);
4997end_no_session:
4998 rcu_read_unlock();
4999 health_code_update();
5000 return 0;
5001
5002error_rcu_unlock:
5003 pthread_mutex_unlock(&ua_sess->lock);
5004 rcu_read_unlock();
5005 health_code_update();
5006 return -1;
5007}
5008
b34cbebf 5009static
c4b88406
MD
5010int ust_app_flush_app_session(struct ust_app *app,
5011 struct ust_app_session *ua_sess)
b34cbebf 5012{
c4b88406 5013 int ret, retval = 0;
b34cbebf 5014 struct lttng_ht_iter iter;
b34cbebf 5015 struct ust_app_channel *ua_chan;
c4b88406 5016 struct consumer_socket *socket;
b34cbebf 5017
c4b88406 5018 DBG("Flushing app session buffers for ust app pid %d", app->pid);
b34cbebf
MD
5019
5020 rcu_read_lock();
5021
5022 if (!app->compatible) {
c4b88406 5023 goto end_not_compatible;
b34cbebf
MD
5024 }
5025
5026 pthread_mutex_lock(&ua_sess->lock);
5027
b161602a
MD
5028 if (ua_sess->deleted) {
5029 goto end_deleted;
5030 }
5031
b34cbebf
MD
5032 health_code_update();
5033
9d6c7d3f 5034 /* Flushing buffers */
c4b88406
MD
5035 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5036 ua_sess->consumer);
ce34fcd0
MD
5037
5038 /* Flush buffers and push metadata. */
5039 switch (ua_sess->buffer_type) {
5040 case LTTNG_BUFFER_PER_PID:
5041 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
5042 node.node) {
5043 health_code_update();
ce34fcd0
MD
5044 ret = consumer_flush_channel(socket, ua_chan->key);
5045 if (ret) {
5046 ERR("Error flushing consumer channel");
5047 retval = -1;
5048 continue;
5049 }
8be98f9a 5050 }
ce34fcd0
MD
5051 break;
5052 case LTTNG_BUFFER_PER_UID:
5053 default:
5054 assert(0);
5055 break;
8be98f9a 5056 }
8be98f9a 5057
840cb59c 5058 health_code_update();
86acf0da 5059
b161602a 5060end_deleted:
d88aee68 5061 pthread_mutex_unlock(&ua_sess->lock);
ce34fcd0 5062
c4b88406
MD
5063end_not_compatible:
5064 rcu_read_unlock();
5065 health_code_update();
5066 return retval;
5067}
5068
5069/*
ce34fcd0
MD
5070 * Flush buffers for all applications for a specific UST session.
5071 * Called with UST session lock held.
c4b88406
MD
5072 */
5073static
ce34fcd0 5074int ust_app_flush_session(struct ltt_ust_session *usess)
c4b88406
MD
5075
5076{
99b1411c 5077 int ret = 0;
c4b88406 5078
ce34fcd0 5079 DBG("Flushing session buffers for all ust apps");
c4b88406
MD
5080
5081 rcu_read_lock();
5082
ce34fcd0
MD
5083 /* Flush buffers and push metadata. */
5084 switch (usess->buffer_type) {
5085 case LTTNG_BUFFER_PER_UID:
5086 {
5087 struct buffer_reg_uid *reg;
5088 struct lttng_ht_iter iter;
5089
5090 /* Flush all per UID buffers associated to that session. */
5091 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5092 struct ust_registry_session *ust_session_reg;
5093 struct buffer_reg_channel *reg_chan;
5094 struct consumer_socket *socket;
5095
5096 /* Get consumer socket to use to push the metadata.*/
5097 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5098 usess->consumer);
5099 if (!socket) {
5100 /* Ignore request if no consumer is found for the session. */
5101 continue;
5102 }
5103
5104 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5105 reg_chan, node.node) {
5106 /*
5107 * The following call will print error values so the return
5108 * code is of little importance because whatever happens, we
5109 * have to try them all.
5110 */
5111 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
5112 }
5113
5114 ust_session_reg = reg->registry->reg.ust;
5115 /* Push metadata. */
5116 (void) push_metadata(ust_session_reg, usess->consumer);
5117 }
ce34fcd0
MD
5118 break;
5119 }
5120 case LTTNG_BUFFER_PER_PID:
5121 {
5122 struct ust_app_session *ua_sess;
5123 struct lttng_ht_iter iter;
5124 struct ust_app *app;
5125
5126 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5127 ua_sess = lookup_session_by_app(usess, app);
5128 if (ua_sess == NULL) {
5129 continue;
5130 }
5131 (void) ust_app_flush_app_session(app, ua_sess);
5132 }
5133 break;
5134 }
5135 default:
99b1411c 5136 ret = -1;
ce34fcd0
MD
5137 assert(0);
5138 break;
c4b88406 5139 }
c4b88406 5140
7db205b5 5141 rcu_read_unlock();
840cb59c 5142 health_code_update();
c4b88406 5143 return ret;
8be98f9a
MD
5144}
5145
0dd01979
MD
5146static
5147int ust_app_clear_quiescent_app_session(struct ust_app *app,
5148 struct ust_app_session *ua_sess)
5149{
5150 int ret = 0;
5151 struct lttng_ht_iter iter;
5152 struct ust_app_channel *ua_chan;
5153 struct consumer_socket *socket;
5154
5155 DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
5156
5157 rcu_read_lock();
5158
5159 if (!app->compatible) {
5160 goto end_not_compatible;
5161 }
5162
5163 pthread_mutex_lock(&ua_sess->lock);
5164
5165 if (ua_sess->deleted) {
5166 goto end_unlock;
5167 }
5168
5169 health_code_update();
5170
5171 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5172 ua_sess->consumer);
5173 if (!socket) {
5174 ERR("Failed to find consumer (%" PRIu32 ") socket",
5175 app->bits_per_long);
5176 ret = -1;
5177 goto end_unlock;
5178 }
5179
5180 /* Clear quiescent state. */
5181 switch (ua_sess->buffer_type) {
5182 case LTTNG_BUFFER_PER_PID:
5183 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter,
5184 ua_chan, node.node) {
5185 health_code_update();
5186 ret = consumer_clear_quiescent_channel(socket,
5187 ua_chan->key);
5188 if (ret) {
5189 ERR("Error clearing quiescent state for consumer channel");
5190 ret = -1;
5191 continue;
5192 }
5193 }
5194 break;
5195 case LTTNG_BUFFER_PER_UID:
5196 default:
5197 assert(0);
5198 ret = -1;
5199 break;
5200 }
5201
5202 health_code_update();
5203
5204end_unlock:
5205 pthread_mutex_unlock(&ua_sess->lock);
5206
5207end_not_compatible:
5208 rcu_read_unlock();
5209 health_code_update();
5210 return ret;
5211}
5212
5213/*
5214 * Clear quiescent state in each stream for all applications for a
5215 * specific UST session.
5216 * Called with UST session lock held.
5217 */
5218static
5219int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
5220
5221{
5222 int ret = 0;
5223
5224 DBG("Clearing stream quiescent state for all ust apps");
5225
5226 rcu_read_lock();
5227
5228 switch (usess->buffer_type) {
5229 case LTTNG_BUFFER_PER_UID:
5230 {
5231 struct lttng_ht_iter iter;
5232 struct buffer_reg_uid *reg;
5233
5234 /*
5235 * Clear quiescent for all per UID buffers associated to
5236 * that session.
5237 */
5238 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5239 struct consumer_socket *socket;
5240 struct buffer_reg_channel *reg_chan;
5241
5242 /* Get associated consumer socket.*/
5243 socket = consumer_find_socket_by_bitness(
5244 reg->bits_per_long, usess->consumer);
5245 if (!socket) {
5246 /*
5247 * Ignore request if no consumer is found for
5248 * the session.
5249 */
5250 continue;
5251 }
5252
5253 cds_lfht_for_each_entry(reg->registry->channels->ht,
5254 &iter.iter, reg_chan, node.node) {
5255 /*
5256 * The following call will print error values so
5257 * the return code is of little importance
5258 * because whatever happens, we have to try them
5259 * all.
5260 */
5261 (void) consumer_clear_quiescent_channel(socket,
5262 reg_chan->consumer_key);
5263 }
5264 }
5265 break;
5266 }
5267 case LTTNG_BUFFER_PER_PID:
5268 {
5269 struct ust_app_session *ua_sess;
5270 struct lttng_ht_iter iter;
5271 struct ust_app *app;
5272
5273 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
5274 pid_n.node) {
5275 ua_sess = lookup_session_by_app(usess, app);
5276 if (ua_sess == NULL) {
5277 continue;
5278 }
5279 (void) ust_app_clear_quiescent_app_session(app,
5280 ua_sess);
5281 }
5282 break;
5283 }
5284 default:
5285 ret = -1;
5286 assert(0);
5287 break;
5288 }
5289
5290 rcu_read_unlock();
5291 health_code_update();
5292 return ret;
5293}
5294
84cd17c6
MD
5295/*
5296 * Destroy a specific UST session in apps.
5297 */
3353de95 5298static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
84cd17c6 5299{
ffe60014 5300 int ret;
84cd17c6 5301 struct ust_app_session *ua_sess;
bec39940 5302 struct lttng_ht_iter iter;
d9bf3ca4 5303 struct lttng_ht_node_u64 *node;
84cd17c6 5304
852d0037 5305 DBG("Destroy tracing for ust app pid %d", app->pid);
84cd17c6
MD
5306
5307 rcu_read_lock();
5308
e0c7ec2b
DG
5309 if (!app->compatible) {
5310 goto end;
5311 }
5312
84cd17c6 5313 __lookup_session_by_app(usess, app, &iter);
d9bf3ca4 5314 node = lttng_ht_iter_get_node_u64(&iter);
84cd17c6 5315 if (node == NULL) {
d42f20df
DG
5316 /* Session is being or is deleted. */
5317 goto end;
84cd17c6
MD
5318 }
5319 ua_sess = caa_container_of(node, struct ust_app_session, node);
c4a1715b 5320
840cb59c 5321 health_code_update();
d0b96690 5322 destroy_app_session(app, ua_sess);
84cd17c6 5323
840cb59c 5324 health_code_update();
7db205b5 5325
84cd17c6 5326 /* Quiescent wait after stopping trace */
fb45065e 5327 pthread_mutex_lock(&app->sock_lock);
ffe60014 5328 ret = ustctl_wait_quiescent(app->sock);
fb45065e 5329 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
5330 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5331 ERR("UST app wait quiescent failed for app pid %d ret %d",
5332 app->pid, ret);
5333 }
e0c7ec2b
DG
5334end:
5335 rcu_read_unlock();
840cb59c 5336 health_code_update();
84cd17c6 5337 return 0;
84cd17c6
MD
5338}
5339
5b4a0ec0
DG
5340/*
5341 * Start tracing for the UST session.
5342 */
421cb601
DG
5343int ust_app_start_trace_all(struct ltt_ust_session *usess)
5344{
bec39940 5345 struct lttng_ht_iter iter;
421cb601 5346 struct ust_app *app;
48842b30 5347
421cb601
DG
5348 DBG("Starting all UST traces");
5349
bb2452c8
MD
5350 /*
5351 * Even though the start trace might fail, flag this session active so
5352 * other application coming in are started by default.
5353 */
5354 usess->active = 1;
5355
421cb601 5356 rcu_read_lock();
421cb601 5357
0dd01979
MD
5358 /*
5359 * In a start-stop-start use-case, we need to clear the quiescent state
5360 * of each channel set by the prior stop command, thus ensuring that a
5361 * following stop or destroy is sure to grab a timestamp_end near those
5362 * operations, even if the packet is empty.
5363 */
5364 (void) ust_app_clear_quiescent_session(usess);
5365
0498a00c
MD
5366 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5367 ust_app_global_update(usess, app);
5368 }
5369
48842b30
DG
5370 rcu_read_unlock();
5371
5372 return 0;
5373}
487cf67c 5374
8be98f9a
MD
5375/*
5376 * Start tracing for the UST session.
ce34fcd0 5377 * Called with UST session lock held.
8be98f9a
MD
5378 */
5379int ust_app_stop_trace_all(struct ltt_ust_session *usess)
5380{
5381 int ret = 0;
bec39940 5382 struct lttng_ht_iter iter;
8be98f9a
MD
5383 struct ust_app *app;
5384
5385 DBG("Stopping all UST traces");
5386
bb2452c8
MD
5387 /*
5388 * Even though the stop trace might fail, flag this session inactive so
5389 * other application coming in are not started by default.
5390 */
5391 usess->active = 0;
5392
8be98f9a
MD
5393 rcu_read_lock();
5394
b34cbebf
MD
5395 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5396 ret = ust_app_stop_trace(usess, app);
5397 if (ret < 0) {
5398 /* Continue to next apps even on error */
5399 continue;
5400 }
5401 }
5402
ce34fcd0 5403 (void) ust_app_flush_session(usess);
8be98f9a
MD
5404
5405 rcu_read_unlock();
5406
5407 return 0;
5408}
5409
84cd17c6
MD
5410/*
5411 * Destroy app UST session.
5412 */
5413int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
5414{
5415 int ret = 0;
bec39940 5416 struct lttng_ht_iter iter;
84cd17c6
MD
5417 struct ust_app *app;
5418
5419 DBG("Destroy all UST traces");
5420
5421 rcu_read_lock();
5422
852d0037 5423 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3353de95 5424 ret = destroy_trace(usess, app);
84cd17c6
MD
5425 if (ret < 0) {
5426 /* Continue to next apps even on error */
5427 continue;
5428 }
5429 }
5430
5431 rcu_read_unlock();
5432
5433 return 0;
5434}
5435
88e3c2f5 5436/* The ua_sess lock must be held by the caller. */
a9ad0c8f 5437static
88e3c2f5
JG
5438int find_or_create_ust_app_channel(
5439 struct ltt_ust_session *usess,
5440 struct ust_app_session *ua_sess,
5441 struct ust_app *app,
5442 struct ltt_ust_channel *uchan,
5443 struct ust_app_channel **ua_chan)
487cf67c 5444{
55c54cce 5445 int ret = 0;
88e3c2f5
JG
5446 struct lttng_ht_iter iter;
5447 struct lttng_ht_node_str *ua_chan_node;
5448
5449 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter);
5450 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
5451 if (ua_chan_node) {
5452 *ua_chan = caa_container_of(ua_chan_node,
5453 struct ust_app_channel, node);
5454 goto end;
5455 }
5456
5457 ret = ust_app_channel_create(usess, ua_sess, uchan, app, ua_chan);
5458 if (ret) {
5459 goto end;
5460 }
5461end:
5462 return ret;
5463}
5464
5465static
5466int ust_app_channel_synchronize_event(struct ust_app_channel *ua_chan,
5467 struct ltt_ust_event *uevent, struct ust_app_session *ua_sess,
5468 struct ust_app *app)
5469{
5470 int ret = 0;
5471 struct ust_app_event *ua_event = NULL;
5472
5473 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
5474 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
5475 if (!ua_event) {
5476 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
5477 if (ret < 0) {
5478 goto end;
5479 }
5480 } else {
5481 if (ua_event->enabled != uevent->enabled) {
5482 ret = uevent->enabled ?
5483 enable_ust_app_event(ua_sess, ua_event, app) :
5484 disable_ust_app_event(ua_sess, ua_event, app);
5485 }
5486 }
5487
5488end:
5489 return ret;
5490}
5491
2463b787
JR
5492static
5493void ust_app_synchronize_tokens(struct ust_app *app)
5494{
5495 int ret = 0;
5496 enum lttng_error_code ret_code;
5497 enum lttng_trigger_status t_status;
5498 struct lttng_ht_iter app_trigger_iter;
5499 struct lttng_triggers *triggers;
5500 struct ust_app_token_event_rule *token_event_rule_element;
5501 unsigned int count;
5502
5503 rcu_read_lock();
5504 /* TODO: is this necessary to protect against new trigger being added ?
5505 * notification_trigger_tokens_ht is still the backing data structure
5506 * for this listing. Leave it there for now.
5507 */
5508 pthread_mutex_lock(&notification_trigger_tokens_ht_lock);
5509 ret_code = notification_thread_command_get_tokens(
5510 notification_thread_handle, &triggers);
5511 if (ret_code != LTTNG_OK) {
5512 ret = -1;
5513 goto end;
5514 }
5515
5516 assert(triggers);
5517
5518 t_status = lttng_triggers_get_count(triggers, &count);
5519 if (t_status != LTTNG_TRIGGER_STATUS_OK) {
5520 ret = -1;
5521 goto end;
5522 }
5523
5524 for (unsigned int i = 0; i < count; i++) {
5525 struct lttng_condition *condition;
5526 struct lttng_event_rule *event_rule;
5527 struct lttng_trigger *trigger;
5528 struct ust_app_token_event_rule *ua_token;
5529 uint64_t token;
5530
5531 trigger = lttng_triggers_get_pointer_of_index(triggers, i);
5532 assert(trigger);
5533
5534 /* TODO: error checking and type checking */
5535 token = lttng_trigger_get_tracer_token(trigger);
5536 condition = lttng_trigger_get_condition(trigger);
5537 (void) lttng_condition_event_rule_get_rule_mutable(condition, &event_rule);
5538
5539 if (lttng_event_rule_get_domain_type(event_rule) == LTTNG_DOMAIN_KERNEL) {
5540 /* Skip kernel related trigger */
5541 continue;
5542 }
5543
5544 /* Iterate over all known token trigger */
5545 ua_token = find_ust_app_token_event_rule(app->tokens_ht, token);
5546 if (!ua_token) {
5547 ret = create_ust_app_token_event_rule(trigger, app);
5548 if (ret < 0) {
5549 goto end;
5550 }
5551 }
5552 }
5553
5554 /* Remove all unknown trigger from the app
5555 * TODO find a way better way then this, do it on the unregister command
5556 * and be specific on the token to remove instead of going over all
5557 * trigger known to the app. This is sub optimal.
5558 */
5559 cds_lfht_for_each_entry (app->tokens_ht->ht, &app_trigger_iter.iter,
5560 token_event_rule_element, node.node) {
5561 uint64_t token;
5562 bool found = false;
5563
5564 token = token_event_rule_element->token;
5565
5566 /*
5567 * Check if the app event trigger still exists on the
5568 * notification side.
5569 * TODO: might want to change the backing data struct of the
5570 * lttng_triggers object to allow quick lookup?
5571 * For kernel mostly all of this can be removed once we delete
5572 * on a per trigger basis.
5573 */
5574
5575 for (unsigned int i = 0; i < count; i++) {
5576 struct lttng_trigger *trigger;
5577 uint64_t inner_token;
5578
5579 trigger = lttng_triggers_get_pointer_of_index(
5580 triggers, i);
5581 assert(trigger);
5582
5583 inner_token = lttng_trigger_get_tracer_token(trigger);
5584
5585 if (inner_token == token) {
5586 found = true;
5587 break;
5588 }
5589 }
5590
5591 if (found) {
5592 /* Still valid */
5593 continue;
5594 }
5595
5596 /* TODO: This is fucking ugly API for fuck sake */
5597 assert(!lttng_ht_del(app->tokens_ht, &app_trigger_iter));
5598
5599 (void) disable_ust_object(app, token_event_rule_element->obj);
5600
5601 delete_ust_app_token_event_rule(app->sock, token_event_rule_element, app);
5602 }
5603end:
5604 lttng_triggers_destroy(triggers);
5605 rcu_read_unlock();
5606 pthread_mutex_unlock(&notification_trigger_tokens_ht_lock);
5607 return;
5608}
5609
88e3c2f5
JG
5610/*
5611 * The caller must ensure that the application is compatible and is tracked
2a3a1a2b 5612 * by the process attribute trackers.
88e3c2f5
JG
5613 */
5614static
5615void ust_app_synchronize(struct ltt_ust_session *usess,
5616 struct ust_app *app)
5617{
5618 int ret = 0;
5619 struct cds_lfht_iter uchan_iter;
5620 struct ltt_ust_channel *uchan;
3d8ca23b 5621 struct ust_app_session *ua_sess = NULL;
1f3580c7 5622
88e3c2f5
JG
5623 /*
5624 * The application's configuration should only be synchronized for
5625 * active sessions.
5626 */
5627 assert(usess->active);
5628
5629 ret = find_or_create_ust_app_session(usess, app, &ua_sess, NULL);
3d8ca23b
DG
5630 if (ret < 0) {
5631 /* Tracer is probably gone or ENOMEM. */
487cf67c
DG
5632 goto error;
5633 }
3d8ca23b 5634 assert(ua_sess);
487cf67c 5635
d0b96690 5636 pthread_mutex_lock(&ua_sess->lock);
b161602a
MD
5637 if (ua_sess->deleted) {
5638 pthread_mutex_unlock(&ua_sess->lock);
5639 goto end;
5640 }
5641
88e3c2f5
JG
5642 rcu_read_lock();
5643 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &uchan_iter,
5644 uchan, node.node) {
5645 struct ust_app_channel *ua_chan;
5646 struct cds_lfht_iter uevent_iter;
5647 struct ltt_ust_event *uevent;
487cf67c 5648
31746f93 5649 /*
88e3c2f5
JG
5650 * Search for a matching ust_app_channel. If none is found,
5651 * create it. Creating the channel will cause the ua_chan
5652 * structure to be allocated, the channel buffers to be
5653 * allocated (if necessary) and sent to the application, and
5654 * all enabled contexts will be added to the channel.
31746f93 5655 */
f3db82be 5656 ret = find_or_create_ust_app_channel(usess, ua_sess,
88e3c2f5
JG
5657 app, uchan, &ua_chan);
5658 if (ret) {
5659 /* Tracer is probably gone or ENOMEM. */
5660 goto error_unlock;
727d5404
DG
5661 }
5662
88e3c2f5
JG
5663 if (!ua_chan) {
5664 /* ua_chan will be NULL for the metadata channel */
5665 continue;
5666 }
727d5404 5667
88e3c2f5 5668 cds_lfht_for_each_entry(uchan->events->ht, &uevent_iter, uevent,
bec39940 5669 node.node) {
88e3c2f5
JG
5670 ret = ust_app_channel_synchronize_event(ua_chan,
5671 uevent, ua_sess, app);
5672 if (ret) {
d0b96690 5673 goto error_unlock;
487cf67c 5674 }
36dc12cc 5675 }
d0b96690 5676
88e3c2f5
JG
5677 if (ua_chan->enabled != uchan->enabled) {
5678 ret = uchan->enabled ?
5679 enable_ust_app_channel(ua_sess, uchan, app) :
5680 disable_ust_app_channel(ua_sess, ua_chan, app);
5681 if (ret) {
5682 goto error_unlock;
5683 }
5684 }
36dc12cc 5685 }
88e3c2f5 5686 rcu_read_unlock();
0498a00c 5687
a9ad0c8f 5688end:
88e3c2f5 5689 pthread_mutex_unlock(&ua_sess->lock);
ffe60014 5690 /* Everything went well at this point. */
ffe60014
DG
5691 return;
5692
d0b96690 5693error_unlock:
88e3c2f5 5694 rcu_read_unlock();
d0b96690 5695 pthread_mutex_unlock(&ua_sess->lock);
487cf67c 5696error:
ffe60014 5697 if (ua_sess) {
d0b96690 5698 destroy_app_session(app, ua_sess);
ffe60014 5699 }
487cf67c
DG
5700 return;
5701}
55cc08a6 5702
a9ad0c8f
MD
5703static
5704void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
5705{
5706 struct ust_app_session *ua_sess;
5707
5708 ua_sess = lookup_session_by_app(usess, app);
5709 if (ua_sess == NULL) {
5710 return;
5711 }
5712 destroy_app_session(app, ua_sess);
5713}
5714
5715/*
5716 * Add channels/events from UST global domain to registered apps at sock.
5717 *
5718 * Called with session lock held.
5719 * Called with RCU read-side lock held.
5720 */
5721void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
5722{
5723 assert(usess);
88e3c2f5 5724 assert(usess->active);
a9ad0c8f
MD
5725
5726 DBG2("UST app global update for app sock %d for session id %" PRIu64,
5727 app->sock, usess->id);
5728
5729 if (!app->compatible) {
5730 return;
5731 }
159b042f
JG
5732 if (trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID,
5733 usess, app->pid) &&
55c9e7ca 5734 trace_ust_id_tracker_lookup(
159b042f
JG
5735 LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID,
5736 usess, app->uid) &&
55c9e7ca 5737 trace_ust_id_tracker_lookup(
159b042f
JG
5738 LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID,
5739 usess, app->gid)) {
88e3c2f5
JG
5740 /*
5741 * Synchronize the application's internal tracing configuration
5742 * and start tracing.
5743 */
5744 ust_app_synchronize(usess, app);
5745 ust_app_start_trace(usess, app);
a9ad0c8f
MD
5746 } else {
5747 ust_app_global_destroy(usess, app);
5748 }
5749}
5750
2463b787
JR
5751void ust_app_global_update_tokens(struct ust_app *app)
5752{
5753 DBG2("UST app global update token for app sock %d", app->sock);
5754
5755 if (!app->compatible) {
5756 return;
5757 }
5758 if (app->token_communication.handle == NULL) {
5759 WARN("UST app global update token for app sock %d skipped since communcation handle is null", app->sock);
5760 return;
5761 }
5762
5763 ust_app_synchronize_tokens(app);
5764}
5765
a9ad0c8f
MD
5766/*
5767 * Called with session lock held.
5768 */
5769void ust_app_global_update_all(struct ltt_ust_session *usess)
5770{
5771 struct lttng_ht_iter iter;
5772 struct ust_app *app;
5773
5774 rcu_read_lock();
5775 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5776 ust_app_global_update(usess, app);
5777 }
5778 rcu_read_unlock();
5779}
5780
2463b787
JR
5781void ust_app_global_update_all_tokens(void)
5782{
5783 struct lttng_ht_iter iter;
5784 struct ust_app *app;
5785
5786 rcu_read_lock();
5787 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5788 ust_app_global_update_tokens(app);
5789 }
5790 rcu_read_unlock();
5791}
5792
5793void ust_app_update_trigger_error_count(struct lttng_trigger *trigger)
5794{
5795 uint64_t error_count = 0;
5796 enum trigger_error_accounting_status status;
5797
5798 status = trigger_error_accounting_get_count(trigger, &error_count);
5799 if (status != TRIGGER_ERROR_ACCOUNTING_STATUS_OK) {
5800 ERR("Error getting trigger error count");
5801 }
5802
5803 lttng_trigger_set_error_count(trigger, error_count);
5804}
5805
55cc08a6
DG
5806/*
5807 * Add context to a specific channel for global UST domain.
5808 */
5809int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
5810 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
5811{
5812 int ret = 0;
bec39940
DG
5813 struct lttng_ht_node_str *ua_chan_node;
5814 struct lttng_ht_iter iter, uiter;
55cc08a6
DG
5815 struct ust_app_channel *ua_chan = NULL;
5816 struct ust_app_session *ua_sess;
5817 struct ust_app *app;
5818
88e3c2f5 5819 assert(usess->active);
0498a00c 5820
55cc08a6 5821 rcu_read_lock();
852d0037 5822 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
5823 if (!app->compatible) {
5824 /*
5825 * TODO: In time, we should notice the caller of this error by
5826 * telling him that this is a version error.
5827 */
5828 continue;
5829 }
55cc08a6
DG
5830 ua_sess = lookup_session_by_app(usess, app);
5831 if (ua_sess == NULL) {
5832 continue;
5833 }
5834
d0b96690 5835 pthread_mutex_lock(&ua_sess->lock);
b161602a
MD
5836
5837 if (ua_sess->deleted) {
5838 pthread_mutex_unlock(&ua_sess->lock);
5839 continue;
5840 }
5841
55cc08a6 5842 /* Lookup channel in the ust app session */
bec39940
DG
5843 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
5844 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
55cc08a6 5845 if (ua_chan_node == NULL) {
d0b96690 5846 goto next_app;
55cc08a6
DG
5847 }
5848 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
5849 node);
c9edf082 5850 ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
55cc08a6 5851 if (ret < 0) {
d0b96690 5852 goto next_app;
55cc08a6 5853 }
d0b96690
DG
5854 next_app:
5855 pthread_mutex_unlock(&ua_sess->lock);
55cc08a6
DG
5856 }
5857
55cc08a6 5858 rcu_read_unlock();
76d45b40
DG
5859 return ret;
5860}
7f79d3a1 5861
d0b96690
DG
5862/*
5863 * Receive registration and populate the given msg structure.
5864 *
5865 * On success return 0 else a negative value returned by the ustctl call.
5866 */
5867int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
5868{
5869 int ret;
5870 uint32_t pid, ppid, uid, gid;
5871
5872 assert(msg);
5873
5874 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
5875 &pid, &ppid, &uid, &gid,
5876 &msg->bits_per_long,
5877 &msg->uint8_t_alignment,
5878 &msg->uint16_t_alignment,
5879 &msg->uint32_t_alignment,
5880 &msg->uint64_t_alignment,
5881 &msg->long_alignment,
5882 &msg->byte_order,
5883 msg->name);
5884 if (ret < 0) {
5885 switch (-ret) {
5886 case EPIPE:
5887 case ECONNRESET:
5888 case LTTNG_UST_ERR_EXITING:
5889 DBG3("UST app recv reg message failed. Application died");
5890 break;
5891 case LTTNG_UST_ERR_UNSUP_MAJOR:
5892 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
5893 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
5894 LTTNG_UST_ABI_MINOR_VERSION);
5895 break;
5896 default:
5897 ERR("UST app recv reg message failed with ret %d", ret);
5898 break;
5899 }
5900 goto error;
5901 }
5902 msg->pid = (pid_t) pid;
5903 msg->ppid = (pid_t) ppid;
5904 msg->uid = (uid_t) uid;
5905 msg->gid = (gid_t) gid;
5906
5907error:
5908 return ret;
5909}
5910
10b56aef
MD
5911/*
5912 * Return a ust app session object using the application object and the
5913 * session object descriptor has a key. If not found, NULL is returned.
5914 * A RCU read side lock MUST be acquired when calling this function.
5915*/
5916static struct ust_app_session *find_session_by_objd(struct ust_app *app,
5917 int objd)
5918{
5919 struct lttng_ht_node_ulong *node;
5920 struct lttng_ht_iter iter;
5921 struct ust_app_session *ua_sess = NULL;
5922
5923 assert(app);
5924
5925 lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
5926 node = lttng_ht_iter_get_node_ulong(&iter);
5927 if (node == NULL) {
5928 DBG2("UST app session find by objd %d not found", objd);
5929 goto error;
5930 }
5931
5932 ua_sess = caa_container_of(node, struct ust_app_session, ust_objd_node);
5933
5934error:
5935 return ua_sess;
5936}
5937
d88aee68
DG
5938/*
5939 * Return a ust app channel object using the application object and the channel
5940 * object descriptor has a key. If not found, NULL is returned. A RCU read side
5941 * lock MUST be acquired before calling this function.
5942 */
d0b96690
DG
5943static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
5944 int objd)
5945{
5946 struct lttng_ht_node_ulong *node;
5947 struct lttng_ht_iter iter;
5948 struct ust_app_channel *ua_chan = NULL;
5949
5950 assert(app);
5951
5952 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
5953 node = lttng_ht_iter_get_node_ulong(&iter);
5954 if (node == NULL) {
5955 DBG2("UST app channel find by objd %d not found", objd);
5956 goto error;
5957 }
5958
5959 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
5960
5961error:
5962 return ua_chan;
5963}
5964
d88aee68
DG
5965/*
5966 * Reply to a register channel notification from an application on the notify
5967 * socket. The channel metadata is also created.
5968 *
5969 * The session UST registry lock is acquired in this function.
5970 *
5971 * On success 0 is returned else a negative value.
5972 */
8eede835 5973static int reply_ust_register_channel(int sock, int cobjd,
d0b96690
DG
5974 size_t nr_fields, struct ustctl_field *fields)
5975{
5976 int ret, ret_code = 0;
294e218e 5977 uint32_t chan_id;
7972aab2 5978 uint64_t chan_reg_key;
d0b96690
DG
5979 enum ustctl_channel_header type;
5980 struct ust_app *app;
5981 struct ust_app_channel *ua_chan;
5982 struct ust_app_session *ua_sess;
7972aab2 5983 struct ust_registry_session *registry;
45893984 5984 struct ust_registry_channel *chan_reg;
d0b96690
DG
5985
5986 rcu_read_lock();
5987
5988 /* Lookup application. If not found, there is a code flow error. */
5989 app = find_app_by_notify_sock(sock);
d88aee68 5990 if (!app) {
fad1ed2f 5991 DBG("Application socket %d is being torn down. Abort event notify",
d88aee68
DG
5992 sock);
5993 ret = 0;
5994 goto error_rcu_unlock;
5995 }
d0b96690 5996
4950b860 5997 /* Lookup channel by UST object descriptor. */
d0b96690 5998 ua_chan = find_channel_by_objd(app, cobjd);
4950b860 5999 if (!ua_chan) {
fad1ed2f 6000 DBG("Application channel is being torn down. Abort event notify");
4950b860
MD
6001 ret = 0;
6002 goto error_rcu_unlock;
6003 }
6004
d0b96690
DG
6005 assert(ua_chan->session);
6006 ua_sess = ua_chan->session;
d0b96690 6007
7972aab2
DG
6008 /* Get right session registry depending on the session buffer type. */
6009 registry = get_session_registry(ua_sess);
fad1ed2f
JR
6010 if (!registry) {
6011 DBG("Application session is being torn down. Abort event notify");
6012 ret = 0;
6013 goto error_rcu_unlock;
6014 };
45893984 6015
7972aab2
DG
6016 /* Depending on the buffer type, a different channel key is used. */
6017 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
6018 chan_reg_key = ua_chan->tracing_channel_id;
d0b96690 6019 } else {
7972aab2 6020 chan_reg_key = ua_chan->key;
d0b96690
DG
6021 }
6022
7972aab2
DG
6023 pthread_mutex_lock(&registry->lock);
6024
6025 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
6026 assert(chan_reg);
6027
6028 if (!chan_reg->register_done) {
294e218e
MD
6029 /*
6030 * TODO: eventually use the registry event count for
6031 * this channel to better guess header type for per-pid
6032 * buffers.
6033 */
6034 type = USTCTL_CHANNEL_HEADER_LARGE;
7972aab2
DG
6035 chan_reg->nr_ctx_fields = nr_fields;
6036 chan_reg->ctx_fields = fields;
fad1ed2f 6037 fields = NULL;
7972aab2 6038 chan_reg->header_type = type;
d0b96690 6039 } else {
7972aab2
DG
6040 /* Get current already assigned values. */
6041 type = chan_reg->header_type;
d0b96690 6042 }
7972aab2
DG
6043 /* Channel id is set during the object creation. */
6044 chan_id = chan_reg->chan_id;
d0b96690
DG
6045
6046 /* Append to metadata */
7972aab2
DG
6047 if (!chan_reg->metadata_dumped) {
6048 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
d0b96690
DG
6049 if (ret_code) {
6050 ERR("Error appending channel metadata (errno = %d)", ret_code);
6051 goto reply;
6052 }
6053 }
6054
6055reply:
7972aab2
DG
6056 DBG3("UST app replying to register channel key %" PRIu64
6057 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
6058 ret_code);
d0b96690
DG
6059
6060 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
6061 if (ret < 0) {
6062 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6063 ERR("UST app reply channel failed with ret %d", ret);
6064 } else {
6065 DBG3("UST app reply channel failed. Application died");
6066 }
6067 goto error;
6068 }
6069
7972aab2
DG
6070 /* This channel registry registration is completed. */
6071 chan_reg->register_done = 1;
6072
d0b96690 6073error:
7972aab2 6074 pthread_mutex_unlock(&registry->lock);
d88aee68 6075error_rcu_unlock:
d0b96690 6076 rcu_read_unlock();
fad1ed2f 6077 free(fields);
d0b96690
DG
6078 return ret;
6079}
6080
d88aee68
DG
6081/*
6082 * Add event to the UST channel registry. When the event is added to the
6083 * registry, the metadata is also created. Once done, this replies to the
6084 * application with the appropriate error code.
6085 *
6086 * The session UST registry lock is acquired in the function.
6087 *
6088 * On success 0 is returned else a negative value.
6089 */
d0b96690 6090static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
2106efa0
PP
6091 char *sig, size_t nr_fields, struct ustctl_field *fields,
6092 int loglevel_value, char *model_emf_uri)
d0b96690
DG
6093{
6094 int ret, ret_code;
6095 uint32_t event_id = 0;
7972aab2 6096 uint64_t chan_reg_key;
d0b96690
DG
6097 struct ust_app *app;
6098 struct ust_app_channel *ua_chan;
6099 struct ust_app_session *ua_sess;
7972aab2 6100 struct ust_registry_session *registry;
d0b96690
DG
6101
6102 rcu_read_lock();
6103
6104 /* Lookup application. If not found, there is a code flow error. */
6105 app = find_app_by_notify_sock(sock);
d88aee68 6106 if (!app) {
fad1ed2f 6107 DBG("Application socket %d is being torn down. Abort event notify",
d88aee68
DG
6108 sock);
6109 ret = 0;
6110 goto error_rcu_unlock;
6111 }
d0b96690 6112
4950b860 6113 /* Lookup channel by UST object descriptor. */
d0b96690 6114 ua_chan = find_channel_by_objd(app, cobjd);
4950b860 6115 if (!ua_chan) {
fad1ed2f 6116 DBG("Application channel is being torn down. Abort event notify");
4950b860
MD
6117 ret = 0;
6118 goto error_rcu_unlock;
6119 }
6120
d0b96690
DG
6121 assert(ua_chan->session);
6122 ua_sess = ua_chan->session;
6123
7972aab2 6124 registry = get_session_registry(ua_sess);
fad1ed2f
JR
6125 if (!registry) {
6126 DBG("Application session is being torn down. Abort event notify");
6127 ret = 0;
6128 goto error_rcu_unlock;
6129 }
7972aab2
DG
6130
6131 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
6132 chan_reg_key = ua_chan->tracing_channel_id;
6133 } else {
6134 chan_reg_key = ua_chan->key;
6135 }
6136
6137 pthread_mutex_lock(&registry->lock);
d0b96690 6138
d5d629b5
DG
6139 /*
6140 * From this point on, this call acquires the ownership of the sig, fields
6141 * and model_emf_uri meaning any free are done inside it if needed. These
6142 * three variables MUST NOT be read/write after this.
6143 */
7972aab2 6144 ret_code = ust_registry_create_event(registry, chan_reg_key,
2106efa0
PP
6145 sobjd, cobjd, name, sig, nr_fields, fields,
6146 loglevel_value, model_emf_uri, ua_sess->buffer_type,
6147 &event_id, app);
fad1ed2f
JR
6148 sig = NULL;
6149 fields = NULL;
6150 model_emf_uri = NULL;
d0b96690
DG
6151
6152 /*
6153 * The return value is returned to ustctl so in case of an error, the
6154 * application can be notified. In case of an error, it's important not to
6155 * return a negative error or else the application will get closed.
6156 */
6157 ret = ustctl_reply_register_event(sock, event_id, ret_code);
6158 if (ret < 0) {
6159 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6160 ERR("UST app reply event failed with ret %d", ret);
6161 } else {
6162 DBG3("UST app reply event failed. Application died");
6163 }
6164 /*
6165 * No need to wipe the create event since the application socket will
6166 * get close on error hence cleaning up everything by itself.
6167 */
6168 goto error;
6169 }
6170
7972aab2
DG
6171 DBG3("UST registry event %s with id %" PRId32 " added successfully",
6172 name, event_id);
d88aee68 6173
d0b96690 6174error:
7972aab2 6175 pthread_mutex_unlock(&registry->lock);
d88aee68 6176error_rcu_unlock:
d0b96690 6177 rcu_read_unlock();
fad1ed2f
JR
6178 free(sig);
6179 free(fields);
6180 free(model_emf_uri);
d0b96690
DG
6181 return ret;
6182}
6183
10b56aef
MD
6184/*
6185 * Add enum to the UST session registry. Once done, this replies to the
6186 * application with the appropriate error code.
6187 *
6188 * The session UST registry lock is acquired within this function.
6189 *
6190 * On success 0 is returned else a negative value.
6191 */
6192static int add_enum_ust_registry(int sock, int sobjd, char *name,
6193 struct ustctl_enum_entry *entries, size_t nr_entries)
6194{
6195 int ret = 0, ret_code;
6196 struct ust_app *app;
6197 struct ust_app_session *ua_sess;
6198 struct ust_registry_session *registry;
6199 uint64_t enum_id = -1ULL;
6200
6201 rcu_read_lock();
6202
6203 /* Lookup application. If not found, there is a code flow error. */
6204 app = find_app_by_notify_sock(sock);
6205 if (!app) {
6206 /* Return an error since this is not an error */
6207 DBG("Application socket %d is being torn down. Aborting enum registration",
6208 sock);
6209 free(entries);
6210 goto error_rcu_unlock;
6211 }
6212
6213 /* Lookup session by UST object descriptor. */
6214 ua_sess = find_session_by_objd(app, sobjd);
6215 if (!ua_sess) {
6216 /* Return an error since this is not an error */
fad1ed2f 6217 DBG("Application session is being torn down (session not found). Aborting enum registration.");
10b56aef
MD
6218 free(entries);
6219 goto error_rcu_unlock;
6220 }
6221
6222 registry = get_session_registry(ua_sess);
fad1ed2f
JR
6223 if (!registry) {
6224 DBG("Application session is being torn down (registry not found). Aborting enum registration.");
6225 free(entries);
6226 goto error_rcu_unlock;
6227 }
10b56aef
MD
6228
6229 pthread_mutex_lock(&registry->lock);
6230
6231 /*
6232 * From this point on, the callee acquires the ownership of
6233 * entries. The variable entries MUST NOT be read/written after
6234 * call.
6235 */
6236 ret_code = ust_registry_create_or_find_enum(registry, sobjd, name,
6237 entries, nr_entries, &enum_id);
6238 entries = NULL;
6239
6240 /*
6241 * The return value is returned to ustctl so in case of an error, the
6242 * application can be notified. In case of an error, it's important not to
6243 * return a negative error or else the application will get closed.
6244 */
6245 ret = ustctl_reply_register_enum(sock, enum_id, ret_code);
6246 if (ret < 0) {
6247 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6248 ERR("UST app reply enum failed with ret %d", ret);
6249 } else {
6250 DBG3("UST app reply enum failed. Application died");
6251 }
6252 /*
6253 * No need to wipe the create enum since the application socket will
6254 * get close on error hence cleaning up everything by itself.
6255 */
6256 goto error;
6257 }
6258
6259 DBG3("UST registry enum %s added successfully or already found", name);
6260
6261error:
6262 pthread_mutex_unlock(&registry->lock);
6263error_rcu_unlock:
6264 rcu_read_unlock();
6265 return ret;
6266}
6267
d88aee68
DG
6268/*
6269 * Handle application notification through the given notify socket.
6270 *
6271 * Return 0 on success or else a negative value.
6272 */
d0b96690
DG
6273int ust_app_recv_notify(int sock)
6274{
6275 int ret;
6276 enum ustctl_notify_cmd cmd;
6277
6278 DBG3("UST app receiving notify from sock %d", sock);
6279
6280 ret = ustctl_recv_notify(sock, &cmd);
6281 if (ret < 0) {
6282 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6283 ERR("UST app recv notify failed with ret %d", ret);
6284 } else {
6285 DBG3("UST app recv notify failed. Application died");
6286 }
6287 goto error;
6288 }
6289
6290 switch (cmd) {
6291 case USTCTL_NOTIFY_CMD_EVENT:
6292 {
2106efa0 6293 int sobjd, cobjd, loglevel_value;
d0b96690
DG
6294 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
6295 size_t nr_fields;
6296 struct ustctl_field *fields;
6297
6298 DBG2("UST app ustctl register event received");
6299
2106efa0
PP
6300 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name,
6301 &loglevel_value, &sig, &nr_fields, &fields,
6302 &model_emf_uri);
d0b96690
DG
6303 if (ret < 0) {
6304 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6305 ERR("UST app recv event failed with ret %d", ret);
6306 } else {
6307 DBG3("UST app recv event failed. Application died");
6308 }
6309 goto error;
6310 }
6311
d5d629b5
DG
6312 /*
6313 * Add event to the UST registry coming from the notify socket. This
6314 * call will free if needed the sig, fields and model_emf_uri. This
6315 * code path loses the ownsership of these variables and transfer them
6316 * to the this function.
6317 */
d0b96690 6318 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
2106efa0 6319 fields, loglevel_value, model_emf_uri);
d0b96690
DG
6320 if (ret < 0) {
6321 goto error;
6322 }
6323
6324 break;
6325 }
6326 case USTCTL_NOTIFY_CMD_CHANNEL:
6327 {
6328 int sobjd, cobjd;
6329 size_t nr_fields;
6330 struct ustctl_field *fields;
6331
6332 DBG2("UST app ustctl register channel received");
6333
6334 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
6335 &fields);
6336 if (ret < 0) {
6337 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6338 ERR("UST app recv channel failed with ret %d", ret);
6339 } else {
6340 DBG3("UST app recv channel failed. Application died");
6341 }
6342 goto error;
6343 }
6344
d5d629b5
DG
6345 /*
6346 * The fields ownership are transfered to this function call meaning
6347 * that if needed it will be freed. After this, it's invalid to access
6348 * fields or clean it up.
6349 */
8eede835 6350 ret = reply_ust_register_channel(sock, cobjd, nr_fields,
d0b96690
DG
6351 fields);
6352 if (ret < 0) {
6353 goto error;
6354 }
6355
6356 break;
6357 }
10b56aef
MD
6358 case USTCTL_NOTIFY_CMD_ENUM:
6359 {
6360 int sobjd;
6361 char name[LTTNG_UST_SYM_NAME_LEN];
6362 size_t nr_entries;
6363 struct ustctl_enum_entry *entries;
6364
6365 DBG2("UST app ustctl register enum received");
6366
6367 ret = ustctl_recv_register_enum(sock, &sobjd, name,
6368 &entries, &nr_entries);
6369 if (ret < 0) {
6370 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6371 ERR("UST app recv enum failed with ret %d", ret);
6372 } else {
6373 DBG3("UST app recv enum failed. Application died");
6374 }
6375 goto error;
6376 }
6377
6378 /* Callee assumes ownership of entries */
6379 ret = add_enum_ust_registry(sock, sobjd, name,
6380 entries, nr_entries);
6381 if (ret < 0) {
6382 goto error;
6383 }
6384
6385 break;
6386 }
d0b96690
DG
6387 default:
6388 /* Should NEVER happen. */
6389 assert(0);
6390 }
6391
6392error:
6393 return ret;
6394}
d88aee68
DG
6395
6396/*
6397 * Once the notify socket hangs up, this is called. First, it tries to find the
6398 * corresponding application. On failure, the call_rcu to close the socket is
6399 * executed. If an application is found, it tries to delete it from the notify
6400 * socket hash table. Whathever the result, it proceeds to the call_rcu.
6401 *
6402 * Note that an object needs to be allocated here so on ENOMEM failure, the
6403 * call RCU is not done but the rest of the cleanup is.
6404 */
6405void ust_app_notify_sock_unregister(int sock)
6406{
6407 int err_enomem = 0;
6408 struct lttng_ht_iter iter;
6409 struct ust_app *app;
6410 struct ust_app_notify_sock_obj *obj;
6411
6412 assert(sock >= 0);
6413
6414 rcu_read_lock();
6415
6416 obj = zmalloc(sizeof(*obj));
6417 if (!obj) {
6418 /*
6419 * An ENOMEM is kind of uncool. If this strikes we continue the
6420 * procedure but the call_rcu will not be called. In this case, we
6421 * accept the fd leak rather than possibly creating an unsynchronized
6422 * state between threads.
6423 *
6424 * TODO: The notify object should be created once the notify socket is
6425 * registered and stored independantely from the ust app object. The
6426 * tricky part is to synchronize the teardown of the application and
6427 * this notify object. Let's keep that in mind so we can avoid this
6428 * kind of shenanigans with ENOMEM in the teardown path.
6429 */
6430 err_enomem = 1;
6431 } else {
6432 obj->fd = sock;
6433 }
6434
6435 DBG("UST app notify socket unregister %d", sock);
6436
6437 /*
6438 * Lookup application by notify socket. If this fails, this means that the
6439 * hash table delete has already been done by the application
6440 * unregistration process so we can safely close the notify socket in a
6441 * call RCU.
6442 */
6443 app = find_app_by_notify_sock(sock);
6444 if (!app) {
6445 goto close_socket;
6446 }
6447
6448 iter.iter.node = &app->notify_sock_n.node;
6449
6450 /*
6451 * Whatever happens here either we fail or succeed, in both cases we have
6452 * to close the socket after a grace period to continue to the call RCU
6453 * here. If the deletion is successful, the application is not visible
6454 * anymore by other threads and is it fails it means that it was already
6455 * deleted from the hash table so either way we just have to close the
6456 * socket.
6457 */
6458 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
6459
6460close_socket:
6461 rcu_read_unlock();
6462
6463 /*
6464 * Close socket after a grace period to avoid for the socket to be reused
6465 * before the application object is freed creating potential race between
6466 * threads trying to add unique in the global hash table.
6467 */
6468 if (!err_enomem) {
6469 call_rcu(&obj->head, close_notify_sock_rcu);
6470 }
6471}
f45e313d
DG
6472
6473/*
6474 * Destroy a ust app data structure and free its memory.
6475 */
6476void ust_app_destroy(struct ust_app *app)
6477{
6478 if (!app) {
6479 return;
6480 }
6481
6482 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
6483}
6dc3064a
DG
6484
6485/*
6486 * Take a snapshot for a given UST session. The snapshot is sent to the given
6487 * output.
6488 *
9a654598 6489 * Returns LTTNG_OK on success or a LTTNG_ERR error code.
6dc3064a 6490 */
fb9a95c4
JG
6491enum lttng_error_code ust_app_snapshot_record(
6492 const struct ltt_ust_session *usess,
348a81dc 6493 const struct consumer_output *output, int wait,
d07ceecd 6494 uint64_t nb_packets_per_stream)
6dc3064a
DG
6495{
6496 int ret = 0;
9a654598 6497 enum lttng_error_code status = LTTNG_OK;
6dc3064a
DG
6498 struct lttng_ht_iter iter;
6499 struct ust_app *app;
affce97e 6500 char *trace_path = NULL;
6dc3064a
DG
6501
6502 assert(usess);
6503 assert(output);
6504
6505 rcu_read_lock();
6506
8c924c7b
MD
6507 switch (usess->buffer_type) {
6508 case LTTNG_BUFFER_PER_UID:
6509 {
6510 struct buffer_reg_uid *reg;
6dc3064a 6511
8c924c7b
MD
6512 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6513 struct buffer_reg_channel *reg_chan;
6514 struct consumer_socket *socket;
3b967712 6515 char pathname[PATH_MAX];
5da88b0f 6516 size_t consumer_path_offset = 0;
6dc3064a 6517
2b269489
JR
6518 if (!reg->registry->reg.ust->metadata_key) {
6519 /* Skip since no metadata is present */
6520 continue;
6521 }
6522
8c924c7b
MD
6523 /* Get consumer socket to use to push the metadata.*/
6524 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
6525 usess->consumer);
6526 if (!socket) {
9a654598 6527 status = LTTNG_ERR_INVALID;
8c924c7b
MD
6528 goto error;
6529 }
6dc3064a 6530
8c924c7b
MD
6531 memset(pathname, 0, sizeof(pathname));
6532 ret = snprintf(pathname, sizeof(pathname),
5da88b0f 6533 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
8c924c7b
MD
6534 reg->uid, reg->bits_per_long);
6535 if (ret < 0) {
6536 PERROR("snprintf snapshot path");
9a654598 6537 status = LTTNG_ERR_INVALID;
8c924c7b
MD
6538 goto error;
6539 }
affce97e
JG
6540 /* Free path allowed on previous iteration. */
6541 free(trace_path);
5da88b0f
MD
6542 trace_path = setup_channel_trace_path(usess->consumer, pathname,
6543 &consumer_path_offset);
3b967712
MD
6544 if (!trace_path) {
6545 status = LTTNG_ERR_INVALID;
6546 goto error;
6547 }
f3db82be 6548 /* Add the UST default trace dir to path. */
8c924c7b
MD
6549 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6550 reg_chan, node.node) {
9a654598 6551 status = consumer_snapshot_channel(socket,
e098433c
JG
6552 reg_chan->consumer_key,
6553 output, 0, usess->uid,
5da88b0f 6554 usess->gid, &trace_path[consumer_path_offset], wait,
d2956687 6555 nb_packets_per_stream);
9a654598 6556 if (status != LTTNG_OK) {
8c924c7b
MD
6557 goto error;
6558 }
6559 }
9a654598 6560 status = consumer_snapshot_channel(socket,
68808f4e 6561 reg->registry->reg.ust->metadata_key, output, 1,
5da88b0f
MD
6562 usess->uid, usess->gid, &trace_path[consumer_path_offset],
6563 wait, 0);
9a654598 6564 if (status != LTTNG_OK) {
8c924c7b
MD
6565 goto error;
6566 }
af706bb7 6567 }
8c924c7b
MD
6568 break;
6569 }
6570 case LTTNG_BUFFER_PER_PID:
6571 {
6572 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6573 struct consumer_socket *socket;
6574 struct lttng_ht_iter chan_iter;
6575 struct ust_app_channel *ua_chan;
6576 struct ust_app_session *ua_sess;
6577 struct ust_registry_session *registry;
3b967712 6578 char pathname[PATH_MAX];
5da88b0f 6579 size_t consumer_path_offset = 0;
8c924c7b
MD
6580
6581 ua_sess = lookup_session_by_app(usess, app);
6582 if (!ua_sess) {
6583 /* Session not associated with this app. */
6584 continue;
6585 }
af706bb7 6586
8c924c7b
MD
6587 /* Get the right consumer socket for the application. */
6588 socket = consumer_find_socket_by_bitness(app->bits_per_long,
348a81dc 6589 output);
8c924c7b 6590 if (!socket) {
9a654598 6591 status = LTTNG_ERR_INVALID;
5c786ded
JD
6592 goto error;
6593 }
6594
8c924c7b
MD
6595 /* Add the UST default trace dir to path. */
6596 memset(pathname, 0, sizeof(pathname));
5da88b0f 6597 ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
8c924c7b 6598 ua_sess->path);
6dc3064a 6599 if (ret < 0) {
9a654598 6600 status = LTTNG_ERR_INVALID;
8c924c7b 6601 PERROR("snprintf snapshot path");
6dc3064a
DG
6602 goto error;
6603 }
affce97e
JG
6604 /* Free path allowed on previous iteration. */
6605 free(trace_path);
5da88b0f
MD
6606 trace_path = setup_channel_trace_path(usess->consumer, pathname,
6607 &consumer_path_offset);
3b967712
MD
6608 if (!trace_path) {
6609 status = LTTNG_ERR_INVALID;
6610 goto error;
6611 }
f3db82be 6612 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
8c924c7b 6613 ua_chan, node.node) {
9a654598 6614 status = consumer_snapshot_channel(socket,
470cc211 6615 ua_chan->key, output, 0,
2463b787
JR
6616 lttng_credentials_get_uid(&ua_sess->effective_credentials),
6617 lttng_credentials_get_gid(&ua_sess->effective_credentials),
5da88b0f 6618 &trace_path[consumer_path_offset], wait,
d2956687 6619 nb_packets_per_stream);
9a654598
JG
6620 switch (status) {
6621 case LTTNG_OK:
6622 break;
6623 case LTTNG_ERR_CHAN_NOT_FOUND:
6624 continue;
6625 default:
8c924c7b
MD
6626 goto error;
6627 }
6628 }
6629
6630 registry = get_session_registry(ua_sess);
fad1ed2f 6631 if (!registry) {
9bbfb88c
MD
6632 DBG("Application session is being torn down. Skip application.");
6633 continue;
fad1ed2f 6634 }
9a654598 6635 status = consumer_snapshot_channel(socket,
470cc211 6636 registry->metadata_key, output, 1,
2463b787
JR
6637 lttng_credentials_get_uid(&ua_sess->effective_credentials),
6638 lttng_credentials_get_gid(&ua_sess->effective_credentials),
5da88b0f 6639 &trace_path[consumer_path_offset], wait, 0);
9a654598
JG
6640 switch (status) {
6641 case LTTNG_OK:
6642 break;
6643 case LTTNG_ERR_CHAN_NOT_FOUND:
6644 continue;
6645 default:
8c924c7b
MD
6646 goto error;
6647 }
6648 }
6649 break;
6650 }
6651 default:
6652 assert(0);
6653 break;
6dc3064a
DG
6654 }
6655
6656error:
affce97e 6657 free(trace_path);
6dc3064a 6658 rcu_read_unlock();
9a654598 6659 return status;
6dc3064a 6660}
5c786ded
JD
6661
6662/*
d07ceecd 6663 * Return the size taken by one more packet per stream.
5c786ded 6664 */
fb9a95c4
JG
6665uint64_t ust_app_get_size_one_more_packet_per_stream(
6666 const struct ltt_ust_session *usess, uint64_t cur_nr_packets)
5c786ded 6667{
d07ceecd 6668 uint64_t tot_size = 0;
5c786ded
JD
6669 struct ust_app *app;
6670 struct lttng_ht_iter iter;
6671
6672 assert(usess);
6673
6674 switch (usess->buffer_type) {
6675 case LTTNG_BUFFER_PER_UID:
6676 {
6677 struct buffer_reg_uid *reg;
6678
6679 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6680 struct buffer_reg_channel *reg_chan;
6681
b7064eaa 6682 rcu_read_lock();
5c786ded
JD
6683 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6684 reg_chan, node.node) {
d07ceecd
MD
6685 if (cur_nr_packets >= reg_chan->num_subbuf) {
6686 /*
6687 * Don't take channel into account if we
6688 * already grab all its packets.
6689 */
6690 continue;
6691 }
6692 tot_size += reg_chan->subbuf_size * reg_chan->stream_count;
5c786ded 6693 }
b7064eaa 6694 rcu_read_unlock();
5c786ded
JD
6695 }
6696 break;
6697 }
6698 case LTTNG_BUFFER_PER_PID:
6699 {
6700 rcu_read_lock();
6701 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6702 struct ust_app_channel *ua_chan;
6703 struct ust_app_session *ua_sess;
6704 struct lttng_ht_iter chan_iter;
6705
6706 ua_sess = lookup_session_by_app(usess, app);
6707 if (!ua_sess) {
6708 /* Session not associated with this app. */
6709 continue;
6710 }
6711
6712 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6713 ua_chan, node.node) {
d07ceecd
MD
6714 if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
6715 /*
6716 * Don't take channel into account if we
6717 * already grab all its packets.
6718 */
6719 continue;
6720 }
6721 tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
5c786ded
JD
6722 }
6723 }
6724 rcu_read_unlock();
6725 break;
6726 }
6727 default:
6728 assert(0);
6729 break;
6730 }
6731
d07ceecd 6732 return tot_size;
5c786ded 6733}
fb83fe64
JD
6734
6735int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id,
6736 struct cds_list_head *buffer_reg_uid_list,
6737 struct consumer_output *consumer, uint64_t uchan_id,
6738 int overwrite, uint64_t *discarded, uint64_t *lost)
6739{
6740 int ret;
6741 uint64_t consumer_chan_key;
6742
70dd8162
MD
6743 *discarded = 0;
6744 *lost = 0;
6745
fb83fe64 6746 ret = buffer_reg_uid_consumer_channel_key(
76604852 6747 buffer_reg_uid_list, uchan_id, &consumer_chan_key);
fb83fe64 6748 if (ret < 0) {
70dd8162
MD
6749 /* Not found */
6750 ret = 0;
fb83fe64
JD
6751 goto end;
6752 }
6753
6754 if (overwrite) {
6755 ret = consumer_get_lost_packets(ust_session_id,
6756 consumer_chan_key, consumer, lost);
6757 } else {
6758 ret = consumer_get_discarded_events(ust_session_id,
6759 consumer_chan_key, consumer, discarded);
6760 }
6761
6762end:
6763 return ret;
6764}
6765
6766int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
6767 struct ltt_ust_channel *uchan,
6768 struct consumer_output *consumer, int overwrite,
6769 uint64_t *discarded, uint64_t *lost)
6770{
6771 int ret = 0;
6772 struct lttng_ht_iter iter;
6773 struct lttng_ht_node_str *ua_chan_node;
6774 struct ust_app *app;
6775 struct ust_app_session *ua_sess;
6776 struct ust_app_channel *ua_chan;
6777
70dd8162
MD
6778 *discarded = 0;
6779 *lost = 0;
6780
fb83fe64
JD
6781 rcu_read_lock();
6782 /*
70dd8162
MD
6783 * Iterate over every registered applications. Sum counters for
6784 * all applications containing requested session and channel.
fb83fe64
JD
6785 */
6786 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6787 struct lttng_ht_iter uiter;
6788
6789 ua_sess = lookup_session_by_app(usess, app);
6790 if (ua_sess == NULL) {
6791 continue;
6792 }
6793
6794 /* Get channel */
ee022399 6795 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
fb83fe64
JD
6796 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
6797 /* If the session is found for the app, the channel must be there */
6798 assert(ua_chan_node);
6799
6800 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
6801
6802 if (overwrite) {
70dd8162
MD
6803 uint64_t _lost;
6804
fb83fe64 6805 ret = consumer_get_lost_packets(usess->id, ua_chan->key,
70dd8162
MD
6806 consumer, &_lost);
6807 if (ret < 0) {
6808 break;
6809 }
6810 (*lost) += _lost;
fb83fe64 6811 } else {
70dd8162
MD
6812 uint64_t _discarded;
6813
fb83fe64 6814 ret = consumer_get_discarded_events(usess->id,
70dd8162
MD
6815 ua_chan->key, consumer, &_discarded);
6816 if (ret < 0) {
6817 break;
6818 }
6819 (*discarded) += _discarded;
fb83fe64 6820 }
fb83fe64
JD
6821 }
6822
fb83fe64
JD
6823 rcu_read_unlock();
6824 return ret;
6825}
c2561365
JD
6826
6827static
6828int ust_app_regenerate_statedump(struct ltt_ust_session *usess,
6829 struct ust_app *app)
6830{
6831 int ret = 0;
6832 struct ust_app_session *ua_sess;
6833
6834 DBG("Regenerating the metadata for ust app pid %d", app->pid);
6835
6836 rcu_read_lock();
6837
6838 ua_sess = lookup_session_by_app(usess, app);
6839 if (ua_sess == NULL) {
6840 /* The session is in teardown process. Ignore and continue. */
6841 goto end;
6842 }
6843
6844 pthread_mutex_lock(&ua_sess->lock);
6845
6846 if (ua_sess->deleted) {
6847 goto end_unlock;
6848 }
6849
6850 pthread_mutex_lock(&app->sock_lock);
6851 ret = ustctl_regenerate_statedump(app->sock, ua_sess->handle);
6852 pthread_mutex_unlock(&app->sock_lock);
6853
6854end_unlock:
6855 pthread_mutex_unlock(&ua_sess->lock);
6856
6857end:
6858 rcu_read_unlock();
6859 health_code_update();
6860 return ret;
6861}
6862
6863/*
6864 * Regenerate the statedump for each app in the session.
6865 */
6866int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
6867{
6868 int ret = 0;
6869 struct lttng_ht_iter iter;
6870 struct ust_app *app;
6871
6872 DBG("Regenerating the metadata for all UST apps");
6873
6874 rcu_read_lock();
6875
6876 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6877 if (!app->compatible) {
6878 continue;
6879 }
6880
6881 ret = ust_app_regenerate_statedump(usess, app);
6882 if (ret < 0) {
6883 /* Continue to the next app even on error */
6884 continue;
6885 }
6886 }
6887
6888 rcu_read_unlock();
6889
6890 return 0;
6891}
5c408ad8
JD
6892
6893/*
6894 * Rotate all the channels of a session.
6895 *
6f6d3b69 6896 * Return LTTNG_OK on success or else an LTTng error code.
5c408ad8 6897 */
6f6d3b69 6898enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
5c408ad8 6899{
6f6d3b69
MD
6900 int ret;
6901 enum lttng_error_code cmd_ret = LTTNG_OK;
5c408ad8
JD
6902 struct lttng_ht_iter iter;
6903 struct ust_app *app;
6904 struct ltt_ust_session *usess = session->ust_session;
5c408ad8
JD
6905
6906 assert(usess);
6907
6908 rcu_read_lock();
6909
6910 switch (usess->buffer_type) {
6911 case LTTNG_BUFFER_PER_UID:
6912 {
6913 struct buffer_reg_uid *reg;
6914
6915 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6916 struct buffer_reg_channel *reg_chan;
6917 struct consumer_socket *socket;
6918
14d3fca9
JR
6919 if (!reg->registry->reg.ust->metadata_key) {
6920 /* Skip since no metadata is present */
6921 continue;
6922 }
6923
5c408ad8
JD
6924 /* Get consumer socket to use to push the metadata.*/
6925 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
6926 usess->consumer);
6927 if (!socket) {
6f6d3b69 6928 cmd_ret = LTTNG_ERR_INVALID;
5c408ad8
JD
6929 goto error;
6930 }
6931
5c408ad8
JD
6932 /* Rotate the data channels. */
6933 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6934 reg_chan, node.node) {
5c408ad8
JD
6935 ret = consumer_rotate_channel(socket,
6936 reg_chan->consumer_key,
6937 usess->uid, usess->gid,
d2956687
JG
6938 usess->consumer,
6939 /* is_metadata_channel */ false);
5c408ad8 6940 if (ret < 0) {
6f6d3b69 6941 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
5c408ad8
JD
6942 goto error;
6943 }
6944 }
6945
6946 (void) push_metadata(reg->registry->reg.ust, usess->consumer);
6947
6948 ret = consumer_rotate_channel(socket,
6949 reg->registry->reg.ust->metadata_key,
6950 usess->uid, usess->gid,
d2956687
JG
6951 usess->consumer,
6952 /* is_metadata_channel */ true);
5c408ad8 6953 if (ret < 0) {
6f6d3b69 6954 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
5c408ad8
JD
6955 goto error;
6956 }
5c408ad8
JD
6957 }
6958 break;
6959 }
6960 case LTTNG_BUFFER_PER_PID:
6961 {
6962 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6963 struct consumer_socket *socket;
6964 struct lttng_ht_iter chan_iter;
6965 struct ust_app_channel *ua_chan;
6966 struct ust_app_session *ua_sess;
6967 struct ust_registry_session *registry;
6968
6969 ua_sess = lookup_session_by_app(usess, app);
6970 if (!ua_sess) {
6971 /* Session not associated with this app. */
6972 continue;
6973 }
5c408ad8
JD
6974
6975 /* Get the right consumer socket for the application. */
6976 socket = consumer_find_socket_by_bitness(app->bits_per_long,
6977 usess->consumer);
6978 if (!socket) {
6f6d3b69 6979 cmd_ret = LTTNG_ERR_INVALID;
5c408ad8
JD
6980 goto error;
6981 }
6982
6983 registry = get_session_registry(ua_sess);
6984 if (!registry) {
6f6d3b69
MD
6985 DBG("Application session is being torn down. Skip application.");
6986 continue;
5c408ad8
JD
6987 }
6988
5c408ad8
JD
6989 /* Rotate the data channels. */
6990 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6991 ua_chan, node.node) {
470cc211
JG
6992 ret = consumer_rotate_channel(socket,
6993 ua_chan->key,
2463b787
JR
6994 lttng_credentials_get_uid(&ua_sess->effective_credentials),
6995 lttng_credentials_get_gid(&ua_sess->effective_credentials),
d2956687
JG
6996 ua_sess->consumer,
6997 /* is_metadata_channel */ false);
5c408ad8 6998 if (ret < 0) {
6f6d3b69
MD
6999 /* Per-PID buffer and application going away. */
7000 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
7001 continue;
7002 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
5c408ad8
JD
7003 goto error;
7004 }
7005 }
7006
7007 /* Rotate the metadata channel. */
7008 (void) push_metadata(registry, usess->consumer);
470cc211
JG
7009 ret = consumer_rotate_channel(socket,
7010 registry->metadata_key,
2463b787
JR
7011 lttng_credentials_get_uid(&ua_sess->effective_credentials),
7012 lttng_credentials_get_gid(&ua_sess->effective_credentials),
d2956687
JG
7013 ua_sess->consumer,
7014 /* is_metadata_channel */ true);
5c408ad8 7015 if (ret < 0) {
6f6d3b69
MD
7016 /* Per-PID buffer and application going away. */
7017 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
7018 continue;
7019 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
5c408ad8
JD
7020 goto error;
7021 }
5c408ad8
JD
7022 }
7023 break;
7024 }
7025 default:
7026 assert(0);
7027 break;
7028 }
7029
6f6d3b69 7030 cmd_ret = LTTNG_OK;
5c408ad8
JD
7031
7032error:
7033 rcu_read_unlock();
6f6d3b69 7034 return cmd_ret;
5c408ad8 7035}
d2956687
JG
7036
7037enum lttng_error_code ust_app_create_channel_subdirectories(
7038 const struct ltt_ust_session *usess)
7039{
7040 enum lttng_error_code ret = LTTNG_OK;
7041 struct lttng_ht_iter iter;
7042 enum lttng_trace_chunk_status chunk_status;
7043 char *pathname_index;
7044 int fmt_ret;
7045
7046 assert(usess->current_trace_chunk);
7047 rcu_read_lock();
7048
7049 switch (usess->buffer_type) {
7050 case LTTNG_BUFFER_PER_UID:
7051 {
7052 struct buffer_reg_uid *reg;
7053
7054 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7055 fmt_ret = asprintf(&pathname_index,
5da88b0f 7056 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH "/" DEFAULT_INDEX_DIR,
d2956687
JG
7057 reg->uid, reg->bits_per_long);
7058 if (fmt_ret < 0) {
7059 ERR("Failed to format channel index directory");
7060 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7061 goto error;
7062 }
7063
7064 /*
7065 * Create the index subdirectory which will take care
7066 * of implicitly creating the channel's path.
7067 */
7068 chunk_status = lttng_trace_chunk_create_subdirectory(
7069 usess->current_trace_chunk,
7070 pathname_index);
7071 free(pathname_index);
7072 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7073 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7074 goto error;
7075 }
7076 }
7077 break;
7078 }
7079 case LTTNG_BUFFER_PER_PID:
7080 {
7081 struct ust_app *app;
7082
495dece5
MD
7083 /*
7084 * Create the toplevel ust/ directory in case no apps are running.
7085 */
7086 chunk_status = lttng_trace_chunk_create_subdirectory(
7087 usess->current_trace_chunk,
7088 DEFAULT_UST_TRACE_DIR);
7089 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7090 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7091 goto error;
7092 }
7093
d2956687
JG
7094 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
7095 pid_n.node) {
7096 struct ust_app_session *ua_sess;
7097 struct ust_registry_session *registry;
7098
7099 ua_sess = lookup_session_by_app(usess, app);
7100 if (!ua_sess) {
7101 /* Session not associated with this app. */
7102 continue;
7103 }
7104
7105 registry = get_session_registry(ua_sess);
7106 if (!registry) {
7107 DBG("Application session is being torn down. Skip application.");
7108 continue;
7109 }
7110
7111 fmt_ret = asprintf(&pathname_index,
5da88b0f 7112 DEFAULT_UST_TRACE_DIR "/%s/" DEFAULT_INDEX_DIR,
d2956687
JG
7113 ua_sess->path);
7114 if (fmt_ret < 0) {
7115 ERR("Failed to format channel index directory");
7116 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7117 goto error;
7118 }
7119 /*
7120 * Create the index subdirectory which will take care
7121 * of implicitly creating the channel's path.
7122 */
7123 chunk_status = lttng_trace_chunk_create_subdirectory(
7124 usess->current_trace_chunk,
7125 pathname_index);
7126 free(pathname_index);
7127 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7128 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7129 goto error;
7130 }
7131 }
7132 break;
7133 }
7134 default:
7135 abort();
7136 }
7137
7138 ret = LTTNG_OK;
7139error:
7140 rcu_read_unlock();
7141 return ret;
7142}
4a9b9759
MD
7143
7144/*
7145 * Clear all the channels of a session.
7146 *
7147 * Return LTTNG_OK on success or else an LTTng error code.
7148 */
7149enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
7150{
7151 int ret;
7152 enum lttng_error_code cmd_ret = LTTNG_OK;
7153 struct lttng_ht_iter iter;
7154 struct ust_app *app;
7155 struct ltt_ust_session *usess = session->ust_session;
7156
7157 assert(usess);
7158
7159 rcu_read_lock();
7160
7161 if (usess->active) {
7162 ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
7163 cmd_ret = LTTNG_ERR_FATAL;
7164 goto end;
7165 }
7166
7167 switch (usess->buffer_type) {
7168 case LTTNG_BUFFER_PER_UID:
7169 {
7170 struct buffer_reg_uid *reg;
7171
7172 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7173 struct buffer_reg_channel *reg_chan;
7174 struct consumer_socket *socket;
7175
7176 /* Get consumer socket to use to push the metadata.*/
7177 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
7178 usess->consumer);
7179 if (!socket) {
7180 cmd_ret = LTTNG_ERR_INVALID;
7181 goto error_socket;
7182 }
7183
7184 /* Clear the data channels. */
7185 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
7186 reg_chan, node.node) {
7187 ret = consumer_clear_channel(socket,
7188 reg_chan->consumer_key);
7189 if (ret < 0) {
7190 goto error;
7191 }
7192 }
7193
7194 (void) push_metadata(reg->registry->reg.ust, usess->consumer);
7195
7196 /*
7197 * Clear the metadata channel.
7198 * Metadata channel is not cleared per se but we still need to
7199 * perform a rotation operation on it behind the scene.
7200 */
7201 ret = consumer_clear_channel(socket,
7202 reg->registry->reg.ust->metadata_key);
7203 if (ret < 0) {
7204 goto error;
7205 }
7206 }
7207 break;
7208 }
7209 case LTTNG_BUFFER_PER_PID:
7210 {
7211 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7212 struct consumer_socket *socket;
7213 struct lttng_ht_iter chan_iter;
7214 struct ust_app_channel *ua_chan;
7215 struct ust_app_session *ua_sess;
7216 struct ust_registry_session *registry;
7217
7218 ua_sess = lookup_session_by_app(usess, app);
7219 if (!ua_sess) {
7220 /* Session not associated with this app. */
7221 continue;
7222 }
7223
7224 /* Get the right consumer socket for the application. */
7225 socket = consumer_find_socket_by_bitness(app->bits_per_long,
7226 usess->consumer);
7227 if (!socket) {
7228 cmd_ret = LTTNG_ERR_INVALID;
7229 goto error_socket;
7230 }
7231
7232 registry = get_session_registry(ua_sess);
7233 if (!registry) {
7234 DBG("Application session is being torn down. Skip application.");
7235 continue;
7236 }
7237
7238 /* Clear the data channels. */
7239 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
7240 ua_chan, node.node) {
7241 ret = consumer_clear_channel(socket, ua_chan->key);
7242 if (ret < 0) {
7243 /* Per-PID buffer and application going away. */
7244 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7245 continue;
7246 }
7247 goto error;
7248 }
7249 }
7250
7251 (void) push_metadata(registry, usess->consumer);
7252
7253 /*
7254 * Clear the metadata channel.
7255 * Metadata channel is not cleared per se but we still need to
7256 * perform rotation operation on it behind the scene.
7257 */
7258 ret = consumer_clear_channel(socket, registry->metadata_key);
7259 if (ret < 0) {
7260 /* Per-PID buffer and application going away. */
7261 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7262 continue;
7263 }
7264 goto error;
7265 }
7266 }
7267 break;
7268 }
7269 default:
7270 assert(0);
7271 break;
7272 }
7273
7274 cmd_ret = LTTNG_OK;
7275 goto end;
7276
7277error:
7278 switch (-ret) {
7279 case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED:
7280 cmd_ret = LTTNG_ERR_CLEAR_RELAY_DISALLOWED;
7281 break;
7282 default:
7283 cmd_ret = LTTNG_ERR_CLEAR_FAIL_CONSUMER;
7284 }
7285
7286error_socket:
7287end:
7288 rcu_read_unlock();
7289 return cmd_ret;
7290}
04ed9e10
JG
7291
7292/*
7293 * This function skips the metadata channel as the begin/end timestamps of a
7294 * metadata packet are useless.
7295 *
7296 * Moreover, opening a packet after a "clear" will cause problems for live
7297 * sessions as it will introduce padding that was not part of the first trace
7298 * chunk. The relay daemon expects the content of the metadata stream of
7299 * successive metadata trace chunks to be strict supersets of one another.
7300 *
7301 * For example, flushing a packet at the beginning of the metadata stream of
7302 * a trace chunk resulting from a "clear" session command will cause the
7303 * size of the metadata stream of the new trace chunk to not match the size of
7304 * the metadata stream of the original chunk. This will confuse the relay
7305 * daemon as the same "offset" in a metadata stream will no longer point
7306 * to the same content.
7307 */
7308enum lttng_error_code ust_app_open_packets(struct ltt_session *session)
7309{
7310 enum lttng_error_code ret = LTTNG_OK;
7311 struct lttng_ht_iter iter;
7312 struct ltt_ust_session *usess = session->ust_session;
7313
7314 assert(usess);
7315
7316 rcu_read_lock();
7317
7318 switch (usess->buffer_type) {
7319 case LTTNG_BUFFER_PER_UID:
7320 {
7321 struct buffer_reg_uid *reg;
7322
7323 cds_list_for_each_entry (
7324 reg, &usess->buffer_reg_uid_list, lnode) {
7325 struct buffer_reg_channel *reg_chan;
7326 struct consumer_socket *socket;
7327
7328 socket = consumer_find_socket_by_bitness(
7329 reg->bits_per_long, usess->consumer);
7330 if (!socket) {
7331 ret = LTTNG_ERR_FATAL;
7332 goto error;
7333 }
7334
7335 cds_lfht_for_each_entry(reg->registry->channels->ht,
7336 &iter.iter, reg_chan, node.node) {
7337 const int open_ret =
7338 consumer_open_channel_packets(
7339 socket,
7340 reg_chan->consumer_key);
7341
7342 if (open_ret < 0) {
7343 ret = LTTNG_ERR_UNK;
7344 goto error;
7345 }
7346 }
7347 }
7348 break;
7349 }
7350 case LTTNG_BUFFER_PER_PID:
7351 {
7352 struct ust_app *app;
7353
7354 cds_lfht_for_each_entry (
7355 ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7356 struct consumer_socket *socket;
7357 struct lttng_ht_iter chan_iter;
7358 struct ust_app_channel *ua_chan;
7359 struct ust_app_session *ua_sess;
7360 struct ust_registry_session *registry;
7361
7362 ua_sess = lookup_session_by_app(usess, app);
7363 if (!ua_sess) {
7364 /* Session not associated with this app. */
7365 continue;
7366 }
7367
7368 /* Get the right consumer socket for the application. */
7369 socket = consumer_find_socket_by_bitness(
7370 app->bits_per_long, usess->consumer);
7371 if (!socket) {
7372 ret = LTTNG_ERR_FATAL;
7373 goto error;
7374 }
7375
7376 registry = get_session_registry(ua_sess);
7377 if (!registry) {
7378 DBG("Application session is being torn down. Skip application.");
7379 continue;
7380 }
7381
7382 cds_lfht_for_each_entry(ua_sess->channels->ht,
7383 &chan_iter.iter, ua_chan, node.node) {
7384 const int open_ret =
7385 consumer_open_channel_packets(
7386 socket,
7387 ua_chan->key);
7388
7389 if (open_ret < 0) {
7390 /*
7391 * Per-PID buffer and application going
7392 * away.
7393 */
97a171e1 7394 if (open_ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
04ed9e10
JG
7395 continue;
7396 }
7397
7398 ret = LTTNG_ERR_UNK;
7399 goto error;
7400 }
7401 }
7402 }
7403 break;
7404 }
7405 default:
7406 abort();
7407 break;
7408 }
7409
7410error:
7411 rcu_read_unlock();
7412 return ret;
7413}
This page took 0.595125 seconds and 5 git commands to generate.