Fix: consumerd: live client receives incomplete metadata
[lttng-tools.git] / src / common / ust-consumer / ust-consumer.c
CommitLineData
3bd1e081
MD
1/*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
d14d33bf
AM
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
3bd1e081
MD
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
d14d33bf
AM
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
3bd1e081
MD
17 */
18
6c1c0768 19#define _LGPL_SOURCE
3bd1e081 20#include <assert.h>
f02e1e8a 21#include <lttng/ust-ctl.h>
3bd1e081
MD
22#include <poll.h>
23#include <pthread.h>
24#include <stdlib.h>
25#include <string.h>
26#include <sys/mman.h>
27#include <sys/socket.h>
dbb5dfe6 28#include <sys/stat.h>
3bd1e081 29#include <sys/types.h>
77c7c900 30#include <inttypes.h>
3bd1e081 31#include <unistd.h>
ffe60014 32#include <urcu/list.h>
331744e3 33#include <signal.h>
29d1a7ae
JG
34#include <stdbool.h>
35#include <stdint.h>
0857097f 36
51a9e1c7 37#include <bin/lttng-consumerd/health-consumerd.h>
990570ed 38#include <common/common.h>
10a8a223 39#include <common/sessiond-comm/sessiond-comm.h>
00e2e675 40#include <common/relayd/relayd.h>
dbb5dfe6 41#include <common/compat/fcntl.h>
f263b7fd 42#include <common/compat/endian.h>
c8fea79c
JR
43#include <common/consumer/consumer-metadata-cache.h>
44#include <common/consumer/consumer-stream.h>
45#include <common/consumer/consumer-timer.h>
fe4477ee 46#include <common/utils.h>
309167d2 47#include <common/index/index.h>
29d1a7ae 48#include <common/consumer/consumer.h>
d6ef77b3 49#include <common/optional.h>
10a8a223
DG
50
51#include "ust-consumer.h"
3bd1e081 52
45863397 53#define INT_MAX_STR_LEN 12 /* includes \0 */
4628484a 54
3bd1e081
MD
55extern struct lttng_consumer_global_data consumer_data;
56extern int consumer_poll_timeout;
57extern volatile int consumer_quit;
58
59/*
ffe60014
DG
60 * Free channel object and all streams associated with it. This MUST be used
61 * only and only if the channel has _NEVER_ been added to the global channel
62 * hash table.
3bd1e081 63 */
ffe60014 64static void destroy_channel(struct lttng_consumer_channel *channel)
3bd1e081 65{
ffe60014
DG
66 struct lttng_consumer_stream *stream, *stmp;
67
68 assert(channel);
69
70 DBG("UST consumer cleaning stream list");
71
72 cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
73 send_node) {
9ce5646a
MD
74
75 health_code_update();
76
ffe60014
DG
77 cds_list_del(&stream->send_node);
78 ustctl_destroy_stream(stream->ustream);
79 free(stream);
80 }
81
82 /*
83 * If a channel is available meaning that was created before the streams
84 * were, delete it.
85 */
86 if (channel->uchan) {
87 lttng_ustconsumer_del_channel(channel);
b83e03c4 88 lttng_ustconsumer_free_channel(channel);
ffe60014
DG
89 }
90 free(channel);
91}
3bd1e081
MD
92
93/*
ffe60014 94 * Add channel to internal consumer state.
3bd1e081 95 *
ffe60014 96 * Returns 0 on success or else a negative value.
3bd1e081 97 */
ffe60014
DG
98static int add_channel(struct lttng_consumer_channel *channel,
99 struct lttng_consumer_local_data *ctx)
3bd1e081
MD
100{
101 int ret = 0;
102
ffe60014
DG
103 assert(channel);
104 assert(ctx);
105
106 if (ctx->on_recv_channel != NULL) {
107 ret = ctx->on_recv_channel(channel);
108 if (ret == 0) {
d8ef542d 109 ret = consumer_add_channel(channel, ctx);
ffe60014
DG
110 } else if (ret < 0) {
111 /* Most likely an ENOMEM. */
112 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
113 goto error;
114 }
115 } else {
d8ef542d 116 ret = consumer_add_channel(channel, ctx);
3bd1e081
MD
117 }
118
d88aee68 119 DBG("UST consumer channel added (key: %" PRIu64 ")", channel->key);
ffe60014
DG
120
121error:
3bd1e081
MD
122 return ret;
123}
124
ffe60014
DG
125/*
126 * Allocate and return a consumer stream object. If _alloc_ret is not NULL, the
127 * error value if applicable is set in it else it is kept untouched.
3bd1e081 128 *
ffe60014 129 * Return NULL on error else the newly allocated stream object.
3bd1e081 130 */
ffe60014
DG
131static struct lttng_consumer_stream *allocate_stream(int cpu, int key,
132 struct lttng_consumer_channel *channel,
133 struct lttng_consumer_local_data *ctx, int *_alloc_ret)
134{
135 int alloc_ret;
136 struct lttng_consumer_stream *stream = NULL;
137
138 assert(channel);
139 assert(ctx);
140
29d1a7ae 141 stream = consumer_stream_create(
59db0d42
JG
142 channel,
143 channel->key,
ffe60014
DG
144 key,
145 LTTNG_CONSUMER_ACTIVE_STREAM,
146 channel->name,
147 channel->uid,
148 channel->gid,
149 channel->relayd_id,
150 channel->session_id,
151 cpu,
152 &alloc_ret,
4891ece8
DG
153 channel->type,
154 channel->monitor);
ffe60014
DG
155 if (stream == NULL) {
156 switch (alloc_ret) {
157 case -ENOENT:
158 /*
159 * We could not find the channel. Can happen if cpu hotplug
160 * happens while tearing down.
161 */
162 DBG3("Could not find channel");
163 break;
164 case -ENOMEM:
165 case -EINVAL:
166 default:
167 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
168 break;
169 }
170 goto error;
171 }
172
ffe60014
DG
173error:
174 if (_alloc_ret) {
175 *_alloc_ret = alloc_ret;
176 }
177 return stream;
178}
179
180/*
181 * Send the given stream pointer to the corresponding thread.
182 *
183 * Returns 0 on success else a negative value.
184 */
185static int send_stream_to_thread(struct lttng_consumer_stream *stream,
186 struct lttng_consumer_local_data *ctx)
187{
dae10966
DG
188 int ret;
189 struct lttng_pipe *stream_pipe;
ffe60014
DG
190
191 /* Get the right pipe where the stream will be sent. */
192 if (stream->metadata_flag) {
5ab66908
MD
193 ret = consumer_add_metadata_stream(stream);
194 if (ret) {
195 ERR("Consumer add metadata stream %" PRIu64 " failed.",
196 stream->key);
197 goto error;
198 }
dae10966 199 stream_pipe = ctx->consumer_metadata_pipe;
ffe60014 200 } else {
5ab66908
MD
201 ret = consumer_add_data_stream(stream);
202 if (ret) {
203 ERR("Consumer add stream %" PRIu64 " failed.",
204 stream->key);
205 goto error;
206 }
dae10966 207 stream_pipe = ctx->consumer_data_pipe;
ffe60014
DG
208 }
209
5ab66908
MD
210 /*
211 * From this point on, the stream's ownership has been moved away from
212 * the channel and becomes globally visible.
213 */
214 stream->globally_visible = 1;
215
dae10966 216 ret = lttng_pipe_write(stream_pipe, &stream, sizeof(stream));
ffe60014 217 if (ret < 0) {
dae10966
DG
218 ERR("Consumer write %s stream to pipe %d",
219 stream->metadata_flag ? "metadata" : "data",
220 lttng_pipe_get_writefd(stream_pipe));
5ab66908
MD
221 if (stream->metadata_flag) {
222 consumer_del_stream_for_metadata(stream);
223 } else {
224 consumer_del_stream_for_data(stream);
225 }
ffe60014 226 }
5ab66908 227error:
ffe60014
DG
228 return ret;
229}
230
4628484a
MD
231static
232int get_stream_shm_path(char *stream_shm_path, const char *shm_path, int cpu)
233{
45863397 234 char cpu_nr[INT_MAX_STR_LEN]; /* int max len */
4628484a
MD
235 int ret;
236
237 strncpy(stream_shm_path, shm_path, PATH_MAX);
238 stream_shm_path[PATH_MAX - 1] = '\0';
45863397 239 ret = snprintf(cpu_nr, INT_MAX_STR_LEN, "%i", cpu);
67f8cb8d
MD
240 if (ret < 0) {
241 PERROR("snprintf");
4628484a
MD
242 goto end;
243 }
244 strncat(stream_shm_path, cpu_nr,
245 PATH_MAX - strlen(stream_shm_path) - 1);
246 ret = 0;
247end:
248 return ret;
249}
250
d88aee68
DG
251/*
252 * Create streams for the given channel using liblttng-ust-ctl.
253 *
254 * Return 0 on success else a negative value.
255 */
ffe60014
DG
256static int create_ust_streams(struct lttng_consumer_channel *channel,
257 struct lttng_consumer_local_data *ctx)
258{
259 int ret, cpu = 0;
260 struct ustctl_consumer_stream *ustream;
261 struct lttng_consumer_stream *stream;
262
263 assert(channel);
264 assert(ctx);
265
266 /*
267 * While a stream is available from ustctl. When NULL is returned, we've
268 * reached the end of the possible stream for the channel.
269 */
270 while ((ustream = ustctl_create_stream(channel->uchan, cpu))) {
271 int wait_fd;
04ef1097 272 int ust_metadata_pipe[2];
ffe60014 273
9ce5646a
MD
274 health_code_update();
275
04ef1097
MD
276 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && channel->monitor) {
277 ret = utils_create_pipe_cloexec_nonblock(ust_metadata_pipe);
278 if (ret < 0) {
279 ERR("Create ust metadata poll pipe");
280 goto error;
281 }
282 wait_fd = ust_metadata_pipe[0];
283 } else {
284 wait_fd = ustctl_stream_get_wait_fd(ustream);
285 }
ffe60014
DG
286
287 /* Allocate consumer stream object. */
288 stream = allocate_stream(cpu, wait_fd, channel, ctx, &ret);
289 if (!stream) {
290 goto error_alloc;
291 }
292 stream->ustream = ustream;
293 /*
294 * Store it so we can save multiple function calls afterwards since
295 * this value is used heavily in the stream threads. This is UST
296 * specific so this is why it's done after allocation.
297 */
298 stream->wait_fd = wait_fd;
299
b31398bb
DG
300 /*
301 * Increment channel refcount since the channel reference has now been
302 * assigned in the allocation process above.
303 */
10a50311
JD
304 if (stream->chan->monitor) {
305 uatomic_inc(&stream->chan->refcount);
306 }
b31398bb 307
ffe60014
DG
308 /*
309 * Order is important this is why a list is used. On error, the caller
310 * should clean this list.
311 */
312 cds_list_add_tail(&stream->send_node, &channel->streams.head);
313
314 ret = ustctl_get_max_subbuf_size(stream->ustream,
315 &stream->max_sb_size);
316 if (ret < 0) {
317 ERR("ustctl_get_max_subbuf_size failed for stream %s",
318 stream->name);
319 goto error;
320 }
321
322 /* Do actions once stream has been received. */
323 if (ctx->on_recv_stream) {
324 ret = ctx->on_recv_stream(stream);
325 if (ret < 0) {
326 goto error;
327 }
328 }
329
d88aee68 330 DBG("UST consumer add stream %s (key: %" PRIu64 ") with relayd id %" PRIu64,
ffe60014
DG
331 stream->name, stream->key, stream->relayd_stream_id);
332
333 /* Set next CPU stream. */
334 channel->streams.count = ++cpu;
d88aee68
DG
335
336 /* Keep stream reference when creating metadata. */
337 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA) {
338 channel->metadata_stream = stream;
8de4f941
JG
339 if (channel->monitor) {
340 /* Set metadata poll pipe if we created one */
341 memcpy(stream->ust_metadata_poll_pipe,
342 ust_metadata_pipe,
343 sizeof(ust_metadata_pipe));
344 }
d88aee68 345 }
ffe60014
DG
346 }
347
348 return 0;
349
350error:
351error_alloc:
352 return ret;
353}
354
4628484a
MD
355/*
356 * create_posix_shm is never called concurrently within a process.
357 */
358static
359int create_posix_shm(void)
360{
361 char tmp_name[NAME_MAX];
362 int shmfd, ret;
363
364 ret = snprintf(tmp_name, NAME_MAX, "/ust-shm-consumer-%d", getpid());
365 if (ret < 0) {
366 PERROR("snprintf");
367 return -1;
368 }
369 /*
370 * Allocate shm, and immediately unlink its shm oject, keeping
371 * only the file descriptor as a reference to the object.
372 * We specifically do _not_ use the / at the beginning of the
373 * pathname so that some OS implementations can keep it local to
374 * the process (POSIX leaves this implementation-defined).
375 */
376 shmfd = shm_open(tmp_name, O_CREAT | O_EXCL | O_RDWR, 0700);
377 if (shmfd < 0) {
378 PERROR("shm_open");
379 goto error_shm_open;
380 }
381 ret = shm_unlink(tmp_name);
382 if (ret < 0 && errno != ENOENT) {
383 PERROR("shm_unlink");
384 goto error_shm_release;
385 }
386 return shmfd;
387
388error_shm_release:
389 ret = close(shmfd);
390 if (ret) {
391 PERROR("close");
392 }
393error_shm_open:
394 return -1;
395}
396
397static int open_ust_stream_fd(struct lttng_consumer_channel *channel,
398 struct ustctl_consumer_channel_attr *attr,
399 int cpu)
400{
401 char shm_path[PATH_MAX];
402 int ret;
403
404 if (!channel->shm_path[0]) {
405 return create_posix_shm();
406 }
407 ret = get_stream_shm_path(shm_path, channel->shm_path, cpu);
408 if (ret) {
409 goto error_shm_path;
410 }
411 return run_as_open(shm_path,
412 O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR,
413 channel->uid, channel->gid);
414
415error_shm_path:
416 return -1;
417}
418
ffe60014
DG
419/*
420 * Create an UST channel with the given attributes and send it to the session
421 * daemon using the ust ctl API.
422 *
423 * Return 0 on success or else a negative value.
424 */
4628484a
MD
425static int create_ust_channel(struct lttng_consumer_channel *channel,
426 struct ustctl_consumer_channel_attr *attr,
427 struct ustctl_consumer_channel **ust_chanp)
ffe60014 428{
4628484a
MD
429 int ret, nr_stream_fds, i, j;
430 int *stream_fds;
431 struct ustctl_consumer_channel *ust_channel;
ffe60014 432
4628484a 433 assert(channel);
ffe60014 434 assert(attr);
4628484a 435 assert(ust_chanp);
ffe60014
DG
436
437 DBG3("Creating channel to ustctl with attr: [overwrite: %d, "
438 "subbuf_size: %" PRIu64 ", num_subbuf: %" PRIu64 ", "
439 "switch_timer_interval: %u, read_timer_interval: %u, "
440 "output: %d, type: %d", attr->overwrite, attr->subbuf_size,
441 attr->num_subbuf, attr->switch_timer_interval,
442 attr->read_timer_interval, attr->output, attr->type);
443
4628484a
MD
444 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA)
445 nr_stream_fds = 1;
446 else
447 nr_stream_fds = ustctl_get_nr_stream_per_channel();
448 stream_fds = zmalloc(nr_stream_fds * sizeof(*stream_fds));
449 if (!stream_fds) {
450 ret = -1;
451 goto error_alloc;
452 }
453 for (i = 0; i < nr_stream_fds; i++) {
454 stream_fds[i] = open_ust_stream_fd(channel, attr, i);
455 if (stream_fds[i] < 0) {
456 ret = -1;
457 goto error_open;
458 }
459 }
460 ust_channel = ustctl_create_channel(attr, stream_fds, nr_stream_fds);
461 if (!ust_channel) {
ffe60014
DG
462 ret = -1;
463 goto error_create;
464 }
4628484a
MD
465 channel->nr_stream_fds = nr_stream_fds;
466 channel->stream_fds = stream_fds;
467 *ust_chanp = ust_channel;
ffe60014
DG
468
469 return 0;
470
471error_create:
4628484a
MD
472error_open:
473 for (j = i - 1; j >= 0; j--) {
474 int closeret;
475
476 closeret = close(stream_fds[j]);
477 if (closeret) {
478 PERROR("close");
479 }
480 if (channel->shm_path[0]) {
481 char shm_path[PATH_MAX];
482
483 closeret = get_stream_shm_path(shm_path,
484 channel->shm_path, j);
485 if (closeret) {
486 ERR("Cannot get stream shm path");
487 }
488 closeret = run_as_unlink(shm_path,
489 channel->uid, channel->gid);
490 if (closeret) {
4628484a
MD
491 PERROR("unlink %s", shm_path);
492 }
493 }
494 }
495 /* Try to rmdir all directories under shm_path root. */
496 if (channel->root_shm_path[0]) {
497 (void) run_as_recursive_rmdir(channel->root_shm_path,
498 channel->uid, channel->gid);
499 }
500 free(stream_fds);
501error_alloc:
ffe60014
DG
502 return ret;
503}
504
d88aee68
DG
505/*
506 * Send a single given stream to the session daemon using the sock.
507 *
508 * Return 0 on success else a negative value.
509 */
ffe60014
DG
510static int send_sessiond_stream(int sock, struct lttng_consumer_stream *stream)
511{
512 int ret;
513
514 assert(stream);
515 assert(sock >= 0);
516
3eb914c0 517 DBG("UST consumer sending stream %" PRIu64 " to sessiond", stream->key);
ffe60014
DG
518
519 /* Send stream to session daemon. */
520 ret = ustctl_send_stream_to_sessiond(sock, stream->ustream);
521 if (ret < 0) {
522 goto error;
523 }
524
ffe60014
DG
525error:
526 return ret;
527}
528
529/*
530 * Send channel to sessiond.
531 *
d88aee68 532 * Return 0 on success or else a negative value.
ffe60014
DG
533 */
534static int send_sessiond_channel(int sock,
535 struct lttng_consumer_channel *channel,
536 struct lttng_consumer_local_data *ctx, int *relayd_error)
537{
0c759fc9 538 int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS;
ffe60014 539 struct lttng_consumer_stream *stream;
6d40f8fa 540 uint64_t relayd_id = -1ULL;
ffe60014
DG
541
542 assert(channel);
543 assert(ctx);
544 assert(sock >= 0);
545
546 DBG("UST consumer sending channel %s to sessiond", channel->name);
547
62285ea4
DG
548 if (channel->relayd_id != (uint64_t) -1ULL) {
549 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
9ce5646a
MD
550
551 health_code_update();
552
62285ea4
DG
553 /* Try to send the stream to the relayd if one is available. */
554 ret = consumer_send_relayd_stream(stream, stream->chan->pathname);
555 if (ret < 0) {
556 /*
557 * Flag that the relayd was the problem here probably due to a
558 * communicaton error on the socket.
559 */
560 if (relayd_error) {
561 *relayd_error = 1;
562 }
725d28b2 563 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
ffe60014 564 }
6d40f8fa
JG
565 if (relayd_id == -1ULL) {
566 relayd_id = stream->relayd_id;
a4baae1b
JD
567 }
568 }
f2a444f1 569 }
ffe60014 570
f2a444f1
DG
571 /* Inform sessiond that we are about to send channel and streams. */
572 ret = consumer_send_status_msg(sock, ret_code);
0c759fc9 573 if (ret < 0 || ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
f2a444f1
DG
574 /*
575 * Either the session daemon is not responding or the relayd died so we
576 * stop now.
577 */
578 goto error;
579 }
580
581 /* Send channel to sessiond. */
582 ret = ustctl_send_channel_to_sessiond(sock, channel->uchan);
583 if (ret < 0) {
584 goto error;
585 }
586
587 ret = ustctl_channel_close_wakeup_fd(channel->uchan);
588 if (ret < 0) {
589 goto error;
590 }
591
592 /* The channel was sent successfully to the sessiond at this point. */
593 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
9ce5646a
MD
594
595 health_code_update();
596
ffe60014
DG
597 /* Send stream to session daemon. */
598 ret = send_sessiond_stream(sock, stream);
599 if (ret < 0) {
600 goto error;
601 }
602 }
603
604 /* Tell sessiond there is no more stream. */
605 ret = ustctl_send_stream_to_sessiond(sock, NULL);
606 if (ret < 0) {
607 goto error;
608 }
609
610 DBG("UST consumer NULL stream sent to sessiond");
611
612 return 0;
613
614error:
0c759fc9 615 if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
f2a444f1
DG
616 ret = -1;
617 }
ffe60014
DG
618 return ret;
619}
620
621/*
622 * Creates a channel and streams and add the channel it to the channel internal
623 * state. The created stream must ONLY be sent once the GET_CHANNEL command is
624 * received.
625 *
626 * Return 0 on success or else, a negative value is returned and the channel
627 * MUST be destroyed by consumer_del_channel().
628 */
629static int ask_channel(struct lttng_consumer_local_data *ctx, int sock,
630 struct lttng_consumer_channel *channel,
631 struct ustctl_consumer_channel_attr *attr)
3bd1e081
MD
632{
633 int ret;
634
ffe60014
DG
635 assert(ctx);
636 assert(channel);
637 assert(attr);
638
639 /*
640 * This value is still used by the kernel consumer since for the kernel,
641 * the stream ownership is not IN the consumer so we need to have the
642 * number of left stream that needs to be initialized so we can know when
643 * to delete the channel (see consumer.c).
644 *
645 * As for the user space tracer now, the consumer creates and sends the
646 * stream to the session daemon which only sends them to the application
647 * once every stream of a channel is received making this value useless
648 * because we they will be added to the poll thread before the application
649 * receives them. This ensures that a stream can not hang up during
650 * initilization of a channel.
651 */
652 channel->nb_init_stream_left = 0;
653
654 /* The reply msg status is handled in the following call. */
4628484a 655 ret = create_ust_channel(channel, attr, &channel->uchan);
ffe60014 656 if (ret < 0) {
10a50311 657 goto end;
3bd1e081
MD
658 }
659
d8ef542d
MD
660 channel->wait_fd = ustctl_channel_get_wait_fd(channel->uchan);
661
10a50311
JD
662 /*
663 * For the snapshots (no monitor), we create the metadata streams
664 * on demand, not during the channel creation.
665 */
666 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && !channel->monitor) {
667 ret = 0;
668 goto end;
669 }
670
ffe60014
DG
671 /* Open all streams for this channel. */
672 ret = create_ust_streams(channel, ctx);
673 if (ret < 0) {
10a50311 674 goto end;
ffe60014
DG
675 }
676
10a50311 677end:
3bd1e081
MD
678 return ret;
679}
680
d88aee68
DG
681/*
682 * Send all stream of a channel to the right thread handling it.
683 *
684 * On error, return a negative value else 0 on success.
685 */
686static int send_streams_to_thread(struct lttng_consumer_channel *channel,
687 struct lttng_consumer_local_data *ctx)
688{
689 int ret = 0;
690 struct lttng_consumer_stream *stream, *stmp;
691
692 assert(channel);
693 assert(ctx);
694
695 /* Send streams to the corresponding thread. */
696 cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
697 send_node) {
9ce5646a
MD
698
699 health_code_update();
700
d88aee68
DG
701 /* Sending the stream to the thread. */
702 ret = send_stream_to_thread(stream, ctx);
703 if (ret < 0) {
704 /*
705 * If we are unable to send the stream to the thread, there is
706 * a big problem so just stop everything.
707 */
5ab66908
MD
708 /* Remove node from the channel stream list. */
709 cds_list_del(&stream->send_node);
d88aee68
DG
710 goto error;
711 }
712
713 /* Remove node from the channel stream list. */
714 cds_list_del(&stream->send_node);
4891ece8 715
d88aee68
DG
716 }
717
718error:
719 return ret;
720}
721
7972aab2
DG
722/*
723 * Flush channel's streams using the given key to retrieve the channel.
724 *
725 * Return 0 on success else an LTTng error code.
726 */
727static int flush_channel(uint64_t chan_key)
728{
729 int ret = 0;
730 struct lttng_consumer_channel *channel;
731 struct lttng_consumer_stream *stream;
732 struct lttng_ht *ht;
733 struct lttng_ht_iter iter;
734
8fd623e0 735 DBG("UST consumer flush channel key %" PRIu64, chan_key);
7972aab2 736
a500c257 737 rcu_read_lock();
7972aab2
DG
738 channel = consumer_find_channel(chan_key);
739 if (!channel) {
8fd623e0 740 ERR("UST consumer flush channel %" PRIu64 " not found", chan_key);
7972aab2
DG
741 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
742 goto error;
743 }
744
745 ht = consumer_data.stream_per_chan_id_ht;
746
747 /* For each stream of the channel id, flush it. */
7972aab2
DG
748 cds_lfht_for_each_entry_duplicate(ht->ht,
749 ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct,
750 &channel->key, &iter.iter, stream, node_channel_id.node) {
9ce5646a
MD
751
752 health_code_update();
753
0dd01979 754 pthread_mutex_lock(&stream->lock);
123fff97
JR
755
756 /*
757 * Protect against concurrent teardown of a stream.
758 */
759 if (cds_lfht_is_node_deleted(&stream->node.node)) {
760 goto next;
761 }
762
0dd01979
MD
763 if (!stream->quiescent) {
764 ustctl_flush_buffer(stream->ustream, 0);
765 stream->quiescent = true;
766 }
123fff97 767next:
0dd01979
MD
768 pthread_mutex_unlock(&stream->lock);
769 }
770error:
771 rcu_read_unlock();
772 return ret;
773}
774
775/*
776 * Clear quiescent state from channel's streams using the given key to
777 * retrieve the channel.
778 *
779 * Return 0 on success else an LTTng error code.
780 */
781static int clear_quiescent_channel(uint64_t chan_key)
782{
783 int ret = 0;
784 struct lttng_consumer_channel *channel;
785 struct lttng_consumer_stream *stream;
786 struct lttng_ht *ht;
787 struct lttng_ht_iter iter;
788
789 DBG("UST consumer clear quiescent channel key %" PRIu64, chan_key);
790
791 rcu_read_lock();
792 channel = consumer_find_channel(chan_key);
793 if (!channel) {
794 ERR("UST consumer clear quiescent channel %" PRIu64 " not found", chan_key);
795 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
796 goto error;
797 }
798
799 ht = consumer_data.stream_per_chan_id_ht;
800
801 /* For each stream of the channel id, clear quiescent state. */
802 cds_lfht_for_each_entry_duplicate(ht->ht,
803 ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct,
804 &channel->key, &iter.iter, stream, node_channel_id.node) {
805
806 health_code_update();
807
808 pthread_mutex_lock(&stream->lock);
809 stream->quiescent = false;
810 pthread_mutex_unlock(&stream->lock);
7972aab2 811 }
7972aab2 812error:
a500c257 813 rcu_read_unlock();
7972aab2
DG
814 return ret;
815}
816
d88aee68
DG
817/*
818 * Close metadata stream wakeup_fd using the given key to retrieve the channel.
a500c257 819 * RCU read side lock MUST be acquired before calling this function.
d88aee68
DG
820 *
821 * Return 0 on success else an LTTng error code.
822 */
823static int close_metadata(uint64_t chan_key)
824{
ea88ca2a 825 int ret = 0;
d88aee68 826 struct lttng_consumer_channel *channel;
a1ca62da 827 unsigned int channel_monitor;
d88aee68 828
8fd623e0 829 DBG("UST consumer close metadata key %" PRIu64, chan_key);
d88aee68
DG
830
831 channel = consumer_find_channel(chan_key);
832 if (!channel) {
84cc9aa0
DG
833 /*
834 * This is possible if the metadata thread has issue a delete because
835 * the endpoint point of the stream hung up. There is no way the
836 * session daemon can know about it thus use a DBG instead of an actual
837 * error.
838 */
839 DBG("UST consumer close metadata %" PRIu64 " not found", chan_key);
d88aee68
DG
840 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
841 goto error;
842 }
843
ea88ca2a 844 pthread_mutex_lock(&consumer_data.lock);
a9838785 845 pthread_mutex_lock(&channel->lock);
a1ca62da 846 channel_monitor = channel->monitor;
73811ecc
DG
847 if (cds_lfht_is_node_deleted(&channel->node.node)) {
848 goto error_unlock;
849 }
850
6d574024 851 lttng_ustconsumer_close_metadata(channel);
a1ca62da
JG
852 pthread_mutex_unlock(&channel->lock);
853 pthread_mutex_unlock(&consumer_data.lock);
d88aee68 854
a1ca62da
JG
855 /*
856 * The ownership of a metadata channel depends on the type of
857 * session to which it belongs. In effect, the monitor flag is checked
858 * to determine if this metadata channel is in "snapshot" mode or not.
859 *
860 * In the non-snapshot case, the metadata channel is created along with
861 * a single stream which will remain present until the metadata channel
862 * is destroyed (on the destruction of its session). In this case, the
863 * metadata stream in "monitored" by the metadata poll thread and holds
864 * the ownership of its channel.
865 *
866 * Closing the metadata will cause the metadata stream's "metadata poll
867 * pipe" to be closed. Closing this pipe will wake-up the metadata poll
868 * thread which will teardown the metadata stream which, in return,
869 * deletes the metadata channel.
870 *
871 * In the snapshot case, the metadata stream is created and destroyed
872 * on every snapshot record. Since the channel doesn't have an owner
873 * other than the session daemon, it is safe to destroy it immediately
874 * on reception of the CLOSE_METADATA command.
875 */
876 if (!channel_monitor) {
877 /*
878 * The channel and consumer_data locks must be
879 * released before this call since consumer_del_channel
880 * re-acquires the channel and consumer_data locks to teardown
881 * the channel and queue its reclamation by the "call_rcu"
882 * worker thread.
883 */
884 consumer_del_channel(channel);
885 }
886
887 return ret;
ea88ca2a 888error_unlock:
a9838785 889 pthread_mutex_unlock(&channel->lock);
ea88ca2a 890 pthread_mutex_unlock(&consumer_data.lock);
d88aee68
DG
891error:
892 return ret;
893}
894
895/*
896 * RCU read side lock MUST be acquired before calling this function.
897 *
898 * Return 0 on success else an LTTng error code.
899 */
900static int setup_metadata(struct lttng_consumer_local_data *ctx, uint64_t key)
901{
902 int ret;
903 struct lttng_consumer_channel *metadata;
904
8fd623e0 905 DBG("UST consumer setup metadata key %" PRIu64, key);
d88aee68
DG
906
907 metadata = consumer_find_channel(key);
908 if (!metadata) {
909 ERR("UST consumer push metadata %" PRIu64 " not found", key);
910 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
10a50311
JD
911 goto end;
912 }
913
914 /*
915 * In no monitor mode, the metadata channel has no stream(s) so skip the
916 * ownership transfer to the metadata thread.
917 */
918 if (!metadata->monitor) {
919 DBG("Metadata channel in no monitor");
920 ret = 0;
921 goto end;
d88aee68
DG
922 }
923
924 /*
925 * Send metadata stream to relayd if one available. Availability is
926 * known if the stream is still in the list of the channel.
927 */
928 if (cds_list_empty(&metadata->streams.head)) {
929 ERR("Metadata channel key %" PRIu64 ", no stream available.", key);
930 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
f5a0c9cf 931 goto error_no_stream;
d88aee68
DG
932 }
933
934 /* Send metadata stream to relayd if needed. */
6d40f8fa 935 if (metadata->metadata_stream->relayd_id != (uint64_t) -1ULL) {
62285ea4
DG
936 ret = consumer_send_relayd_stream(metadata->metadata_stream,
937 metadata->pathname);
938 if (ret < 0) {
939 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
940 goto error;
941 }
601262d6 942 ret = consumer_send_relayd_streams_sent(
6d40f8fa 943 metadata->metadata_stream->relayd_id);
601262d6
JD
944 if (ret < 0) {
945 ret = LTTCOMM_CONSUMERD_RELAYD_FAIL;
946 goto error;
947 }
d88aee68
DG
948 }
949
950 ret = send_streams_to_thread(metadata, ctx);
951 if (ret < 0) {
952 /*
953 * If we are unable to send the stream to the thread, there is
954 * a big problem so just stop everything.
955 */
956 ret = LTTCOMM_CONSUMERD_FATAL;
957 goto error;
958 }
959 /* List MUST be empty after or else it could be reused. */
960 assert(cds_list_empty(&metadata->streams.head));
961
10a50311
JD
962 ret = 0;
963 goto end;
d88aee68
DG
964
965error:
f2a444f1
DG
966 /*
967 * Delete metadata channel on error. At this point, the metadata stream can
968 * NOT be monitored by the metadata thread thus having the guarantee that
969 * the stream is still in the local stream list of the channel. This call
970 * will make sure to clean that list.
971 */
f5a0c9cf 972 consumer_stream_destroy(metadata->metadata_stream, NULL);
212d67a2
DG
973 cds_list_del(&metadata->metadata_stream->send_node);
974 metadata->metadata_stream = NULL;
f5a0c9cf 975error_no_stream:
10a50311
JD
976end:
977 return ret;
978}
979
980/*
981 * Snapshot the whole metadata.
982 *
983 * Returns 0 on success, < 0 on error
984 */
985static int snapshot_metadata(uint64_t key, char *path, uint64_t relayd_id,
986 struct lttng_consumer_local_data *ctx)
987{
988 int ret = 0;
10a50311
JD
989 struct lttng_consumer_channel *metadata_channel;
990 struct lttng_consumer_stream *metadata_stream;
991
992 assert(path);
993 assert(ctx);
994
995 DBG("UST consumer snapshot metadata with key %" PRIu64 " at path %s",
996 key, path);
997
998 rcu_read_lock();
999
1000 metadata_channel = consumer_find_channel(key);
1001 if (!metadata_channel) {
6a00837f
MD
1002 ERR("UST snapshot metadata channel not found for key %" PRIu64,
1003 key);
10a50311
JD
1004 ret = -1;
1005 goto error;
1006 }
1007 assert(!metadata_channel->monitor);
1008
9ce5646a
MD
1009 health_code_update();
1010
10a50311
JD
1011 /*
1012 * Ask the sessiond if we have new metadata waiting and update the
1013 * consumer metadata cache.
1014 */
94d49140 1015 ret = lttng_ustconsumer_request_metadata(ctx, metadata_channel, 0, 1);
10a50311
JD
1016 if (ret < 0) {
1017 goto error;
1018 }
1019
9ce5646a
MD
1020 health_code_update();
1021
10a50311
JD
1022 /*
1023 * The metadata stream is NOT created in no monitor mode when the channel
1024 * is created on a sessiond ask channel command.
1025 */
1026 ret = create_ust_streams(metadata_channel, ctx);
1027 if (ret < 0) {
1028 goto error;
1029 }
1030
1031 metadata_stream = metadata_channel->metadata_stream;
1032 assert(metadata_stream);
1033
1034 if (relayd_id != (uint64_t) -1ULL) {
6d40f8fa 1035 metadata_stream->relayd_id = relayd_id;
10a50311
JD
1036 ret = consumer_send_relayd_stream(metadata_stream, path);
1037 if (ret < 0) {
1038 goto error_stream;
1039 }
1040 } else {
1041 ret = utils_create_stream_file(path, metadata_stream->name,
1042 metadata_stream->chan->tracefile_size,
1043 metadata_stream->tracefile_count_current,
309167d2 1044 metadata_stream->uid, metadata_stream->gid, NULL);
10a50311
JD
1045 if (ret < 0) {
1046 goto error_stream;
1047 }
1048 metadata_stream->out_fd = ret;
1049 metadata_stream->tracefile_size_current = 0;
1050 }
1051
04ef1097 1052 do {
9ce5646a
MD
1053 health_code_update();
1054
29d1a7ae 1055 ret = lttng_consumer_read_subbuffer(metadata_stream, ctx, true);
10a50311 1056 if (ret < 0) {
94d49140 1057 goto error_stream;
10a50311 1058 }
04ef1097 1059 } while (ret > 0);
10a50311 1060
10a50311
JD
1061error_stream:
1062 /*
1063 * Clean up the stream completly because the next snapshot will use a new
1064 * metadata stream.
1065 */
10a50311 1066 consumer_stream_destroy(metadata_stream, NULL);
212d67a2 1067 cds_list_del(&metadata_stream->send_node);
10a50311
JD
1068 metadata_channel->metadata_stream = NULL;
1069
1070error:
1071 rcu_read_unlock();
1072 return ret;
1073}
1074
1fdb9a78
JG
1075static
1076int get_current_subbuf_addr(struct lttng_consumer_stream *stream,
1077 const char **addr)
1078{
1079 int ret;
1080 unsigned long mmap_offset;
1081 const char *mmap_base;
1082
1083 mmap_base = ustctl_get_mmap_base(stream->ustream);
1084 if (!mmap_base) {
1085 ERR("Failed to get mmap base for stream `%s`",
1086 stream->name);
1087 ret = -EPERM;
1088 goto error;
1089 }
1090
1091 ret = ustctl_get_mmap_read_offset(stream->ustream, &mmap_offset);
1092 if (ret != 0) {
1093 ERR("Failed to get mmap offset for stream `%s`", stream->name);
1094 ret = -EINVAL;
1095 goto error;
1096 }
1097
1098 *addr = mmap_base + mmap_offset;
1099error:
1100 return ret;
1101
1102}
1103
10a50311
JD
1104/*
1105 * Take a snapshot of all the stream of a channel.
1106 *
1107 * Returns 0 on success, < 0 on error
1108 */
1109static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
d07ceecd 1110 uint64_t nb_packets_per_stream, struct lttng_consumer_local_data *ctx)
10a50311
JD
1111{
1112 int ret;
1113 unsigned use_relayd = 0;
1114 unsigned long consumed_pos, produced_pos;
1115 struct lttng_consumer_channel *channel;
1116 struct lttng_consumer_stream *stream;
1117
1118 assert(path);
1119 assert(ctx);
1120
1121 rcu_read_lock();
1122
1123 if (relayd_id != (uint64_t) -1ULL) {
1124 use_relayd = 1;
1125 }
1126
1127 channel = consumer_find_channel(key);
1128 if (!channel) {
6a00837f 1129 ERR("UST snapshot channel not found for key %" PRIu64, key);
10a50311
JD
1130 ret = -1;
1131 goto error;
1132 }
1133 assert(!channel->monitor);
6a00837f 1134 DBG("UST consumer snapshot channel %" PRIu64, key);
10a50311
JD
1135
1136 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
9ce5646a
MD
1137 health_code_update();
1138
10a50311
JD
1139 /* Lock stream because we are about to change its state. */
1140 pthread_mutex_lock(&stream->lock);
6d40f8fa 1141 stream->relayd_id = relayd_id;
10a50311
JD
1142
1143 if (use_relayd) {
1144 ret = consumer_send_relayd_stream(stream, path);
1145 if (ret < 0) {
1146 goto error_unlock;
1147 }
1148 } else {
1149 ret = utils_create_stream_file(path, stream->name,
1150 stream->chan->tracefile_size,
1151 stream->tracefile_count_current,
309167d2 1152 stream->uid, stream->gid, NULL);
10a50311
JD
1153 if (ret < 0) {
1154 goto error_unlock;
1155 }
1156 stream->out_fd = ret;
1157 stream->tracefile_size_current = 0;
1158
1159 DBG("UST consumer snapshot stream %s/%s (%" PRIu64 ")", path,
1160 stream->name, stream->key);
1161 }
a4baae1b
JD
1162 if (relayd_id != -1ULL) {
1163 ret = consumer_send_relayd_streams_sent(relayd_id);
1164 if (ret < 0) {
1165 goto error_unlock;
1166 }
1167 }
10a50311 1168
d4d80f77
MD
1169 /*
1170 * If tracing is active, we want to perform a "full" buffer flush.
1171 * Else, if quiescent, it has already been done by the prior stop.
1172 */
1173 if (!stream->quiescent) {
1174 ustctl_flush_buffer(stream->ustream, 0);
1175 }
10a50311
JD
1176
1177 ret = lttng_ustconsumer_take_snapshot(stream);
1178 if (ret < 0) {
1179 ERR("Taking UST snapshot");
1180 goto error_unlock;
1181 }
1182
1183 ret = lttng_ustconsumer_get_produced_snapshot(stream, &produced_pos);
1184 if (ret < 0) {
1185 ERR("Produced UST snapshot position");
1186 goto error_unlock;
1187 }
1188
1189 ret = lttng_ustconsumer_get_consumed_snapshot(stream, &consumed_pos);
1190 if (ret < 0) {
1191 ERR("Consumerd UST snapshot position");
1192 goto error_unlock;
1193 }
1194
5c786ded
JD
1195 /*
1196 * The original value is sent back if max stream size is larger than
d07ceecd 1197 * the possible size of the snapshot. Also, we assume that the session
5c786ded
JD
1198 * daemon should never send a maximum stream size that is lower than
1199 * subbuffer size.
1200 */
d07ceecd
MD
1201 consumed_pos = consumer_get_consume_start_pos(consumed_pos,
1202 produced_pos, nb_packets_per_stream,
1203 stream->max_sb_size);
5c786ded 1204
10a50311
JD
1205 while (consumed_pos < produced_pos) {
1206 ssize_t read_len;
1207 unsigned long len, padded_len;
1fdb9a78 1208 const char *subbuf_addr;
ace0e591 1209 struct lttng_buffer_view subbuf_view;
10a50311 1210
9ce5646a
MD
1211 health_code_update();
1212
10a50311
JD
1213 DBG("UST consumer taking snapshot at pos %lu", consumed_pos);
1214
1215 ret = ustctl_get_subbuf(stream->ustream, &consumed_pos);
1216 if (ret < 0) {
1217 if (ret != -EAGAIN) {
1218 PERROR("ustctl_get_subbuf snapshot");
1219 goto error_close_stream;
1220 }
1221 DBG("UST consumer get subbuf failed. Skipping it.");
1222 consumed_pos += stream->max_sb_size;
6e1f2e92 1223 stream->chan->lost_packets++;
10a50311
JD
1224 continue;
1225 }
1226
1227 ret = ustctl_get_subbuf_size(stream->ustream, &len);
1228 if (ret < 0) {
1229 ERR("Snapshot ustctl_get_subbuf_size");
1230 goto error_put_subbuf;
1231 }
1232
1233 ret = ustctl_get_padded_subbuf_size(stream->ustream, &padded_len);
1234 if (ret < 0) {
1235 ERR("Snapshot ustctl_get_padded_subbuf_size");
1236 goto error_put_subbuf;
1237 }
1238
1fdb9a78
JG
1239 ret = get_current_subbuf_addr(stream, &subbuf_addr);
1240 if (ret) {
1241 goto error_put_subbuf;
1242 }
1243
ace0e591
JG
1244 subbuf_view = lttng_buffer_view_init(
1245 subbuf_addr, 0, padded_len);
d6ef77b3 1246 read_len = lttng_consumer_on_read_subbuffer_mmap(
29d1a7ae 1247 stream, &subbuf_view, padded_len - len);
10a50311
JD
1248 if (use_relayd) {
1249 if (read_len != len) {
56591bac 1250 ret = -EPERM;
10a50311
JD
1251 goto error_put_subbuf;
1252 }
1253 } else {
1254 if (read_len != padded_len) {
56591bac 1255 ret = -EPERM;
10a50311
JD
1256 goto error_put_subbuf;
1257 }
1258 }
1259
1260 ret = ustctl_put_subbuf(stream->ustream);
1261 if (ret < 0) {
1262 ERR("Snapshot ustctl_put_subbuf");
1263 goto error_close_stream;
1264 }
1265 consumed_pos += stream->max_sb_size;
1266 }
1267
1268 /* Simply close the stream so we can use it on the next snapshot. */
1269 consumer_stream_close(stream);
1270 pthread_mutex_unlock(&stream->lock);
1271 }
1272
1273 rcu_read_unlock();
1274 return 0;
1275
1276error_put_subbuf:
1277 if (ustctl_put_subbuf(stream->ustream) < 0) {
1278 ERR("Snapshot ustctl_put_subbuf");
1279 }
1280error_close_stream:
1281 consumer_stream_close(stream);
1282error_unlock:
1283 pthread_mutex_unlock(&stream->lock);
1284error:
1285 rcu_read_unlock();
d88aee68
DG
1286 return ret;
1287}
1288
331744e3 1289/*
c585821b
MD
1290 * Receive the metadata updates from the sessiond. Supports receiving
1291 * overlapping metadata, but is needs to always belong to a contiguous
1292 * range starting from 0.
1293 * Be careful about the locks held when calling this function: it needs
1294 * the metadata cache flush to concurrently progress in order to
1295 * complete.
331744e3
JD
1296 */
1297int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset,
93ec662e
JD
1298 uint64_t len, uint64_t version,
1299 struct lttng_consumer_channel *channel, int timer, int wait)
331744e3 1300{
0c759fc9 1301 int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS;
331744e3
JD
1302 char *metadata_str;
1303
8fd623e0 1304 DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key, len);
331744e3
JD
1305
1306 metadata_str = zmalloc(len * sizeof(char));
1307 if (!metadata_str) {
1308 PERROR("zmalloc metadata string");
1309 ret_code = LTTCOMM_CONSUMERD_ENOMEM;
1310 goto end;
1311 }
1312
9ce5646a
MD
1313 health_code_update();
1314
331744e3
JD
1315 /* Receive metadata string. */
1316 ret = lttcomm_recv_unix_sock(sock, metadata_str, len);
1317 if (ret < 0) {
1318 /* Session daemon is dead so return gracefully. */
1319 ret_code = ret;
1320 goto end_free;
1321 }
1322
9ce5646a
MD
1323 health_code_update();
1324
331744e3 1325 pthread_mutex_lock(&channel->metadata_cache->lock);
93ec662e
JD
1326 ret = consumer_metadata_cache_write(channel, offset, len, version,
1327 metadata_str);
331744e3
JD
1328 if (ret < 0) {
1329 /* Unable to handle metadata. Notify session daemon. */
1330 ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
a32bd775
DG
1331 /*
1332 * Skip metadata flush on write error since the offset and len might
1333 * not have been updated which could create an infinite loop below when
1334 * waiting for the metadata cache to be flushed.
1335 */
1336 pthread_mutex_unlock(&channel->metadata_cache->lock);
a32bd775 1337 goto end_free;
331744e3
JD
1338 }
1339 pthread_mutex_unlock(&channel->metadata_cache->lock);
1340
94d49140
JD
1341 if (!wait) {
1342 goto end_free;
1343 }
5e41ebe1 1344 while (consumer_metadata_cache_flushed(channel, offset + len, timer)) {
331744e3 1345 DBG("Waiting for metadata to be flushed");
9ce5646a
MD
1346
1347 health_code_update();
1348
331744e3
JD
1349 usleep(DEFAULT_METADATA_AVAILABILITY_WAIT_TIME);
1350 }
1351
1352end_free:
1353 free(metadata_str);
1354end:
1355 return ret_code;
1356}
1357
4cbc1a04
DG
1358/*
1359 * Receive command from session daemon and process it.
1360 *
1361 * Return 1 on success else a negative value or 0.
1362 */
3bd1e081
MD
1363int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
1364 int sock, struct pollfd *consumer_sockpoll)
1365{
1366 ssize_t ret;
0c759fc9 1367 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
3bd1e081 1368 struct lttcomm_consumer_msg msg;
ffe60014 1369 struct lttng_consumer_channel *channel = NULL;
3bd1e081 1370
9ce5646a
MD
1371 health_code_update();
1372
3bd1e081
MD
1373 ret = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg));
1374 if (ret != sizeof(msg)) {
173af62f
DG
1375 DBG("Consumer received unexpected message size %zd (expects %zu)",
1376 ret, sizeof(msg));
3be74084
DG
1377 /*
1378 * The ret value might 0 meaning an orderly shutdown but this is ok
1379 * since the caller handles this.
1380 */
489f70e9 1381 if (ret > 0) {
c6857fcf 1382 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
489f70e9
MD
1383 ret = -1;
1384 }
3bd1e081
MD
1385 return ret;
1386 }
9ce5646a
MD
1387
1388 health_code_update();
1389
84382d49
MD
1390 /* deprecated */
1391 assert(msg.cmd_type != LTTNG_CONSUMER_STOP);
3bd1e081 1392
9ce5646a
MD
1393 health_code_update();
1394
3f8e211f 1395 /* relayd needs RCU read-side lock */
b0b335c8
MD
1396 rcu_read_lock();
1397
3bd1e081 1398 switch (msg.cmd_type) {
00e2e675
DG
1399 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
1400 {
f50f23d9 1401 /* Session daemon status message are handled in the following call. */
028ba707 1402 consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
7735ef9e 1403 msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll,
d3e2ba59
JD
1404 &msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id,
1405 msg.u.relayd_sock.relayd_session_id);
00e2e675
DG
1406 goto end_nosignal;
1407 }
173af62f
DG
1408 case LTTNG_CONSUMER_DESTROY_RELAYD:
1409 {
a6ba4fe1 1410 uint64_t index = msg.u.destroy_relayd.net_seq_idx;
173af62f
DG
1411 struct consumer_relayd_sock_pair *relayd;
1412
a6ba4fe1 1413 DBG("UST consumer destroying relayd %" PRIu64, index);
173af62f
DG
1414
1415 /* Get relayd reference if exists. */
a6ba4fe1 1416 relayd = consumer_find_relayd(index);
173af62f 1417 if (relayd == NULL) {
3448e266 1418 DBG("Unable to find relayd %" PRIu64, index);
e462382a 1419 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
173af62f
DG
1420 }
1421
a6ba4fe1
DG
1422 /*
1423 * Each relayd socket pair has a refcount of stream attached to it
1424 * which tells if the relayd is still active or not depending on the
1425 * refcount value.
1426 *
1427 * This will set the destroy flag of the relayd object and destroy it
1428 * if the refcount reaches zero when called.
1429 *
1430 * The destroy can happen either here or when a stream fd hangs up.
1431 */
f50f23d9
DG
1432 if (relayd) {
1433 consumer_flag_relayd_for_destroy(relayd);
1434 }
1435
d88aee68 1436 goto end_msg_sessiond;
173af62f 1437 }
3bd1e081
MD
1438 case LTTNG_CONSUMER_UPDATE_STREAM:
1439 {
3f8e211f 1440 rcu_read_unlock();
7ad0a0cb 1441 return -ENOSYS;
3bd1e081 1442 }
6d805429 1443 case LTTNG_CONSUMER_DATA_PENDING:
53632229 1444 {
3be74084 1445 int ret, is_data_pending;
6d805429 1446 uint64_t id = msg.u.data_pending.session_id;
ca22feea 1447
6d805429 1448 DBG("UST consumer data pending command for id %" PRIu64, id);
ca22feea 1449
3be74084 1450 is_data_pending = consumer_data_pending(id);
ca22feea
DG
1451
1452 /* Send back returned value to session daemon */
3be74084
DG
1453 ret = lttcomm_send_unix_sock(sock, &is_data_pending,
1454 sizeof(is_data_pending));
ca22feea 1455 if (ret < 0) {
3be74084 1456 DBG("Error when sending the data pending ret code: %d", ret);
489f70e9 1457 goto error_fatal;
ca22feea 1458 }
f50f23d9
DG
1459
1460 /*
1461 * No need to send back a status message since the data pending
1462 * returned value is the response.
1463 */
ca22feea 1464 break;
53632229 1465 }
ffe60014
DG
1466 case LTTNG_CONSUMER_ASK_CHANNEL_CREATION:
1467 {
1468 int ret;
1469 struct ustctl_consumer_channel_attr attr;
1470
1471 /* Create a plain object and reserve a channel key. */
11785f65
JG
1472 channel = consumer_allocate_channel(
1473 msg.u.ask_channel.key,
1474 msg.u.ask_channel.session_id,
1475 msg.u.ask_channel.pathname,
1476 msg.u.ask_channel.name,
1477 msg.u.ask_channel.uid,
1478 msg.u.ask_channel.gid,
1479 msg.u.ask_channel.relayd_id,
1624d5b7
JD
1480 (enum lttng_event_output) msg.u.ask_channel.output,
1481 msg.u.ask_channel.tracefile_size,
2bba9e53 1482 msg.u.ask_channel.tracefile_count,
1950109e 1483 msg.u.ask_channel.session_id_per_pid,
ecc48a90 1484 msg.u.ask_channel.monitor,
d7ba1388 1485 msg.u.ask_channel.live_timer_interval,
11785f65 1486 msg.u.ask_channel.is_live,
3d071855 1487 msg.u.ask_channel.root_shm_path,
d7ba1388 1488 msg.u.ask_channel.shm_path);
ffe60014
DG
1489 if (!channel) {
1490 goto end_channel_error;
1491 }
1492
567eb353
DG
1493 /*
1494 * Assign UST application UID to the channel. This value is ignored for
1495 * per PID buffers. This is specific to UST thus setting this after the
1496 * allocation.
1497 */
1498 channel->ust_app_uid = msg.u.ask_channel.ust_app_uid;
1499
ffe60014
DG
1500 /* Build channel attributes from received message. */
1501 attr.subbuf_size = msg.u.ask_channel.subbuf_size;
1502 attr.num_subbuf = msg.u.ask_channel.num_subbuf;
1503 attr.overwrite = msg.u.ask_channel.overwrite;
1504 attr.switch_timer_interval = msg.u.ask_channel.switch_timer_interval;
1505 attr.read_timer_interval = msg.u.ask_channel.read_timer_interval;
7972aab2 1506 attr.chan_id = msg.u.ask_channel.chan_id;
ffe60014
DG
1507 memcpy(attr.uuid, msg.u.ask_channel.uuid, sizeof(attr.uuid));
1508
0c759fc9
DG
1509 /* Match channel buffer type to the UST abi. */
1510 switch (msg.u.ask_channel.output) {
1511 case LTTNG_EVENT_MMAP:
1512 default:
1513 attr.output = LTTNG_UST_MMAP;
1514 break;
1515 }
1516
ffe60014
DG
1517 /* Translate and save channel type. */
1518 switch (msg.u.ask_channel.type) {
1519 case LTTNG_UST_CHAN_PER_CPU:
1520 channel->type = CONSUMER_CHANNEL_TYPE_DATA;
1521 attr.type = LTTNG_UST_CHAN_PER_CPU;
8633d6e3
MD
1522 /*
1523 * Set refcount to 1 for owner. Below, we will
1524 * pass ownership to the
1525 * consumer_thread_channel_poll() thread.
1526 */
1527 channel->refcount = 1;
ffe60014
DG
1528 break;
1529 case LTTNG_UST_CHAN_METADATA:
1530 channel->type = CONSUMER_CHANNEL_TYPE_METADATA;
1531 attr.type = LTTNG_UST_CHAN_METADATA;
1532 break;
1533 default:
1534 assert(0);
1535 goto error_fatal;
1536 };
1537
9ce5646a
MD
1538 health_code_update();
1539
ffe60014
DG
1540 ret = ask_channel(ctx, sock, channel, &attr);
1541 if (ret < 0) {
1542 goto end_channel_error;
1543 }
1544
fc643247
MD
1545 if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
1546 ret = consumer_metadata_cache_allocate(channel);
1547 if (ret < 0) {
1548 ERR("Allocating metadata cache");
1549 goto end_channel_error;
1550 }
1551 consumer_timer_switch_start(channel, attr.switch_timer_interval);
1552 attr.switch_timer_interval = 0;
94d49140
JD
1553 } else {
1554 consumer_timer_live_start(channel,
1555 msg.u.ask_channel.live_timer_interval);
fc643247
MD
1556 }
1557
9ce5646a
MD
1558 health_code_update();
1559
ffe60014
DG
1560 /*
1561 * Add the channel to the internal state AFTER all streams were created
1562 * and successfully sent to session daemon. This way, all streams must
1563 * be ready before this channel is visible to the threads.
fc643247
MD
1564 * If add_channel succeeds, ownership of the channel is
1565 * passed to consumer_thread_channel_poll().
ffe60014
DG
1566 */
1567 ret = add_channel(channel, ctx);
1568 if (ret < 0) {
ea88ca2a
MD
1569 if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
1570 if (channel->switch_timer_enabled == 1) {
1571 consumer_timer_switch_stop(channel);
1572 }
1573 consumer_metadata_cache_destroy(channel);
1574 }
d3e2ba59
JD
1575 if (channel->live_timer_enabled == 1) {
1576 consumer_timer_live_stop(channel);
1577 }
ffe60014
DG
1578 goto end_channel_error;
1579 }
1580
9ce5646a
MD
1581 health_code_update();
1582
ffe60014
DG
1583 /*
1584 * Channel and streams are now created. Inform the session daemon that
1585 * everything went well and should wait to receive the channel and
1586 * streams with ustctl API.
1587 */
1588 ret = consumer_send_status_channel(sock, channel);
1589 if (ret < 0) {
1590 /*
489f70e9 1591 * There is probably a problem on the socket.
ffe60014 1592 */
489f70e9 1593 goto error_fatal;
ffe60014
DG
1594 }
1595
1596 break;
1597 }
1598 case LTTNG_CONSUMER_GET_CHANNEL:
1599 {
1600 int ret, relayd_err = 0;
d88aee68 1601 uint64_t key = msg.u.get_channel.key;
ffe60014 1602 struct lttng_consumer_channel *channel;
ffe60014
DG
1603
1604 channel = consumer_find_channel(key);
1605 if (!channel) {
8fd623e0 1606 ERR("UST consumer get channel key %" PRIu64 " not found", key);
e462382a 1607 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
ffe60014
DG
1608 goto end_msg_sessiond;
1609 }
1610
9ce5646a
MD
1611 health_code_update();
1612
ffe60014
DG
1613 /* Send everything to sessiond. */
1614 ret = send_sessiond_channel(sock, channel, ctx, &relayd_err);
1615 if (ret < 0) {
1616 if (relayd_err) {
1617 /*
1618 * We were unable to send to the relayd the stream so avoid
1619 * sending back a fatal error to the thread since this is OK
f2a444f1
DG
1620 * and the consumer can continue its work. The above call
1621 * has sent the error status message to the sessiond.
ffe60014 1622 */
f2a444f1 1623 goto end_nosignal;
ffe60014
DG
1624 }
1625 /*
1626 * The communicaton was broken hence there is a bad state between
1627 * the consumer and sessiond so stop everything.
1628 */
1629 goto error_fatal;
1630 }
1631
9ce5646a
MD
1632 health_code_update();
1633
10a50311
JD
1634 /*
1635 * In no monitor mode, the streams ownership is kept inside the channel
1636 * so don't send them to the data thread.
1637 */
1638 if (!channel->monitor) {
1639 goto end_msg_sessiond;
1640 }
1641
d88aee68
DG
1642 ret = send_streams_to_thread(channel, ctx);
1643 if (ret < 0) {
1644 /*
1645 * If we are unable to send the stream to the thread, there is
1646 * a big problem so just stop everything.
1647 */
1648 goto error_fatal;
ffe60014 1649 }
ffe60014
DG
1650 /* List MUST be empty after or else it could be reused. */
1651 assert(cds_list_empty(&channel->streams.head));
d88aee68
DG
1652 goto end_msg_sessiond;
1653 }
1654 case LTTNG_CONSUMER_DESTROY_CHANNEL:
1655 {
1656 uint64_t key = msg.u.destroy_channel.key;
d88aee68 1657
a0cbdd2e
MD
1658 /*
1659 * Only called if streams have not been sent to stream
1660 * manager thread. However, channel has been sent to
1661 * channel manager thread.
1662 */
1663 notify_thread_del_channel(ctx, key);
d88aee68 1664 goto end_msg_sessiond;
ffe60014 1665 }
d88aee68
DG
1666 case LTTNG_CONSUMER_CLOSE_METADATA:
1667 {
1668 int ret;
1669
1670 ret = close_metadata(msg.u.close_metadata.key);
1671 if (ret != 0) {
1672 ret_code = ret;
1673 }
1674
1675 goto end_msg_sessiond;
1676 }
7972aab2
DG
1677 case LTTNG_CONSUMER_FLUSH_CHANNEL:
1678 {
1679 int ret;
1680
1681 ret = flush_channel(msg.u.flush_channel.key);
1682 if (ret != 0) {
1683 ret_code = ret;
1684 }
1685
1686 goto end_msg_sessiond;
1687 }
0dd01979
MD
1688 case LTTNG_CONSUMER_CLEAR_QUIESCENT_CHANNEL:
1689 {
1690 int ret;
1691
1692 ret = clear_quiescent_channel(
1693 msg.u.clear_quiescent_channel.key);
1694 if (ret != 0) {
1695 ret_code = ret;
1696 }
1697
1698 goto end_msg_sessiond;
1699 }
d88aee68 1700 case LTTNG_CONSUMER_PUSH_METADATA:
ffe60014
DG
1701 {
1702 int ret;
d88aee68 1703 uint64_t len = msg.u.push_metadata.len;
d88aee68 1704 uint64_t key = msg.u.push_metadata.key;
331744e3 1705 uint64_t offset = msg.u.push_metadata.target_offset;
93ec662e 1706 uint64_t version = msg.u.push_metadata.version;
ffe60014
DG
1707 struct lttng_consumer_channel *channel;
1708
8fd623e0
DG
1709 DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key,
1710 len);
ffe60014
DG
1711
1712 channel = consumer_find_channel(key);
1713 if (!channel) {
000baf6a
DG
1714 /*
1715 * This is possible if the metadata creation on the consumer side
1716 * is in flight vis-a-vis a concurrent push metadata from the
1717 * session daemon. Simply return that the channel failed and the
1718 * session daemon will handle that message correctly considering
1719 * that this race is acceptable thus the DBG() statement here.
1720 */
1721 DBG("UST consumer push metadata %" PRIu64 " not found", key);
1722 ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL;
4a2eb0ca 1723 goto end_msg_sessiond;
d88aee68
DG
1724 }
1725
9ce5646a
MD
1726 health_code_update();
1727
c585821b
MD
1728 if (!len) {
1729 /*
1730 * There is nothing to receive. We have simply
1731 * checked whether the channel can be found.
1732 */
1733 ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1734 goto end_msg_sessiond;
1735 }
1736
d88aee68 1737 /* Tell session daemon we are ready to receive the metadata. */
0c759fc9 1738 ret = consumer_send_status_msg(sock, LTTCOMM_CONSUMERD_SUCCESS);
ffe60014
DG
1739 if (ret < 0) {
1740 /* Somehow, the session daemon is not responding anymore. */
d88aee68
DG
1741 goto error_fatal;
1742 }
1743
9ce5646a
MD
1744 health_code_update();
1745
d88aee68 1746 /* Wait for more data. */
9ce5646a
MD
1747 health_poll_entry();
1748 ret = lttng_consumer_poll_socket(consumer_sockpoll);
1749 health_poll_exit();
84382d49 1750 if (ret) {
489f70e9 1751 goto error_fatal;
d88aee68
DG
1752 }
1753
9ce5646a
MD
1754 health_code_update();
1755
331744e3 1756 ret = lttng_ustconsumer_recv_metadata(sock, key, offset,
93ec662e 1757 len, version, channel, 0, 1);
d88aee68 1758 if (ret < 0) {
331744e3 1759 /* error receiving from sessiond */
489f70e9 1760 goto error_fatal;
331744e3
JD
1761 } else {
1762 ret_code = ret;
d88aee68
DG
1763 goto end_msg_sessiond;
1764 }
d88aee68
DG
1765 }
1766 case LTTNG_CONSUMER_SETUP_METADATA:
1767 {
1768 int ret;
1769
1770 ret = setup_metadata(ctx, msg.u.setup_metadata.key);
1771 if (ret) {
1772 ret_code = ret;
1773 }
1774 goto end_msg_sessiond;
ffe60014 1775 }
6dc3064a
DG
1776 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
1777 {
10a50311
JD
1778 if (msg.u.snapshot_channel.metadata) {
1779 ret = snapshot_metadata(msg.u.snapshot_channel.key,
1780 msg.u.snapshot_channel.pathname,
1781 msg.u.snapshot_channel.relayd_id,
1782 ctx);
1783 if (ret < 0) {
1784 ERR("Snapshot metadata failed");
e462382a 1785 ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
10a50311
JD
1786 }
1787 } else {
1788 ret = snapshot_channel(msg.u.snapshot_channel.key,
1789 msg.u.snapshot_channel.pathname,
1790 msg.u.snapshot_channel.relayd_id,
d07ceecd 1791 msg.u.snapshot_channel.nb_packets_per_stream,
10a50311
JD
1792 ctx);
1793 if (ret < 0) {
1794 ERR("Snapshot channel failed");
e462382a 1795 ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL;
10a50311
JD
1796 }
1797 }
1798
9ce5646a 1799 health_code_update();
6dc3064a
DG
1800 ret = consumer_send_status_msg(sock, ret_code);
1801 if (ret < 0) {
1802 /* Somehow, the session daemon is not responding anymore. */
1803 goto end_nosignal;
1804 }
9ce5646a 1805 health_code_update();
6dc3064a
DG
1806 break;
1807 }
fb83fe64
JD
1808 case LTTNG_CONSUMER_DISCARDED_EVENTS:
1809 {
beb59458
MJ
1810 int ret = 0;
1811 uint64_t discarded_events;
fb83fe64
JD
1812 struct lttng_ht_iter iter;
1813 struct lttng_ht *ht;
1814 struct lttng_consumer_stream *stream;
1815 uint64_t id = msg.u.discarded_events.session_id;
1816 uint64_t key = msg.u.discarded_events.channel_key;
1817
1818 DBG("UST consumer discarded events command for session id %"
1819 PRIu64, id);
1820 rcu_read_lock();
1821 pthread_mutex_lock(&consumer_data.lock);
1822
1823 ht = consumer_data.stream_list_ht;
1824
1825 /*
1826 * We only need a reference to the channel, but they are not
1827 * directly indexed, so we just use the first matching stream
1828 * to extract the information we need, we default to 0 if not
1829 * found (no events are dropped if the channel is not yet in
1830 * use).
1831 */
beb59458 1832 discarded_events = 0;
fb83fe64
JD
1833 cds_lfht_for_each_entry_duplicate(ht->ht,
1834 ht->hash_fct(&id, lttng_ht_seed),
1835 ht->match_fct, &id,
1836 &iter.iter, stream, node_session_id.node) {
1837 if (stream->chan->key == key) {
beb59458 1838 discarded_events = stream->chan->discarded_events;
fb83fe64
JD
1839 break;
1840 }
1841 }
1842 pthread_mutex_unlock(&consumer_data.lock);
1843 rcu_read_unlock();
1844
1845 DBG("UST consumer discarded events command for session id %"
1846 PRIu64 ", channel key %" PRIu64, id, key);
1847
1848 health_code_update();
1849
1850 /* Send back returned value to session daemon */
beb59458 1851 ret = lttcomm_send_unix_sock(sock, &discarded_events, sizeof(discarded_events));
fb83fe64
JD
1852 if (ret < 0) {
1853 PERROR("send discarded events");
1854 goto error_fatal;
1855 }
1856
1857 break;
1858 }
1859 case LTTNG_CONSUMER_LOST_PACKETS:
1860 {
9a06e8d4
JG
1861 int ret;
1862 uint64_t lost_packets;
fb83fe64
JD
1863 struct lttng_ht_iter iter;
1864 struct lttng_ht *ht;
1865 struct lttng_consumer_stream *stream;
1866 uint64_t id = msg.u.lost_packets.session_id;
1867 uint64_t key = msg.u.lost_packets.channel_key;
1868
1869 DBG("UST consumer lost packets command for session id %"
1870 PRIu64, id);
1871 rcu_read_lock();
1872 pthread_mutex_lock(&consumer_data.lock);
1873
1874 ht = consumer_data.stream_list_ht;
1875
1876 /*
1877 * We only need a reference to the channel, but they are not
1878 * directly indexed, so we just use the first matching stream
1879 * to extract the information we need, we default to 0 if not
1880 * found (no packets lost if the channel is not yet in use).
1881 */
9a06e8d4 1882 lost_packets = 0;
fb83fe64
JD
1883 cds_lfht_for_each_entry_duplicate(ht->ht,
1884 ht->hash_fct(&id, lttng_ht_seed),
1885 ht->match_fct, &id,
1886 &iter.iter, stream, node_session_id.node) {
1887 if (stream->chan->key == key) {
9a06e8d4 1888 lost_packets = stream->chan->lost_packets;
fb83fe64
JD
1889 break;
1890 }
1891 }
1892 pthread_mutex_unlock(&consumer_data.lock);
1893 rcu_read_unlock();
1894
1895 DBG("UST consumer lost packets command for session id %"
1896 PRIu64 ", channel key %" PRIu64, id, key);
1897
1898 health_code_update();
1899
1900 /* Send back returned value to session daemon */
9a06e8d4
JG
1901 ret = lttcomm_send_unix_sock(sock, &lost_packets,
1902 sizeof(lost_packets));
fb83fe64
JD
1903 if (ret < 0) {
1904 PERROR("send lost packets");
1905 goto error_fatal;
1906 }
1907
1908 break;
1909 }
3bd1e081
MD
1910 default:
1911 break;
1912 }
3f8e211f 1913
3bd1e081 1914end_nosignal:
b0b335c8 1915 rcu_read_unlock();
4cbc1a04 1916
9ce5646a
MD
1917 health_code_update();
1918
4cbc1a04
DG
1919 /*
1920 * Return 1 to indicate success since the 0 value can be a socket
1921 * shutdown during the recv() or send() call.
1922 */
1923 return 1;
ffe60014
DG
1924
1925end_msg_sessiond:
1926 /*
1927 * The returned value here is not useful since either way we'll return 1 to
1928 * the caller because the session daemon socket management is done
1929 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
1930 */
489f70e9
MD
1931 ret = consumer_send_status_msg(sock, ret_code);
1932 if (ret < 0) {
1933 goto error_fatal;
1934 }
ffe60014 1935 rcu_read_unlock();
9ce5646a
MD
1936
1937 health_code_update();
1938
ffe60014
DG
1939 return 1;
1940end_channel_error:
1941 if (channel) {
1942 /*
1943 * Free channel here since no one has a reference to it. We don't
1944 * free after that because a stream can store this pointer.
1945 */
1946 destroy_channel(channel);
1947 }
1948 /* We have to send a status channel message indicating an error. */
1949 ret = consumer_send_status_channel(sock, NULL);
1950 if (ret < 0) {
1951 /* Stop everything if session daemon can not be notified. */
1952 goto error_fatal;
1953 }
1954 rcu_read_unlock();
9ce5646a
MD
1955
1956 health_code_update();
1957
ffe60014
DG
1958 return 1;
1959error_fatal:
1960 rcu_read_unlock();
1961 /* This will issue a consumer stop. */
1962 return -1;
3bd1e081
MD
1963}
1964
1fdb9a78
JG
1965void lttng_ustctl_flush_buffer(struct lttng_consumer_stream *stream,
1966 int producer_active)
3bd1e081 1967{
ffe60014
DG
1968 assert(stream);
1969 assert(stream->ustream);
b5c5fc29 1970
1fdb9a78 1971 ustctl_flush_buffer(stream->ustream, producer_active);
d056b477
MD
1972}
1973
ffe60014
DG
1974/*
1975 * Take a snapshot for a specific fd
1976 *
1977 * Returns 0 on success, < 0 on error
1978 */
1979int lttng_ustconsumer_take_snapshot(struct lttng_consumer_stream *stream)
3bd1e081 1980{
ffe60014
DG
1981 assert(stream);
1982 assert(stream->ustream);
1983
1984 return ustctl_snapshot(stream->ustream);
3bd1e081
MD
1985}
1986
ffe60014
DG
1987/*
1988 * Get the produced position
1989 *
1990 * Returns 0 on success, < 0 on error
1991 */
1992int lttng_ustconsumer_get_produced_snapshot(
1993 struct lttng_consumer_stream *stream, unsigned long *pos)
3bd1e081 1994{
ffe60014
DG
1995 assert(stream);
1996 assert(stream->ustream);
1997 assert(pos);
7a57cf92 1998
ffe60014
DG
1999 return ustctl_snapshot_get_produced(stream->ustream, pos);
2000}
7a57cf92 2001
10a50311
JD
2002/*
2003 * Get the consumed position
2004 *
2005 * Returns 0 on success, < 0 on error
2006 */
2007int lttng_ustconsumer_get_consumed_snapshot(
2008 struct lttng_consumer_stream *stream, unsigned long *pos)
2009{
2010 assert(stream);
2011 assert(stream->ustream);
2012 assert(pos);
2013
2014 return ustctl_snapshot_get_consumed(stream->ustream, pos);
2015}
2016
84a182ce
DG
2017void lttng_ustconsumer_flush_buffer(struct lttng_consumer_stream *stream,
2018 int producer)
2019{
2020 assert(stream);
2021 assert(stream->ustream);
2022
2023 ustctl_flush_buffer(stream->ustream, producer);
2024}
2025
2026int lttng_ustconsumer_get_current_timestamp(
2027 struct lttng_consumer_stream *stream, uint64_t *ts)
2028{
2029 assert(stream);
2030 assert(stream->ustream);
2031 assert(ts);
2032
2033 return ustctl_get_current_timestamp(stream->ustream, ts);
2034}
2035
fb83fe64
JD
2036int lttng_ustconsumer_get_sequence_number(
2037 struct lttng_consumer_stream *stream, uint64_t *seq)
2038{
2039 assert(stream);
2040 assert(stream->ustream);
2041 assert(seq);
2042
2043 return ustctl_get_sequence_number(stream->ustream, seq);
2044}
2045
ffe60014 2046/*
0dd01979 2047 * Called when the stream signals the consumer that it has hung up.
ffe60014
DG
2048 */
2049void lttng_ustconsumer_on_stream_hangup(struct lttng_consumer_stream *stream)
2050{
2051 assert(stream);
2052 assert(stream->ustream);
2c1dd183 2053
0dd01979
MD
2054 pthread_mutex_lock(&stream->lock);
2055 if (!stream->quiescent) {
2056 ustctl_flush_buffer(stream->ustream, 0);
2057 stream->quiescent = true;
2058 }
2059 pthread_mutex_unlock(&stream->lock);
ffe60014
DG
2060 stream->hangup_flush_done = 1;
2061}
ee77a7b0 2062
ffe60014
DG
2063void lttng_ustconsumer_del_channel(struct lttng_consumer_channel *chan)
2064{
4628484a
MD
2065 int i;
2066
ffe60014
DG
2067 assert(chan);
2068 assert(chan->uchan);
e316aad5 2069
ea88ca2a
MD
2070 if (chan->switch_timer_enabled == 1) {
2071 consumer_timer_switch_stop(chan);
2072 }
4628484a
MD
2073 for (i = 0; i < chan->nr_stream_fds; i++) {
2074 int ret;
2075
2076 ret = close(chan->stream_fds[i]);
2077 if (ret) {
2078 PERROR("close");
2079 }
2080 if (chan->shm_path[0]) {
2081 char shm_path[PATH_MAX];
2082
2083 ret = get_stream_shm_path(shm_path, chan->shm_path, i);
2084 if (ret) {
2085 ERR("Cannot get stream shm path");
2086 }
2087 ret = run_as_unlink(shm_path, chan->uid, chan->gid);
2088 if (ret) {
4628484a
MD
2089 PERROR("unlink %s", shm_path);
2090 }
2091 }
2092 }
3bd1e081
MD
2093}
2094
b83e03c4
MD
2095void lttng_ustconsumer_free_channel(struct lttng_consumer_channel *chan)
2096{
2097 assert(chan);
2098 assert(chan->uchan);
2099
2100 consumer_metadata_cache_destroy(chan);
2101 ustctl_destroy_channel(chan->uchan);
ea853771
JR
2102 /* Try to rmdir all directories under shm_path root. */
2103 if (chan->root_shm_path[0]) {
2104 (void) run_as_recursive_rmdir(chan->root_shm_path,
2105 chan->uid, chan->gid);
2106 }
b83e03c4
MD
2107 free(chan->stream_fds);
2108}
2109
3bd1e081
MD
2110void lttng_ustconsumer_del_stream(struct lttng_consumer_stream *stream)
2111{
ffe60014
DG
2112 assert(stream);
2113 assert(stream->ustream);
d41f73b7 2114
ea88ca2a
MD
2115 if (stream->chan->switch_timer_enabled == 1) {
2116 consumer_timer_switch_stop(stream->chan);
2117 }
ffe60014
DG
2118 ustctl_destroy_stream(stream->ustream);
2119}
d41f73b7 2120
6d574024
DG
2121int lttng_ustconsumer_get_wakeup_fd(struct lttng_consumer_stream *stream)
2122{
2123 assert(stream);
2124 assert(stream->ustream);
2125
2126 return ustctl_stream_get_wakeup_fd(stream->ustream);
2127}
2128
2129int lttng_ustconsumer_close_wakeup_fd(struct lttng_consumer_stream *stream)
2130{
2131 assert(stream);
2132 assert(stream->ustream);
2133
2134 return ustctl_stream_close_wakeup_fd(stream->ustream);
2135}
2136
93ec662e 2137static
29d1a7ae 2138void metadata_stream_reset_cache(struct lttng_consumer_stream *stream)
93ec662e 2139{
29d1a7ae
JG
2140 DBG("Reset metadata cache of session %" PRIu64,
2141 stream->chan->session_id);
93ec662e 2142 stream->ust_metadata_pushed = 0;
29d1a7ae 2143 stream->metadata_version = stream->chan->metadata_cache->version;
93ec662e
JD
2144 stream->reset_metadata_flag = 1;
2145}
2146
94d49140
JD
2147/*
2148 * Write up to one packet from the metadata cache to the channel.
2149 *
2150 * Returns the number of bytes pushed in the cache, or a negative value
2151 * on error.
2152 */
2153static
2154int commit_one_metadata_packet(struct lttng_consumer_stream *stream)
2155{
2156 ssize_t write_len;
2157 int ret;
2158
2159 pthread_mutex_lock(&stream->chan->metadata_cache->lock);
c585821b 2160 if (stream->chan->metadata_cache->max_offset
94d49140
JD
2161 == stream->ust_metadata_pushed) {
2162 ret = 0;
2163 goto end;
2164 }
2165
2166 write_len = ustctl_write_one_packet_to_channel(stream->chan->uchan,
2167 &stream->chan->metadata_cache->data[stream->ust_metadata_pushed],
c585821b 2168 stream->chan->metadata_cache->max_offset
94d49140
JD
2169 - stream->ust_metadata_pushed);
2170 assert(write_len != 0);
2171 if (write_len < 0) {
2172 ERR("Writing one metadata packet");
d6ef77b3 2173 ret = write_len;
94d49140
JD
2174 goto end;
2175 }
2176 stream->ust_metadata_pushed += write_len;
2177
c585821b 2178 assert(stream->chan->metadata_cache->max_offset >=
94d49140
JD
2179 stream->ust_metadata_pushed);
2180 ret = write_len;
2181
29d1a7ae
JG
2182 /*
2183 * Switch packet (but don't open the next one) on every commit of
2184 * a metadata packet. Since the subbuffer is fully filled (with padding,
2185 * if needed), the stream is "quiescent" after this commit.
2186 */
2187 ustctl_flush_buffer(stream->ustream, 1);
94d49140
JD
2188end:
2189 pthread_mutex_unlock(&stream->chan->metadata_cache->lock);
2190 return ret;
2191}
2192
309167d2 2193
94d49140
JD
2194/*
2195 * Sync metadata meaning request them to the session daemon and snapshot to the
2196 * metadata thread can consumer them.
2197 *
c585821b
MD
2198 * Metadata stream lock is held here, but we need to release it when
2199 * interacting with sessiond, else we cause a deadlock with live
2200 * awaiting on metadata to be pushed out.
94d49140
JD
2201 *
2202 * Return 0 if new metadatda is available, EAGAIN if the metadata stream
2203 * is empty or a negative value on error.
2204 */
2205int lttng_ustconsumer_sync_metadata(struct lttng_consumer_local_data *ctx,
2206 struct lttng_consumer_stream *metadata)
2207{
2208 int ret;
2209 int retry = 0;
2210
2211 assert(ctx);
2212 assert(metadata);
2213
c585821b 2214 pthread_mutex_unlock(&metadata->lock);
94d49140
JD
2215 /*
2216 * Request metadata from the sessiond, but don't wait for the flush
2217 * because we locked the metadata thread.
2218 */
2219 ret = lttng_ustconsumer_request_metadata(ctx, metadata->chan, 0, 0);
87f05398 2220 pthread_mutex_lock(&metadata->lock);
94d49140
JD
2221 if (ret < 0) {
2222 goto end;
2223 }
2224
2225 ret = commit_one_metadata_packet(metadata);
2226 if (ret <= 0) {
2227 goto end;
2228 } else if (ret > 0) {
2229 retry = 1;
2230 }
2231
2232 ustctl_flush_buffer(metadata->ustream, 1);
2233 ret = ustctl_snapshot(metadata->ustream);
2234 if (ret < 0) {
2235 if (errno != EAGAIN) {
2236 ERR("Sync metadata, taking UST snapshot");
2237 goto end;
2238 }
2239 DBG("No new metadata when syncing them.");
2240 /* No new metadata, exit. */
2241 ret = ENODATA;
2242 goto end;
2243 }
2244
2245 /*
2246 * After this flush, we still need to extract metadata.
2247 */
2248 if (retry) {
2249 ret = EAGAIN;
2250 }
2251
2252end:
2253 return ret;
2254}
2255
02b3d176
DG
2256/*
2257 * Return 0 on success else a negative value.
2258 */
2259static int notify_if_more_data(struct lttng_consumer_stream *stream,
2260 struct lttng_consumer_local_data *ctx)
2261{
2262 int ret;
2263 struct ustctl_consumer_stream *ustream;
2264
2265 assert(stream);
2266 assert(ctx);
2267
2268 ustream = stream->ustream;
2269
2270 /*
2271 * First, we are going to check if there is a new subbuffer available
2272 * before reading the stream wait_fd.
2273 */
2274 /* Get the next subbuffer */
2275 ret = ustctl_get_next_subbuf(ustream);
2276 if (ret) {
2277 /* No more data found, flag the stream. */
2278 stream->has_data = 0;
2279 ret = 0;
2280 goto end;
2281 }
2282
5420e5db 2283 ret = ustctl_put_subbuf(ustream);
02b3d176
DG
2284 assert(!ret);
2285
2286 /* This stream still has data. Flag it and wake up the data thread. */
2287 stream->has_data = 1;
2288
2289 if (stream->monitor && !stream->hangup_flush_done && !ctx->has_wakeup) {
2290 ssize_t writelen;
2291
2292 writelen = lttng_pipe_write(ctx->consumer_wakeup_pipe, "!", 1);
2293 if (writelen < 0 && errno != EAGAIN && errno != EWOULDBLOCK) {
2294 ret = writelen;
2295 goto end;
2296 }
2297
2298 /* The wake up pipe has been notified. */
2299 ctx->has_wakeup = 1;
2300 }
2301 ret = 0;
2302
2303end:
2304 return ret;
2305}
2306
29d1a7ae 2307static int consumer_stream_ust_on_wake_up(struct lttng_consumer_stream *stream)
fb83fe64 2308{
29d1a7ae 2309 int ret = 0;
fb83fe64 2310
fb83fe64 2311 /*
29d1a7ae
JG
2312 * We can consume the 1 byte written into the wait_fd by
2313 * UST. Don't trigger error if we cannot read this one byte
2314 * (read returns 0), or if the error is EAGAIN or EWOULDBLOCK.
2315 *
2316 * This is only done when the stream is monitored by a thread,
2317 * before the flush is done after a hangup and if the stream
2318 * is not flagged with data since there might be nothing to
2319 * consume in the wait fd but still have data available
2320 * flagged by the consumer wake up pipe.
fb83fe64 2321 */
29d1a7ae
JG
2322 if (stream->monitor && !stream->hangup_flush_done && !stream->has_data) {
2323 char dummy;
2324 ssize_t readlen;
2325
2326 readlen = lttng_read(stream->wait_fd, &dummy, 1);
2327 if (readlen < 0 && errno != EAGAIN && errno != EWOULDBLOCK) {
2328 ret = readlen;
2329 }
fb83fe64 2330 }
fb83fe64 2331
29d1a7ae
JG
2332 return ret;
2333}
2334
2335static int extract_common_subbuffer_info(struct lttng_consumer_stream *stream,
2336 struct stream_subbuffer *subbuf)
2337{
2338 int ret;
2339
2340 ret = ustctl_get_subbuf_size(
2341 stream->ustream, &subbuf->info.data.subbuf_size);
2342 if (ret) {
fb83fe64
JD
2343 goto end;
2344 }
29d1a7ae
JG
2345
2346 ret = ustctl_get_padded_subbuf_size(
2347 stream->ustream, &subbuf->info.data.padded_subbuf_size);
2348 if (ret) {
2349 goto end;
fb83fe64 2350 }
fb83fe64
JD
2351
2352end:
2353 return ret;
2354}
2355
29d1a7ae
JG
2356static int extract_metadata_subbuffer_info(struct lttng_consumer_stream *stream,
2357 struct stream_subbuffer *subbuf)
d41f73b7 2358{
29d1a7ae 2359 int ret;
ffe60014 2360
29d1a7ae
JG
2361 ret = extract_common_subbuffer_info(stream, subbuf);
2362 if (ret) {
2363 goto end;
2364 }
d41f73b7 2365
29d1a7ae 2366 subbuf->info.metadata.version = stream->chan->metadata_cache->version;
ffe60014 2367
29d1a7ae
JG
2368end:
2369 return ret;
2370}
d41f73b7 2371
29d1a7ae
JG
2372static int extract_data_subbuffer_info(struct lttng_consumer_stream *stream,
2373 struct stream_subbuffer *subbuf)
2374{
2375 int ret;
c617c0c6 2376
29d1a7ae
JG
2377 ret = extract_common_subbuffer_info(stream, subbuf);
2378 if (ret) {
2379 goto end;
d41f73b7
MD
2380 }
2381
29d1a7ae
JG
2382 ret = ustctl_get_packet_size(
2383 stream->ustream, &subbuf->info.data.packet_size);
2384 if (ret < 0) {
2385 PERROR("Failed to get sub-buffer packet size");
2386 goto end;
2387 }
04ef1097 2388
29d1a7ae
JG
2389 ret = ustctl_get_content_size(
2390 stream->ustream, &subbuf->info.data.content_size);
2391 if (ret < 0) {
2392 PERROR("Failed to get sub-buffer content size");
d41f73b7
MD
2393 goto end;
2394 }
309167d2 2395
29d1a7ae
JG
2396 ret = ustctl_get_timestamp_begin(
2397 stream->ustream, &subbuf->info.data.timestamp_begin);
2398 if (ret < 0) {
2399 PERROR("Failed to get sub-buffer begin timestamp");
2400 goto end;
2401 }
fb83fe64 2402
29d1a7ae
JG
2403 ret = ustctl_get_timestamp_end(
2404 stream->ustream, &subbuf->info.data.timestamp_end);
2405 if (ret < 0) {
2406 PERROR("Failed to get sub-buffer end timestamp");
2407 goto end;
2408 }
2409
2410 ret = ustctl_get_events_discarded(
2411 stream->ustream, &subbuf->info.data.events_discarded);
2412 if (ret) {
2413 PERROR("Failed to get sub-buffer events discarded count");
2414 goto end;
2415 }
2416
2417 ret = ustctl_get_sequence_number(stream->ustream,
2418 &subbuf->info.data.sequence_number.value);
2419 if (ret) {
2420 /* May not be supported by older LTTng-modules. */
2421 if (ret != -ENOTTY) {
2422 PERROR("Failed to get sub-buffer sequence number");
fb83fe64
JD
2423 goto end;
2424 }
1c20f0e2 2425 } else {
29d1a7ae 2426 subbuf->info.data.sequence_number.is_set = true;
309167d2
JD
2427 }
2428
29d1a7ae
JG
2429 ret = ustctl_get_stream_id(
2430 stream->ustream, &subbuf->info.data.stream_id);
2431 if (ret < 0) {
2432 PERROR("Failed to get stream id");
2433 goto end;
2434 }
1d4dfdef 2435
29d1a7ae
JG
2436 ret = ustctl_get_instance_id(stream->ustream,
2437 &subbuf->info.data.stream_instance_id.value);
2438 if (ret) {
2439 /* May not be supported by older LTTng-modules. */
2440 if (ret != -ENOTTY) {
2441 PERROR("Failed to get stream instance id");
2442 goto end;
2443 }
2444 } else {
2445 subbuf->info.data.stream_instance_id.is_set = true;
2446 }
2447end:
2448 return ret;
2449}
1d4dfdef 2450
29d1a7ae
JG
2451static int get_next_subbuffer_common(struct lttng_consumer_stream *stream,
2452 struct stream_subbuffer *subbuffer)
2453{
2454 int ret;
2455 const char *addr;
1d4dfdef 2456
29d1a7ae
JG
2457 ret = stream->read_subbuffer_ops.extract_subbuffer_info(
2458 stream, subbuffer);
2459 if (ret) {
2460 goto end;
2461 }
1fdb9a78 2462
29d1a7ae 2463 ret = get_current_subbuf_addr(stream, &addr);
1fdb9a78 2464 if (ret) {
29d1a7ae 2465 goto end;
1fdb9a78
JG
2466 }
2467
29d1a7ae
JG
2468 subbuffer->buffer.buffer = lttng_buffer_view_init(
2469 addr, 0, subbuffer->info.data.padded_subbuf_size);
2470 assert(subbuffer->buffer.buffer.data != NULL);
2471end:
2472 return ret;
2473}
ace0e591 2474
29d1a7ae
JG
2475static int get_next_subbuffer(struct lttng_consumer_stream *stream,
2476 struct stream_subbuffer *subbuffer)
2477{
2478 int ret;
331744e3 2479
29d1a7ae
JG
2480 ret = ustctl_get_next_subbuf(stream->ustream);
2481 if (ret) {
2482 goto end;
02b3d176
DG
2483 }
2484
29d1a7ae
JG
2485 ret = get_next_subbuffer_common(stream, subbuffer);
2486 if (ret) {
1c20f0e2
JD
2487 goto end;
2488 }
29d1a7ae
JG
2489end:
2490 return ret;
2491}
1c20f0e2 2492
29d1a7ae
JG
2493static int get_next_subbuffer_metadata(struct lttng_consumer_stream *stream,
2494 struct stream_subbuffer *subbuffer)
2495{
2496 int ret;
d6ef77b3
JG
2497 bool cache_empty;
2498 bool got_subbuffer;
2499 bool coherent;
2500 bool buffer_empty;
2501 unsigned long consumed_pos, produced_pos;
29d1a7ae 2502
d6ef77b3
JG
2503 do {
2504 ret = ustctl_get_next_subbuf(stream->ustream);
2505 if (ret == 0) {
2506 got_subbuffer = true;
2507 } else {
2508 got_subbuffer = false;
2509 if (ret != -EAGAIN) {
2510 /* Fatal error. */
2511 goto end;
2512 }
c585821b
MD
2513 }
2514
d6ef77b3
JG
2515 /*
2516 * Determine if the cache is empty and ensure that a sub-buffer
2517 * is made available if the cache is not empty.
2518 */
2519 if (!got_subbuffer) {
2520 ret = commit_one_metadata_packet(stream);
2521 if (ret < 0 && ret != -ENOBUFS) {
2522 goto end;
2523 } else if (ret == 0) {
2524 /* Not an error, the cache is empty. */
2525 cache_empty = true;
2526 ret = -ENODATA;
2527 goto end;
2528 } else {
2529 cache_empty = false;
2530 }
2531 } else {
2532 pthread_mutex_lock(&stream->chan->metadata_cache->lock);
2533 cache_empty = stream->chan->metadata_cache->max_offset ==
2534 stream->ust_metadata_pushed;
2535 pthread_mutex_unlock(&stream->chan->metadata_cache->lock);
94d49140 2536 }
d6ef77b3 2537 } while (!got_subbuffer);
94d49140 2538
d6ef77b3 2539 /* Populate sub-buffer infos and view. */
29d1a7ae
JG
2540 ret = get_next_subbuffer_common(stream, subbuffer);
2541 if (ret) {
1c20f0e2 2542 goto end;
309167d2 2543 }
d6ef77b3
JG
2544
2545 ret = lttng_ustconsumer_take_snapshot(stream);
2546 if (ret < 0) {
2547 /*
2548 * -EAGAIN is not expected since we got a sub-buffer and haven't
2549 * pushed the consumption position yet (on put_next).
2550 */
2551 PERROR("Failed to take a snapshot of metadata buffer positions");
2552 goto end;
2553 }
2554
2555 ret = lttng_ustconsumer_get_consumed_snapshot(stream, &consumed_pos);
2556 if (ret) {
2557 PERROR("Failed to get metadata consumed position");
2558 goto end;
2559 }
2560
2561 ret = lttng_ustconsumer_get_produced_snapshot(stream, &produced_pos);
2562 if (ret) {
2563 PERROR("Failed to get metadata produced position");
2564 goto end;
2565 }
2566
2567 /* Last sub-buffer of the ring buffer ? */
2568 buffer_empty = (consumed_pos + stream->max_sb_size) == produced_pos;
2569
2570 /*
2571 * The sessiond registry lock ensures that coherent units of metadata
2572 * are pushed to the consumer daemon at once. Hence, if a sub-buffer is
2573 * acquired, the cache is empty, and it is the only available sub-buffer
2574 * available, it is safe to assume that it is "coherent".
2575 */
2576 coherent = got_subbuffer && cache_empty && buffer_empty;
2577
2578 LTTNG_OPTIONAL_SET(&subbuffer->info.metadata.coherent, coherent);
d41f73b7
MD
2579end:
2580 return ret;
2581}
2582
29d1a7ae
JG
2583static int put_next_subbuffer(struct lttng_consumer_stream *stream,
2584 struct stream_subbuffer *subbuffer)
2585{
2586 const int ret = ustctl_put_next_subbuf(stream->ustream);
2587
2588 assert(ret == 0);
2589 return ret;
2590}
2591
2592static int signal_metadata(struct lttng_consumer_stream *stream,
2593 struct lttng_consumer_local_data *ctx)
2594{
2595 return pthread_cond_broadcast(&stream->metadata_rdv) ? -errno : 0;
2596}
2597
d6ef77b3 2598static int lttng_ustconsumer_set_stream_ops(
29d1a7ae
JG
2599 struct lttng_consumer_stream *stream)
2600{
d6ef77b3
JG
2601 int ret = 0;
2602
29d1a7ae
JG
2603 stream->read_subbuffer_ops.on_wake_up = consumer_stream_ust_on_wake_up;
2604 if (stream->metadata_flag) {
2605 stream->read_subbuffer_ops.get_next_subbuffer =
2606 get_next_subbuffer_metadata;
2607 stream->read_subbuffer_ops.extract_subbuffer_info =
2608 extract_metadata_subbuffer_info;
2609 stream->read_subbuffer_ops.reset_metadata =
2610 metadata_stream_reset_cache;
d6ef77b3
JG
2611 if (stream->chan->is_live) {
2612 stream->read_subbuffer_ops.on_sleep = signal_metadata;
2613 ret = consumer_stream_enable_metadata_bucketization(
2614 stream);
2615 if (ret) {
2616 goto end;
2617 }
2618 }
29d1a7ae
JG
2619 } else {
2620 stream->read_subbuffer_ops.get_next_subbuffer =
2621 get_next_subbuffer;
2622 stream->read_subbuffer_ops.extract_subbuffer_info =
2623 extract_data_subbuffer_info;
2624 stream->read_subbuffer_ops.on_sleep = notify_if_more_data;
2625 if (stream->chan->is_live) {
2626 stream->read_subbuffer_ops.send_live_beacon =
2627 consumer_flush_ust_index;
2628 }
2629 }
2630
2631 stream->read_subbuffer_ops.put_next_subbuffer = put_next_subbuffer;
d6ef77b3
JG
2632end:
2633 return ret;
29d1a7ae
JG
2634}
2635
ffe60014
DG
2636/*
2637 * Called when a stream is created.
fe4477ee
JD
2638 *
2639 * Return 0 on success or else a negative value.
ffe60014 2640 */
d41f73b7
MD
2641int lttng_ustconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
2642{
fe4477ee
JD
2643 int ret;
2644
10a50311
JD
2645 assert(stream);
2646
fe4477ee 2647 /* Don't create anything if this is set for streaming. */
6d40f8fa 2648 if (stream->relayd_id == (uint64_t) -1ULL && stream->chan->monitor) {
fe4477ee
JD
2649 ret = utils_create_stream_file(stream->chan->pathname, stream->name,
2650 stream->chan->tracefile_size, stream->tracefile_count_current,
309167d2 2651 stream->uid, stream->gid, NULL);
fe4477ee
JD
2652 if (ret < 0) {
2653 goto error;
2654 }
2655 stream->out_fd = ret;
2656 stream->tracefile_size_current = 0;
309167d2
JD
2657
2658 if (!stream->metadata_flag) {
e0547b83
MD
2659 struct lttng_index_file *index_file;
2660
2661 index_file = lttng_index_file_create(stream->chan->pathname,
309167d2
JD
2662 stream->name, stream->uid, stream->gid,
2663 stream->chan->tracefile_size,
e0547b83
MD
2664 stream->tracefile_count_current,
2665 CTF_INDEX_MAJOR, CTF_INDEX_MINOR);
2666 if (!index_file) {
309167d2
JD
2667 goto error;
2668 }
e0547b83 2669 stream->index_file = index_file;
309167d2 2670 }
fe4477ee 2671 }
29d1a7ae
JG
2672
2673 lttng_ustconsumer_set_stream_ops(stream);
fe4477ee
JD
2674 ret = 0;
2675
2676error:
2677 return ret;
d41f73b7 2678}
ca22feea
DG
2679
2680/*
2681 * Check if data is still being extracted from the buffers for a specific
4e9a4686
DG
2682 * stream. Consumer data lock MUST be acquired before calling this function
2683 * and the stream lock.
ca22feea 2684 *
6d805429 2685 * Return 1 if the traced data are still getting read else 0 meaning that the
ca22feea
DG
2686 * data is available for trace viewer reading.
2687 */
6d805429 2688int lttng_ustconsumer_data_pending(struct lttng_consumer_stream *stream)
ca22feea
DG
2689{
2690 int ret;
2691
2692 assert(stream);
ffe60014 2693 assert(stream->ustream);
ca22feea 2694
6d805429 2695 DBG("UST consumer checking data pending");
c8f59ee5 2696
ca6b395f
MD
2697 if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) {
2698 ret = 0;
2699 goto end;
2700 }
2701
04ef1097 2702 if (stream->chan->type == CONSUMER_CHANNEL_TYPE_METADATA) {
e6ee4eab
DG
2703 uint64_t contiguous, pushed;
2704
2705 /* Ease our life a bit. */
c585821b 2706 contiguous = stream->chan->metadata_cache->max_offset;
e6ee4eab
DG
2707 pushed = stream->ust_metadata_pushed;
2708
04ef1097
MD
2709 /*
2710 * We can simply check whether all contiguously available data
2711 * has been pushed to the ring buffer, since the push operation
2712 * is performed within get_next_subbuf(), and because both
2713 * get_next_subbuf() and put_next_subbuf() are issued atomically
2714 * thanks to the stream lock within
2715 * lttng_ustconsumer_read_subbuffer(). This basically means that
2716 * whetnever ust_metadata_pushed is incremented, the associated
2717 * metadata has been consumed from the metadata stream.
2718 */
2719 DBG("UST consumer metadata pending check: contiguous %" PRIu64 " vs pushed %" PRIu64,
e6ee4eab 2720 contiguous, pushed);
aa01b94c 2721 assert(((int64_t) (contiguous - pushed)) >= 0);
e6ee4eab 2722 if ((contiguous != pushed) ||
6acdf328 2723 (((int64_t) contiguous - pushed) > 0 || contiguous == 0)) {
04ef1097
MD
2724 ret = 1; /* Data is pending */
2725 goto end;
2726 }
2727 } else {
2728 ret = ustctl_get_next_subbuf(stream->ustream);
2729 if (ret == 0) {
2730 /*
2731 * There is still data so let's put back this
2732 * subbuffer.
2733 */
2734 ret = ustctl_put_subbuf(stream->ustream);
2735 assert(ret == 0);
2736 ret = 1; /* Data is pending */
2737 goto end;
2738 }
ca22feea
DG
2739 }
2740
6d805429
DG
2741 /* Data is NOT pending so ready to be read. */
2742 ret = 0;
ca22feea 2743
6efae65e
DG
2744end:
2745 return ret;
ca22feea 2746}
d88aee68 2747
6d574024
DG
2748/*
2749 * Stop a given metadata channel timer if enabled and close the wait fd which
2750 * is the poll pipe of the metadata stream.
2751 *
2752 * This MUST be called with the metadata channel acquired.
2753 */
2754void lttng_ustconsumer_close_metadata(struct lttng_consumer_channel *metadata)
2755{
2756 int ret;
2757
2758 assert(metadata);
2759 assert(metadata->type == CONSUMER_CHANNEL_TYPE_METADATA);
2760
2761 DBG("Closing metadata channel key %" PRIu64, metadata->key);
2762
2763 if (metadata->switch_timer_enabled == 1) {
2764 consumer_timer_switch_stop(metadata);
2765 }
2766
2767 if (!metadata->metadata_stream) {
2768 goto end;
2769 }
2770
2771 /*
2772 * Closing write side so the thread monitoring the stream wakes up if any
2773 * and clean the metadata stream.
2774 */
2775 if (metadata->metadata_stream->ust_metadata_poll_pipe[1] >= 0) {
2776 ret = close(metadata->metadata_stream->ust_metadata_poll_pipe[1]);
2777 if (ret < 0) {
2778 PERROR("closing metadata pipe write side");
2779 }
2780 metadata->metadata_stream->ust_metadata_poll_pipe[1] = -1;
2781 }
2782
2783end:
2784 return;
2785}
2786
d88aee68
DG
2787/*
2788 * Close every metadata stream wait fd of the metadata hash table. This
2789 * function MUST be used very carefully so not to run into a race between the
2790 * metadata thread handling streams and this function closing their wait fd.
2791 *
2792 * For UST, this is used when the session daemon hangs up. Its the metadata
2793 * producer so calling this is safe because we are assured that no state change
2794 * can occur in the metadata thread for the streams in the hash table.
2795 */
6d574024 2796void lttng_ustconsumer_close_all_metadata(struct lttng_ht *metadata_ht)
d88aee68 2797{
d88aee68
DG
2798 struct lttng_ht_iter iter;
2799 struct lttng_consumer_stream *stream;
2800
2801 assert(metadata_ht);
2802 assert(metadata_ht->ht);
2803
2804 DBG("UST consumer closing all metadata streams");
2805
2806 rcu_read_lock();
2807 cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream,
2808 node.node) {
9ce5646a
MD
2809
2810 health_code_update();
2811
be2b50c7 2812 pthread_mutex_lock(&stream->chan->lock);
6d574024 2813 lttng_ustconsumer_close_metadata(stream->chan);
be2b50c7
DG
2814 pthread_mutex_unlock(&stream->chan->lock);
2815
d88aee68
DG
2816 }
2817 rcu_read_unlock();
2818}
d8ef542d
MD
2819
2820void lttng_ustconsumer_close_stream_wakeup(struct lttng_consumer_stream *stream)
2821{
2822 int ret;
2823
2824 ret = ustctl_stream_close_wakeup_fd(stream->ustream);
2825 if (ret < 0) {
2826 ERR("Unable to close wakeup fd");
2827 }
2828}
331744e3 2829
f666ae70
MD
2830/*
2831 * Please refer to consumer-timer.c before adding any lock within this
2832 * function or any of its callees. Timers have a very strict locking
2833 * semantic with respect to teardown. Failure to respect this semantic
2834 * introduces deadlocks.
c585821b
MD
2835 *
2836 * DON'T hold the metadata lock when calling this function, else this
2837 * can cause deadlock involving consumer awaiting for metadata to be
2838 * pushed out due to concurrent interaction with the session daemon.
f666ae70 2839 */
331744e3 2840int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
94d49140 2841 struct lttng_consumer_channel *channel, int timer, int wait)
331744e3
JD
2842{
2843 struct lttcomm_metadata_request_msg request;
2844 struct lttcomm_consumer_msg msg;
0c759fc9 2845 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
93ec662e 2846 uint64_t len, key, offset, version;
331744e3
JD
2847 int ret;
2848
2849 assert(channel);
2850 assert(channel->metadata_cache);
2851
53efb85a
MD
2852 memset(&request, 0, sizeof(request));
2853
331744e3
JD
2854 /* send the metadata request to sessiond */
2855 switch (consumer_data.type) {
2856 case LTTNG_CONSUMER64_UST:
2857 request.bits_per_long = 64;
2858 break;
2859 case LTTNG_CONSUMER32_UST:
2860 request.bits_per_long = 32;
2861 break;
2862 default:
2863 request.bits_per_long = 0;
2864 break;
2865 }
2866
2867 request.session_id = channel->session_id;
1950109e 2868 request.session_id_per_pid = channel->session_id_per_pid;
567eb353
DG
2869 /*
2870 * Request the application UID here so the metadata of that application can
2871 * be sent back. The channel UID corresponds to the user UID of the session
2872 * used for the rights on the stream file(s).
2873 */
2874 request.uid = channel->ust_app_uid;
331744e3 2875 request.key = channel->key;
567eb353 2876
1950109e 2877 DBG("Sending metadata request to sessiond, session id %" PRIu64
cc84d37b 2878 ", per-pid %" PRIu64 ", app UID %u and channel key %" PRIu64,
567eb353
DG
2879 request.session_id, request.session_id_per_pid, request.uid,
2880 request.key);
331744e3 2881
75d83e50 2882 pthread_mutex_lock(&ctx->metadata_socket_lock);
9ce5646a
MD
2883
2884 health_code_update();
2885
331744e3
JD
2886 ret = lttcomm_send_unix_sock(ctx->consumer_metadata_socket, &request,
2887 sizeof(request));
2888 if (ret < 0) {
2889 ERR("Asking metadata to sessiond");
2890 goto end;
2891 }
2892
9ce5646a
MD
2893 health_code_update();
2894
331744e3
JD
2895 /* Receive the metadata from sessiond */
2896 ret = lttcomm_recv_unix_sock(ctx->consumer_metadata_socket, &msg,
2897 sizeof(msg));
2898 if (ret != sizeof(msg)) {
8fd623e0 2899 DBG("Consumer received unexpected message size %d (expects %zu)",
331744e3
JD
2900 ret, sizeof(msg));
2901 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
2902 /*
2903 * The ret value might 0 meaning an orderly shutdown but this is ok
2904 * since the caller handles this.
2905 */
2906 goto end;
2907 }
2908
9ce5646a
MD
2909 health_code_update();
2910
331744e3
JD
2911 if (msg.cmd_type == LTTNG_ERR_UND) {
2912 /* No registry found */
2913 (void) consumer_send_status_msg(ctx->consumer_metadata_socket,
2914 ret_code);
2915 ret = 0;
2916 goto end;
2917 } else if (msg.cmd_type != LTTNG_CONSUMER_PUSH_METADATA) {
2918 ERR("Unexpected cmd_type received %d", msg.cmd_type);
2919 ret = -1;
2920 goto end;
2921 }
2922
2923 len = msg.u.push_metadata.len;
2924 key = msg.u.push_metadata.key;
2925 offset = msg.u.push_metadata.target_offset;
93ec662e 2926 version = msg.u.push_metadata.version;
331744e3
JD
2927
2928 assert(key == channel->key);
2929 if (len == 0) {
2930 DBG("No new metadata to receive for key %" PRIu64, key);
2931 }
2932
9ce5646a
MD
2933 health_code_update();
2934
331744e3
JD
2935 /* Tell session daemon we are ready to receive the metadata. */
2936 ret = consumer_send_status_msg(ctx->consumer_metadata_socket,
0c759fc9 2937 LTTCOMM_CONSUMERD_SUCCESS);
331744e3
JD
2938 if (ret < 0 || len == 0) {
2939 /*
2940 * Somehow, the session daemon is not responding anymore or there is
2941 * nothing to receive.
2942 */
2943 goto end;
2944 }
2945
9ce5646a
MD
2946 health_code_update();
2947
1eb682be 2948 ret = lttng_ustconsumer_recv_metadata(ctx->consumer_metadata_socket,
93ec662e 2949 key, offset, len, version, channel, timer, wait);
1eb682be 2950 if (ret >= 0) {
f2a444f1
DG
2951 /*
2952 * Only send the status msg if the sessiond is alive meaning a positive
2953 * ret code.
2954 */
1eb682be 2955 (void) consumer_send_status_msg(ctx->consumer_metadata_socket, ret);
f2a444f1 2956 }
331744e3
JD
2957 ret = 0;
2958
2959end:
9ce5646a
MD
2960 health_code_update();
2961
75d83e50 2962 pthread_mutex_unlock(&ctx->metadata_socket_lock);
331744e3
JD
2963 return ret;
2964}
70190e1c
DG
2965
2966/*
2967 * Return the ustctl call for the get stream id.
2968 */
2969int lttng_ustconsumer_get_stream_id(struct lttng_consumer_stream *stream,
2970 uint64_t *stream_id)
2971{
2972 assert(stream);
2973 assert(stream_id);
2974
2975 return ustctl_get_stream_id(stream->ustream, stream_id);
2976}
This page took 0.237181 seconds and 5 git commands to generate.