CUSTOM: liver timer: immediate liver timer control on data pending and destroy
[lttng-tools.git] / src / common / ust-consumer / ust-consumer.c
CommitLineData
3bd1e081
MD
1/*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
d14d33bf
AM
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
3bd1e081
MD
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
d14d33bf
AM
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
3bd1e081
MD
17 */
18
6c1c0768 19#define _LGPL_SOURCE
3bd1e081 20#include <assert.h>
f02e1e8a 21#include <lttng/ust-ctl.h>
3bd1e081
MD
22#include <poll.h>
23#include <pthread.h>
24#include <stdlib.h>
25#include <string.h>
26#include <sys/mman.h>
27#include <sys/socket.h>
dbb5dfe6 28#include <sys/stat.h>
3bd1e081 29#include <sys/types.h>
77c7c900 30#include <inttypes.h>
3bd1e081 31#include <unistd.h>
ffe60014 32#include <urcu/list.h>
331744e3 33#include <signal.h>
29d1a7ae
JG
34#include <stdbool.h>
35#include <stdint.h>
0857097f 36
51a9e1c7 37#include <bin/lttng-consumerd/health-consumerd.h>
990570ed 38#include <common/common.h>
10a8a223 39#include <common/sessiond-comm/sessiond-comm.h>
00e2e675 40#include <common/relayd/relayd.h>
dbb5dfe6 41#include <common/compat/fcntl.h>
f263b7fd 42#include <common/compat/endian.h>
c8fea79c
JR
43#include <common/consumer/consumer-metadata-cache.h>
44#include <common/consumer/consumer-stream.h>
45#include <common/consumer/consumer-timer.h>
fe4477ee 46#include <common/utils.h>
309167d2 47#include <common/index/index.h>
29d1a7ae 48#include <common/consumer/consumer.h>
d6ef77b3 49#include <common/optional.h>
10a8a223
DG
50
51#include "ust-consumer.h"
3bd1e081 52
45863397 53#define INT_MAX_STR_LEN 12 /* includes \0 */
4628484a 54
3bd1e081
MD
55extern struct lttng_consumer_global_data consumer_data;
56extern int consumer_poll_timeout;
57extern volatile int consumer_quit;
58
59/*
ffe60014
DG
60 * Free channel object and all streams associated with it. This MUST be used
61 * only and only if the channel has _NEVER_ been added to the global channel
62 * hash table.
3bd1e081 63 */
ffe60014 64static void destroy_channel(struct lttng_consumer_channel *channel)
3bd1e081 65{
ffe60014
DG
66 struct lttng_consumer_stream *stream, *stmp;
67
68 assert(channel);
69
70 DBG("UST consumer cleaning stream list");
71
72 cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
73 send_node) {
9ce5646a
MD
74
75 health_code_update();
76
ffe60014
DG
77 cds_list_del(&stream->send_node);
78 ustctl_destroy_stream(stream->ustream);
79 free(stream);
80 }
81
82 /*
83 * If a channel is available meaning that was created before the streams
84 * were, delete it.
85 */
86 if (channel->uchan) {
87 lttng_ustconsumer_del_channel(channel);
b83e03c4 88 lttng_ustconsumer_free_channel(channel);
ffe60014
DG
89 }
90 free(channel);
91}
3bd1e081
MD
92
93/*
ffe60014 94 * Add channel to internal consumer state.
3bd1e081 95 *
ffe60014 96 * Returns 0 on success or else a negative value.
3bd1e081 97 */
ffe60014
DG
98static int add_channel(struct lttng_consumer_channel *channel,
99 struct lttng_consumer_local_data *ctx)
3bd1e081
MD
100{
101 int ret = 0;
102
ffe60014
DG
103 assert(channel);
104 assert(ctx);
105
106 if (ctx->on_recv_channel != NULL) {
107 ret = ctx->on_recv_channel(channel);
108 if (ret == 0) {
d8ef542d 109 ret = consumer_add_channel(channel, ctx);
ffe60014
DG
110 } else if (ret < 0) {
111 /* Most likely an ENOMEM. */
112 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
113 goto error;
114 }
115 } else {
d8ef542d 116 ret = consumer_add_channel(channel, ctx);
3bd1e081
MD
117 }
118
d88aee68 119 DBG("UST consumer channel added (key: %" PRIu64 ")", channel->key);
ffe60014
DG
120
121error:
3bd1e081
MD
122 return ret;
123}
124
ffe60014
DG
125/*
126 * Allocate and return a consumer stream object. If _alloc_ret is not NULL, the
127 * error value if applicable is set in it else it is kept untouched.
3bd1e081 128 *
ffe60014 129 * Return NULL on error else the newly allocated stream object.
3bd1e081 130 */
ffe60014
DG
131static struct lttng_consumer_stream *allocate_stream(int cpu, int key,
132 struct lttng_consumer_channel *channel,
133 struct lttng_consumer_local_data *ctx, int *_alloc_ret)
134{
135 int alloc_ret;
136 struct lttng_consumer_stream *stream = NULL;
137
138 assert(channel);
139 assert(ctx);
140
29d1a7ae 141 stream = consumer_stream_create(
59db0d42
JG
142 channel,
143 channel->key,
ffe60014
DG
144 key,
145 LTTNG_CONSUMER_ACTIVE_STREAM,
146 channel->name,
147 channel->uid,
148 channel->gid,
149 channel->relayd_id,
150 channel->session_id,
151 cpu,
152 &alloc_ret,
4891ece8
DG
153 channel->type,
154 channel->monitor);
ffe60014
DG
155 if (stream == NULL) {
156 switch (alloc_ret) {
157 case -ENOENT:
158 /*
159 * We could not find the channel. Can happen if cpu hotplug
160 * happens while tearing down.
161 */
162 DBG3("Could not find channel");
163 break;
164 case -ENOMEM:
165 case -EINVAL:
166 default:
167 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
168 break;
169 }
170 goto error;
171 }
172
ffe60014
DG
173error:
174 if (_alloc_ret) {
175 *_alloc_ret = alloc_ret;
176 }
177 return stream;
178}
179
180/*
181 * Send the given stream pointer to the corresponding thread.
182 *
183 * Returns 0 on success else a negative value.
184 */
185static int send_stream_to_thread(struct lttng_consumer_stream *stream,
186 struct lttng_consumer_local_data *ctx)
187{
dae10966
DG
188 int ret;
189 struct lttng_pipe *stream_pipe;
ffe60014
DG
190
191 /* Get the right pipe where the stream will be sent. */
192 if (stream->metadata_flag) {
5ab66908
MD
193 ret = consumer_add_metadata_stream(stream);
194 if (ret) {
195 ERR("Consumer add metadata stream %" PRIu64 " failed.",
196 stream->key);
197 goto error;
198 }
dae10966 199 stream_pipe = ctx->consumer_metadata_pipe;
ffe60014 200 } else {
5ab66908
MD
201 ret = consumer_add_data_stream(stream);
202 if (ret) {
203 ERR("Consumer add stream %" PRIu64 " failed.",
204 stream->key);
205 goto error;
206 }
dae10966 207 stream_pipe = ctx->consumer_data_pipe;
ffe60014
DG
208 }
209
5ab66908
MD
210 /*
211 * From this point on, the stream's ownership has been moved away from
212 * the channel and becomes globally visible.
213 */
214 stream->globally_visible = 1;
215
dae10966 216 ret = lttng_pipe_write(stream_pipe, &stream, sizeof(stream));
ffe60014 217 if (ret < 0) {
dae10966
DG
218 ERR("Consumer write %s stream to pipe %d",
219 stream->metadata_flag ? "metadata" : "data",
220 lttng_pipe_get_writefd(stream_pipe));
5ab66908
MD
221 if (stream->metadata_flag) {
222 consumer_del_stream_for_metadata(stream);
223 } else {
224 consumer_del_stream_for_data(stream);
225 }
ffe60014 226 }
5ab66908 227error:
ffe60014
DG
228 return ret;
229}
230
4628484a
MD
231static
232int get_stream_shm_path(char *stream_shm_path, const char *shm_path, int cpu)
233{
45863397 234 char cpu_nr[INT_MAX_STR_LEN]; /* int max len */
4628484a
MD
235 int ret;
236
237 strncpy(stream_shm_path, shm_path, PATH_MAX);
238 stream_shm_path[PATH_MAX - 1] = '\0';
45863397 239 ret = snprintf(cpu_nr, INT_MAX_STR_LEN, "%i", cpu);
67f8cb8d
MD
240 if (ret < 0) {
241 PERROR("snprintf");
4628484a
MD
242 goto end;
243 }
244 strncat(stream_shm_path, cpu_nr,
245 PATH_MAX - strlen(stream_shm_path) - 1);
246 ret = 0;
247end:
248 return ret;
249}
250
d88aee68
DG
251/*
252 * Create streams for the given channel using liblttng-ust-ctl.
253 *
254 * Return 0 on success else a negative value.
255 */
ffe60014
DG
256static int create_ust_streams(struct lttng_consumer_channel *channel,
257 struct lttng_consumer_local_data *ctx)
258{
259 int ret, cpu = 0;
260 struct ustctl_consumer_stream *ustream;
261 struct lttng_consumer_stream *stream;
262
263 assert(channel);
264 assert(ctx);
265
266 /*
267 * While a stream is available from ustctl. When NULL is returned, we've
268 * reached the end of the possible stream for the channel.
269 */
270 while ((ustream = ustctl_create_stream(channel->uchan, cpu))) {
271 int wait_fd;
04ef1097 272 int ust_metadata_pipe[2];
ffe60014 273
9ce5646a
MD
274 health_code_update();
275
04ef1097
MD
276 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && channel->monitor) {
277 ret = utils_create_pipe_cloexec_nonblock(ust_metadata_pipe);
278 if (ret < 0) {
279 ERR("Create ust metadata poll pipe");
280 goto error;
281 }
282 wait_fd = ust_metadata_pipe[0];
283 } else {
284 wait_fd = ustctl_stream_get_wait_fd(ustream);
285 }
ffe60014
DG
286
287 /* Allocate consumer stream object. */
288 stream = allocate_stream(cpu, wait_fd, channel, ctx, &ret);
289 if (!stream) {
290 goto error_alloc;
291 }
292 stream->ustream = ustream;
293 /*
294 * Store it so we can save multiple function calls afterwards since
295 * this value is used heavily in the stream threads. This is UST
296 * specific so this is why it's done after allocation.
297 */
298 stream->wait_fd = wait_fd;
299
b31398bb
DG
300 /*
301 * Increment channel refcount since the channel reference has now been
302 * assigned in the allocation process above.
303 */
10a50311
JD
304 if (stream->chan->monitor) {
305 uatomic_inc(&stream->chan->refcount);
306 }
b31398bb 307
ffe60014
DG
308 /*
309 * Order is important this is why a list is used. On error, the caller
310 * should clean this list.
311 */
312 cds_list_add_tail(&stream->send_node, &channel->streams.head);
313
314 ret = ustctl_get_max_subbuf_size(stream->ustream,
315 &stream->max_sb_size);
316 if (ret < 0) {
317 ERR("ustctl_get_max_subbuf_size failed for stream %s",
318 stream->name);
319 goto error;
320 }
321
322 /* Do actions once stream has been received. */
323 if (ctx->on_recv_stream) {
324 ret = ctx->on_recv_stream(stream);
325 if (ret < 0) {
326 goto error;
327 }
328 }
329
d88aee68 330 DBG("UST consumer add stream %s (key: %" PRIu64 ") with relayd id %" PRIu64,
ffe60014
DG
331 stream->name, stream->key, stream->relayd_stream_id);
332
333 /* Set next CPU stream. */
334 channel->streams.count = ++cpu;
d88aee68
DG
335
336 /* Keep stream reference when creating metadata. */
337 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA) {
338 channel->metadata_stream = stream;
8de4f941
JG
339 if (channel->monitor) {
340 /* Set metadata poll pipe if we created one */
341 memcpy(stream->ust_metadata_poll_pipe,
342 ust_metadata_pipe,
343 sizeof(ust_metadata_pipe));
344 }
d88aee68 345 }
ffe60014
DG
346 }
347
348 return 0;
349
350error:
351error_alloc:
352 return ret;
353}
354
4628484a
MD
355/*
356 * create_posix_shm is never called concurrently within a process.
357 */
358static
359int create_posix_shm(void)
360{
361 char tmp_name[NAME_MAX];
362 int shmfd, ret;
363
364 ret = snprintf(tmp_name, NAME_MAX, "/ust-shm-consumer-%d", getpid());
365 if (ret < 0) {
366 PERROR("snprintf");
367 return -1;
368 }
369 /*
370 * Allocate shm, and immediately unlink its shm oject, keeping
371 * only the file descriptor as a reference to the object.
372 * We specifically do _not_ use the / at the beginning of the
373 * pathname so that some OS implementations can keep it local to
374 * the process (POSIX leaves this implementation-defined).
375 */
376 shmfd = shm_open(tmp_name, O_CREAT | O_EXCL | O_RDWR, 0700);
377 if (shmfd < 0) {
378 PERROR("shm_open");
379 goto error_shm_open;
380 }
381 ret = shm_unlink(tmp_name);
382 if (ret < 0 && errno != ENOENT) {
383 PERROR("shm_unlink");
384 goto error_shm_release;
385 }
386 return shmfd;
387
388error_shm_release:
389 ret = close(shmfd);
390 if (ret) {
391 PERROR("close");
392 }
393error_shm_open:
394 return -1;
395}
396
397static int open_ust_stream_fd(struct lttng_consumer_channel *channel,
398 struct ustctl_consumer_channel_attr *attr,
399 int cpu)
400{
401 char shm_path[PATH_MAX];
402 int ret;
403
404 if (!channel->shm_path[0]) {
405 return create_posix_shm();
406 }
407 ret = get_stream_shm_path(shm_path, channel->shm_path, cpu);
408 if (ret) {
409 goto error_shm_path;
410 }
411 return run_as_open(shm_path,
412 O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR,
413 channel->uid, channel->gid);
414
415error_shm_path:
416 return -1;
417}
418
ffe60014
DG
419/*
420 * Create an UST channel with the given attributes and send it to the session
421 * daemon using the ust ctl API.
422 *
423 * Return 0 on success or else a negative value.
424 */
4628484a
MD
425static int create_ust_channel(struct lttng_consumer_channel *channel,
426 struct ustctl_consumer_channel_attr *attr,
427 struct ustctl_consumer_channel **ust_chanp)
ffe60014 428{
4628484a
MD
429 int ret, nr_stream_fds, i, j;
430 int *stream_fds;
431 struct ustctl_consumer_channel *ust_channel;
ffe60014 432
4628484a 433 assert(channel);
ffe60014 434 assert(attr);
4628484a 435 assert(ust_chanp);
ffe60014
DG
436
437 DBG3("Creating channel to ustctl with attr: [overwrite: %d, "
438 "subbuf_size: %" PRIu64 ", num_subbuf: %" PRIu64 ", "
439 "switch_timer_interval: %u, read_timer_interval: %u, "
440 "output: %d, type: %d", attr->overwrite, attr->subbuf_size,
441 attr->num_subbuf, attr->switch_timer_interval,
442 attr->read_timer_interval, attr->output, attr->type);
443
4628484a
MD
444 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA)
445 nr_stream_fds = 1;
446 else
447 nr_stream_fds = ustctl_get_nr_stream_per_channel();
448 stream_fds = zmalloc(nr_stream_fds * sizeof(*stream_fds));
449 if (!stream_fds) {
450 ret = -1;
451 goto error_alloc;
452 }
453 for (i = 0; i < nr_stream_fds; i++) {
454 stream_fds[i] = open_ust_stream_fd(channel, attr, i);
455 if (stream_fds[i] < 0) {
456 ret = -1;
457 goto error_open;
458 }
459 }
460 ust_channel = ustctl_create_channel(attr, stream_fds, nr_stream_fds);
461 if (!ust_channel) {
ffe60014
DG
462 ret = -1;
463 goto error_create;
464 }
4628484a
MD
465 channel->nr_stream_fds = nr_stream_fds;
466 channel->stream_fds = stream_fds;
467 *ust_chanp = ust_channel;
ffe60014
DG
468
469 return 0;
470
471error_create:
4628484a
MD
472error_open:
473 for (j = i - 1; j >= 0; j--) {
474 int closeret;
475
476 closeret = close(stream_fds[j]);
477 if (closeret) {
478 PERROR("close");
479 }
480 if (channel->shm_path[0]) {
481 char shm_path[PATH_MAX];
482
483 closeret = get_stream_shm_path(shm_path,
484 channel->shm_path, j);
485 if (closeret) {
486 ERR("Cannot get stream shm path");
487 }
488 closeret = run_as_unlink(shm_path,
489 channel->uid, channel->gid);
490 if (closeret) {
4628484a
MD
491 PERROR("unlink %s", shm_path);
492 }
493 }
494 }
495 /* Try to rmdir all directories under shm_path root. */
496 if (channel->root_shm_path[0]) {
497 (void) run_as_recursive_rmdir(channel->root_shm_path,
498 channel->uid, channel->gid);
499 }
500 free(stream_fds);
501error_alloc:
ffe60014
DG
502 return ret;
503}
504
d88aee68
DG
505/*
506 * Send a single given stream to the session daemon using the sock.
507 *
508 * Return 0 on success else a negative value.
509 */
ffe60014
DG
510static int send_sessiond_stream(int sock, struct lttng_consumer_stream *stream)
511{
512 int ret;
513
514 assert(stream);
515 assert(sock >= 0);
516
3eb914c0 517 DBG("UST consumer sending stream %" PRIu64 " to sessiond", stream->key);
ffe60014
DG
518
519 /* Send stream to session daemon. */
520 ret = ustctl_send_stream_to_sessiond(sock, stream->ustream);
521 if (ret < 0) {
522 goto error;
523 }
524
ffe60014
DG
525error:
526 return ret;
527}
528
529/*
530 * Send channel to sessiond.
531 *
d88aee68 532 * Return 0 on success or else a negative value.
ffe60014
DG
533 */
534static int send_sessiond_channel(int sock,
535 struct lttng_consumer_channel *channel,
536 struct lttng_consumer_local_data *ctx, int *relayd_error)
537{
0c759fc9 538 int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS;
ffe60014 539 struct lttng_consumer_stream *stream;
6d40f8fa 540 uint64_t relayd_id = -1ULL;
ffe60014
DG
541
542 assert(channel);
543 assert(ctx);
544 assert(sock >= 0);
545
546 DBG("UST consumer sending channel %s to sessiond", channel->name);
547
62285ea4
DG
548 if (channel->relayd_id != (uint64_t) -1ULL) {
549 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
9ce5646a
MD
550
551 health_code_update();
552
62285ea4
DG
553 /* Try to send the stream to the relayd if one is available. */
554 ret = consumer_send_relayd_stream(stream, stream->chan->pathname);
555 if (ret < 0) {
556 /*
557 * Flag that the relayd was the problem here probably due to a
558 * communicaton error on the socket.
559 */
560 if (relayd_error) {
561 *relayd_error = 1;
562 }
725d28b2 563 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
ffe60014 564 }
6d40f8fa
JG
565 if (relayd_id == -1ULL) {
566 relayd_id = stream->relayd_id;
a4baae1b
JD
567 }
568 }
f2a444f1 569 }
ffe60014 570
f2a444f1
DG
571 /* Inform sessiond that we are about to send channel and streams. */
572 ret = consumer_send_status_msg(sock, ret_code);
0c759fc9 573 if (ret < 0 || ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
f2a444f1
DG
574 /*
575 * Either the session daemon is not responding or the relayd died so we
576 * stop now.
577 */
578 goto error;
579 }
580
581 /* Send channel to sessiond. */
582 ret = ustctl_send_channel_to_sessiond(sock, channel->uchan);
583 if (ret < 0) {
584 goto error;
585 }
586
587 ret = ustctl_channel_close_wakeup_fd(channel->uchan);
588 if (ret < 0) {
589 goto error;
590 }
591
592 /* The channel was sent successfully to the sessiond at this point. */
593 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
9ce5646a
MD
594
595 health_code_update();
596
ffe60014
DG
597 /* Send stream to session daemon. */
598 ret = send_sessiond_stream(sock, stream);
599 if (ret < 0) {
600 goto error;
601 }
602 }
603
604 /* Tell sessiond there is no more stream. */
605 ret = ustctl_send_stream_to_sessiond(sock, NULL);
606 if (ret < 0) {
607 goto error;
608 }
609
610 DBG("UST consumer NULL stream sent to sessiond");
611
612 return 0;
613
614error:
0c759fc9 615 if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
f2a444f1
DG
616 ret = -1;
617 }
ffe60014
DG
618 return ret;
619}
620
621/*
622 * Creates a channel and streams and add the channel it to the channel internal
623 * state. The created stream must ONLY be sent once the GET_CHANNEL command is
624 * received.
625 *
626 * Return 0 on success or else, a negative value is returned and the channel
627 * MUST be destroyed by consumer_del_channel().
628 */
629static int ask_channel(struct lttng_consumer_local_data *ctx, int sock,
630 struct lttng_consumer_channel *channel,
631 struct ustctl_consumer_channel_attr *attr)
3bd1e081
MD
632{
633 int ret;
634
ffe60014
DG
635 assert(ctx);
636 assert(channel);
637 assert(attr);
638
639 /*
640 * This value is still used by the kernel consumer since for the kernel,
641 * the stream ownership is not IN the consumer so we need to have the
642 * number of left stream that needs to be initialized so we can know when
643 * to delete the channel (see consumer.c).
644 *
645 * As for the user space tracer now, the consumer creates and sends the
646 * stream to the session daemon which only sends them to the application
647 * once every stream of a channel is received making this value useless
648 * because we they will be added to the poll thread before the application
649 * receives them. This ensures that a stream can not hang up during
650 * initilization of a channel.
651 */
652 channel->nb_init_stream_left = 0;
653
654 /* The reply msg status is handled in the following call. */
4628484a 655 ret = create_ust_channel(channel, attr, &channel->uchan);
ffe60014 656 if (ret < 0) {
10a50311 657 goto end;
3bd1e081
MD
658 }
659
d8ef542d
MD
660 channel->wait_fd = ustctl_channel_get_wait_fd(channel->uchan);
661
10a50311
JD
662 /*
663 * For the snapshots (no monitor), we create the metadata streams
664 * on demand, not during the channel creation.
665 */
666 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && !channel->monitor) {
667 ret = 0;
668 goto end;
669 }
670
ffe60014
DG
671 /* Open all streams for this channel. */
672 ret = create_ust_streams(channel, ctx);
673 if (ret < 0) {
10a50311 674 goto end;
ffe60014
DG
675 }
676
10a50311 677end:
3bd1e081
MD
678 return ret;
679}
680
d88aee68
DG
681/*
682 * Send all stream of a channel to the right thread handling it.
683 *
684 * On error, return a negative value else 0 on success.
685 */
686static int send_streams_to_thread(struct lttng_consumer_channel *channel,
687 struct lttng_consumer_local_data *ctx)
688{
689 int ret = 0;
690 struct lttng_consumer_stream *stream, *stmp;
691
692 assert(channel);
693 assert(ctx);
694
695 /* Send streams to the corresponding thread. */
696 cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
697 send_node) {
9ce5646a
MD
698
699 health_code_update();
700
d88aee68
DG
701 /* Sending the stream to the thread. */
702 ret = send_stream_to_thread(stream, ctx);
703 if (ret < 0) {
704 /*
705 * If we are unable to send the stream to the thread, there is
706 * a big problem so just stop everything.
707 */
5ab66908
MD
708 /* Remove node from the channel stream list. */
709 cds_list_del(&stream->send_node);
d88aee68
DG
710 goto error;
711 }
712
713 /* Remove node from the channel stream list. */
714 cds_list_del(&stream->send_node);
4891ece8 715
d88aee68
DG
716 }
717
718error:
719 return ret;
720}
721
7972aab2
DG
722/*
723 * Flush channel's streams using the given key to retrieve the channel.
724 *
725 * Return 0 on success else an LTTng error code.
726 */
727static int flush_channel(uint64_t chan_key)
728{
729 int ret = 0;
730 struct lttng_consumer_channel *channel;
731 struct lttng_consumer_stream *stream;
732 struct lttng_ht *ht;
733 struct lttng_ht_iter iter;
734
8fd623e0 735 DBG("UST consumer flush channel key %" PRIu64, chan_key);
7972aab2 736
a500c257 737 rcu_read_lock();
7972aab2
DG
738 channel = consumer_find_channel(chan_key);
739 if (!channel) {
8fd623e0 740 ERR("UST consumer flush channel %" PRIu64 " not found", chan_key);
7972aab2
DG
741 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
742 goto error;
743 }
744
745 ht = consumer_data.stream_per_chan_id_ht;
746
747 /* For each stream of the channel id, flush it. */
7972aab2
DG
748 cds_lfht_for_each_entry_duplicate(ht->ht,
749 ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct,
750 &channel->key, &iter.iter, stream, node_channel_id.node) {
9ce5646a
MD
751
752 health_code_update();
753
0dd01979 754 pthread_mutex_lock(&stream->lock);
123fff97
JR
755
756 /*
757 * Protect against concurrent teardown of a stream.
758 */
759 if (cds_lfht_is_node_deleted(&stream->node.node)) {
760 goto next;
761 }
762
0dd01979
MD
763 if (!stream->quiescent) {
764 ustctl_flush_buffer(stream->ustream, 0);
765 stream->quiescent = true;
766 }
123fff97 767next:
0dd01979
MD
768 pthread_mutex_unlock(&stream->lock);
769 }
770error:
771 rcu_read_unlock();
772 return ret;
773}
774
775/*
776 * Clear quiescent state from channel's streams using the given key to
777 * retrieve the channel.
778 *
779 * Return 0 on success else an LTTng error code.
780 */
781static int clear_quiescent_channel(uint64_t chan_key)
782{
783 int ret = 0;
784 struct lttng_consumer_channel *channel;
785 struct lttng_consumer_stream *stream;
786 struct lttng_ht *ht;
787 struct lttng_ht_iter iter;
788
789 DBG("UST consumer clear quiescent channel key %" PRIu64, chan_key);
790
791 rcu_read_lock();
792 channel = consumer_find_channel(chan_key);
793 if (!channel) {
794 ERR("UST consumer clear quiescent channel %" PRIu64 " not found", chan_key);
795 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
796 goto error;
797 }
798
799 ht = consumer_data.stream_per_chan_id_ht;
800
801 /* For each stream of the channel id, clear quiescent state. */
802 cds_lfht_for_each_entry_duplicate(ht->ht,
803 ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct,
804 &channel->key, &iter.iter, stream, node_channel_id.node) {
805
806 health_code_update();
807
808 pthread_mutex_lock(&stream->lock);
809 stream->quiescent = false;
810 pthread_mutex_unlock(&stream->lock);
7972aab2 811 }
7972aab2 812error:
a500c257 813 rcu_read_unlock();
7972aab2
DG
814 return ret;
815}
816
d88aee68
DG
817/*
818 * Close metadata stream wakeup_fd using the given key to retrieve the channel.
a500c257 819 * RCU read side lock MUST be acquired before calling this function.
d88aee68
DG
820 *
821 * Return 0 on success else an LTTng error code.
822 */
823static int close_metadata(uint64_t chan_key)
824{
ea88ca2a 825 int ret = 0;
d88aee68 826 struct lttng_consumer_channel *channel;
a1ca62da 827 unsigned int channel_monitor;
d88aee68 828
8fd623e0 829 DBG("UST consumer close metadata key %" PRIu64, chan_key);
d88aee68
DG
830
831 channel = consumer_find_channel(chan_key);
832 if (!channel) {
84cc9aa0
DG
833 /*
834 * This is possible if the metadata thread has issue a delete because
835 * the endpoint point of the stream hung up. There is no way the
836 * session daemon can know about it thus use a DBG instead of an actual
837 * error.
838 */
839 DBG("UST consumer close metadata %" PRIu64 " not found", chan_key);
d88aee68
DG
840 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
841 goto error;
842 }
843
ea88ca2a 844 pthread_mutex_lock(&consumer_data.lock);
a9838785 845 pthread_mutex_lock(&channel->lock);
a1ca62da 846 channel_monitor = channel->monitor;
73811ecc
DG
847 if (cds_lfht_is_node_deleted(&channel->node.node)) {
848 goto error_unlock;
849 }
850
6d574024 851 lttng_ustconsumer_close_metadata(channel);
a1ca62da
JG
852 pthread_mutex_unlock(&channel->lock);
853 pthread_mutex_unlock(&consumer_data.lock);
d88aee68 854
a1ca62da
JG
855 /*
856 * The ownership of a metadata channel depends on the type of
857 * session to which it belongs. In effect, the monitor flag is checked
858 * to determine if this metadata channel is in "snapshot" mode or not.
859 *
860 * In the non-snapshot case, the metadata channel is created along with
861 * a single stream which will remain present until the metadata channel
862 * is destroyed (on the destruction of its session). In this case, the
863 * metadata stream in "monitored" by the metadata poll thread and holds
864 * the ownership of its channel.
865 *
866 * Closing the metadata will cause the metadata stream's "metadata poll
867 * pipe" to be closed. Closing this pipe will wake-up the metadata poll
868 * thread which will teardown the metadata stream which, in return,
869 * deletes the metadata channel.
870 *
871 * In the snapshot case, the metadata stream is created and destroyed
872 * on every snapshot record. Since the channel doesn't have an owner
873 * other than the session daemon, it is safe to destroy it immediately
874 * on reception of the CLOSE_METADATA command.
875 */
876 if (!channel_monitor) {
877 /*
878 * The channel and consumer_data locks must be
879 * released before this call since consumer_del_channel
880 * re-acquires the channel and consumer_data locks to teardown
881 * the channel and queue its reclamation by the "call_rcu"
882 * worker thread.
883 */
884 consumer_del_channel(channel);
885 }
886
887 return ret;
ea88ca2a 888error_unlock:
a9838785 889 pthread_mutex_unlock(&channel->lock);
ea88ca2a 890 pthread_mutex_unlock(&consumer_data.lock);
d88aee68
DG
891error:
892 return ret;
893}
894
895/*
896 * RCU read side lock MUST be acquired before calling this function.
897 *
898 * Return 0 on success else an LTTng error code.
899 */
900static int setup_metadata(struct lttng_consumer_local_data *ctx, uint64_t key)
901{
902 int ret;
903 struct lttng_consumer_channel *metadata;
904
8fd623e0 905 DBG("UST consumer setup metadata key %" PRIu64, key);
d88aee68
DG
906
907 metadata = consumer_find_channel(key);
908 if (!metadata) {
909 ERR("UST consumer push metadata %" PRIu64 " not found", key);
910 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
10a50311
JD
911 goto end;
912 }
913
914 /*
915 * In no monitor mode, the metadata channel has no stream(s) so skip the
916 * ownership transfer to the metadata thread.
917 */
918 if (!metadata->monitor) {
919 DBG("Metadata channel in no monitor");
920 ret = 0;
921 goto end;
d88aee68
DG
922 }
923
924 /*
925 * Send metadata stream to relayd if one available. Availability is
926 * known if the stream is still in the list of the channel.
927 */
928 if (cds_list_empty(&metadata->streams.head)) {
929 ERR("Metadata channel key %" PRIu64 ", no stream available.", key);
930 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
f5a0c9cf 931 goto error_no_stream;
d88aee68
DG
932 }
933
934 /* Send metadata stream to relayd if needed. */
6d40f8fa 935 if (metadata->metadata_stream->relayd_id != (uint64_t) -1ULL) {
62285ea4
DG
936 ret = consumer_send_relayd_stream(metadata->metadata_stream,
937 metadata->pathname);
938 if (ret < 0) {
939 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
940 goto error;
941 }
601262d6 942 ret = consumer_send_relayd_streams_sent(
6d40f8fa 943 metadata->metadata_stream->relayd_id);
601262d6
JD
944 if (ret < 0) {
945 ret = LTTCOMM_CONSUMERD_RELAYD_FAIL;
946 goto error;
947 }
d88aee68
DG
948 }
949
950 ret = send_streams_to_thread(metadata, ctx);
951 if (ret < 0) {
952 /*
953 * If we are unable to send the stream to the thread, there is
954 * a big problem so just stop everything.
955 */
956 ret = LTTCOMM_CONSUMERD_FATAL;
957 goto error;
958 }
959 /* List MUST be empty after or else it could be reused. */
960 assert(cds_list_empty(&metadata->streams.head));
961
10a50311
JD
962 ret = 0;
963 goto end;
d88aee68
DG
964
965error:
f2a444f1
DG
966 /*
967 * Delete metadata channel on error. At this point, the metadata stream can
968 * NOT be monitored by the metadata thread thus having the guarantee that
969 * the stream is still in the local stream list of the channel. This call
970 * will make sure to clean that list.
971 */
f5a0c9cf 972 consumer_stream_destroy(metadata->metadata_stream, NULL);
212d67a2
DG
973 cds_list_del(&metadata->metadata_stream->send_node);
974 metadata->metadata_stream = NULL;
f5a0c9cf 975error_no_stream:
10a50311
JD
976end:
977 return ret;
978}
979
980/*
981 * Snapshot the whole metadata.
982 *
983 * Returns 0 on success, < 0 on error
984 */
985static int snapshot_metadata(uint64_t key, char *path, uint64_t relayd_id,
986 struct lttng_consumer_local_data *ctx)
987{
988 int ret = 0;
10a50311
JD
989 struct lttng_consumer_channel *metadata_channel;
990 struct lttng_consumer_stream *metadata_stream;
991
992 assert(path);
993 assert(ctx);
994
995 DBG("UST consumer snapshot metadata with key %" PRIu64 " at path %s",
996 key, path);
997
998 rcu_read_lock();
999
1000 metadata_channel = consumer_find_channel(key);
1001 if (!metadata_channel) {
6a00837f
MD
1002 ERR("UST snapshot metadata channel not found for key %" PRIu64,
1003 key);
10a50311
JD
1004 ret = -1;
1005 goto error;
1006 }
1007 assert(!metadata_channel->monitor);
1008
9ce5646a
MD
1009 health_code_update();
1010
10a50311
JD
1011 /*
1012 * Ask the sessiond if we have new metadata waiting and update the
1013 * consumer metadata cache.
1014 */
94d49140 1015 ret = lttng_ustconsumer_request_metadata(ctx, metadata_channel, 0, 1);
10a50311
JD
1016 if (ret < 0) {
1017 goto error;
1018 }
1019
9ce5646a
MD
1020 health_code_update();
1021
10a50311
JD
1022 /*
1023 * The metadata stream is NOT created in no monitor mode when the channel
1024 * is created on a sessiond ask channel command.
1025 */
1026 ret = create_ust_streams(metadata_channel, ctx);
1027 if (ret < 0) {
1028 goto error;
1029 }
1030
1031 metadata_stream = metadata_channel->metadata_stream;
1032 assert(metadata_stream);
1033
1034 if (relayd_id != (uint64_t) -1ULL) {
6d40f8fa 1035 metadata_stream->relayd_id = relayd_id;
10a50311
JD
1036 ret = consumer_send_relayd_stream(metadata_stream, path);
1037 if (ret < 0) {
1038 goto error_stream;
1039 }
1040 } else {
1041 ret = utils_create_stream_file(path, metadata_stream->name,
1042 metadata_stream->chan->tracefile_size,
1043 metadata_stream->tracefile_count_current,
309167d2 1044 metadata_stream->uid, metadata_stream->gid, NULL);
10a50311
JD
1045 if (ret < 0) {
1046 goto error_stream;
1047 }
1048 metadata_stream->out_fd = ret;
1049 metadata_stream->tracefile_size_current = 0;
1050 }
1051
04ef1097 1052 do {
9ce5646a
MD
1053 health_code_update();
1054
29d1a7ae 1055 ret = lttng_consumer_read_subbuffer(metadata_stream, ctx, true);
10a50311 1056 if (ret < 0) {
94d49140 1057 goto error_stream;
10a50311 1058 }
04ef1097 1059 } while (ret > 0);
10a50311 1060
10a50311
JD
1061error_stream:
1062 /*
1063 * Clean up the stream completly because the next snapshot will use a new
1064 * metadata stream.
1065 */
10a50311 1066 consumer_stream_destroy(metadata_stream, NULL);
212d67a2 1067 cds_list_del(&metadata_stream->send_node);
10a50311
JD
1068 metadata_channel->metadata_stream = NULL;
1069
1070error:
1071 rcu_read_unlock();
1072 return ret;
1073}
1074
1fdb9a78
JG
1075static
1076int get_current_subbuf_addr(struct lttng_consumer_stream *stream,
1077 const char **addr)
1078{
1079 int ret;
1080 unsigned long mmap_offset;
1081 const char *mmap_base;
1082
1083 mmap_base = ustctl_get_mmap_base(stream->ustream);
1084 if (!mmap_base) {
1085 ERR("Failed to get mmap base for stream `%s`",
1086 stream->name);
1087 ret = -EPERM;
1088 goto error;
1089 }
1090
1091 ret = ustctl_get_mmap_read_offset(stream->ustream, &mmap_offset);
1092 if (ret != 0) {
1093 ERR("Failed to get mmap offset for stream `%s`", stream->name);
1094 ret = -EINVAL;
1095 goto error;
1096 }
1097
1098 *addr = mmap_base + mmap_offset;
1099error:
1100 return ret;
1101
1102}
1103
10a50311
JD
1104/*
1105 * Take a snapshot of all the stream of a channel.
1106 *
1107 * Returns 0 on success, < 0 on error
1108 */
1109static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
d07ceecd 1110 uint64_t nb_packets_per_stream, struct lttng_consumer_local_data *ctx)
10a50311
JD
1111{
1112 int ret;
1113 unsigned use_relayd = 0;
1114 unsigned long consumed_pos, produced_pos;
1115 struct lttng_consumer_channel *channel;
1116 struct lttng_consumer_stream *stream;
1117
1118 assert(path);
1119 assert(ctx);
1120
1121 rcu_read_lock();
1122
1123 if (relayd_id != (uint64_t) -1ULL) {
1124 use_relayd = 1;
1125 }
1126
1127 channel = consumer_find_channel(key);
1128 if (!channel) {
6a00837f 1129 ERR("UST snapshot channel not found for key %" PRIu64, key);
10a50311
JD
1130 ret = -1;
1131 goto error;
1132 }
1133 assert(!channel->monitor);
6a00837f 1134 DBG("UST consumer snapshot channel %" PRIu64, key);
10a50311
JD
1135
1136 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
9ce5646a
MD
1137 health_code_update();
1138
10a50311
JD
1139 /* Lock stream because we are about to change its state. */
1140 pthread_mutex_lock(&stream->lock);
6d40f8fa 1141 stream->relayd_id = relayd_id;
10a50311
JD
1142
1143 if (use_relayd) {
1144 ret = consumer_send_relayd_stream(stream, path);
1145 if (ret < 0) {
1146 goto error_unlock;
1147 }
1148 } else {
1149 ret = utils_create_stream_file(path, stream->name,
1150 stream->chan->tracefile_size,
1151 stream->tracefile_count_current,
309167d2 1152 stream->uid, stream->gid, NULL);
10a50311
JD
1153 if (ret < 0) {
1154 goto error_unlock;
1155 }
1156 stream->out_fd = ret;
1157 stream->tracefile_size_current = 0;
1158
1159 DBG("UST consumer snapshot stream %s/%s (%" PRIu64 ")", path,
1160 stream->name, stream->key);
1161 }
a4baae1b
JD
1162 if (relayd_id != -1ULL) {
1163 ret = consumer_send_relayd_streams_sent(relayd_id);
1164 if (ret < 0) {
1165 goto error_unlock;
1166 }
1167 }
10a50311 1168
d4d80f77
MD
1169 /*
1170 * If tracing is active, we want to perform a "full" buffer flush.
1171 * Else, if quiescent, it has already been done by the prior stop.
1172 */
1173 if (!stream->quiescent) {
1174 ustctl_flush_buffer(stream->ustream, 0);
1175 }
10a50311
JD
1176
1177 ret = lttng_ustconsumer_take_snapshot(stream);
1178 if (ret < 0) {
1179 ERR("Taking UST snapshot");
1180 goto error_unlock;
1181 }
1182
1183 ret = lttng_ustconsumer_get_produced_snapshot(stream, &produced_pos);
1184 if (ret < 0) {
1185 ERR("Produced UST snapshot position");
1186 goto error_unlock;
1187 }
1188
1189 ret = lttng_ustconsumer_get_consumed_snapshot(stream, &consumed_pos);
1190 if (ret < 0) {
1191 ERR("Consumerd UST snapshot position");
1192 goto error_unlock;
1193 }
1194
5c786ded
JD
1195 /*
1196 * The original value is sent back if max stream size is larger than
d07ceecd 1197 * the possible size of the snapshot. Also, we assume that the session
5c786ded
JD
1198 * daemon should never send a maximum stream size that is lower than
1199 * subbuffer size.
1200 */
d07ceecd
MD
1201 consumed_pos = consumer_get_consume_start_pos(consumed_pos,
1202 produced_pos, nb_packets_per_stream,
1203 stream->max_sb_size);
5c786ded 1204
10a50311
JD
1205 while (consumed_pos < produced_pos) {
1206 ssize_t read_len;
1207 unsigned long len, padded_len;
1fdb9a78 1208 const char *subbuf_addr;
ace0e591 1209 struct lttng_buffer_view subbuf_view;
10a50311 1210
9ce5646a
MD
1211 health_code_update();
1212
10a50311
JD
1213 DBG("UST consumer taking snapshot at pos %lu", consumed_pos);
1214
1215 ret = ustctl_get_subbuf(stream->ustream, &consumed_pos);
1216 if (ret < 0) {
1217 if (ret != -EAGAIN) {
1218 PERROR("ustctl_get_subbuf snapshot");
1219 goto error_close_stream;
1220 }
1221 DBG("UST consumer get subbuf failed. Skipping it.");
1222 consumed_pos += stream->max_sb_size;
6e1f2e92 1223 stream->chan->lost_packets++;
10a50311
JD
1224 continue;
1225 }
1226
1227 ret = ustctl_get_subbuf_size(stream->ustream, &len);
1228 if (ret < 0) {
1229 ERR("Snapshot ustctl_get_subbuf_size");
1230 goto error_put_subbuf;
1231 }
1232
1233 ret = ustctl_get_padded_subbuf_size(stream->ustream, &padded_len);
1234 if (ret < 0) {
1235 ERR("Snapshot ustctl_get_padded_subbuf_size");
1236 goto error_put_subbuf;
1237 }
1238
1fdb9a78
JG
1239 ret = get_current_subbuf_addr(stream, &subbuf_addr);
1240 if (ret) {
1241 goto error_put_subbuf;
1242 }
1243
ace0e591
JG
1244 subbuf_view = lttng_buffer_view_init(
1245 subbuf_addr, 0, padded_len);
d6ef77b3 1246 read_len = lttng_consumer_on_read_subbuffer_mmap(
29d1a7ae 1247 stream, &subbuf_view, padded_len - len);
10a50311
JD
1248 if (use_relayd) {
1249 if (read_len != len) {
56591bac 1250 ret = -EPERM;
10a50311
JD
1251 goto error_put_subbuf;
1252 }
1253 } else {
1254 if (read_len != padded_len) {
56591bac 1255 ret = -EPERM;
10a50311
JD
1256 goto error_put_subbuf;
1257 }
1258 }
1259
1260 ret = ustctl_put_subbuf(stream->ustream);
1261 if (ret < 0) {
1262 ERR("Snapshot ustctl_put_subbuf");
1263 goto error_close_stream;
1264 }
1265 consumed_pos += stream->max_sb_size;
1266 }
1267
1268 /* Simply close the stream so we can use it on the next snapshot. */
1269 consumer_stream_close(stream);
1270 pthread_mutex_unlock(&stream->lock);
1271 }
1272
1273 rcu_read_unlock();
1274 return 0;
1275
1276error_put_subbuf:
1277 if (ustctl_put_subbuf(stream->ustream) < 0) {
1278 ERR("Snapshot ustctl_put_subbuf");
1279 }
1280error_close_stream:
1281 consumer_stream_close(stream);
1282error_unlock:
1283 pthread_mutex_unlock(&stream->lock);
1284error:
1285 rcu_read_unlock();
d88aee68
DG
1286 return ret;
1287}
1288
331744e3 1289/*
c585821b
MD
1290 * Receive the metadata updates from the sessiond. Supports receiving
1291 * overlapping metadata, but is needs to always belong to a contiguous
1292 * range starting from 0.
1293 * Be careful about the locks held when calling this function: it needs
1294 * the metadata cache flush to concurrently progress in order to
1295 * complete.
331744e3
JD
1296 */
1297int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset,
93ec662e
JD
1298 uint64_t len, uint64_t version,
1299 struct lttng_consumer_channel *channel, int timer, int wait)
331744e3 1300{
0c759fc9 1301 int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS;
331744e3
JD
1302 char *metadata_str;
1303
8fd623e0 1304 DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key, len);
331744e3
JD
1305
1306 metadata_str = zmalloc(len * sizeof(char));
1307 if (!metadata_str) {
1308 PERROR("zmalloc metadata string");
1309 ret_code = LTTCOMM_CONSUMERD_ENOMEM;
1310 goto end;
1311 }
1312
9ce5646a
MD
1313 health_code_update();
1314
331744e3
JD
1315 /* Receive metadata string. */
1316 ret = lttcomm_recv_unix_sock(sock, metadata_str, len);
1317 if (ret < 0) {
1318 /* Session daemon is dead so return gracefully. */
1319 ret_code = ret;
1320 goto end_free;
1321 }
1322
9ce5646a
MD
1323 health_code_update();
1324
331744e3 1325 pthread_mutex_lock(&channel->metadata_cache->lock);
93ec662e
JD
1326 ret = consumer_metadata_cache_write(channel, offset, len, version,
1327 metadata_str);
331744e3
JD
1328 if (ret < 0) {
1329 /* Unable to handle metadata. Notify session daemon. */
1330 ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
a32bd775
DG
1331 /*
1332 * Skip metadata flush on write error since the offset and len might
1333 * not have been updated which could create an infinite loop below when
1334 * waiting for the metadata cache to be flushed.
1335 */
1336 pthread_mutex_unlock(&channel->metadata_cache->lock);
a32bd775 1337 goto end_free;
331744e3
JD
1338 }
1339 pthread_mutex_unlock(&channel->metadata_cache->lock);
1340
94d49140
JD
1341 if (!wait) {
1342 goto end_free;
1343 }
5e41ebe1 1344 while (consumer_metadata_cache_flushed(channel, offset + len, timer)) {
331744e3 1345 DBG("Waiting for metadata to be flushed");
9ce5646a
MD
1346
1347 health_code_update();
1348
331744e3
JD
1349 usleep(DEFAULT_METADATA_AVAILABILITY_WAIT_TIME);
1350 }
1351
1352end_free:
1353 free(metadata_str);
1354end:
1355 return ret_code;
1356}
1357
4cbc1a04
DG
1358/*
1359 * Receive command from session daemon and process it.
1360 *
1361 * Return 1 on success else a negative value or 0.
1362 */
3bd1e081
MD
1363int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
1364 int sock, struct pollfd *consumer_sockpoll)
1365{
1366 ssize_t ret;
0c759fc9 1367 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
3bd1e081 1368 struct lttcomm_consumer_msg msg;
ffe60014 1369 struct lttng_consumer_channel *channel = NULL;
3bd1e081 1370
9ce5646a
MD
1371 health_code_update();
1372
3bd1e081
MD
1373 ret = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg));
1374 if (ret != sizeof(msg)) {
173af62f
DG
1375 DBG("Consumer received unexpected message size %zd (expects %zu)",
1376 ret, sizeof(msg));
3be74084
DG
1377 /*
1378 * The ret value might 0 meaning an orderly shutdown but this is ok
1379 * since the caller handles this.
1380 */
489f70e9 1381 if (ret > 0) {
c6857fcf 1382 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
489f70e9
MD
1383 ret = -1;
1384 }
3bd1e081
MD
1385 return ret;
1386 }
9ce5646a
MD
1387
1388 health_code_update();
1389
84382d49
MD
1390 /* deprecated */
1391 assert(msg.cmd_type != LTTNG_CONSUMER_STOP);
3bd1e081 1392
9ce5646a
MD
1393 health_code_update();
1394
3f8e211f 1395 /* relayd needs RCU read-side lock */
b0b335c8
MD
1396 rcu_read_lock();
1397
3bd1e081 1398 switch (msg.cmd_type) {
00e2e675
DG
1399 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
1400 {
f50f23d9 1401 /* Session daemon status message are handled in the following call. */
028ba707 1402 consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
7735ef9e 1403 msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll,
d3e2ba59
JD
1404 &msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id,
1405 msg.u.relayd_sock.relayd_session_id);
00e2e675
DG
1406 goto end_nosignal;
1407 }
173af62f
DG
1408 case LTTNG_CONSUMER_DESTROY_RELAYD:
1409 {
a6ba4fe1 1410 uint64_t index = msg.u.destroy_relayd.net_seq_idx;
173af62f
DG
1411 struct consumer_relayd_sock_pair *relayd;
1412
a6ba4fe1 1413 DBG("UST consumer destroying relayd %" PRIu64, index);
173af62f
DG
1414
1415 /* Get relayd reference if exists. */
a6ba4fe1 1416 relayd = consumer_find_relayd(index);
173af62f 1417 if (relayd == NULL) {
3448e266 1418 DBG("Unable to find relayd %" PRIu64, index);
e462382a 1419 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
173af62f
DG
1420 }
1421
a6ba4fe1
DG
1422 /*
1423 * Each relayd socket pair has a refcount of stream attached to it
1424 * which tells if the relayd is still active or not depending on the
1425 * refcount value.
1426 *
1427 * This will set the destroy flag of the relayd object and destroy it
1428 * if the refcount reaches zero when called.
1429 *
1430 * The destroy can happen either here or when a stream fd hangs up.
1431 */
f50f23d9
DG
1432 if (relayd) {
1433 consumer_flag_relayd_for_destroy(relayd);
1434 }
1435
d88aee68 1436 goto end_msg_sessiond;
173af62f 1437 }
3bd1e081
MD
1438 case LTTNG_CONSUMER_UPDATE_STREAM:
1439 {
3f8e211f 1440 rcu_read_unlock();
7ad0a0cb 1441 return -ENOSYS;
3bd1e081 1442 }
6d805429 1443 case LTTNG_CONSUMER_DATA_PENDING:
53632229 1444 {
3be74084 1445 int ret, is_data_pending;
6d805429 1446 uint64_t id = msg.u.data_pending.session_id;
ca22feea 1447
6d805429 1448 DBG("UST consumer data pending command for id %" PRIu64, id);
ca22feea 1449
3be74084 1450 is_data_pending = consumer_data_pending(id);
ca22feea
DG
1451
1452 /* Send back returned value to session daemon */
3be74084
DG
1453 ret = lttcomm_send_unix_sock(sock, &is_data_pending,
1454 sizeof(is_data_pending));
ca22feea 1455 if (ret < 0) {
3be74084 1456 DBG("Error when sending the data pending ret code: %d", ret);
489f70e9 1457 goto error_fatal;
ca22feea 1458 }
f50f23d9
DG
1459
1460 /*
1461 * No need to send back a status message since the data pending
1462 * returned value is the response.
1463 */
ca22feea 1464 break;
53632229 1465 }
ffe60014
DG
1466 case LTTNG_CONSUMER_ASK_CHANNEL_CREATION:
1467 {
1468 int ret;
1469 struct ustctl_consumer_channel_attr attr;
1470
1471 /* Create a plain object and reserve a channel key. */
11785f65
JG
1472 channel = consumer_allocate_channel(
1473 msg.u.ask_channel.key,
1474 msg.u.ask_channel.session_id,
1475 msg.u.ask_channel.pathname,
1476 msg.u.ask_channel.name,
1477 msg.u.ask_channel.uid,
1478 msg.u.ask_channel.gid,
1479 msg.u.ask_channel.relayd_id,
1624d5b7
JD
1480 (enum lttng_event_output) msg.u.ask_channel.output,
1481 msg.u.ask_channel.tracefile_size,
2bba9e53 1482 msg.u.ask_channel.tracefile_count,
1950109e 1483 msg.u.ask_channel.session_id_per_pid,
ecc48a90 1484 msg.u.ask_channel.monitor,
d7ba1388 1485 msg.u.ask_channel.live_timer_interval,
11785f65 1486 msg.u.ask_channel.is_live,
3d071855 1487 msg.u.ask_channel.root_shm_path,
d7ba1388 1488 msg.u.ask_channel.shm_path);
ffe60014
DG
1489 if (!channel) {
1490 goto end_channel_error;
1491 }
1492
567eb353
DG
1493 /*
1494 * Assign UST application UID to the channel. This value is ignored for
1495 * per PID buffers. This is specific to UST thus setting this after the
1496 * allocation.
1497 */
1498 channel->ust_app_uid = msg.u.ask_channel.ust_app_uid;
1499
ffe60014
DG
1500 /* Build channel attributes from received message. */
1501 attr.subbuf_size = msg.u.ask_channel.subbuf_size;
1502 attr.num_subbuf = msg.u.ask_channel.num_subbuf;
1503 attr.overwrite = msg.u.ask_channel.overwrite;
1504 attr.switch_timer_interval = msg.u.ask_channel.switch_timer_interval;
1505 attr.read_timer_interval = msg.u.ask_channel.read_timer_interval;
7972aab2 1506 attr.chan_id = msg.u.ask_channel.chan_id;
ffe60014
DG
1507 memcpy(attr.uuid, msg.u.ask_channel.uuid, sizeof(attr.uuid));
1508
0c759fc9
DG
1509 /* Match channel buffer type to the UST abi. */
1510 switch (msg.u.ask_channel.output) {
1511 case LTTNG_EVENT_MMAP:
1512 default:
1513 attr.output = LTTNG_UST_MMAP;
1514 break;
1515 }
1516
ffe60014
DG
1517 /* Translate and save channel type. */
1518 switch (msg.u.ask_channel.type) {
1519 case LTTNG_UST_CHAN_PER_CPU:
1520 channel->type = CONSUMER_CHANNEL_TYPE_DATA;
1521 attr.type = LTTNG_UST_CHAN_PER_CPU;
8633d6e3
MD
1522 /*
1523 * Set refcount to 1 for owner. Below, we will
1524 * pass ownership to the
1525 * consumer_thread_channel_poll() thread.
1526 */
1527 channel->refcount = 1;
ffe60014
DG
1528 break;
1529 case LTTNG_UST_CHAN_METADATA:
1530 channel->type = CONSUMER_CHANNEL_TYPE_METADATA;
1531 attr.type = LTTNG_UST_CHAN_METADATA;
1532 break;
1533 default:
1534 assert(0);
1535 goto error_fatal;
1536 };
1537
9ce5646a
MD
1538 health_code_update();
1539
ffe60014
DG
1540 ret = ask_channel(ctx, sock, channel, &attr);
1541 if (ret < 0) {
1542 goto end_channel_error;
1543 }
1544
fc643247
MD
1545 if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
1546 ret = consumer_metadata_cache_allocate(channel);
1547 if (ret < 0) {
1548 ERR("Allocating metadata cache");
1549 goto end_channel_error;
1550 }
1551 consumer_timer_switch_start(channel, attr.switch_timer_interval);
1552 attr.switch_timer_interval = 0;
94d49140
JD
1553 } else {
1554 consumer_timer_live_start(channel,
1555 msg.u.ask_channel.live_timer_interval);
fc643247
MD
1556 }
1557
9ce5646a
MD
1558 health_code_update();
1559
ffe60014
DG
1560 /*
1561 * Add the channel to the internal state AFTER all streams were created
1562 * and successfully sent to session daemon. This way, all streams must
1563 * be ready before this channel is visible to the threads.
fc643247
MD
1564 * If add_channel succeeds, ownership of the channel is
1565 * passed to consumer_thread_channel_poll().
ffe60014
DG
1566 */
1567 ret = add_channel(channel, ctx);
1568 if (ret < 0) {
ea88ca2a
MD
1569 if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
1570 if (channel->switch_timer_enabled == 1) {
1571 consumer_timer_switch_stop(channel);
1572 }
1573 consumer_metadata_cache_destroy(channel);
1574 }
d3e2ba59
JD
1575 if (channel->live_timer_enabled == 1) {
1576 consumer_timer_live_stop(channel);
1577 }
ffe60014
DG
1578 goto end_channel_error;
1579 }
1580
9ce5646a
MD
1581 health_code_update();
1582
ffe60014
DG
1583 /*
1584 * Channel and streams are now created. Inform the session daemon that
1585 * everything went well and should wait to receive the channel and
1586 * streams with ustctl API.
1587 */
1588 ret = consumer_send_status_channel(sock, channel);
1589 if (ret < 0) {
1590 /*
489f70e9 1591 * There is probably a problem on the socket.
ffe60014 1592 */
489f70e9 1593 goto error_fatal;
ffe60014
DG
1594 }
1595
1596 break;
1597 }
1598 case LTTNG_CONSUMER_GET_CHANNEL:
1599 {
1600 int ret, relayd_err = 0;
d88aee68 1601 uint64_t key = msg.u.get_channel.key;
ffe60014 1602 struct lttng_consumer_channel *channel;
ffe60014
DG
1603
1604 channel = consumer_find_channel(key);
1605 if (!channel) {
8fd623e0 1606 ERR("UST consumer get channel key %" PRIu64 " not found", key);
e462382a 1607 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
ffe60014
DG
1608 goto end_msg_sessiond;
1609 }
1610
9ce5646a
MD
1611 health_code_update();
1612
ffe60014
DG
1613 /* Send everything to sessiond. */
1614 ret = send_sessiond_channel(sock, channel, ctx, &relayd_err);
1615 if (ret < 0) {
1616 if (relayd_err) {
1617 /*
1618 * We were unable to send to the relayd the stream so avoid
1619 * sending back a fatal error to the thread since this is OK
f2a444f1
DG
1620 * and the consumer can continue its work. The above call
1621 * has sent the error status message to the sessiond.
ffe60014 1622 */
f2a444f1 1623 goto end_nosignal;
ffe60014
DG
1624 }
1625 /*
1626 * The communicaton was broken hence there is a bad state between
1627 * the consumer and sessiond so stop everything.
1628 */
1629 goto error_fatal;
1630 }
1631
9ce5646a
MD
1632 health_code_update();
1633
10a50311
JD
1634 /*
1635 * In no monitor mode, the streams ownership is kept inside the channel
1636 * so don't send them to the data thread.
1637 */
1638 if (!channel->monitor) {
1639 goto end_msg_sessiond;
1640 }
1641
d88aee68
DG
1642 ret = send_streams_to_thread(channel, ctx);
1643 if (ret < 0) {
1644 /*
1645 * If we are unable to send the stream to the thread, there is
1646 * a big problem so just stop everything.
1647 */
1648 goto error_fatal;
ffe60014 1649 }
ffe60014
DG
1650 /* List MUST be empty after or else it could be reused. */
1651 assert(cds_list_empty(&channel->streams.head));
d88aee68
DG
1652 goto end_msg_sessiond;
1653 }
d60962b2
JR
1654 case LTTNG_CONSUMER_CHANNEL_STOP_LIVE_TIMER:
1655 {
1656 uint64_t key = msg.u.get_channel.key;
1657 struct lttng_consumer_channel *channel;
1658
1659 channel = consumer_find_channel(key);
1660 if (!channel) {
1661 ERR("UST consumer get channel key %" PRIu64 " not found", key);
1662 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1663 goto end_msg_sessiond;
1664 }
1665
1666 health_code_update();
1667
1668 if (channel->live_timer_enabled == 1) {
1669 consumer_timer_live_stop(channel);
1670 }
1671
1672 health_code_update();
1673
1674 goto end_msg_sessiond;
1675 }
1676 case LTTNG_CONSUMER_CHANNEL_START_LIVE_TIMER:
1677 {
1678 uint64_t key = msg.u.get_channel.key;
1679 struct lttng_consumer_channel *channel;
1680
1681 channel = consumer_find_channel(key);
1682 if (!channel) {
1683 ERR("UST consumer get channel key %" PRIu64 " not found", key);
1684 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1685 goto end_msg_sessiond;
1686 }
1687
1688 health_code_update();
1689
1690 if (channel->live_timer_enabled == 0) {
1691 consumer_timer_live_start(channel, channel->live_timer_interval);
1692 }
1693
1694 health_code_update();
1695
1696 goto end_msg_sessiond;
1697 }
1698
d88aee68
DG
1699 case LTTNG_CONSUMER_DESTROY_CHANNEL:
1700 {
1701 uint64_t key = msg.u.destroy_channel.key;
d88aee68 1702
a0cbdd2e
MD
1703 /*
1704 * Only called if streams have not been sent to stream
1705 * manager thread. However, channel has been sent to
1706 * channel manager thread.
1707 */
1708 notify_thread_del_channel(ctx, key);
d88aee68 1709 goto end_msg_sessiond;
ffe60014 1710 }
d88aee68
DG
1711 case LTTNG_CONSUMER_CLOSE_METADATA:
1712 {
1713 int ret;
1714
1715 ret = close_metadata(msg.u.close_metadata.key);
1716 if (ret != 0) {
1717 ret_code = ret;
1718 }
1719
1720 goto end_msg_sessiond;
1721 }
7972aab2
DG
1722 case LTTNG_CONSUMER_FLUSH_CHANNEL:
1723 {
1724 int ret;
1725
1726 ret = flush_channel(msg.u.flush_channel.key);
1727 if (ret != 0) {
1728 ret_code = ret;
1729 }
1730
1731 goto end_msg_sessiond;
1732 }
0dd01979
MD
1733 case LTTNG_CONSUMER_CLEAR_QUIESCENT_CHANNEL:
1734 {
1735 int ret;
1736
1737 ret = clear_quiescent_channel(
1738 msg.u.clear_quiescent_channel.key);
1739 if (ret != 0) {
1740 ret_code = ret;
1741 }
1742
1743 goto end_msg_sessiond;
1744 }
d88aee68 1745 case LTTNG_CONSUMER_PUSH_METADATA:
ffe60014
DG
1746 {
1747 int ret;
d88aee68 1748 uint64_t len = msg.u.push_metadata.len;
d88aee68 1749 uint64_t key = msg.u.push_metadata.key;
331744e3 1750 uint64_t offset = msg.u.push_metadata.target_offset;
93ec662e 1751 uint64_t version = msg.u.push_metadata.version;
ffe60014
DG
1752 struct lttng_consumer_channel *channel;
1753
8fd623e0
DG
1754 DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key,
1755 len);
ffe60014
DG
1756
1757 channel = consumer_find_channel(key);
1758 if (!channel) {
000baf6a
DG
1759 /*
1760 * This is possible if the metadata creation on the consumer side
1761 * is in flight vis-a-vis a concurrent push metadata from the
1762 * session daemon. Simply return that the channel failed and the
1763 * session daemon will handle that message correctly considering
1764 * that this race is acceptable thus the DBG() statement here.
1765 */
1766 DBG("UST consumer push metadata %" PRIu64 " not found", key);
1767 ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL;
4a2eb0ca 1768 goto end_msg_sessiond;
d88aee68
DG
1769 }
1770
9ce5646a
MD
1771 health_code_update();
1772
c585821b
MD
1773 if (!len) {
1774 /*
1775 * There is nothing to receive. We have simply
1776 * checked whether the channel can be found.
1777 */
1778 ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1779 goto end_msg_sessiond;
1780 }
1781
d88aee68 1782 /* Tell session daemon we are ready to receive the metadata. */
0c759fc9 1783 ret = consumer_send_status_msg(sock, LTTCOMM_CONSUMERD_SUCCESS);
ffe60014
DG
1784 if (ret < 0) {
1785 /* Somehow, the session daemon is not responding anymore. */
d88aee68
DG
1786 goto error_fatal;
1787 }
1788
9ce5646a
MD
1789 health_code_update();
1790
d88aee68 1791 /* Wait for more data. */
9ce5646a
MD
1792 health_poll_entry();
1793 ret = lttng_consumer_poll_socket(consumer_sockpoll);
1794 health_poll_exit();
84382d49 1795 if (ret) {
489f70e9 1796 goto error_fatal;
d88aee68
DG
1797 }
1798
9ce5646a
MD
1799 health_code_update();
1800
331744e3 1801 ret = lttng_ustconsumer_recv_metadata(sock, key, offset,
93ec662e 1802 len, version, channel, 0, 1);
d88aee68 1803 if (ret < 0) {
331744e3 1804 /* error receiving from sessiond */
489f70e9 1805 goto error_fatal;
331744e3
JD
1806 } else {
1807 ret_code = ret;
d88aee68
DG
1808 goto end_msg_sessiond;
1809 }
d88aee68
DG
1810 }
1811 case LTTNG_CONSUMER_SETUP_METADATA:
1812 {
1813 int ret;
1814
1815 ret = setup_metadata(ctx, msg.u.setup_metadata.key);
1816 if (ret) {
1817 ret_code = ret;
1818 }
1819 goto end_msg_sessiond;
ffe60014 1820 }
6dc3064a
DG
1821 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
1822 {
10a50311
JD
1823 if (msg.u.snapshot_channel.metadata) {
1824 ret = snapshot_metadata(msg.u.snapshot_channel.key,
1825 msg.u.snapshot_channel.pathname,
1826 msg.u.snapshot_channel.relayd_id,
1827 ctx);
1828 if (ret < 0) {
1829 ERR("Snapshot metadata failed");
e462382a 1830 ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
10a50311
JD
1831 }
1832 } else {
1833 ret = snapshot_channel(msg.u.snapshot_channel.key,
1834 msg.u.snapshot_channel.pathname,
1835 msg.u.snapshot_channel.relayd_id,
d07ceecd 1836 msg.u.snapshot_channel.nb_packets_per_stream,
10a50311
JD
1837 ctx);
1838 if (ret < 0) {
1839 ERR("Snapshot channel failed");
e462382a 1840 ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL;
10a50311
JD
1841 }
1842 }
1843
9ce5646a 1844 health_code_update();
6dc3064a
DG
1845 ret = consumer_send_status_msg(sock, ret_code);
1846 if (ret < 0) {
1847 /* Somehow, the session daemon is not responding anymore. */
1848 goto end_nosignal;
1849 }
9ce5646a 1850 health_code_update();
6dc3064a
DG
1851 break;
1852 }
fb83fe64
JD
1853 case LTTNG_CONSUMER_DISCARDED_EVENTS:
1854 {
beb59458
MJ
1855 int ret = 0;
1856 uint64_t discarded_events;
fb83fe64
JD
1857 struct lttng_ht_iter iter;
1858 struct lttng_ht *ht;
1859 struct lttng_consumer_stream *stream;
1860 uint64_t id = msg.u.discarded_events.session_id;
1861 uint64_t key = msg.u.discarded_events.channel_key;
1862
1863 DBG("UST consumer discarded events command for session id %"
1864 PRIu64, id);
1865 rcu_read_lock();
1866 pthread_mutex_lock(&consumer_data.lock);
1867
1868 ht = consumer_data.stream_list_ht;
1869
1870 /*
1871 * We only need a reference to the channel, but they are not
1872 * directly indexed, so we just use the first matching stream
1873 * to extract the information we need, we default to 0 if not
1874 * found (no events are dropped if the channel is not yet in
1875 * use).
1876 */
beb59458 1877 discarded_events = 0;
fb83fe64
JD
1878 cds_lfht_for_each_entry_duplicate(ht->ht,
1879 ht->hash_fct(&id, lttng_ht_seed),
1880 ht->match_fct, &id,
1881 &iter.iter, stream, node_session_id.node) {
1882 if (stream->chan->key == key) {
beb59458 1883 discarded_events = stream->chan->discarded_events;
fb83fe64
JD
1884 break;
1885 }
1886 }
1887 pthread_mutex_unlock(&consumer_data.lock);
1888 rcu_read_unlock();
1889
1890 DBG("UST consumer discarded events command for session id %"
1891 PRIu64 ", channel key %" PRIu64, id, key);
1892
1893 health_code_update();
1894
1895 /* Send back returned value to session daemon */
beb59458 1896 ret = lttcomm_send_unix_sock(sock, &discarded_events, sizeof(discarded_events));
fb83fe64
JD
1897 if (ret < 0) {
1898 PERROR("send discarded events");
1899 goto error_fatal;
1900 }
1901
1902 break;
1903 }
1904 case LTTNG_CONSUMER_LOST_PACKETS:
1905 {
9a06e8d4
JG
1906 int ret;
1907 uint64_t lost_packets;
fb83fe64
JD
1908 struct lttng_ht_iter iter;
1909 struct lttng_ht *ht;
1910 struct lttng_consumer_stream *stream;
1911 uint64_t id = msg.u.lost_packets.session_id;
1912 uint64_t key = msg.u.lost_packets.channel_key;
1913
1914 DBG("UST consumer lost packets command for session id %"
1915 PRIu64, id);
1916 rcu_read_lock();
1917 pthread_mutex_lock(&consumer_data.lock);
1918
1919 ht = consumer_data.stream_list_ht;
1920
1921 /*
1922 * We only need a reference to the channel, but they are not
1923 * directly indexed, so we just use the first matching stream
1924 * to extract the information we need, we default to 0 if not
1925 * found (no packets lost if the channel is not yet in use).
1926 */
9a06e8d4 1927 lost_packets = 0;
fb83fe64
JD
1928 cds_lfht_for_each_entry_duplicate(ht->ht,
1929 ht->hash_fct(&id, lttng_ht_seed),
1930 ht->match_fct, &id,
1931 &iter.iter, stream, node_session_id.node) {
1932 if (stream->chan->key == key) {
9a06e8d4 1933 lost_packets = stream->chan->lost_packets;
fb83fe64
JD
1934 break;
1935 }
1936 }
1937 pthread_mutex_unlock(&consumer_data.lock);
1938 rcu_read_unlock();
1939
1940 DBG("UST consumer lost packets command for session id %"
1941 PRIu64 ", channel key %" PRIu64, id, key);
1942
1943 health_code_update();
1944
1945 /* Send back returned value to session daemon */
9a06e8d4
JG
1946 ret = lttcomm_send_unix_sock(sock, &lost_packets,
1947 sizeof(lost_packets));
fb83fe64
JD
1948 if (ret < 0) {
1949 PERROR("send lost packets");
1950 goto error_fatal;
1951 }
1952
1953 break;
1954 }
3bd1e081
MD
1955 default:
1956 break;
1957 }
3f8e211f 1958
3bd1e081 1959end_nosignal:
b0b335c8 1960 rcu_read_unlock();
4cbc1a04 1961
9ce5646a
MD
1962 health_code_update();
1963
4cbc1a04
DG
1964 /*
1965 * Return 1 to indicate success since the 0 value can be a socket
1966 * shutdown during the recv() or send() call.
1967 */
1968 return 1;
ffe60014
DG
1969
1970end_msg_sessiond:
1971 /*
1972 * The returned value here is not useful since either way we'll return 1 to
1973 * the caller because the session daemon socket management is done
1974 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
1975 */
489f70e9
MD
1976 ret = consumer_send_status_msg(sock, ret_code);
1977 if (ret < 0) {
1978 goto error_fatal;
1979 }
ffe60014 1980 rcu_read_unlock();
9ce5646a
MD
1981
1982 health_code_update();
1983
ffe60014
DG
1984 return 1;
1985end_channel_error:
1986 if (channel) {
1987 /*
1988 * Free channel here since no one has a reference to it. We don't
1989 * free after that because a stream can store this pointer.
1990 */
1991 destroy_channel(channel);
1992 }
1993 /* We have to send a status channel message indicating an error. */
1994 ret = consumer_send_status_channel(sock, NULL);
1995 if (ret < 0) {
1996 /* Stop everything if session daemon can not be notified. */
1997 goto error_fatal;
1998 }
1999 rcu_read_unlock();
9ce5646a
MD
2000
2001 health_code_update();
2002
ffe60014
DG
2003 return 1;
2004error_fatal:
2005 rcu_read_unlock();
2006 /* This will issue a consumer stop. */
2007 return -1;
3bd1e081
MD
2008}
2009
1fdb9a78
JG
2010void lttng_ustctl_flush_buffer(struct lttng_consumer_stream *stream,
2011 int producer_active)
3bd1e081 2012{
ffe60014
DG
2013 assert(stream);
2014 assert(stream->ustream);
b5c5fc29 2015
1fdb9a78 2016 ustctl_flush_buffer(stream->ustream, producer_active);
d056b477
MD
2017}
2018
ffe60014
DG
2019/*
2020 * Take a snapshot for a specific fd
2021 *
2022 * Returns 0 on success, < 0 on error
2023 */
2024int lttng_ustconsumer_take_snapshot(struct lttng_consumer_stream *stream)
3bd1e081 2025{
ffe60014
DG
2026 assert(stream);
2027 assert(stream->ustream);
2028
2029 return ustctl_snapshot(stream->ustream);
3bd1e081
MD
2030}
2031
ffe60014
DG
2032/*
2033 * Get the produced position
2034 *
2035 * Returns 0 on success, < 0 on error
2036 */
2037int lttng_ustconsumer_get_produced_snapshot(
2038 struct lttng_consumer_stream *stream, unsigned long *pos)
3bd1e081 2039{
ffe60014
DG
2040 assert(stream);
2041 assert(stream->ustream);
2042 assert(pos);
7a57cf92 2043
ffe60014
DG
2044 return ustctl_snapshot_get_produced(stream->ustream, pos);
2045}
7a57cf92 2046
10a50311
JD
2047/*
2048 * Get the consumed position
2049 *
2050 * Returns 0 on success, < 0 on error
2051 */
2052int lttng_ustconsumer_get_consumed_snapshot(
2053 struct lttng_consumer_stream *stream, unsigned long *pos)
2054{
2055 assert(stream);
2056 assert(stream->ustream);
2057 assert(pos);
2058
2059 return ustctl_snapshot_get_consumed(stream->ustream, pos);
2060}
2061
84a182ce
DG
2062void lttng_ustconsumer_flush_buffer(struct lttng_consumer_stream *stream,
2063 int producer)
2064{
2065 assert(stream);
2066 assert(stream->ustream);
2067
2068 ustctl_flush_buffer(stream->ustream, producer);
2069}
2070
2071int lttng_ustconsumer_get_current_timestamp(
2072 struct lttng_consumer_stream *stream, uint64_t *ts)
2073{
2074 assert(stream);
2075 assert(stream->ustream);
2076 assert(ts);
2077
2078 return ustctl_get_current_timestamp(stream->ustream, ts);
2079}
2080
fb83fe64
JD
2081int lttng_ustconsumer_get_sequence_number(
2082 struct lttng_consumer_stream *stream, uint64_t *seq)
2083{
2084 assert(stream);
2085 assert(stream->ustream);
2086 assert(seq);
2087
2088 return ustctl_get_sequence_number(stream->ustream, seq);
2089}
2090
ffe60014 2091/*
0dd01979 2092 * Called when the stream signals the consumer that it has hung up.
ffe60014
DG
2093 */
2094void lttng_ustconsumer_on_stream_hangup(struct lttng_consumer_stream *stream)
2095{
2096 assert(stream);
2097 assert(stream->ustream);
2c1dd183 2098
0dd01979
MD
2099 pthread_mutex_lock(&stream->lock);
2100 if (!stream->quiescent) {
2101 ustctl_flush_buffer(stream->ustream, 0);
2102 stream->quiescent = true;
2103 }
2104 pthread_mutex_unlock(&stream->lock);
ffe60014
DG
2105 stream->hangup_flush_done = 1;
2106}
ee77a7b0 2107
ffe60014
DG
2108void lttng_ustconsumer_del_channel(struct lttng_consumer_channel *chan)
2109{
4628484a
MD
2110 int i;
2111
ffe60014
DG
2112 assert(chan);
2113 assert(chan->uchan);
e316aad5 2114
ea88ca2a
MD
2115 if (chan->switch_timer_enabled == 1) {
2116 consumer_timer_switch_stop(chan);
2117 }
4628484a
MD
2118 for (i = 0; i < chan->nr_stream_fds; i++) {
2119 int ret;
2120
2121 ret = close(chan->stream_fds[i]);
2122 if (ret) {
2123 PERROR("close");
2124 }
2125 if (chan->shm_path[0]) {
2126 char shm_path[PATH_MAX];
2127
2128 ret = get_stream_shm_path(shm_path, chan->shm_path, i);
2129 if (ret) {
2130 ERR("Cannot get stream shm path");
2131 }
2132 ret = run_as_unlink(shm_path, chan->uid, chan->gid);
2133 if (ret) {
4628484a
MD
2134 PERROR("unlink %s", shm_path);
2135 }
2136 }
2137 }
3bd1e081
MD
2138}
2139
b83e03c4
MD
2140void lttng_ustconsumer_free_channel(struct lttng_consumer_channel *chan)
2141{
2142 assert(chan);
2143 assert(chan->uchan);
2144
2145 consumer_metadata_cache_destroy(chan);
2146 ustctl_destroy_channel(chan->uchan);
ea853771
JR
2147 /* Try to rmdir all directories under shm_path root. */
2148 if (chan->root_shm_path[0]) {
2149 (void) run_as_recursive_rmdir(chan->root_shm_path,
2150 chan->uid, chan->gid);
2151 }
b83e03c4
MD
2152 free(chan->stream_fds);
2153}
2154
3bd1e081
MD
2155void lttng_ustconsumer_del_stream(struct lttng_consumer_stream *stream)
2156{
ffe60014
DG
2157 assert(stream);
2158 assert(stream->ustream);
d41f73b7 2159
ea88ca2a
MD
2160 if (stream->chan->switch_timer_enabled == 1) {
2161 consumer_timer_switch_stop(stream->chan);
2162 }
ffe60014
DG
2163 ustctl_destroy_stream(stream->ustream);
2164}
d41f73b7 2165
6d574024
DG
2166int lttng_ustconsumer_get_wakeup_fd(struct lttng_consumer_stream *stream)
2167{
2168 assert(stream);
2169 assert(stream->ustream);
2170
2171 return ustctl_stream_get_wakeup_fd(stream->ustream);
2172}
2173
2174int lttng_ustconsumer_close_wakeup_fd(struct lttng_consumer_stream *stream)
2175{
2176 assert(stream);
2177 assert(stream->ustream);
2178
2179 return ustctl_stream_close_wakeup_fd(stream->ustream);
2180}
2181
93ec662e 2182static
3910d1ea
JG
2183void metadata_stream_reset_cache_consumed_position(
2184 struct lttng_consumer_stream *stream)
93ec662e 2185{
29d1a7ae
JG
2186 DBG("Reset metadata cache of session %" PRIu64,
2187 stream->chan->session_id);
93ec662e 2188 stream->ust_metadata_pushed = 0;
93ec662e
JD
2189}
2190
94d49140
JD
2191/*
2192 * Write up to one packet from the metadata cache to the channel.
2193 *
2194 * Returns the number of bytes pushed in the cache, or a negative value
2195 * on error.
2196 */
2197static
2198int commit_one_metadata_packet(struct lttng_consumer_stream *stream)
2199{
2200 ssize_t write_len;
2201 int ret;
2202
2203 pthread_mutex_lock(&stream->chan->metadata_cache->lock);
3910d1ea
JG
2204 if (stream->chan->metadata_cache->max_offset ==
2205 stream->ust_metadata_pushed) {
2206 /*
2207 * In the context of a user space metadata channel, a
2208 * change in version can be detected in two ways:
2209 * 1) During the pre-consume of the `read_subbuffer` loop,
2210 * 2) When populating the metadata ring buffer (i.e. here).
2211 *
2212 * This function is invoked when there is no metadata
2213 * available in the ring-buffer. If all data was consumed
2214 * up to the size of the metadata cache, there is no metadata
2215 * to insert in the ring-buffer.
2216 *
2217 * However, the metadata version could still have changed (a
2218 * regeneration without any new data will yield the same cache
2219 * size).
2220 *
2221 * The cache's version is checked for a version change and the
2222 * consumed position is reset if one occurred.
2223 *
2224 * This check is only necessary for the user space domain as
2225 * it has to manage the cache explicitly. If this reset was not
2226 * performed, no metadata would be consumed (and no reset would
2227 * occur as part of the pre-consume) until the metadata size
2228 * exceeded the cache size.
2229 */
2230 if (stream->metadata_version !=
2231 stream->chan->metadata_cache->version) {
2232 metadata_stream_reset_cache_consumed_position(stream);
2233 consumer_stream_metadata_set_version(stream,
2234 stream->chan->metadata_cache->version);
2235 } else {
2236 ret = 0;
2237 goto end;
2238 }
94d49140
JD
2239 }
2240
2241 write_len = ustctl_write_one_packet_to_channel(stream->chan->uchan,
2242 &stream->chan->metadata_cache->data[stream->ust_metadata_pushed],
c585821b 2243 stream->chan->metadata_cache->max_offset
94d49140
JD
2244 - stream->ust_metadata_pushed);
2245 assert(write_len != 0);
2246 if (write_len < 0) {
2247 ERR("Writing one metadata packet");
d6ef77b3 2248 ret = write_len;
94d49140
JD
2249 goto end;
2250 }
2251 stream->ust_metadata_pushed += write_len;
2252
c585821b 2253 assert(stream->chan->metadata_cache->max_offset >=
94d49140
JD
2254 stream->ust_metadata_pushed);
2255 ret = write_len;
2256
29d1a7ae
JG
2257 /*
2258 * Switch packet (but don't open the next one) on every commit of
2259 * a metadata packet. Since the subbuffer is fully filled (with padding,
2260 * if needed), the stream is "quiescent" after this commit.
2261 */
2262 ustctl_flush_buffer(stream->ustream, 1);
94d49140
JD
2263end:
2264 pthread_mutex_unlock(&stream->chan->metadata_cache->lock);
2265 return ret;
2266}
2267
309167d2 2268
94d49140
JD
2269/*
2270 * Sync metadata meaning request them to the session daemon and snapshot to the
2271 * metadata thread can consumer them.
2272 *
c585821b
MD
2273 * Metadata stream lock is held here, but we need to release it when
2274 * interacting with sessiond, else we cause a deadlock with live
2275 * awaiting on metadata to be pushed out.
94d49140
JD
2276 *
2277 * Return 0 if new metadatda is available, EAGAIN if the metadata stream
2278 * is empty or a negative value on error.
2279 */
2280int lttng_ustconsumer_sync_metadata(struct lttng_consumer_local_data *ctx,
2281 struct lttng_consumer_stream *metadata)
2282{
2283 int ret;
2284 int retry = 0;
2285
2286 assert(ctx);
2287 assert(metadata);
2288
c585821b 2289 pthread_mutex_unlock(&metadata->lock);
94d49140
JD
2290 /*
2291 * Request metadata from the sessiond, but don't wait for the flush
2292 * because we locked the metadata thread.
2293 */
2294 ret = lttng_ustconsumer_request_metadata(ctx, metadata->chan, 0, 0);
87f05398 2295 pthread_mutex_lock(&metadata->lock);
94d49140
JD
2296 if (ret < 0) {
2297 goto end;
2298 }
2299
2300 ret = commit_one_metadata_packet(metadata);
2301 if (ret <= 0) {
2302 goto end;
2303 } else if (ret > 0) {
2304 retry = 1;
2305 }
2306
2307 ustctl_flush_buffer(metadata->ustream, 1);
2308 ret = ustctl_snapshot(metadata->ustream);
2309 if (ret < 0) {
2310 if (errno != EAGAIN) {
2311 ERR("Sync metadata, taking UST snapshot");
2312 goto end;
2313 }
2314 DBG("No new metadata when syncing them.");
2315 /* No new metadata, exit. */
2316 ret = ENODATA;
2317 goto end;
2318 }
2319
2320 /*
2321 * After this flush, we still need to extract metadata.
2322 */
2323 if (retry) {
2324 ret = EAGAIN;
2325 }
2326
2327end:
2328 return ret;
2329}
2330
02b3d176
DG
2331/*
2332 * Return 0 on success else a negative value.
2333 */
2334static int notify_if_more_data(struct lttng_consumer_stream *stream,
2335 struct lttng_consumer_local_data *ctx)
2336{
2337 int ret;
2338 struct ustctl_consumer_stream *ustream;
2339
2340 assert(stream);
2341 assert(ctx);
2342
2343 ustream = stream->ustream;
2344
2345 /*
2346 * First, we are going to check if there is a new subbuffer available
2347 * before reading the stream wait_fd.
2348 */
2349 /* Get the next subbuffer */
2350 ret = ustctl_get_next_subbuf(ustream);
2351 if (ret) {
2352 /* No more data found, flag the stream. */
2353 stream->has_data = 0;
2354 ret = 0;
2355 goto end;
2356 }
2357
5420e5db 2358 ret = ustctl_put_subbuf(ustream);
02b3d176
DG
2359 assert(!ret);
2360
2361 /* This stream still has data. Flag it and wake up the data thread. */
2362 stream->has_data = 1;
2363
2364 if (stream->monitor && !stream->hangup_flush_done && !ctx->has_wakeup) {
2365 ssize_t writelen;
2366
2367 writelen = lttng_pipe_write(ctx->consumer_wakeup_pipe, "!", 1);
2368 if (writelen < 0 && errno != EAGAIN && errno != EWOULDBLOCK) {
2369 ret = writelen;
2370 goto end;
2371 }
2372
2373 /* The wake up pipe has been notified. */
2374 ctx->has_wakeup = 1;
2375 }
2376 ret = 0;
2377
2378end:
2379 return ret;
2380}
2381
29d1a7ae 2382static int consumer_stream_ust_on_wake_up(struct lttng_consumer_stream *stream)
fb83fe64 2383{
29d1a7ae 2384 int ret = 0;
fb83fe64 2385
fb83fe64 2386 /*
29d1a7ae
JG
2387 * We can consume the 1 byte written into the wait_fd by
2388 * UST. Don't trigger error if we cannot read this one byte
2389 * (read returns 0), or if the error is EAGAIN or EWOULDBLOCK.
2390 *
2391 * This is only done when the stream is monitored by a thread,
2392 * before the flush is done after a hangup and if the stream
2393 * is not flagged with data since there might be nothing to
2394 * consume in the wait fd but still have data available
2395 * flagged by the consumer wake up pipe.
fb83fe64 2396 */
29d1a7ae
JG
2397 if (stream->monitor && !stream->hangup_flush_done && !stream->has_data) {
2398 char dummy;
2399 ssize_t readlen;
2400
2401 readlen = lttng_read(stream->wait_fd, &dummy, 1);
2402 if (readlen < 0 && errno != EAGAIN && errno != EWOULDBLOCK) {
2403 ret = readlen;
2404 }
fb83fe64 2405 }
fb83fe64 2406
29d1a7ae
JG
2407 return ret;
2408}
2409
2410static int extract_common_subbuffer_info(struct lttng_consumer_stream *stream,
2411 struct stream_subbuffer *subbuf)
2412{
2413 int ret;
2414
2415 ret = ustctl_get_subbuf_size(
2416 stream->ustream, &subbuf->info.data.subbuf_size);
2417 if (ret) {
fb83fe64
JD
2418 goto end;
2419 }
29d1a7ae
JG
2420
2421 ret = ustctl_get_padded_subbuf_size(
2422 stream->ustream, &subbuf->info.data.padded_subbuf_size);
2423 if (ret) {
2424 goto end;
fb83fe64 2425 }
fb83fe64
JD
2426
2427end:
2428 return ret;
2429}
2430
29d1a7ae
JG
2431static int extract_metadata_subbuffer_info(struct lttng_consumer_stream *stream,
2432 struct stream_subbuffer *subbuf)
d41f73b7 2433{
29d1a7ae 2434 int ret;
ffe60014 2435
29d1a7ae
JG
2436 ret = extract_common_subbuffer_info(stream, subbuf);
2437 if (ret) {
2438 goto end;
2439 }
d41f73b7 2440
3910d1ea 2441 subbuf->info.metadata.version = stream->metadata_version;
ffe60014 2442
29d1a7ae
JG
2443end:
2444 return ret;
2445}
d41f73b7 2446
29d1a7ae
JG
2447static int extract_data_subbuffer_info(struct lttng_consumer_stream *stream,
2448 struct stream_subbuffer *subbuf)
2449{
2450 int ret;
c617c0c6 2451
29d1a7ae
JG
2452 ret = extract_common_subbuffer_info(stream, subbuf);
2453 if (ret) {
2454 goto end;
d41f73b7
MD
2455 }
2456
29d1a7ae
JG
2457 ret = ustctl_get_packet_size(
2458 stream->ustream, &subbuf->info.data.packet_size);
2459 if (ret < 0) {
2460 PERROR("Failed to get sub-buffer packet size");
2461 goto end;
2462 }
04ef1097 2463
29d1a7ae
JG
2464 ret = ustctl_get_content_size(
2465 stream->ustream, &subbuf->info.data.content_size);
2466 if (ret < 0) {
2467 PERROR("Failed to get sub-buffer content size");
d41f73b7
MD
2468 goto end;
2469 }
309167d2 2470
29d1a7ae
JG
2471 ret = ustctl_get_timestamp_begin(
2472 stream->ustream, &subbuf->info.data.timestamp_begin);
2473 if (ret < 0) {
2474 PERROR("Failed to get sub-buffer begin timestamp");
2475 goto end;
2476 }
fb83fe64 2477
29d1a7ae
JG
2478 ret = ustctl_get_timestamp_end(
2479 stream->ustream, &subbuf->info.data.timestamp_end);
2480 if (ret < 0) {
2481 PERROR("Failed to get sub-buffer end timestamp");
2482 goto end;
2483 }
2484
2485 ret = ustctl_get_events_discarded(
2486 stream->ustream, &subbuf->info.data.events_discarded);
2487 if (ret) {
2488 PERROR("Failed to get sub-buffer events discarded count");
2489 goto end;
2490 }
2491
2492 ret = ustctl_get_sequence_number(stream->ustream,
2493 &subbuf->info.data.sequence_number.value);
2494 if (ret) {
2495 /* May not be supported by older LTTng-modules. */
2496 if (ret != -ENOTTY) {
2497 PERROR("Failed to get sub-buffer sequence number");
fb83fe64
JD
2498 goto end;
2499 }
1c20f0e2 2500 } else {
29d1a7ae 2501 subbuf->info.data.sequence_number.is_set = true;
309167d2
JD
2502 }
2503
29d1a7ae
JG
2504 ret = ustctl_get_stream_id(
2505 stream->ustream, &subbuf->info.data.stream_id);
2506 if (ret < 0) {
2507 PERROR("Failed to get stream id");
2508 goto end;
2509 }
1d4dfdef 2510
29d1a7ae
JG
2511 ret = ustctl_get_instance_id(stream->ustream,
2512 &subbuf->info.data.stream_instance_id.value);
2513 if (ret) {
2514 /* May not be supported by older LTTng-modules. */
2515 if (ret != -ENOTTY) {
2516 PERROR("Failed to get stream instance id");
2517 goto end;
2518 }
2519 } else {
2520 subbuf->info.data.stream_instance_id.is_set = true;
2521 }
2522end:
2523 return ret;
2524}
1d4dfdef 2525
29d1a7ae
JG
2526static int get_next_subbuffer_common(struct lttng_consumer_stream *stream,
2527 struct stream_subbuffer *subbuffer)
2528{
2529 int ret;
2530 const char *addr;
1d4dfdef 2531
29d1a7ae
JG
2532 ret = stream->read_subbuffer_ops.extract_subbuffer_info(
2533 stream, subbuffer);
2534 if (ret) {
2535 goto end;
2536 }
1fdb9a78 2537
29d1a7ae 2538 ret = get_current_subbuf_addr(stream, &addr);
1fdb9a78 2539 if (ret) {
29d1a7ae 2540 goto end;
1fdb9a78
JG
2541 }
2542
29d1a7ae
JG
2543 subbuffer->buffer.buffer = lttng_buffer_view_init(
2544 addr, 0, subbuffer->info.data.padded_subbuf_size);
2545 assert(subbuffer->buffer.buffer.data != NULL);
2546end:
2547 return ret;
2548}
ace0e591 2549
29d1a7ae
JG
2550static int get_next_subbuffer(struct lttng_consumer_stream *stream,
2551 struct stream_subbuffer *subbuffer)
2552{
2553 int ret;
331744e3 2554
29d1a7ae
JG
2555 ret = ustctl_get_next_subbuf(stream->ustream);
2556 if (ret) {
2557 goto end;
02b3d176
DG
2558 }
2559
29d1a7ae
JG
2560 ret = get_next_subbuffer_common(stream, subbuffer);
2561 if (ret) {
1c20f0e2
JD
2562 goto end;
2563 }
29d1a7ae
JG
2564end:
2565 return ret;
2566}
1c20f0e2 2567
29d1a7ae
JG
2568static int get_next_subbuffer_metadata(struct lttng_consumer_stream *stream,
2569 struct stream_subbuffer *subbuffer)
2570{
2571 int ret;
d6ef77b3
JG
2572 bool cache_empty;
2573 bool got_subbuffer;
2574 bool coherent;
2575 bool buffer_empty;
2576 unsigned long consumed_pos, produced_pos;
29d1a7ae 2577
d6ef77b3
JG
2578 do {
2579 ret = ustctl_get_next_subbuf(stream->ustream);
2580 if (ret == 0) {
2581 got_subbuffer = true;
2582 } else {
2583 got_subbuffer = false;
2584 if (ret != -EAGAIN) {
2585 /* Fatal error. */
2586 goto end;
2587 }
c585821b
MD
2588 }
2589
d6ef77b3
JG
2590 /*
2591 * Determine if the cache is empty and ensure that a sub-buffer
2592 * is made available if the cache is not empty.
2593 */
2594 if (!got_subbuffer) {
2595 ret = commit_one_metadata_packet(stream);
2596 if (ret < 0 && ret != -ENOBUFS) {
2597 goto end;
2598 } else if (ret == 0) {
2599 /* Not an error, the cache is empty. */
2600 cache_empty = true;
2601 ret = -ENODATA;
2602 goto end;
2603 } else {
2604 cache_empty = false;
2605 }
2606 } else {
2607 pthread_mutex_lock(&stream->chan->metadata_cache->lock);
2608 cache_empty = stream->chan->metadata_cache->max_offset ==
2609 stream->ust_metadata_pushed;
2610 pthread_mutex_unlock(&stream->chan->metadata_cache->lock);
94d49140 2611 }
d6ef77b3 2612 } while (!got_subbuffer);
94d49140 2613
d6ef77b3 2614 /* Populate sub-buffer infos and view. */
29d1a7ae
JG
2615 ret = get_next_subbuffer_common(stream, subbuffer);
2616 if (ret) {
1c20f0e2 2617 goto end;
309167d2 2618 }
d6ef77b3
JG
2619
2620 ret = lttng_ustconsumer_take_snapshot(stream);
2621 if (ret < 0) {
2622 /*
2623 * -EAGAIN is not expected since we got a sub-buffer and haven't
2624 * pushed the consumption position yet (on put_next).
2625 */
2626 PERROR("Failed to take a snapshot of metadata buffer positions");
2627 goto end;
2628 }
2629
2630 ret = lttng_ustconsumer_get_consumed_snapshot(stream, &consumed_pos);
2631 if (ret) {
2632 PERROR("Failed to get metadata consumed position");
2633 goto end;
2634 }
2635
2636 ret = lttng_ustconsumer_get_produced_snapshot(stream, &produced_pos);
2637 if (ret) {
2638 PERROR("Failed to get metadata produced position");
2639 goto end;
2640 }
2641
2642 /* Last sub-buffer of the ring buffer ? */
2643 buffer_empty = (consumed_pos + stream->max_sb_size) == produced_pos;
2644
2645 /*
2646 * The sessiond registry lock ensures that coherent units of metadata
2647 * are pushed to the consumer daemon at once. Hence, if a sub-buffer is
2648 * acquired, the cache is empty, and it is the only available sub-buffer
2649 * available, it is safe to assume that it is "coherent".
2650 */
2651 coherent = got_subbuffer && cache_empty && buffer_empty;
2652
2653 LTTNG_OPTIONAL_SET(&subbuffer->info.metadata.coherent, coherent);
d41f73b7
MD
2654end:
2655 return ret;
2656}
2657
29d1a7ae
JG
2658static int put_next_subbuffer(struct lttng_consumer_stream *stream,
2659 struct stream_subbuffer *subbuffer)
2660{
2661 const int ret = ustctl_put_next_subbuf(stream->ustream);
2662
2663 assert(ret == 0);
2664 return ret;
2665}
2666
2667static int signal_metadata(struct lttng_consumer_stream *stream,
2668 struct lttng_consumer_local_data *ctx)
2669{
2670 return pthread_cond_broadcast(&stream->metadata_rdv) ? -errno : 0;
2671}
2672
d6ef77b3 2673static int lttng_ustconsumer_set_stream_ops(
29d1a7ae
JG
2674 struct lttng_consumer_stream *stream)
2675{
d6ef77b3
JG
2676 int ret = 0;
2677
29d1a7ae
JG
2678 stream->read_subbuffer_ops.on_wake_up = consumer_stream_ust_on_wake_up;
2679 if (stream->metadata_flag) {
2680 stream->read_subbuffer_ops.get_next_subbuffer =
2681 get_next_subbuffer_metadata;
2682 stream->read_subbuffer_ops.extract_subbuffer_info =
2683 extract_metadata_subbuffer_info;
2684 stream->read_subbuffer_ops.reset_metadata =
3910d1ea 2685 metadata_stream_reset_cache_consumed_position;
d6ef77b3
JG
2686 if (stream->chan->is_live) {
2687 stream->read_subbuffer_ops.on_sleep = signal_metadata;
2688 ret = consumer_stream_enable_metadata_bucketization(
2689 stream);
2690 if (ret) {
2691 goto end;
2692 }
2693 }
29d1a7ae
JG
2694 } else {
2695 stream->read_subbuffer_ops.get_next_subbuffer =
2696 get_next_subbuffer;
2697 stream->read_subbuffer_ops.extract_subbuffer_info =
2698 extract_data_subbuffer_info;
2699 stream->read_subbuffer_ops.on_sleep = notify_if_more_data;
2700 if (stream->chan->is_live) {
2701 stream->read_subbuffer_ops.send_live_beacon =
2702 consumer_flush_ust_index;
2703 }
2704 }
2705
2706 stream->read_subbuffer_ops.put_next_subbuffer = put_next_subbuffer;
d6ef77b3
JG
2707end:
2708 return ret;
29d1a7ae
JG
2709}
2710
ffe60014
DG
2711/*
2712 * Called when a stream is created.
fe4477ee
JD
2713 *
2714 * Return 0 on success or else a negative value.
ffe60014 2715 */
d41f73b7
MD
2716int lttng_ustconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
2717{
fe4477ee
JD
2718 int ret;
2719
10a50311
JD
2720 assert(stream);
2721
fe4477ee 2722 /* Don't create anything if this is set for streaming. */
6d40f8fa 2723 if (stream->relayd_id == (uint64_t) -1ULL && stream->chan->monitor) {
fe4477ee
JD
2724 ret = utils_create_stream_file(stream->chan->pathname, stream->name,
2725 stream->chan->tracefile_size, stream->tracefile_count_current,
309167d2 2726 stream->uid, stream->gid, NULL);
fe4477ee
JD
2727 if (ret < 0) {
2728 goto error;
2729 }
2730 stream->out_fd = ret;
2731 stream->tracefile_size_current = 0;
309167d2
JD
2732
2733 if (!stream->metadata_flag) {
e0547b83
MD
2734 struct lttng_index_file *index_file;
2735
2736 index_file = lttng_index_file_create(stream->chan->pathname,
309167d2
JD
2737 stream->name, stream->uid, stream->gid,
2738 stream->chan->tracefile_size,
e0547b83
MD
2739 stream->tracefile_count_current,
2740 CTF_INDEX_MAJOR, CTF_INDEX_MINOR);
2741 if (!index_file) {
309167d2
JD
2742 goto error;
2743 }
e0547b83 2744 stream->index_file = index_file;
309167d2 2745 }
fe4477ee 2746 }
29d1a7ae
JG
2747
2748 lttng_ustconsumer_set_stream_ops(stream);
fe4477ee
JD
2749 ret = 0;
2750
2751error:
2752 return ret;
d41f73b7 2753}
ca22feea
DG
2754
2755/*
2756 * Check if data is still being extracted from the buffers for a specific
4e9a4686
DG
2757 * stream. Consumer data lock MUST be acquired before calling this function
2758 * and the stream lock.
ca22feea 2759 *
6d805429 2760 * Return 1 if the traced data are still getting read else 0 meaning that the
ca22feea
DG
2761 * data is available for trace viewer reading.
2762 */
6d805429 2763int lttng_ustconsumer_data_pending(struct lttng_consumer_stream *stream)
ca22feea
DG
2764{
2765 int ret;
2766
2767 assert(stream);
ffe60014 2768 assert(stream->ustream);
ca22feea 2769
6d805429 2770 DBG("UST consumer checking data pending");
c8f59ee5 2771
ca6b395f
MD
2772 if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) {
2773 ret = 0;
2774 goto end;
2775 }
2776
04ef1097 2777 if (stream->chan->type == CONSUMER_CHANNEL_TYPE_METADATA) {
e6ee4eab
DG
2778 uint64_t contiguous, pushed;
2779
2780 /* Ease our life a bit. */
c585821b 2781 contiguous = stream->chan->metadata_cache->max_offset;
e6ee4eab
DG
2782 pushed = stream->ust_metadata_pushed;
2783
04ef1097
MD
2784 /*
2785 * We can simply check whether all contiguously available data
2786 * has been pushed to the ring buffer, since the push operation
2787 * is performed within get_next_subbuf(), and because both
2788 * get_next_subbuf() and put_next_subbuf() are issued atomically
2789 * thanks to the stream lock within
2790 * lttng_ustconsumer_read_subbuffer(). This basically means that
2791 * whetnever ust_metadata_pushed is incremented, the associated
2792 * metadata has been consumed from the metadata stream.
2793 */
2794 DBG("UST consumer metadata pending check: contiguous %" PRIu64 " vs pushed %" PRIu64,
e6ee4eab 2795 contiguous, pushed);
aa01b94c 2796 assert(((int64_t) (contiguous - pushed)) >= 0);
e6ee4eab 2797 if ((contiguous != pushed) ||
6acdf328 2798 (((int64_t) contiguous - pushed) > 0 || contiguous == 0)) {
04ef1097
MD
2799 ret = 1; /* Data is pending */
2800 goto end;
2801 }
2802 } else {
2803 ret = ustctl_get_next_subbuf(stream->ustream);
2804 if (ret == 0) {
2805 /*
2806 * There is still data so let's put back this
2807 * subbuffer.
2808 */
2809 ret = ustctl_put_subbuf(stream->ustream);
2810 assert(ret == 0);
2811 ret = 1; /* Data is pending */
2812 goto end;
2813 }
ca22feea
DG
2814 }
2815
6d805429
DG
2816 /* Data is NOT pending so ready to be read. */
2817 ret = 0;
ca22feea 2818
6efae65e
DG
2819end:
2820 return ret;
ca22feea 2821}
d88aee68 2822
6d574024
DG
2823/*
2824 * Stop a given metadata channel timer if enabled and close the wait fd which
2825 * is the poll pipe of the metadata stream.
2826 *
2827 * This MUST be called with the metadata channel acquired.
2828 */
2829void lttng_ustconsumer_close_metadata(struct lttng_consumer_channel *metadata)
2830{
2831 int ret;
2832
2833 assert(metadata);
2834 assert(metadata->type == CONSUMER_CHANNEL_TYPE_METADATA);
2835
2836 DBG("Closing metadata channel key %" PRIu64, metadata->key);
2837
2838 if (metadata->switch_timer_enabled == 1) {
2839 consumer_timer_switch_stop(metadata);
2840 }
2841
2842 if (!metadata->metadata_stream) {
2843 goto end;
2844 }
2845
2846 /*
2847 * Closing write side so the thread monitoring the stream wakes up if any
2848 * and clean the metadata stream.
2849 */
2850 if (metadata->metadata_stream->ust_metadata_poll_pipe[1] >= 0) {
2851 ret = close(metadata->metadata_stream->ust_metadata_poll_pipe[1]);
2852 if (ret < 0) {
2853 PERROR("closing metadata pipe write side");
2854 }
2855 metadata->metadata_stream->ust_metadata_poll_pipe[1] = -1;
2856 }
2857
2858end:
2859 return;
2860}
2861
d88aee68
DG
2862/*
2863 * Close every metadata stream wait fd of the metadata hash table. This
2864 * function MUST be used very carefully so not to run into a race between the
2865 * metadata thread handling streams and this function closing their wait fd.
2866 *
2867 * For UST, this is used when the session daemon hangs up. Its the metadata
2868 * producer so calling this is safe because we are assured that no state change
2869 * can occur in the metadata thread for the streams in the hash table.
2870 */
6d574024 2871void lttng_ustconsumer_close_all_metadata(struct lttng_ht *metadata_ht)
d88aee68 2872{
d88aee68
DG
2873 struct lttng_ht_iter iter;
2874 struct lttng_consumer_stream *stream;
2875
2876 assert(metadata_ht);
2877 assert(metadata_ht->ht);
2878
2879 DBG("UST consumer closing all metadata streams");
2880
2881 rcu_read_lock();
2882 cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream,
2883 node.node) {
9ce5646a
MD
2884
2885 health_code_update();
2886
be2b50c7 2887 pthread_mutex_lock(&stream->chan->lock);
6d574024 2888 lttng_ustconsumer_close_metadata(stream->chan);
be2b50c7
DG
2889 pthread_mutex_unlock(&stream->chan->lock);
2890
d88aee68
DG
2891 }
2892 rcu_read_unlock();
2893}
d8ef542d
MD
2894
2895void lttng_ustconsumer_close_stream_wakeup(struct lttng_consumer_stream *stream)
2896{
2897 int ret;
2898
2899 ret = ustctl_stream_close_wakeup_fd(stream->ustream);
2900 if (ret < 0) {
2901 ERR("Unable to close wakeup fd");
2902 }
2903}
331744e3 2904
f666ae70
MD
2905/*
2906 * Please refer to consumer-timer.c before adding any lock within this
2907 * function or any of its callees. Timers have a very strict locking
2908 * semantic with respect to teardown. Failure to respect this semantic
2909 * introduces deadlocks.
c585821b
MD
2910 *
2911 * DON'T hold the metadata lock when calling this function, else this
2912 * can cause deadlock involving consumer awaiting for metadata to be
2913 * pushed out due to concurrent interaction with the session daemon.
f666ae70 2914 */
331744e3 2915int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
94d49140 2916 struct lttng_consumer_channel *channel, int timer, int wait)
331744e3
JD
2917{
2918 struct lttcomm_metadata_request_msg request;
2919 struct lttcomm_consumer_msg msg;
0c759fc9 2920 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
93ec662e 2921 uint64_t len, key, offset, version;
331744e3
JD
2922 int ret;
2923
2924 assert(channel);
2925 assert(channel->metadata_cache);
2926
53efb85a
MD
2927 memset(&request, 0, sizeof(request));
2928
331744e3
JD
2929 /* send the metadata request to sessiond */
2930 switch (consumer_data.type) {
2931 case LTTNG_CONSUMER64_UST:
2932 request.bits_per_long = 64;
2933 break;
2934 case LTTNG_CONSUMER32_UST:
2935 request.bits_per_long = 32;
2936 break;
2937 default:
2938 request.bits_per_long = 0;
2939 break;
2940 }
2941
2942 request.session_id = channel->session_id;
1950109e 2943 request.session_id_per_pid = channel->session_id_per_pid;
567eb353
DG
2944 /*
2945 * Request the application UID here so the metadata of that application can
2946 * be sent back. The channel UID corresponds to the user UID of the session
2947 * used for the rights on the stream file(s).
2948 */
2949 request.uid = channel->ust_app_uid;
331744e3 2950 request.key = channel->key;
567eb353 2951
1950109e 2952 DBG("Sending metadata request to sessiond, session id %" PRIu64
cc84d37b 2953 ", per-pid %" PRIu64 ", app UID %u and channel key %" PRIu64,
567eb353
DG
2954 request.session_id, request.session_id_per_pid, request.uid,
2955 request.key);
331744e3 2956
75d83e50 2957 pthread_mutex_lock(&ctx->metadata_socket_lock);
9ce5646a
MD
2958
2959 health_code_update();
2960
331744e3
JD
2961 ret = lttcomm_send_unix_sock(ctx->consumer_metadata_socket, &request,
2962 sizeof(request));
2963 if (ret < 0) {
2964 ERR("Asking metadata to sessiond");
2965 goto end;
2966 }
2967
9ce5646a
MD
2968 health_code_update();
2969
331744e3
JD
2970 /* Receive the metadata from sessiond */
2971 ret = lttcomm_recv_unix_sock(ctx->consumer_metadata_socket, &msg,
2972 sizeof(msg));
2973 if (ret != sizeof(msg)) {
8fd623e0 2974 DBG("Consumer received unexpected message size %d (expects %zu)",
331744e3
JD
2975 ret, sizeof(msg));
2976 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
2977 /*
2978 * The ret value might 0 meaning an orderly shutdown but this is ok
2979 * since the caller handles this.
2980 */
2981 goto end;
2982 }
2983
9ce5646a
MD
2984 health_code_update();
2985
331744e3
JD
2986 if (msg.cmd_type == LTTNG_ERR_UND) {
2987 /* No registry found */
2988 (void) consumer_send_status_msg(ctx->consumer_metadata_socket,
2989 ret_code);
2990 ret = 0;
2991 goto end;
2992 } else if (msg.cmd_type != LTTNG_CONSUMER_PUSH_METADATA) {
2993 ERR("Unexpected cmd_type received %d", msg.cmd_type);
2994 ret = -1;
2995 goto end;
2996 }
2997
2998 len = msg.u.push_metadata.len;
2999 key = msg.u.push_metadata.key;
3000 offset = msg.u.push_metadata.target_offset;
93ec662e 3001 version = msg.u.push_metadata.version;
331744e3
JD
3002
3003 assert(key == channel->key);
3004 if (len == 0) {
3005 DBG("No new metadata to receive for key %" PRIu64, key);
3006 }
3007
9ce5646a
MD
3008 health_code_update();
3009
331744e3
JD
3010 /* Tell session daemon we are ready to receive the metadata. */
3011 ret = consumer_send_status_msg(ctx->consumer_metadata_socket,
0c759fc9 3012 LTTCOMM_CONSUMERD_SUCCESS);
331744e3
JD
3013 if (ret < 0 || len == 0) {
3014 /*
3015 * Somehow, the session daemon is not responding anymore or there is
3016 * nothing to receive.
3017 */
3018 goto end;
3019 }
3020
9ce5646a
MD
3021 health_code_update();
3022
1eb682be 3023 ret = lttng_ustconsumer_recv_metadata(ctx->consumer_metadata_socket,
93ec662e 3024 key, offset, len, version, channel, timer, wait);
1eb682be 3025 if (ret >= 0) {
f2a444f1
DG
3026 /*
3027 * Only send the status msg if the sessiond is alive meaning a positive
3028 * ret code.
3029 */
1eb682be 3030 (void) consumer_send_status_msg(ctx->consumer_metadata_socket, ret);
f2a444f1 3031 }
331744e3
JD
3032 ret = 0;
3033
3034end:
9ce5646a
MD
3035 health_code_update();
3036
75d83e50 3037 pthread_mutex_unlock(&ctx->metadata_socket_lock);
331744e3
JD
3038 return ret;
3039}
70190e1c
DG
3040
3041/*
3042 * Return the ustctl call for the get stream id.
3043 */
3044int lttng_ustconsumer_get_stream_id(struct lttng_consumer_stream *stream,
3045 uint64_t *stream_id)
3046{
3047 assert(stream);
3048 assert(stream_id);
3049
3050 return ustctl_get_stream_id(stream->ustream, stream_id);
3051}
This page took 0.244917 seconds and 5 git commands to generate.