CUSTOM: liver timer: immediate liver timer control on data pending and destroy
[lttng-tools.git] / src / common / ust-consumer / ust-consumer.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _LGPL_SOURCE
20 #include <assert.h>
21 #include <lttng/ust-ctl.h>
22 #include <poll.h>
23 #include <pthread.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/mman.h>
27 #include <sys/socket.h>
28 #include <sys/stat.h>
29 #include <sys/types.h>
30 #include <inttypes.h>
31 #include <unistd.h>
32 #include <urcu/list.h>
33 #include <signal.h>
34 #include <stdbool.h>
35 #include <stdint.h>
36
37 #include <bin/lttng-consumerd/health-consumerd.h>
38 #include <common/common.h>
39 #include <common/sessiond-comm/sessiond-comm.h>
40 #include <common/relayd/relayd.h>
41 #include <common/compat/fcntl.h>
42 #include <common/compat/endian.h>
43 #include <common/consumer/consumer-metadata-cache.h>
44 #include <common/consumer/consumer-stream.h>
45 #include <common/consumer/consumer-timer.h>
46 #include <common/utils.h>
47 #include <common/index/index.h>
48 #include <common/consumer/consumer.h>
49 #include <common/optional.h>
50
51 #include "ust-consumer.h"
52
53 #define INT_MAX_STR_LEN 12 /* includes \0 */
54
55 extern struct lttng_consumer_global_data consumer_data;
56 extern int consumer_poll_timeout;
57 extern volatile int consumer_quit;
58
59 /*
60 * Free channel object and all streams associated with it. This MUST be used
61 * only and only if the channel has _NEVER_ been added to the global channel
62 * hash table.
63 */
64 static void destroy_channel(struct lttng_consumer_channel *channel)
65 {
66 struct lttng_consumer_stream *stream, *stmp;
67
68 assert(channel);
69
70 DBG("UST consumer cleaning stream list");
71
72 cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
73 send_node) {
74
75 health_code_update();
76
77 cds_list_del(&stream->send_node);
78 ustctl_destroy_stream(stream->ustream);
79 free(stream);
80 }
81
82 /*
83 * If a channel is available meaning that was created before the streams
84 * were, delete it.
85 */
86 if (channel->uchan) {
87 lttng_ustconsumer_del_channel(channel);
88 lttng_ustconsumer_free_channel(channel);
89 }
90 free(channel);
91 }
92
93 /*
94 * Add channel to internal consumer state.
95 *
96 * Returns 0 on success or else a negative value.
97 */
98 static int add_channel(struct lttng_consumer_channel *channel,
99 struct lttng_consumer_local_data *ctx)
100 {
101 int ret = 0;
102
103 assert(channel);
104 assert(ctx);
105
106 if (ctx->on_recv_channel != NULL) {
107 ret = ctx->on_recv_channel(channel);
108 if (ret == 0) {
109 ret = consumer_add_channel(channel, ctx);
110 } else if (ret < 0) {
111 /* Most likely an ENOMEM. */
112 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
113 goto error;
114 }
115 } else {
116 ret = consumer_add_channel(channel, ctx);
117 }
118
119 DBG("UST consumer channel added (key: %" PRIu64 ")", channel->key);
120
121 error:
122 return ret;
123 }
124
125 /*
126 * Allocate and return a consumer stream object. If _alloc_ret is not NULL, the
127 * error value if applicable is set in it else it is kept untouched.
128 *
129 * Return NULL on error else the newly allocated stream object.
130 */
131 static struct lttng_consumer_stream *allocate_stream(int cpu, int key,
132 struct lttng_consumer_channel *channel,
133 struct lttng_consumer_local_data *ctx, int *_alloc_ret)
134 {
135 int alloc_ret;
136 struct lttng_consumer_stream *stream = NULL;
137
138 assert(channel);
139 assert(ctx);
140
141 stream = consumer_stream_create(
142 channel,
143 channel->key,
144 key,
145 LTTNG_CONSUMER_ACTIVE_STREAM,
146 channel->name,
147 channel->uid,
148 channel->gid,
149 channel->relayd_id,
150 channel->session_id,
151 cpu,
152 &alloc_ret,
153 channel->type,
154 channel->monitor);
155 if (stream == NULL) {
156 switch (alloc_ret) {
157 case -ENOENT:
158 /*
159 * We could not find the channel. Can happen if cpu hotplug
160 * happens while tearing down.
161 */
162 DBG3("Could not find channel");
163 break;
164 case -ENOMEM:
165 case -EINVAL:
166 default:
167 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
168 break;
169 }
170 goto error;
171 }
172
173 error:
174 if (_alloc_ret) {
175 *_alloc_ret = alloc_ret;
176 }
177 return stream;
178 }
179
180 /*
181 * Send the given stream pointer to the corresponding thread.
182 *
183 * Returns 0 on success else a negative value.
184 */
185 static int send_stream_to_thread(struct lttng_consumer_stream *stream,
186 struct lttng_consumer_local_data *ctx)
187 {
188 int ret;
189 struct lttng_pipe *stream_pipe;
190
191 /* Get the right pipe where the stream will be sent. */
192 if (stream->metadata_flag) {
193 ret = consumer_add_metadata_stream(stream);
194 if (ret) {
195 ERR("Consumer add metadata stream %" PRIu64 " failed.",
196 stream->key);
197 goto error;
198 }
199 stream_pipe = ctx->consumer_metadata_pipe;
200 } else {
201 ret = consumer_add_data_stream(stream);
202 if (ret) {
203 ERR("Consumer add stream %" PRIu64 " failed.",
204 stream->key);
205 goto error;
206 }
207 stream_pipe = ctx->consumer_data_pipe;
208 }
209
210 /*
211 * From this point on, the stream's ownership has been moved away from
212 * the channel and becomes globally visible.
213 */
214 stream->globally_visible = 1;
215
216 ret = lttng_pipe_write(stream_pipe, &stream, sizeof(stream));
217 if (ret < 0) {
218 ERR("Consumer write %s stream to pipe %d",
219 stream->metadata_flag ? "metadata" : "data",
220 lttng_pipe_get_writefd(stream_pipe));
221 if (stream->metadata_flag) {
222 consumer_del_stream_for_metadata(stream);
223 } else {
224 consumer_del_stream_for_data(stream);
225 }
226 }
227 error:
228 return ret;
229 }
230
231 static
232 int get_stream_shm_path(char *stream_shm_path, const char *shm_path, int cpu)
233 {
234 char cpu_nr[INT_MAX_STR_LEN]; /* int max len */
235 int ret;
236
237 strncpy(stream_shm_path, shm_path, PATH_MAX);
238 stream_shm_path[PATH_MAX - 1] = '\0';
239 ret = snprintf(cpu_nr, INT_MAX_STR_LEN, "%i", cpu);
240 if (ret < 0) {
241 PERROR("snprintf");
242 goto end;
243 }
244 strncat(stream_shm_path, cpu_nr,
245 PATH_MAX - strlen(stream_shm_path) - 1);
246 ret = 0;
247 end:
248 return ret;
249 }
250
251 /*
252 * Create streams for the given channel using liblttng-ust-ctl.
253 *
254 * Return 0 on success else a negative value.
255 */
256 static int create_ust_streams(struct lttng_consumer_channel *channel,
257 struct lttng_consumer_local_data *ctx)
258 {
259 int ret, cpu = 0;
260 struct ustctl_consumer_stream *ustream;
261 struct lttng_consumer_stream *stream;
262
263 assert(channel);
264 assert(ctx);
265
266 /*
267 * While a stream is available from ustctl. When NULL is returned, we've
268 * reached the end of the possible stream for the channel.
269 */
270 while ((ustream = ustctl_create_stream(channel->uchan, cpu))) {
271 int wait_fd;
272 int ust_metadata_pipe[2];
273
274 health_code_update();
275
276 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && channel->monitor) {
277 ret = utils_create_pipe_cloexec_nonblock(ust_metadata_pipe);
278 if (ret < 0) {
279 ERR("Create ust metadata poll pipe");
280 goto error;
281 }
282 wait_fd = ust_metadata_pipe[0];
283 } else {
284 wait_fd = ustctl_stream_get_wait_fd(ustream);
285 }
286
287 /* Allocate consumer stream object. */
288 stream = allocate_stream(cpu, wait_fd, channel, ctx, &ret);
289 if (!stream) {
290 goto error_alloc;
291 }
292 stream->ustream = ustream;
293 /*
294 * Store it so we can save multiple function calls afterwards since
295 * this value is used heavily in the stream threads. This is UST
296 * specific so this is why it's done after allocation.
297 */
298 stream->wait_fd = wait_fd;
299
300 /*
301 * Increment channel refcount since the channel reference has now been
302 * assigned in the allocation process above.
303 */
304 if (stream->chan->monitor) {
305 uatomic_inc(&stream->chan->refcount);
306 }
307
308 /*
309 * Order is important this is why a list is used. On error, the caller
310 * should clean this list.
311 */
312 cds_list_add_tail(&stream->send_node, &channel->streams.head);
313
314 ret = ustctl_get_max_subbuf_size(stream->ustream,
315 &stream->max_sb_size);
316 if (ret < 0) {
317 ERR("ustctl_get_max_subbuf_size failed for stream %s",
318 stream->name);
319 goto error;
320 }
321
322 /* Do actions once stream has been received. */
323 if (ctx->on_recv_stream) {
324 ret = ctx->on_recv_stream(stream);
325 if (ret < 0) {
326 goto error;
327 }
328 }
329
330 DBG("UST consumer add stream %s (key: %" PRIu64 ") with relayd id %" PRIu64,
331 stream->name, stream->key, stream->relayd_stream_id);
332
333 /* Set next CPU stream. */
334 channel->streams.count = ++cpu;
335
336 /* Keep stream reference when creating metadata. */
337 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA) {
338 channel->metadata_stream = stream;
339 if (channel->monitor) {
340 /* Set metadata poll pipe if we created one */
341 memcpy(stream->ust_metadata_poll_pipe,
342 ust_metadata_pipe,
343 sizeof(ust_metadata_pipe));
344 }
345 }
346 }
347
348 return 0;
349
350 error:
351 error_alloc:
352 return ret;
353 }
354
355 /*
356 * create_posix_shm is never called concurrently within a process.
357 */
358 static
359 int create_posix_shm(void)
360 {
361 char tmp_name[NAME_MAX];
362 int shmfd, ret;
363
364 ret = snprintf(tmp_name, NAME_MAX, "/ust-shm-consumer-%d", getpid());
365 if (ret < 0) {
366 PERROR("snprintf");
367 return -1;
368 }
369 /*
370 * Allocate shm, and immediately unlink its shm oject, keeping
371 * only the file descriptor as a reference to the object.
372 * We specifically do _not_ use the / at the beginning of the
373 * pathname so that some OS implementations can keep it local to
374 * the process (POSIX leaves this implementation-defined).
375 */
376 shmfd = shm_open(tmp_name, O_CREAT | O_EXCL | O_RDWR, 0700);
377 if (shmfd < 0) {
378 PERROR("shm_open");
379 goto error_shm_open;
380 }
381 ret = shm_unlink(tmp_name);
382 if (ret < 0 && errno != ENOENT) {
383 PERROR("shm_unlink");
384 goto error_shm_release;
385 }
386 return shmfd;
387
388 error_shm_release:
389 ret = close(shmfd);
390 if (ret) {
391 PERROR("close");
392 }
393 error_shm_open:
394 return -1;
395 }
396
397 static int open_ust_stream_fd(struct lttng_consumer_channel *channel,
398 struct ustctl_consumer_channel_attr *attr,
399 int cpu)
400 {
401 char shm_path[PATH_MAX];
402 int ret;
403
404 if (!channel->shm_path[0]) {
405 return create_posix_shm();
406 }
407 ret = get_stream_shm_path(shm_path, channel->shm_path, cpu);
408 if (ret) {
409 goto error_shm_path;
410 }
411 return run_as_open(shm_path,
412 O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR,
413 channel->uid, channel->gid);
414
415 error_shm_path:
416 return -1;
417 }
418
419 /*
420 * Create an UST channel with the given attributes and send it to the session
421 * daemon using the ust ctl API.
422 *
423 * Return 0 on success or else a negative value.
424 */
425 static int create_ust_channel(struct lttng_consumer_channel *channel,
426 struct ustctl_consumer_channel_attr *attr,
427 struct ustctl_consumer_channel **ust_chanp)
428 {
429 int ret, nr_stream_fds, i, j;
430 int *stream_fds;
431 struct ustctl_consumer_channel *ust_channel;
432
433 assert(channel);
434 assert(attr);
435 assert(ust_chanp);
436
437 DBG3("Creating channel to ustctl with attr: [overwrite: %d, "
438 "subbuf_size: %" PRIu64 ", num_subbuf: %" PRIu64 ", "
439 "switch_timer_interval: %u, read_timer_interval: %u, "
440 "output: %d, type: %d", attr->overwrite, attr->subbuf_size,
441 attr->num_subbuf, attr->switch_timer_interval,
442 attr->read_timer_interval, attr->output, attr->type);
443
444 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA)
445 nr_stream_fds = 1;
446 else
447 nr_stream_fds = ustctl_get_nr_stream_per_channel();
448 stream_fds = zmalloc(nr_stream_fds * sizeof(*stream_fds));
449 if (!stream_fds) {
450 ret = -1;
451 goto error_alloc;
452 }
453 for (i = 0; i < nr_stream_fds; i++) {
454 stream_fds[i] = open_ust_stream_fd(channel, attr, i);
455 if (stream_fds[i] < 0) {
456 ret = -1;
457 goto error_open;
458 }
459 }
460 ust_channel = ustctl_create_channel(attr, stream_fds, nr_stream_fds);
461 if (!ust_channel) {
462 ret = -1;
463 goto error_create;
464 }
465 channel->nr_stream_fds = nr_stream_fds;
466 channel->stream_fds = stream_fds;
467 *ust_chanp = ust_channel;
468
469 return 0;
470
471 error_create:
472 error_open:
473 for (j = i - 1; j >= 0; j--) {
474 int closeret;
475
476 closeret = close(stream_fds[j]);
477 if (closeret) {
478 PERROR("close");
479 }
480 if (channel->shm_path[0]) {
481 char shm_path[PATH_MAX];
482
483 closeret = get_stream_shm_path(shm_path,
484 channel->shm_path, j);
485 if (closeret) {
486 ERR("Cannot get stream shm path");
487 }
488 closeret = run_as_unlink(shm_path,
489 channel->uid, channel->gid);
490 if (closeret) {
491 PERROR("unlink %s", shm_path);
492 }
493 }
494 }
495 /* Try to rmdir all directories under shm_path root. */
496 if (channel->root_shm_path[0]) {
497 (void) run_as_recursive_rmdir(channel->root_shm_path,
498 channel->uid, channel->gid);
499 }
500 free(stream_fds);
501 error_alloc:
502 return ret;
503 }
504
505 /*
506 * Send a single given stream to the session daemon using the sock.
507 *
508 * Return 0 on success else a negative value.
509 */
510 static int send_sessiond_stream(int sock, struct lttng_consumer_stream *stream)
511 {
512 int ret;
513
514 assert(stream);
515 assert(sock >= 0);
516
517 DBG("UST consumer sending stream %" PRIu64 " to sessiond", stream->key);
518
519 /* Send stream to session daemon. */
520 ret = ustctl_send_stream_to_sessiond(sock, stream->ustream);
521 if (ret < 0) {
522 goto error;
523 }
524
525 error:
526 return ret;
527 }
528
529 /*
530 * Send channel to sessiond.
531 *
532 * Return 0 on success or else a negative value.
533 */
534 static int send_sessiond_channel(int sock,
535 struct lttng_consumer_channel *channel,
536 struct lttng_consumer_local_data *ctx, int *relayd_error)
537 {
538 int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS;
539 struct lttng_consumer_stream *stream;
540 uint64_t relayd_id = -1ULL;
541
542 assert(channel);
543 assert(ctx);
544 assert(sock >= 0);
545
546 DBG("UST consumer sending channel %s to sessiond", channel->name);
547
548 if (channel->relayd_id != (uint64_t) -1ULL) {
549 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
550
551 health_code_update();
552
553 /* Try to send the stream to the relayd if one is available. */
554 ret = consumer_send_relayd_stream(stream, stream->chan->pathname);
555 if (ret < 0) {
556 /*
557 * Flag that the relayd was the problem here probably due to a
558 * communicaton error on the socket.
559 */
560 if (relayd_error) {
561 *relayd_error = 1;
562 }
563 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
564 }
565 if (relayd_id == -1ULL) {
566 relayd_id = stream->relayd_id;
567 }
568 }
569 }
570
571 /* Inform sessiond that we are about to send channel and streams. */
572 ret = consumer_send_status_msg(sock, ret_code);
573 if (ret < 0 || ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
574 /*
575 * Either the session daemon is not responding or the relayd died so we
576 * stop now.
577 */
578 goto error;
579 }
580
581 /* Send channel to sessiond. */
582 ret = ustctl_send_channel_to_sessiond(sock, channel->uchan);
583 if (ret < 0) {
584 goto error;
585 }
586
587 ret = ustctl_channel_close_wakeup_fd(channel->uchan);
588 if (ret < 0) {
589 goto error;
590 }
591
592 /* The channel was sent successfully to the sessiond at this point. */
593 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
594
595 health_code_update();
596
597 /* Send stream to session daemon. */
598 ret = send_sessiond_stream(sock, stream);
599 if (ret < 0) {
600 goto error;
601 }
602 }
603
604 /* Tell sessiond there is no more stream. */
605 ret = ustctl_send_stream_to_sessiond(sock, NULL);
606 if (ret < 0) {
607 goto error;
608 }
609
610 DBG("UST consumer NULL stream sent to sessiond");
611
612 return 0;
613
614 error:
615 if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
616 ret = -1;
617 }
618 return ret;
619 }
620
621 /*
622 * Creates a channel and streams and add the channel it to the channel internal
623 * state. The created stream must ONLY be sent once the GET_CHANNEL command is
624 * received.
625 *
626 * Return 0 on success or else, a negative value is returned and the channel
627 * MUST be destroyed by consumer_del_channel().
628 */
629 static int ask_channel(struct lttng_consumer_local_data *ctx, int sock,
630 struct lttng_consumer_channel *channel,
631 struct ustctl_consumer_channel_attr *attr)
632 {
633 int ret;
634
635 assert(ctx);
636 assert(channel);
637 assert(attr);
638
639 /*
640 * This value is still used by the kernel consumer since for the kernel,
641 * the stream ownership is not IN the consumer so we need to have the
642 * number of left stream that needs to be initialized so we can know when
643 * to delete the channel (see consumer.c).
644 *
645 * As for the user space tracer now, the consumer creates and sends the
646 * stream to the session daemon which only sends them to the application
647 * once every stream of a channel is received making this value useless
648 * because we they will be added to the poll thread before the application
649 * receives them. This ensures that a stream can not hang up during
650 * initilization of a channel.
651 */
652 channel->nb_init_stream_left = 0;
653
654 /* The reply msg status is handled in the following call. */
655 ret = create_ust_channel(channel, attr, &channel->uchan);
656 if (ret < 0) {
657 goto end;
658 }
659
660 channel->wait_fd = ustctl_channel_get_wait_fd(channel->uchan);
661
662 /*
663 * For the snapshots (no monitor), we create the metadata streams
664 * on demand, not during the channel creation.
665 */
666 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && !channel->monitor) {
667 ret = 0;
668 goto end;
669 }
670
671 /* Open all streams for this channel. */
672 ret = create_ust_streams(channel, ctx);
673 if (ret < 0) {
674 goto end;
675 }
676
677 end:
678 return ret;
679 }
680
681 /*
682 * Send all stream of a channel to the right thread handling it.
683 *
684 * On error, return a negative value else 0 on success.
685 */
686 static int send_streams_to_thread(struct lttng_consumer_channel *channel,
687 struct lttng_consumer_local_data *ctx)
688 {
689 int ret = 0;
690 struct lttng_consumer_stream *stream, *stmp;
691
692 assert(channel);
693 assert(ctx);
694
695 /* Send streams to the corresponding thread. */
696 cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
697 send_node) {
698
699 health_code_update();
700
701 /* Sending the stream to the thread. */
702 ret = send_stream_to_thread(stream, ctx);
703 if (ret < 0) {
704 /*
705 * If we are unable to send the stream to the thread, there is
706 * a big problem so just stop everything.
707 */
708 /* Remove node from the channel stream list. */
709 cds_list_del(&stream->send_node);
710 goto error;
711 }
712
713 /* Remove node from the channel stream list. */
714 cds_list_del(&stream->send_node);
715
716 }
717
718 error:
719 return ret;
720 }
721
722 /*
723 * Flush channel's streams using the given key to retrieve the channel.
724 *
725 * Return 0 on success else an LTTng error code.
726 */
727 static int flush_channel(uint64_t chan_key)
728 {
729 int ret = 0;
730 struct lttng_consumer_channel *channel;
731 struct lttng_consumer_stream *stream;
732 struct lttng_ht *ht;
733 struct lttng_ht_iter iter;
734
735 DBG("UST consumer flush channel key %" PRIu64, chan_key);
736
737 rcu_read_lock();
738 channel = consumer_find_channel(chan_key);
739 if (!channel) {
740 ERR("UST consumer flush channel %" PRIu64 " not found", chan_key);
741 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
742 goto error;
743 }
744
745 ht = consumer_data.stream_per_chan_id_ht;
746
747 /* For each stream of the channel id, flush it. */
748 cds_lfht_for_each_entry_duplicate(ht->ht,
749 ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct,
750 &channel->key, &iter.iter, stream, node_channel_id.node) {
751
752 health_code_update();
753
754 pthread_mutex_lock(&stream->lock);
755
756 /*
757 * Protect against concurrent teardown of a stream.
758 */
759 if (cds_lfht_is_node_deleted(&stream->node.node)) {
760 goto next;
761 }
762
763 if (!stream->quiescent) {
764 ustctl_flush_buffer(stream->ustream, 0);
765 stream->quiescent = true;
766 }
767 next:
768 pthread_mutex_unlock(&stream->lock);
769 }
770 error:
771 rcu_read_unlock();
772 return ret;
773 }
774
775 /*
776 * Clear quiescent state from channel's streams using the given key to
777 * retrieve the channel.
778 *
779 * Return 0 on success else an LTTng error code.
780 */
781 static int clear_quiescent_channel(uint64_t chan_key)
782 {
783 int ret = 0;
784 struct lttng_consumer_channel *channel;
785 struct lttng_consumer_stream *stream;
786 struct lttng_ht *ht;
787 struct lttng_ht_iter iter;
788
789 DBG("UST consumer clear quiescent channel key %" PRIu64, chan_key);
790
791 rcu_read_lock();
792 channel = consumer_find_channel(chan_key);
793 if (!channel) {
794 ERR("UST consumer clear quiescent channel %" PRIu64 " not found", chan_key);
795 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
796 goto error;
797 }
798
799 ht = consumer_data.stream_per_chan_id_ht;
800
801 /* For each stream of the channel id, clear quiescent state. */
802 cds_lfht_for_each_entry_duplicate(ht->ht,
803 ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct,
804 &channel->key, &iter.iter, stream, node_channel_id.node) {
805
806 health_code_update();
807
808 pthread_mutex_lock(&stream->lock);
809 stream->quiescent = false;
810 pthread_mutex_unlock(&stream->lock);
811 }
812 error:
813 rcu_read_unlock();
814 return ret;
815 }
816
817 /*
818 * Close metadata stream wakeup_fd using the given key to retrieve the channel.
819 * RCU read side lock MUST be acquired before calling this function.
820 *
821 * Return 0 on success else an LTTng error code.
822 */
823 static int close_metadata(uint64_t chan_key)
824 {
825 int ret = 0;
826 struct lttng_consumer_channel *channel;
827 unsigned int channel_monitor;
828
829 DBG("UST consumer close metadata key %" PRIu64, chan_key);
830
831 channel = consumer_find_channel(chan_key);
832 if (!channel) {
833 /*
834 * This is possible if the metadata thread has issue a delete because
835 * the endpoint point of the stream hung up. There is no way the
836 * session daemon can know about it thus use a DBG instead of an actual
837 * error.
838 */
839 DBG("UST consumer close metadata %" PRIu64 " not found", chan_key);
840 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
841 goto error;
842 }
843
844 pthread_mutex_lock(&consumer_data.lock);
845 pthread_mutex_lock(&channel->lock);
846 channel_monitor = channel->monitor;
847 if (cds_lfht_is_node_deleted(&channel->node.node)) {
848 goto error_unlock;
849 }
850
851 lttng_ustconsumer_close_metadata(channel);
852 pthread_mutex_unlock(&channel->lock);
853 pthread_mutex_unlock(&consumer_data.lock);
854
855 /*
856 * The ownership of a metadata channel depends on the type of
857 * session to which it belongs. In effect, the monitor flag is checked
858 * to determine if this metadata channel is in "snapshot" mode or not.
859 *
860 * In the non-snapshot case, the metadata channel is created along with
861 * a single stream which will remain present until the metadata channel
862 * is destroyed (on the destruction of its session). In this case, the
863 * metadata stream in "monitored" by the metadata poll thread and holds
864 * the ownership of its channel.
865 *
866 * Closing the metadata will cause the metadata stream's "metadata poll
867 * pipe" to be closed. Closing this pipe will wake-up the metadata poll
868 * thread which will teardown the metadata stream which, in return,
869 * deletes the metadata channel.
870 *
871 * In the snapshot case, the metadata stream is created and destroyed
872 * on every snapshot record. Since the channel doesn't have an owner
873 * other than the session daemon, it is safe to destroy it immediately
874 * on reception of the CLOSE_METADATA command.
875 */
876 if (!channel_monitor) {
877 /*
878 * The channel and consumer_data locks must be
879 * released before this call since consumer_del_channel
880 * re-acquires the channel and consumer_data locks to teardown
881 * the channel and queue its reclamation by the "call_rcu"
882 * worker thread.
883 */
884 consumer_del_channel(channel);
885 }
886
887 return ret;
888 error_unlock:
889 pthread_mutex_unlock(&channel->lock);
890 pthread_mutex_unlock(&consumer_data.lock);
891 error:
892 return ret;
893 }
894
895 /*
896 * RCU read side lock MUST be acquired before calling this function.
897 *
898 * Return 0 on success else an LTTng error code.
899 */
900 static int setup_metadata(struct lttng_consumer_local_data *ctx, uint64_t key)
901 {
902 int ret;
903 struct lttng_consumer_channel *metadata;
904
905 DBG("UST consumer setup metadata key %" PRIu64, key);
906
907 metadata = consumer_find_channel(key);
908 if (!metadata) {
909 ERR("UST consumer push metadata %" PRIu64 " not found", key);
910 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
911 goto end;
912 }
913
914 /*
915 * In no monitor mode, the metadata channel has no stream(s) so skip the
916 * ownership transfer to the metadata thread.
917 */
918 if (!metadata->monitor) {
919 DBG("Metadata channel in no monitor");
920 ret = 0;
921 goto end;
922 }
923
924 /*
925 * Send metadata stream to relayd if one available. Availability is
926 * known if the stream is still in the list of the channel.
927 */
928 if (cds_list_empty(&metadata->streams.head)) {
929 ERR("Metadata channel key %" PRIu64 ", no stream available.", key);
930 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
931 goto error_no_stream;
932 }
933
934 /* Send metadata stream to relayd if needed. */
935 if (metadata->metadata_stream->relayd_id != (uint64_t) -1ULL) {
936 ret = consumer_send_relayd_stream(metadata->metadata_stream,
937 metadata->pathname);
938 if (ret < 0) {
939 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
940 goto error;
941 }
942 ret = consumer_send_relayd_streams_sent(
943 metadata->metadata_stream->relayd_id);
944 if (ret < 0) {
945 ret = LTTCOMM_CONSUMERD_RELAYD_FAIL;
946 goto error;
947 }
948 }
949
950 ret = send_streams_to_thread(metadata, ctx);
951 if (ret < 0) {
952 /*
953 * If we are unable to send the stream to the thread, there is
954 * a big problem so just stop everything.
955 */
956 ret = LTTCOMM_CONSUMERD_FATAL;
957 goto error;
958 }
959 /* List MUST be empty after or else it could be reused. */
960 assert(cds_list_empty(&metadata->streams.head));
961
962 ret = 0;
963 goto end;
964
965 error:
966 /*
967 * Delete metadata channel on error. At this point, the metadata stream can
968 * NOT be monitored by the metadata thread thus having the guarantee that
969 * the stream is still in the local stream list of the channel. This call
970 * will make sure to clean that list.
971 */
972 consumer_stream_destroy(metadata->metadata_stream, NULL);
973 cds_list_del(&metadata->metadata_stream->send_node);
974 metadata->metadata_stream = NULL;
975 error_no_stream:
976 end:
977 return ret;
978 }
979
980 /*
981 * Snapshot the whole metadata.
982 *
983 * Returns 0 on success, < 0 on error
984 */
985 static int snapshot_metadata(uint64_t key, char *path, uint64_t relayd_id,
986 struct lttng_consumer_local_data *ctx)
987 {
988 int ret = 0;
989 struct lttng_consumer_channel *metadata_channel;
990 struct lttng_consumer_stream *metadata_stream;
991
992 assert(path);
993 assert(ctx);
994
995 DBG("UST consumer snapshot metadata with key %" PRIu64 " at path %s",
996 key, path);
997
998 rcu_read_lock();
999
1000 metadata_channel = consumer_find_channel(key);
1001 if (!metadata_channel) {
1002 ERR("UST snapshot metadata channel not found for key %" PRIu64,
1003 key);
1004 ret = -1;
1005 goto error;
1006 }
1007 assert(!metadata_channel->monitor);
1008
1009 health_code_update();
1010
1011 /*
1012 * Ask the sessiond if we have new metadata waiting and update the
1013 * consumer metadata cache.
1014 */
1015 ret = lttng_ustconsumer_request_metadata(ctx, metadata_channel, 0, 1);
1016 if (ret < 0) {
1017 goto error;
1018 }
1019
1020 health_code_update();
1021
1022 /*
1023 * The metadata stream is NOT created in no monitor mode when the channel
1024 * is created on a sessiond ask channel command.
1025 */
1026 ret = create_ust_streams(metadata_channel, ctx);
1027 if (ret < 0) {
1028 goto error;
1029 }
1030
1031 metadata_stream = metadata_channel->metadata_stream;
1032 assert(metadata_stream);
1033
1034 if (relayd_id != (uint64_t) -1ULL) {
1035 metadata_stream->relayd_id = relayd_id;
1036 ret = consumer_send_relayd_stream(metadata_stream, path);
1037 if (ret < 0) {
1038 goto error_stream;
1039 }
1040 } else {
1041 ret = utils_create_stream_file(path, metadata_stream->name,
1042 metadata_stream->chan->tracefile_size,
1043 metadata_stream->tracefile_count_current,
1044 metadata_stream->uid, metadata_stream->gid, NULL);
1045 if (ret < 0) {
1046 goto error_stream;
1047 }
1048 metadata_stream->out_fd = ret;
1049 metadata_stream->tracefile_size_current = 0;
1050 }
1051
1052 do {
1053 health_code_update();
1054
1055 ret = lttng_consumer_read_subbuffer(metadata_stream, ctx, true);
1056 if (ret < 0) {
1057 goto error_stream;
1058 }
1059 } while (ret > 0);
1060
1061 error_stream:
1062 /*
1063 * Clean up the stream completly because the next snapshot will use a new
1064 * metadata stream.
1065 */
1066 consumer_stream_destroy(metadata_stream, NULL);
1067 cds_list_del(&metadata_stream->send_node);
1068 metadata_channel->metadata_stream = NULL;
1069
1070 error:
1071 rcu_read_unlock();
1072 return ret;
1073 }
1074
1075 static
1076 int get_current_subbuf_addr(struct lttng_consumer_stream *stream,
1077 const char **addr)
1078 {
1079 int ret;
1080 unsigned long mmap_offset;
1081 const char *mmap_base;
1082
1083 mmap_base = ustctl_get_mmap_base(stream->ustream);
1084 if (!mmap_base) {
1085 ERR("Failed to get mmap base for stream `%s`",
1086 stream->name);
1087 ret = -EPERM;
1088 goto error;
1089 }
1090
1091 ret = ustctl_get_mmap_read_offset(stream->ustream, &mmap_offset);
1092 if (ret != 0) {
1093 ERR("Failed to get mmap offset for stream `%s`", stream->name);
1094 ret = -EINVAL;
1095 goto error;
1096 }
1097
1098 *addr = mmap_base + mmap_offset;
1099 error:
1100 return ret;
1101
1102 }
1103
1104 /*
1105 * Take a snapshot of all the stream of a channel.
1106 *
1107 * Returns 0 on success, < 0 on error
1108 */
1109 static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
1110 uint64_t nb_packets_per_stream, struct lttng_consumer_local_data *ctx)
1111 {
1112 int ret;
1113 unsigned use_relayd = 0;
1114 unsigned long consumed_pos, produced_pos;
1115 struct lttng_consumer_channel *channel;
1116 struct lttng_consumer_stream *stream;
1117
1118 assert(path);
1119 assert(ctx);
1120
1121 rcu_read_lock();
1122
1123 if (relayd_id != (uint64_t) -1ULL) {
1124 use_relayd = 1;
1125 }
1126
1127 channel = consumer_find_channel(key);
1128 if (!channel) {
1129 ERR("UST snapshot channel not found for key %" PRIu64, key);
1130 ret = -1;
1131 goto error;
1132 }
1133 assert(!channel->monitor);
1134 DBG("UST consumer snapshot channel %" PRIu64, key);
1135
1136 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
1137 health_code_update();
1138
1139 /* Lock stream because we are about to change its state. */
1140 pthread_mutex_lock(&stream->lock);
1141 stream->relayd_id = relayd_id;
1142
1143 if (use_relayd) {
1144 ret = consumer_send_relayd_stream(stream, path);
1145 if (ret < 0) {
1146 goto error_unlock;
1147 }
1148 } else {
1149 ret = utils_create_stream_file(path, stream->name,
1150 stream->chan->tracefile_size,
1151 stream->tracefile_count_current,
1152 stream->uid, stream->gid, NULL);
1153 if (ret < 0) {
1154 goto error_unlock;
1155 }
1156 stream->out_fd = ret;
1157 stream->tracefile_size_current = 0;
1158
1159 DBG("UST consumer snapshot stream %s/%s (%" PRIu64 ")", path,
1160 stream->name, stream->key);
1161 }
1162 if (relayd_id != -1ULL) {
1163 ret = consumer_send_relayd_streams_sent(relayd_id);
1164 if (ret < 0) {
1165 goto error_unlock;
1166 }
1167 }
1168
1169 /*
1170 * If tracing is active, we want to perform a "full" buffer flush.
1171 * Else, if quiescent, it has already been done by the prior stop.
1172 */
1173 if (!stream->quiescent) {
1174 ustctl_flush_buffer(stream->ustream, 0);
1175 }
1176
1177 ret = lttng_ustconsumer_take_snapshot(stream);
1178 if (ret < 0) {
1179 ERR("Taking UST snapshot");
1180 goto error_unlock;
1181 }
1182
1183 ret = lttng_ustconsumer_get_produced_snapshot(stream, &produced_pos);
1184 if (ret < 0) {
1185 ERR("Produced UST snapshot position");
1186 goto error_unlock;
1187 }
1188
1189 ret = lttng_ustconsumer_get_consumed_snapshot(stream, &consumed_pos);
1190 if (ret < 0) {
1191 ERR("Consumerd UST snapshot position");
1192 goto error_unlock;
1193 }
1194
1195 /*
1196 * The original value is sent back if max stream size is larger than
1197 * the possible size of the snapshot. Also, we assume that the session
1198 * daemon should never send a maximum stream size that is lower than
1199 * subbuffer size.
1200 */
1201 consumed_pos = consumer_get_consume_start_pos(consumed_pos,
1202 produced_pos, nb_packets_per_stream,
1203 stream->max_sb_size);
1204
1205 while (consumed_pos < produced_pos) {
1206 ssize_t read_len;
1207 unsigned long len, padded_len;
1208 const char *subbuf_addr;
1209 struct lttng_buffer_view subbuf_view;
1210
1211 health_code_update();
1212
1213 DBG("UST consumer taking snapshot at pos %lu", consumed_pos);
1214
1215 ret = ustctl_get_subbuf(stream->ustream, &consumed_pos);
1216 if (ret < 0) {
1217 if (ret != -EAGAIN) {
1218 PERROR("ustctl_get_subbuf snapshot");
1219 goto error_close_stream;
1220 }
1221 DBG("UST consumer get subbuf failed. Skipping it.");
1222 consumed_pos += stream->max_sb_size;
1223 stream->chan->lost_packets++;
1224 continue;
1225 }
1226
1227 ret = ustctl_get_subbuf_size(stream->ustream, &len);
1228 if (ret < 0) {
1229 ERR("Snapshot ustctl_get_subbuf_size");
1230 goto error_put_subbuf;
1231 }
1232
1233 ret = ustctl_get_padded_subbuf_size(stream->ustream, &padded_len);
1234 if (ret < 0) {
1235 ERR("Snapshot ustctl_get_padded_subbuf_size");
1236 goto error_put_subbuf;
1237 }
1238
1239 ret = get_current_subbuf_addr(stream, &subbuf_addr);
1240 if (ret) {
1241 goto error_put_subbuf;
1242 }
1243
1244 subbuf_view = lttng_buffer_view_init(
1245 subbuf_addr, 0, padded_len);
1246 read_len = lttng_consumer_on_read_subbuffer_mmap(
1247 stream, &subbuf_view, padded_len - len);
1248 if (use_relayd) {
1249 if (read_len != len) {
1250 ret = -EPERM;
1251 goto error_put_subbuf;
1252 }
1253 } else {
1254 if (read_len != padded_len) {
1255 ret = -EPERM;
1256 goto error_put_subbuf;
1257 }
1258 }
1259
1260 ret = ustctl_put_subbuf(stream->ustream);
1261 if (ret < 0) {
1262 ERR("Snapshot ustctl_put_subbuf");
1263 goto error_close_stream;
1264 }
1265 consumed_pos += stream->max_sb_size;
1266 }
1267
1268 /* Simply close the stream so we can use it on the next snapshot. */
1269 consumer_stream_close(stream);
1270 pthread_mutex_unlock(&stream->lock);
1271 }
1272
1273 rcu_read_unlock();
1274 return 0;
1275
1276 error_put_subbuf:
1277 if (ustctl_put_subbuf(stream->ustream) < 0) {
1278 ERR("Snapshot ustctl_put_subbuf");
1279 }
1280 error_close_stream:
1281 consumer_stream_close(stream);
1282 error_unlock:
1283 pthread_mutex_unlock(&stream->lock);
1284 error:
1285 rcu_read_unlock();
1286 return ret;
1287 }
1288
1289 /*
1290 * Receive the metadata updates from the sessiond. Supports receiving
1291 * overlapping metadata, but is needs to always belong to a contiguous
1292 * range starting from 0.
1293 * Be careful about the locks held when calling this function: it needs
1294 * the metadata cache flush to concurrently progress in order to
1295 * complete.
1296 */
1297 int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset,
1298 uint64_t len, uint64_t version,
1299 struct lttng_consumer_channel *channel, int timer, int wait)
1300 {
1301 int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1302 char *metadata_str;
1303
1304 DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key, len);
1305
1306 metadata_str = zmalloc(len * sizeof(char));
1307 if (!metadata_str) {
1308 PERROR("zmalloc metadata string");
1309 ret_code = LTTCOMM_CONSUMERD_ENOMEM;
1310 goto end;
1311 }
1312
1313 health_code_update();
1314
1315 /* Receive metadata string. */
1316 ret = lttcomm_recv_unix_sock(sock, metadata_str, len);
1317 if (ret < 0) {
1318 /* Session daemon is dead so return gracefully. */
1319 ret_code = ret;
1320 goto end_free;
1321 }
1322
1323 health_code_update();
1324
1325 pthread_mutex_lock(&channel->metadata_cache->lock);
1326 ret = consumer_metadata_cache_write(channel, offset, len, version,
1327 metadata_str);
1328 if (ret < 0) {
1329 /* Unable to handle metadata. Notify session daemon. */
1330 ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
1331 /*
1332 * Skip metadata flush on write error since the offset and len might
1333 * not have been updated which could create an infinite loop below when
1334 * waiting for the metadata cache to be flushed.
1335 */
1336 pthread_mutex_unlock(&channel->metadata_cache->lock);
1337 goto end_free;
1338 }
1339 pthread_mutex_unlock(&channel->metadata_cache->lock);
1340
1341 if (!wait) {
1342 goto end_free;
1343 }
1344 while (consumer_metadata_cache_flushed(channel, offset + len, timer)) {
1345 DBG("Waiting for metadata to be flushed");
1346
1347 health_code_update();
1348
1349 usleep(DEFAULT_METADATA_AVAILABILITY_WAIT_TIME);
1350 }
1351
1352 end_free:
1353 free(metadata_str);
1354 end:
1355 return ret_code;
1356 }
1357
1358 /*
1359 * Receive command from session daemon and process it.
1360 *
1361 * Return 1 on success else a negative value or 0.
1362 */
1363 int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
1364 int sock, struct pollfd *consumer_sockpoll)
1365 {
1366 ssize_t ret;
1367 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1368 struct lttcomm_consumer_msg msg;
1369 struct lttng_consumer_channel *channel = NULL;
1370
1371 health_code_update();
1372
1373 ret = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg));
1374 if (ret != sizeof(msg)) {
1375 DBG("Consumer received unexpected message size %zd (expects %zu)",
1376 ret, sizeof(msg));
1377 /*
1378 * The ret value might 0 meaning an orderly shutdown but this is ok
1379 * since the caller handles this.
1380 */
1381 if (ret > 0) {
1382 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
1383 ret = -1;
1384 }
1385 return ret;
1386 }
1387
1388 health_code_update();
1389
1390 /* deprecated */
1391 assert(msg.cmd_type != LTTNG_CONSUMER_STOP);
1392
1393 health_code_update();
1394
1395 /* relayd needs RCU read-side lock */
1396 rcu_read_lock();
1397
1398 switch (msg.cmd_type) {
1399 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
1400 {
1401 /* Session daemon status message are handled in the following call. */
1402 consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
1403 msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll,
1404 &msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id,
1405 msg.u.relayd_sock.relayd_session_id);
1406 goto end_nosignal;
1407 }
1408 case LTTNG_CONSUMER_DESTROY_RELAYD:
1409 {
1410 uint64_t index = msg.u.destroy_relayd.net_seq_idx;
1411 struct consumer_relayd_sock_pair *relayd;
1412
1413 DBG("UST consumer destroying relayd %" PRIu64, index);
1414
1415 /* Get relayd reference if exists. */
1416 relayd = consumer_find_relayd(index);
1417 if (relayd == NULL) {
1418 DBG("Unable to find relayd %" PRIu64, index);
1419 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
1420 }
1421
1422 /*
1423 * Each relayd socket pair has a refcount of stream attached to it
1424 * which tells if the relayd is still active or not depending on the
1425 * refcount value.
1426 *
1427 * This will set the destroy flag of the relayd object and destroy it
1428 * if the refcount reaches zero when called.
1429 *
1430 * The destroy can happen either here or when a stream fd hangs up.
1431 */
1432 if (relayd) {
1433 consumer_flag_relayd_for_destroy(relayd);
1434 }
1435
1436 goto end_msg_sessiond;
1437 }
1438 case LTTNG_CONSUMER_UPDATE_STREAM:
1439 {
1440 rcu_read_unlock();
1441 return -ENOSYS;
1442 }
1443 case LTTNG_CONSUMER_DATA_PENDING:
1444 {
1445 int ret, is_data_pending;
1446 uint64_t id = msg.u.data_pending.session_id;
1447
1448 DBG("UST consumer data pending command for id %" PRIu64, id);
1449
1450 is_data_pending = consumer_data_pending(id);
1451
1452 /* Send back returned value to session daemon */
1453 ret = lttcomm_send_unix_sock(sock, &is_data_pending,
1454 sizeof(is_data_pending));
1455 if (ret < 0) {
1456 DBG("Error when sending the data pending ret code: %d", ret);
1457 goto error_fatal;
1458 }
1459
1460 /*
1461 * No need to send back a status message since the data pending
1462 * returned value is the response.
1463 */
1464 break;
1465 }
1466 case LTTNG_CONSUMER_ASK_CHANNEL_CREATION:
1467 {
1468 int ret;
1469 struct ustctl_consumer_channel_attr attr;
1470
1471 /* Create a plain object and reserve a channel key. */
1472 channel = consumer_allocate_channel(
1473 msg.u.ask_channel.key,
1474 msg.u.ask_channel.session_id,
1475 msg.u.ask_channel.pathname,
1476 msg.u.ask_channel.name,
1477 msg.u.ask_channel.uid,
1478 msg.u.ask_channel.gid,
1479 msg.u.ask_channel.relayd_id,
1480 (enum lttng_event_output) msg.u.ask_channel.output,
1481 msg.u.ask_channel.tracefile_size,
1482 msg.u.ask_channel.tracefile_count,
1483 msg.u.ask_channel.session_id_per_pid,
1484 msg.u.ask_channel.monitor,
1485 msg.u.ask_channel.live_timer_interval,
1486 msg.u.ask_channel.is_live,
1487 msg.u.ask_channel.root_shm_path,
1488 msg.u.ask_channel.shm_path);
1489 if (!channel) {
1490 goto end_channel_error;
1491 }
1492
1493 /*
1494 * Assign UST application UID to the channel. This value is ignored for
1495 * per PID buffers. This is specific to UST thus setting this after the
1496 * allocation.
1497 */
1498 channel->ust_app_uid = msg.u.ask_channel.ust_app_uid;
1499
1500 /* Build channel attributes from received message. */
1501 attr.subbuf_size = msg.u.ask_channel.subbuf_size;
1502 attr.num_subbuf = msg.u.ask_channel.num_subbuf;
1503 attr.overwrite = msg.u.ask_channel.overwrite;
1504 attr.switch_timer_interval = msg.u.ask_channel.switch_timer_interval;
1505 attr.read_timer_interval = msg.u.ask_channel.read_timer_interval;
1506 attr.chan_id = msg.u.ask_channel.chan_id;
1507 memcpy(attr.uuid, msg.u.ask_channel.uuid, sizeof(attr.uuid));
1508
1509 /* Match channel buffer type to the UST abi. */
1510 switch (msg.u.ask_channel.output) {
1511 case LTTNG_EVENT_MMAP:
1512 default:
1513 attr.output = LTTNG_UST_MMAP;
1514 break;
1515 }
1516
1517 /* Translate and save channel type. */
1518 switch (msg.u.ask_channel.type) {
1519 case LTTNG_UST_CHAN_PER_CPU:
1520 channel->type = CONSUMER_CHANNEL_TYPE_DATA;
1521 attr.type = LTTNG_UST_CHAN_PER_CPU;
1522 /*
1523 * Set refcount to 1 for owner. Below, we will
1524 * pass ownership to the
1525 * consumer_thread_channel_poll() thread.
1526 */
1527 channel->refcount = 1;
1528 break;
1529 case LTTNG_UST_CHAN_METADATA:
1530 channel->type = CONSUMER_CHANNEL_TYPE_METADATA;
1531 attr.type = LTTNG_UST_CHAN_METADATA;
1532 break;
1533 default:
1534 assert(0);
1535 goto error_fatal;
1536 };
1537
1538 health_code_update();
1539
1540 ret = ask_channel(ctx, sock, channel, &attr);
1541 if (ret < 0) {
1542 goto end_channel_error;
1543 }
1544
1545 if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
1546 ret = consumer_metadata_cache_allocate(channel);
1547 if (ret < 0) {
1548 ERR("Allocating metadata cache");
1549 goto end_channel_error;
1550 }
1551 consumer_timer_switch_start(channel, attr.switch_timer_interval);
1552 attr.switch_timer_interval = 0;
1553 } else {
1554 consumer_timer_live_start(channel,
1555 msg.u.ask_channel.live_timer_interval);
1556 }
1557
1558 health_code_update();
1559
1560 /*
1561 * Add the channel to the internal state AFTER all streams were created
1562 * and successfully sent to session daemon. This way, all streams must
1563 * be ready before this channel is visible to the threads.
1564 * If add_channel succeeds, ownership of the channel is
1565 * passed to consumer_thread_channel_poll().
1566 */
1567 ret = add_channel(channel, ctx);
1568 if (ret < 0) {
1569 if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
1570 if (channel->switch_timer_enabled == 1) {
1571 consumer_timer_switch_stop(channel);
1572 }
1573 consumer_metadata_cache_destroy(channel);
1574 }
1575 if (channel->live_timer_enabled == 1) {
1576 consumer_timer_live_stop(channel);
1577 }
1578 goto end_channel_error;
1579 }
1580
1581 health_code_update();
1582
1583 /*
1584 * Channel and streams are now created. Inform the session daemon that
1585 * everything went well and should wait to receive the channel and
1586 * streams with ustctl API.
1587 */
1588 ret = consumer_send_status_channel(sock, channel);
1589 if (ret < 0) {
1590 /*
1591 * There is probably a problem on the socket.
1592 */
1593 goto error_fatal;
1594 }
1595
1596 break;
1597 }
1598 case LTTNG_CONSUMER_GET_CHANNEL:
1599 {
1600 int ret, relayd_err = 0;
1601 uint64_t key = msg.u.get_channel.key;
1602 struct lttng_consumer_channel *channel;
1603
1604 channel = consumer_find_channel(key);
1605 if (!channel) {
1606 ERR("UST consumer get channel key %" PRIu64 " not found", key);
1607 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1608 goto end_msg_sessiond;
1609 }
1610
1611 health_code_update();
1612
1613 /* Send everything to sessiond. */
1614 ret = send_sessiond_channel(sock, channel, ctx, &relayd_err);
1615 if (ret < 0) {
1616 if (relayd_err) {
1617 /*
1618 * We were unable to send to the relayd the stream so avoid
1619 * sending back a fatal error to the thread since this is OK
1620 * and the consumer can continue its work. The above call
1621 * has sent the error status message to the sessiond.
1622 */
1623 goto end_nosignal;
1624 }
1625 /*
1626 * The communicaton was broken hence there is a bad state between
1627 * the consumer and sessiond so stop everything.
1628 */
1629 goto error_fatal;
1630 }
1631
1632 health_code_update();
1633
1634 /*
1635 * In no monitor mode, the streams ownership is kept inside the channel
1636 * so don't send them to the data thread.
1637 */
1638 if (!channel->monitor) {
1639 goto end_msg_sessiond;
1640 }
1641
1642 ret = send_streams_to_thread(channel, ctx);
1643 if (ret < 0) {
1644 /*
1645 * If we are unable to send the stream to the thread, there is
1646 * a big problem so just stop everything.
1647 */
1648 goto error_fatal;
1649 }
1650 /* List MUST be empty after or else it could be reused. */
1651 assert(cds_list_empty(&channel->streams.head));
1652 goto end_msg_sessiond;
1653 }
1654 case LTTNG_CONSUMER_CHANNEL_STOP_LIVE_TIMER:
1655 {
1656 uint64_t key = msg.u.get_channel.key;
1657 struct lttng_consumer_channel *channel;
1658
1659 channel = consumer_find_channel(key);
1660 if (!channel) {
1661 ERR("UST consumer get channel key %" PRIu64 " not found", key);
1662 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1663 goto end_msg_sessiond;
1664 }
1665
1666 health_code_update();
1667
1668 if (channel->live_timer_enabled == 1) {
1669 consumer_timer_live_stop(channel);
1670 }
1671
1672 health_code_update();
1673
1674 goto end_msg_sessiond;
1675 }
1676 case LTTNG_CONSUMER_CHANNEL_START_LIVE_TIMER:
1677 {
1678 uint64_t key = msg.u.get_channel.key;
1679 struct lttng_consumer_channel *channel;
1680
1681 channel = consumer_find_channel(key);
1682 if (!channel) {
1683 ERR("UST consumer get channel key %" PRIu64 " not found", key);
1684 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1685 goto end_msg_sessiond;
1686 }
1687
1688 health_code_update();
1689
1690 if (channel->live_timer_enabled == 0) {
1691 consumer_timer_live_start(channel, channel->live_timer_interval);
1692 }
1693
1694 health_code_update();
1695
1696 goto end_msg_sessiond;
1697 }
1698
1699 case LTTNG_CONSUMER_DESTROY_CHANNEL:
1700 {
1701 uint64_t key = msg.u.destroy_channel.key;
1702
1703 /*
1704 * Only called if streams have not been sent to stream
1705 * manager thread. However, channel has been sent to
1706 * channel manager thread.
1707 */
1708 notify_thread_del_channel(ctx, key);
1709 goto end_msg_sessiond;
1710 }
1711 case LTTNG_CONSUMER_CLOSE_METADATA:
1712 {
1713 int ret;
1714
1715 ret = close_metadata(msg.u.close_metadata.key);
1716 if (ret != 0) {
1717 ret_code = ret;
1718 }
1719
1720 goto end_msg_sessiond;
1721 }
1722 case LTTNG_CONSUMER_FLUSH_CHANNEL:
1723 {
1724 int ret;
1725
1726 ret = flush_channel(msg.u.flush_channel.key);
1727 if (ret != 0) {
1728 ret_code = ret;
1729 }
1730
1731 goto end_msg_sessiond;
1732 }
1733 case LTTNG_CONSUMER_CLEAR_QUIESCENT_CHANNEL:
1734 {
1735 int ret;
1736
1737 ret = clear_quiescent_channel(
1738 msg.u.clear_quiescent_channel.key);
1739 if (ret != 0) {
1740 ret_code = ret;
1741 }
1742
1743 goto end_msg_sessiond;
1744 }
1745 case LTTNG_CONSUMER_PUSH_METADATA:
1746 {
1747 int ret;
1748 uint64_t len = msg.u.push_metadata.len;
1749 uint64_t key = msg.u.push_metadata.key;
1750 uint64_t offset = msg.u.push_metadata.target_offset;
1751 uint64_t version = msg.u.push_metadata.version;
1752 struct lttng_consumer_channel *channel;
1753
1754 DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key,
1755 len);
1756
1757 channel = consumer_find_channel(key);
1758 if (!channel) {
1759 /*
1760 * This is possible if the metadata creation on the consumer side
1761 * is in flight vis-a-vis a concurrent push metadata from the
1762 * session daemon. Simply return that the channel failed and the
1763 * session daemon will handle that message correctly considering
1764 * that this race is acceptable thus the DBG() statement here.
1765 */
1766 DBG("UST consumer push metadata %" PRIu64 " not found", key);
1767 ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL;
1768 goto end_msg_sessiond;
1769 }
1770
1771 health_code_update();
1772
1773 if (!len) {
1774 /*
1775 * There is nothing to receive. We have simply
1776 * checked whether the channel can be found.
1777 */
1778 ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1779 goto end_msg_sessiond;
1780 }
1781
1782 /* Tell session daemon we are ready to receive the metadata. */
1783 ret = consumer_send_status_msg(sock, LTTCOMM_CONSUMERD_SUCCESS);
1784 if (ret < 0) {
1785 /* Somehow, the session daemon is not responding anymore. */
1786 goto error_fatal;
1787 }
1788
1789 health_code_update();
1790
1791 /* Wait for more data. */
1792 health_poll_entry();
1793 ret = lttng_consumer_poll_socket(consumer_sockpoll);
1794 health_poll_exit();
1795 if (ret) {
1796 goto error_fatal;
1797 }
1798
1799 health_code_update();
1800
1801 ret = lttng_ustconsumer_recv_metadata(sock, key, offset,
1802 len, version, channel, 0, 1);
1803 if (ret < 0) {
1804 /* error receiving from sessiond */
1805 goto error_fatal;
1806 } else {
1807 ret_code = ret;
1808 goto end_msg_sessiond;
1809 }
1810 }
1811 case LTTNG_CONSUMER_SETUP_METADATA:
1812 {
1813 int ret;
1814
1815 ret = setup_metadata(ctx, msg.u.setup_metadata.key);
1816 if (ret) {
1817 ret_code = ret;
1818 }
1819 goto end_msg_sessiond;
1820 }
1821 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
1822 {
1823 if (msg.u.snapshot_channel.metadata) {
1824 ret = snapshot_metadata(msg.u.snapshot_channel.key,
1825 msg.u.snapshot_channel.pathname,
1826 msg.u.snapshot_channel.relayd_id,
1827 ctx);
1828 if (ret < 0) {
1829 ERR("Snapshot metadata failed");
1830 ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
1831 }
1832 } else {
1833 ret = snapshot_channel(msg.u.snapshot_channel.key,
1834 msg.u.snapshot_channel.pathname,
1835 msg.u.snapshot_channel.relayd_id,
1836 msg.u.snapshot_channel.nb_packets_per_stream,
1837 ctx);
1838 if (ret < 0) {
1839 ERR("Snapshot channel failed");
1840 ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL;
1841 }
1842 }
1843
1844 health_code_update();
1845 ret = consumer_send_status_msg(sock, ret_code);
1846 if (ret < 0) {
1847 /* Somehow, the session daemon is not responding anymore. */
1848 goto end_nosignal;
1849 }
1850 health_code_update();
1851 break;
1852 }
1853 case LTTNG_CONSUMER_DISCARDED_EVENTS:
1854 {
1855 int ret = 0;
1856 uint64_t discarded_events;
1857 struct lttng_ht_iter iter;
1858 struct lttng_ht *ht;
1859 struct lttng_consumer_stream *stream;
1860 uint64_t id = msg.u.discarded_events.session_id;
1861 uint64_t key = msg.u.discarded_events.channel_key;
1862
1863 DBG("UST consumer discarded events command for session id %"
1864 PRIu64, id);
1865 rcu_read_lock();
1866 pthread_mutex_lock(&consumer_data.lock);
1867
1868 ht = consumer_data.stream_list_ht;
1869
1870 /*
1871 * We only need a reference to the channel, but they are not
1872 * directly indexed, so we just use the first matching stream
1873 * to extract the information we need, we default to 0 if not
1874 * found (no events are dropped if the channel is not yet in
1875 * use).
1876 */
1877 discarded_events = 0;
1878 cds_lfht_for_each_entry_duplicate(ht->ht,
1879 ht->hash_fct(&id, lttng_ht_seed),
1880 ht->match_fct, &id,
1881 &iter.iter, stream, node_session_id.node) {
1882 if (stream->chan->key == key) {
1883 discarded_events = stream->chan->discarded_events;
1884 break;
1885 }
1886 }
1887 pthread_mutex_unlock(&consumer_data.lock);
1888 rcu_read_unlock();
1889
1890 DBG("UST consumer discarded events command for session id %"
1891 PRIu64 ", channel key %" PRIu64, id, key);
1892
1893 health_code_update();
1894
1895 /* Send back returned value to session daemon */
1896 ret = lttcomm_send_unix_sock(sock, &discarded_events, sizeof(discarded_events));
1897 if (ret < 0) {
1898 PERROR("send discarded events");
1899 goto error_fatal;
1900 }
1901
1902 break;
1903 }
1904 case LTTNG_CONSUMER_LOST_PACKETS:
1905 {
1906 int ret;
1907 uint64_t lost_packets;
1908 struct lttng_ht_iter iter;
1909 struct lttng_ht *ht;
1910 struct lttng_consumer_stream *stream;
1911 uint64_t id = msg.u.lost_packets.session_id;
1912 uint64_t key = msg.u.lost_packets.channel_key;
1913
1914 DBG("UST consumer lost packets command for session id %"
1915 PRIu64, id);
1916 rcu_read_lock();
1917 pthread_mutex_lock(&consumer_data.lock);
1918
1919 ht = consumer_data.stream_list_ht;
1920
1921 /*
1922 * We only need a reference to the channel, but they are not
1923 * directly indexed, so we just use the first matching stream
1924 * to extract the information we need, we default to 0 if not
1925 * found (no packets lost if the channel is not yet in use).
1926 */
1927 lost_packets = 0;
1928 cds_lfht_for_each_entry_duplicate(ht->ht,
1929 ht->hash_fct(&id, lttng_ht_seed),
1930 ht->match_fct, &id,
1931 &iter.iter, stream, node_session_id.node) {
1932 if (stream->chan->key == key) {
1933 lost_packets = stream->chan->lost_packets;
1934 break;
1935 }
1936 }
1937 pthread_mutex_unlock(&consumer_data.lock);
1938 rcu_read_unlock();
1939
1940 DBG("UST consumer lost packets command for session id %"
1941 PRIu64 ", channel key %" PRIu64, id, key);
1942
1943 health_code_update();
1944
1945 /* Send back returned value to session daemon */
1946 ret = lttcomm_send_unix_sock(sock, &lost_packets,
1947 sizeof(lost_packets));
1948 if (ret < 0) {
1949 PERROR("send lost packets");
1950 goto error_fatal;
1951 }
1952
1953 break;
1954 }
1955 default:
1956 break;
1957 }
1958
1959 end_nosignal:
1960 rcu_read_unlock();
1961
1962 health_code_update();
1963
1964 /*
1965 * Return 1 to indicate success since the 0 value can be a socket
1966 * shutdown during the recv() or send() call.
1967 */
1968 return 1;
1969
1970 end_msg_sessiond:
1971 /*
1972 * The returned value here is not useful since either way we'll return 1 to
1973 * the caller because the session daemon socket management is done
1974 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
1975 */
1976 ret = consumer_send_status_msg(sock, ret_code);
1977 if (ret < 0) {
1978 goto error_fatal;
1979 }
1980 rcu_read_unlock();
1981
1982 health_code_update();
1983
1984 return 1;
1985 end_channel_error:
1986 if (channel) {
1987 /*
1988 * Free channel here since no one has a reference to it. We don't
1989 * free after that because a stream can store this pointer.
1990 */
1991 destroy_channel(channel);
1992 }
1993 /* We have to send a status channel message indicating an error. */
1994 ret = consumer_send_status_channel(sock, NULL);
1995 if (ret < 0) {
1996 /* Stop everything if session daemon can not be notified. */
1997 goto error_fatal;
1998 }
1999 rcu_read_unlock();
2000
2001 health_code_update();
2002
2003 return 1;
2004 error_fatal:
2005 rcu_read_unlock();
2006 /* This will issue a consumer stop. */
2007 return -1;
2008 }
2009
2010 void lttng_ustctl_flush_buffer(struct lttng_consumer_stream *stream,
2011 int producer_active)
2012 {
2013 assert(stream);
2014 assert(stream->ustream);
2015
2016 ustctl_flush_buffer(stream->ustream, producer_active);
2017 }
2018
2019 /*
2020 * Take a snapshot for a specific fd
2021 *
2022 * Returns 0 on success, < 0 on error
2023 */
2024 int lttng_ustconsumer_take_snapshot(struct lttng_consumer_stream *stream)
2025 {
2026 assert(stream);
2027 assert(stream->ustream);
2028
2029 return ustctl_snapshot(stream->ustream);
2030 }
2031
2032 /*
2033 * Get the produced position
2034 *
2035 * Returns 0 on success, < 0 on error
2036 */
2037 int lttng_ustconsumer_get_produced_snapshot(
2038 struct lttng_consumer_stream *stream, unsigned long *pos)
2039 {
2040 assert(stream);
2041 assert(stream->ustream);
2042 assert(pos);
2043
2044 return ustctl_snapshot_get_produced(stream->ustream, pos);
2045 }
2046
2047 /*
2048 * Get the consumed position
2049 *
2050 * Returns 0 on success, < 0 on error
2051 */
2052 int lttng_ustconsumer_get_consumed_snapshot(
2053 struct lttng_consumer_stream *stream, unsigned long *pos)
2054 {
2055 assert(stream);
2056 assert(stream->ustream);
2057 assert(pos);
2058
2059 return ustctl_snapshot_get_consumed(stream->ustream, pos);
2060 }
2061
2062 void lttng_ustconsumer_flush_buffer(struct lttng_consumer_stream *stream,
2063 int producer)
2064 {
2065 assert(stream);
2066 assert(stream->ustream);
2067
2068 ustctl_flush_buffer(stream->ustream, producer);
2069 }
2070
2071 int lttng_ustconsumer_get_current_timestamp(
2072 struct lttng_consumer_stream *stream, uint64_t *ts)
2073 {
2074 assert(stream);
2075 assert(stream->ustream);
2076 assert(ts);
2077
2078 return ustctl_get_current_timestamp(stream->ustream, ts);
2079 }
2080
2081 int lttng_ustconsumer_get_sequence_number(
2082 struct lttng_consumer_stream *stream, uint64_t *seq)
2083 {
2084 assert(stream);
2085 assert(stream->ustream);
2086 assert(seq);
2087
2088 return ustctl_get_sequence_number(stream->ustream, seq);
2089 }
2090
2091 /*
2092 * Called when the stream signals the consumer that it has hung up.
2093 */
2094 void lttng_ustconsumer_on_stream_hangup(struct lttng_consumer_stream *stream)
2095 {
2096 assert(stream);
2097 assert(stream->ustream);
2098
2099 pthread_mutex_lock(&stream->lock);
2100 if (!stream->quiescent) {
2101 ustctl_flush_buffer(stream->ustream, 0);
2102 stream->quiescent = true;
2103 }
2104 pthread_mutex_unlock(&stream->lock);
2105 stream->hangup_flush_done = 1;
2106 }
2107
2108 void lttng_ustconsumer_del_channel(struct lttng_consumer_channel *chan)
2109 {
2110 int i;
2111
2112 assert(chan);
2113 assert(chan->uchan);
2114
2115 if (chan->switch_timer_enabled == 1) {
2116 consumer_timer_switch_stop(chan);
2117 }
2118 for (i = 0; i < chan->nr_stream_fds; i++) {
2119 int ret;
2120
2121 ret = close(chan->stream_fds[i]);
2122 if (ret) {
2123 PERROR("close");
2124 }
2125 if (chan->shm_path[0]) {
2126 char shm_path[PATH_MAX];
2127
2128 ret = get_stream_shm_path(shm_path, chan->shm_path, i);
2129 if (ret) {
2130 ERR("Cannot get stream shm path");
2131 }
2132 ret = run_as_unlink(shm_path, chan->uid, chan->gid);
2133 if (ret) {
2134 PERROR("unlink %s", shm_path);
2135 }
2136 }
2137 }
2138 }
2139
2140 void lttng_ustconsumer_free_channel(struct lttng_consumer_channel *chan)
2141 {
2142 assert(chan);
2143 assert(chan->uchan);
2144
2145 consumer_metadata_cache_destroy(chan);
2146 ustctl_destroy_channel(chan->uchan);
2147 /* Try to rmdir all directories under shm_path root. */
2148 if (chan->root_shm_path[0]) {
2149 (void) run_as_recursive_rmdir(chan->root_shm_path,
2150 chan->uid, chan->gid);
2151 }
2152 free(chan->stream_fds);
2153 }
2154
2155 void lttng_ustconsumer_del_stream(struct lttng_consumer_stream *stream)
2156 {
2157 assert(stream);
2158 assert(stream->ustream);
2159
2160 if (stream->chan->switch_timer_enabled == 1) {
2161 consumer_timer_switch_stop(stream->chan);
2162 }
2163 ustctl_destroy_stream(stream->ustream);
2164 }
2165
2166 int lttng_ustconsumer_get_wakeup_fd(struct lttng_consumer_stream *stream)
2167 {
2168 assert(stream);
2169 assert(stream->ustream);
2170
2171 return ustctl_stream_get_wakeup_fd(stream->ustream);
2172 }
2173
2174 int lttng_ustconsumer_close_wakeup_fd(struct lttng_consumer_stream *stream)
2175 {
2176 assert(stream);
2177 assert(stream->ustream);
2178
2179 return ustctl_stream_close_wakeup_fd(stream->ustream);
2180 }
2181
2182 static
2183 void metadata_stream_reset_cache_consumed_position(
2184 struct lttng_consumer_stream *stream)
2185 {
2186 DBG("Reset metadata cache of session %" PRIu64,
2187 stream->chan->session_id);
2188 stream->ust_metadata_pushed = 0;
2189 }
2190
2191 /*
2192 * Write up to one packet from the metadata cache to the channel.
2193 *
2194 * Returns the number of bytes pushed in the cache, or a negative value
2195 * on error.
2196 */
2197 static
2198 int commit_one_metadata_packet(struct lttng_consumer_stream *stream)
2199 {
2200 ssize_t write_len;
2201 int ret;
2202
2203 pthread_mutex_lock(&stream->chan->metadata_cache->lock);
2204 if (stream->chan->metadata_cache->max_offset ==
2205 stream->ust_metadata_pushed) {
2206 /*
2207 * In the context of a user space metadata channel, a
2208 * change in version can be detected in two ways:
2209 * 1) During the pre-consume of the `read_subbuffer` loop,
2210 * 2) When populating the metadata ring buffer (i.e. here).
2211 *
2212 * This function is invoked when there is no metadata
2213 * available in the ring-buffer. If all data was consumed
2214 * up to the size of the metadata cache, there is no metadata
2215 * to insert in the ring-buffer.
2216 *
2217 * However, the metadata version could still have changed (a
2218 * regeneration without any new data will yield the same cache
2219 * size).
2220 *
2221 * The cache's version is checked for a version change and the
2222 * consumed position is reset if one occurred.
2223 *
2224 * This check is only necessary for the user space domain as
2225 * it has to manage the cache explicitly. If this reset was not
2226 * performed, no metadata would be consumed (and no reset would
2227 * occur as part of the pre-consume) until the metadata size
2228 * exceeded the cache size.
2229 */
2230 if (stream->metadata_version !=
2231 stream->chan->metadata_cache->version) {
2232 metadata_stream_reset_cache_consumed_position(stream);
2233 consumer_stream_metadata_set_version(stream,
2234 stream->chan->metadata_cache->version);
2235 } else {
2236 ret = 0;
2237 goto end;
2238 }
2239 }
2240
2241 write_len = ustctl_write_one_packet_to_channel(stream->chan->uchan,
2242 &stream->chan->metadata_cache->data[stream->ust_metadata_pushed],
2243 stream->chan->metadata_cache->max_offset
2244 - stream->ust_metadata_pushed);
2245 assert(write_len != 0);
2246 if (write_len < 0) {
2247 ERR("Writing one metadata packet");
2248 ret = write_len;
2249 goto end;
2250 }
2251 stream->ust_metadata_pushed += write_len;
2252
2253 assert(stream->chan->metadata_cache->max_offset >=
2254 stream->ust_metadata_pushed);
2255 ret = write_len;
2256
2257 /*
2258 * Switch packet (but don't open the next one) on every commit of
2259 * a metadata packet. Since the subbuffer is fully filled (with padding,
2260 * if needed), the stream is "quiescent" after this commit.
2261 */
2262 ustctl_flush_buffer(stream->ustream, 1);
2263 end:
2264 pthread_mutex_unlock(&stream->chan->metadata_cache->lock);
2265 return ret;
2266 }
2267
2268
2269 /*
2270 * Sync metadata meaning request them to the session daemon and snapshot to the
2271 * metadata thread can consumer them.
2272 *
2273 * Metadata stream lock is held here, but we need to release it when
2274 * interacting with sessiond, else we cause a deadlock with live
2275 * awaiting on metadata to be pushed out.
2276 *
2277 * Return 0 if new metadatda is available, EAGAIN if the metadata stream
2278 * is empty or a negative value on error.
2279 */
2280 int lttng_ustconsumer_sync_metadata(struct lttng_consumer_local_data *ctx,
2281 struct lttng_consumer_stream *metadata)
2282 {
2283 int ret;
2284 int retry = 0;
2285
2286 assert(ctx);
2287 assert(metadata);
2288
2289 pthread_mutex_unlock(&metadata->lock);
2290 /*
2291 * Request metadata from the sessiond, but don't wait for the flush
2292 * because we locked the metadata thread.
2293 */
2294 ret = lttng_ustconsumer_request_metadata(ctx, metadata->chan, 0, 0);
2295 pthread_mutex_lock(&metadata->lock);
2296 if (ret < 0) {
2297 goto end;
2298 }
2299
2300 ret = commit_one_metadata_packet(metadata);
2301 if (ret <= 0) {
2302 goto end;
2303 } else if (ret > 0) {
2304 retry = 1;
2305 }
2306
2307 ustctl_flush_buffer(metadata->ustream, 1);
2308 ret = ustctl_snapshot(metadata->ustream);
2309 if (ret < 0) {
2310 if (errno != EAGAIN) {
2311 ERR("Sync metadata, taking UST snapshot");
2312 goto end;
2313 }
2314 DBG("No new metadata when syncing them.");
2315 /* No new metadata, exit. */
2316 ret = ENODATA;
2317 goto end;
2318 }
2319
2320 /*
2321 * After this flush, we still need to extract metadata.
2322 */
2323 if (retry) {
2324 ret = EAGAIN;
2325 }
2326
2327 end:
2328 return ret;
2329 }
2330
2331 /*
2332 * Return 0 on success else a negative value.
2333 */
2334 static int notify_if_more_data(struct lttng_consumer_stream *stream,
2335 struct lttng_consumer_local_data *ctx)
2336 {
2337 int ret;
2338 struct ustctl_consumer_stream *ustream;
2339
2340 assert(stream);
2341 assert(ctx);
2342
2343 ustream = stream->ustream;
2344
2345 /*
2346 * First, we are going to check if there is a new subbuffer available
2347 * before reading the stream wait_fd.
2348 */
2349 /* Get the next subbuffer */
2350 ret = ustctl_get_next_subbuf(ustream);
2351 if (ret) {
2352 /* No more data found, flag the stream. */
2353 stream->has_data = 0;
2354 ret = 0;
2355 goto end;
2356 }
2357
2358 ret = ustctl_put_subbuf(ustream);
2359 assert(!ret);
2360
2361 /* This stream still has data. Flag it and wake up the data thread. */
2362 stream->has_data = 1;
2363
2364 if (stream->monitor && !stream->hangup_flush_done && !ctx->has_wakeup) {
2365 ssize_t writelen;
2366
2367 writelen = lttng_pipe_write(ctx->consumer_wakeup_pipe, "!", 1);
2368 if (writelen < 0 && errno != EAGAIN && errno != EWOULDBLOCK) {
2369 ret = writelen;
2370 goto end;
2371 }
2372
2373 /* The wake up pipe has been notified. */
2374 ctx->has_wakeup = 1;
2375 }
2376 ret = 0;
2377
2378 end:
2379 return ret;
2380 }
2381
2382 static int consumer_stream_ust_on_wake_up(struct lttng_consumer_stream *stream)
2383 {
2384 int ret = 0;
2385
2386 /*
2387 * We can consume the 1 byte written into the wait_fd by
2388 * UST. Don't trigger error if we cannot read this one byte
2389 * (read returns 0), or if the error is EAGAIN or EWOULDBLOCK.
2390 *
2391 * This is only done when the stream is monitored by a thread,
2392 * before the flush is done after a hangup and if the stream
2393 * is not flagged with data since there might be nothing to
2394 * consume in the wait fd but still have data available
2395 * flagged by the consumer wake up pipe.
2396 */
2397 if (stream->monitor && !stream->hangup_flush_done && !stream->has_data) {
2398 char dummy;
2399 ssize_t readlen;
2400
2401 readlen = lttng_read(stream->wait_fd, &dummy, 1);
2402 if (readlen < 0 && errno != EAGAIN && errno != EWOULDBLOCK) {
2403 ret = readlen;
2404 }
2405 }
2406
2407 return ret;
2408 }
2409
2410 static int extract_common_subbuffer_info(struct lttng_consumer_stream *stream,
2411 struct stream_subbuffer *subbuf)
2412 {
2413 int ret;
2414
2415 ret = ustctl_get_subbuf_size(
2416 stream->ustream, &subbuf->info.data.subbuf_size);
2417 if (ret) {
2418 goto end;
2419 }
2420
2421 ret = ustctl_get_padded_subbuf_size(
2422 stream->ustream, &subbuf->info.data.padded_subbuf_size);
2423 if (ret) {
2424 goto end;
2425 }
2426
2427 end:
2428 return ret;
2429 }
2430
2431 static int extract_metadata_subbuffer_info(struct lttng_consumer_stream *stream,
2432 struct stream_subbuffer *subbuf)
2433 {
2434 int ret;
2435
2436 ret = extract_common_subbuffer_info(stream, subbuf);
2437 if (ret) {
2438 goto end;
2439 }
2440
2441 subbuf->info.metadata.version = stream->metadata_version;
2442
2443 end:
2444 return ret;
2445 }
2446
2447 static int extract_data_subbuffer_info(struct lttng_consumer_stream *stream,
2448 struct stream_subbuffer *subbuf)
2449 {
2450 int ret;
2451
2452 ret = extract_common_subbuffer_info(stream, subbuf);
2453 if (ret) {
2454 goto end;
2455 }
2456
2457 ret = ustctl_get_packet_size(
2458 stream->ustream, &subbuf->info.data.packet_size);
2459 if (ret < 0) {
2460 PERROR("Failed to get sub-buffer packet size");
2461 goto end;
2462 }
2463
2464 ret = ustctl_get_content_size(
2465 stream->ustream, &subbuf->info.data.content_size);
2466 if (ret < 0) {
2467 PERROR("Failed to get sub-buffer content size");
2468 goto end;
2469 }
2470
2471 ret = ustctl_get_timestamp_begin(
2472 stream->ustream, &subbuf->info.data.timestamp_begin);
2473 if (ret < 0) {
2474 PERROR("Failed to get sub-buffer begin timestamp");
2475 goto end;
2476 }
2477
2478 ret = ustctl_get_timestamp_end(
2479 stream->ustream, &subbuf->info.data.timestamp_end);
2480 if (ret < 0) {
2481 PERROR("Failed to get sub-buffer end timestamp");
2482 goto end;
2483 }
2484
2485 ret = ustctl_get_events_discarded(
2486 stream->ustream, &subbuf->info.data.events_discarded);
2487 if (ret) {
2488 PERROR("Failed to get sub-buffer events discarded count");
2489 goto end;
2490 }
2491
2492 ret = ustctl_get_sequence_number(stream->ustream,
2493 &subbuf->info.data.sequence_number.value);
2494 if (ret) {
2495 /* May not be supported by older LTTng-modules. */
2496 if (ret != -ENOTTY) {
2497 PERROR("Failed to get sub-buffer sequence number");
2498 goto end;
2499 }
2500 } else {
2501 subbuf->info.data.sequence_number.is_set = true;
2502 }
2503
2504 ret = ustctl_get_stream_id(
2505 stream->ustream, &subbuf->info.data.stream_id);
2506 if (ret < 0) {
2507 PERROR("Failed to get stream id");
2508 goto end;
2509 }
2510
2511 ret = ustctl_get_instance_id(stream->ustream,
2512 &subbuf->info.data.stream_instance_id.value);
2513 if (ret) {
2514 /* May not be supported by older LTTng-modules. */
2515 if (ret != -ENOTTY) {
2516 PERROR("Failed to get stream instance id");
2517 goto end;
2518 }
2519 } else {
2520 subbuf->info.data.stream_instance_id.is_set = true;
2521 }
2522 end:
2523 return ret;
2524 }
2525
2526 static int get_next_subbuffer_common(struct lttng_consumer_stream *stream,
2527 struct stream_subbuffer *subbuffer)
2528 {
2529 int ret;
2530 const char *addr;
2531
2532 ret = stream->read_subbuffer_ops.extract_subbuffer_info(
2533 stream, subbuffer);
2534 if (ret) {
2535 goto end;
2536 }
2537
2538 ret = get_current_subbuf_addr(stream, &addr);
2539 if (ret) {
2540 goto end;
2541 }
2542
2543 subbuffer->buffer.buffer = lttng_buffer_view_init(
2544 addr, 0, subbuffer->info.data.padded_subbuf_size);
2545 assert(subbuffer->buffer.buffer.data != NULL);
2546 end:
2547 return ret;
2548 }
2549
2550 static int get_next_subbuffer(struct lttng_consumer_stream *stream,
2551 struct stream_subbuffer *subbuffer)
2552 {
2553 int ret;
2554
2555 ret = ustctl_get_next_subbuf(stream->ustream);
2556 if (ret) {
2557 goto end;
2558 }
2559
2560 ret = get_next_subbuffer_common(stream, subbuffer);
2561 if (ret) {
2562 goto end;
2563 }
2564 end:
2565 return ret;
2566 }
2567
2568 static int get_next_subbuffer_metadata(struct lttng_consumer_stream *stream,
2569 struct stream_subbuffer *subbuffer)
2570 {
2571 int ret;
2572 bool cache_empty;
2573 bool got_subbuffer;
2574 bool coherent;
2575 bool buffer_empty;
2576 unsigned long consumed_pos, produced_pos;
2577
2578 do {
2579 ret = ustctl_get_next_subbuf(stream->ustream);
2580 if (ret == 0) {
2581 got_subbuffer = true;
2582 } else {
2583 got_subbuffer = false;
2584 if (ret != -EAGAIN) {
2585 /* Fatal error. */
2586 goto end;
2587 }
2588 }
2589
2590 /*
2591 * Determine if the cache is empty and ensure that a sub-buffer
2592 * is made available if the cache is not empty.
2593 */
2594 if (!got_subbuffer) {
2595 ret = commit_one_metadata_packet(stream);
2596 if (ret < 0 && ret != -ENOBUFS) {
2597 goto end;
2598 } else if (ret == 0) {
2599 /* Not an error, the cache is empty. */
2600 cache_empty = true;
2601 ret = -ENODATA;
2602 goto end;
2603 } else {
2604 cache_empty = false;
2605 }
2606 } else {
2607 pthread_mutex_lock(&stream->chan->metadata_cache->lock);
2608 cache_empty = stream->chan->metadata_cache->max_offset ==
2609 stream->ust_metadata_pushed;
2610 pthread_mutex_unlock(&stream->chan->metadata_cache->lock);
2611 }
2612 } while (!got_subbuffer);
2613
2614 /* Populate sub-buffer infos and view. */
2615 ret = get_next_subbuffer_common(stream, subbuffer);
2616 if (ret) {
2617 goto end;
2618 }
2619
2620 ret = lttng_ustconsumer_take_snapshot(stream);
2621 if (ret < 0) {
2622 /*
2623 * -EAGAIN is not expected since we got a sub-buffer and haven't
2624 * pushed the consumption position yet (on put_next).
2625 */
2626 PERROR("Failed to take a snapshot of metadata buffer positions");
2627 goto end;
2628 }
2629
2630 ret = lttng_ustconsumer_get_consumed_snapshot(stream, &consumed_pos);
2631 if (ret) {
2632 PERROR("Failed to get metadata consumed position");
2633 goto end;
2634 }
2635
2636 ret = lttng_ustconsumer_get_produced_snapshot(stream, &produced_pos);
2637 if (ret) {
2638 PERROR("Failed to get metadata produced position");
2639 goto end;
2640 }
2641
2642 /* Last sub-buffer of the ring buffer ? */
2643 buffer_empty = (consumed_pos + stream->max_sb_size) == produced_pos;
2644
2645 /*
2646 * The sessiond registry lock ensures that coherent units of metadata
2647 * are pushed to the consumer daemon at once. Hence, if a sub-buffer is
2648 * acquired, the cache is empty, and it is the only available sub-buffer
2649 * available, it is safe to assume that it is "coherent".
2650 */
2651 coherent = got_subbuffer && cache_empty && buffer_empty;
2652
2653 LTTNG_OPTIONAL_SET(&subbuffer->info.metadata.coherent, coherent);
2654 end:
2655 return ret;
2656 }
2657
2658 static int put_next_subbuffer(struct lttng_consumer_stream *stream,
2659 struct stream_subbuffer *subbuffer)
2660 {
2661 const int ret = ustctl_put_next_subbuf(stream->ustream);
2662
2663 assert(ret == 0);
2664 return ret;
2665 }
2666
2667 static int signal_metadata(struct lttng_consumer_stream *stream,
2668 struct lttng_consumer_local_data *ctx)
2669 {
2670 return pthread_cond_broadcast(&stream->metadata_rdv) ? -errno : 0;
2671 }
2672
2673 static int lttng_ustconsumer_set_stream_ops(
2674 struct lttng_consumer_stream *stream)
2675 {
2676 int ret = 0;
2677
2678 stream->read_subbuffer_ops.on_wake_up = consumer_stream_ust_on_wake_up;
2679 if (stream->metadata_flag) {
2680 stream->read_subbuffer_ops.get_next_subbuffer =
2681 get_next_subbuffer_metadata;
2682 stream->read_subbuffer_ops.extract_subbuffer_info =
2683 extract_metadata_subbuffer_info;
2684 stream->read_subbuffer_ops.reset_metadata =
2685 metadata_stream_reset_cache_consumed_position;
2686 if (stream->chan->is_live) {
2687 stream->read_subbuffer_ops.on_sleep = signal_metadata;
2688 ret = consumer_stream_enable_metadata_bucketization(
2689 stream);
2690 if (ret) {
2691 goto end;
2692 }
2693 }
2694 } else {
2695 stream->read_subbuffer_ops.get_next_subbuffer =
2696 get_next_subbuffer;
2697 stream->read_subbuffer_ops.extract_subbuffer_info =
2698 extract_data_subbuffer_info;
2699 stream->read_subbuffer_ops.on_sleep = notify_if_more_data;
2700 if (stream->chan->is_live) {
2701 stream->read_subbuffer_ops.send_live_beacon =
2702 consumer_flush_ust_index;
2703 }
2704 }
2705
2706 stream->read_subbuffer_ops.put_next_subbuffer = put_next_subbuffer;
2707 end:
2708 return ret;
2709 }
2710
2711 /*
2712 * Called when a stream is created.
2713 *
2714 * Return 0 on success or else a negative value.
2715 */
2716 int lttng_ustconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
2717 {
2718 int ret;
2719
2720 assert(stream);
2721
2722 /* Don't create anything if this is set for streaming. */
2723 if (stream->relayd_id == (uint64_t) -1ULL && stream->chan->monitor) {
2724 ret = utils_create_stream_file(stream->chan->pathname, stream->name,
2725 stream->chan->tracefile_size, stream->tracefile_count_current,
2726 stream->uid, stream->gid, NULL);
2727 if (ret < 0) {
2728 goto error;
2729 }
2730 stream->out_fd = ret;
2731 stream->tracefile_size_current = 0;
2732
2733 if (!stream->metadata_flag) {
2734 struct lttng_index_file *index_file;
2735
2736 index_file = lttng_index_file_create(stream->chan->pathname,
2737 stream->name, stream->uid, stream->gid,
2738 stream->chan->tracefile_size,
2739 stream->tracefile_count_current,
2740 CTF_INDEX_MAJOR, CTF_INDEX_MINOR);
2741 if (!index_file) {
2742 goto error;
2743 }
2744 stream->index_file = index_file;
2745 }
2746 }
2747
2748 lttng_ustconsumer_set_stream_ops(stream);
2749 ret = 0;
2750
2751 error:
2752 return ret;
2753 }
2754
2755 /*
2756 * Check if data is still being extracted from the buffers for a specific
2757 * stream. Consumer data lock MUST be acquired before calling this function
2758 * and the stream lock.
2759 *
2760 * Return 1 if the traced data are still getting read else 0 meaning that the
2761 * data is available for trace viewer reading.
2762 */
2763 int lttng_ustconsumer_data_pending(struct lttng_consumer_stream *stream)
2764 {
2765 int ret;
2766
2767 assert(stream);
2768 assert(stream->ustream);
2769
2770 DBG("UST consumer checking data pending");
2771
2772 if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) {
2773 ret = 0;
2774 goto end;
2775 }
2776
2777 if (stream->chan->type == CONSUMER_CHANNEL_TYPE_METADATA) {
2778 uint64_t contiguous, pushed;
2779
2780 /* Ease our life a bit. */
2781 contiguous = stream->chan->metadata_cache->max_offset;
2782 pushed = stream->ust_metadata_pushed;
2783
2784 /*
2785 * We can simply check whether all contiguously available data
2786 * has been pushed to the ring buffer, since the push operation
2787 * is performed within get_next_subbuf(), and because both
2788 * get_next_subbuf() and put_next_subbuf() are issued atomically
2789 * thanks to the stream lock within
2790 * lttng_ustconsumer_read_subbuffer(). This basically means that
2791 * whetnever ust_metadata_pushed is incremented, the associated
2792 * metadata has been consumed from the metadata stream.
2793 */
2794 DBG("UST consumer metadata pending check: contiguous %" PRIu64 " vs pushed %" PRIu64,
2795 contiguous, pushed);
2796 assert(((int64_t) (contiguous - pushed)) >= 0);
2797 if ((contiguous != pushed) ||
2798 (((int64_t) contiguous - pushed) > 0 || contiguous == 0)) {
2799 ret = 1; /* Data is pending */
2800 goto end;
2801 }
2802 } else {
2803 ret = ustctl_get_next_subbuf(stream->ustream);
2804 if (ret == 0) {
2805 /*
2806 * There is still data so let's put back this
2807 * subbuffer.
2808 */
2809 ret = ustctl_put_subbuf(stream->ustream);
2810 assert(ret == 0);
2811 ret = 1; /* Data is pending */
2812 goto end;
2813 }
2814 }
2815
2816 /* Data is NOT pending so ready to be read. */
2817 ret = 0;
2818
2819 end:
2820 return ret;
2821 }
2822
2823 /*
2824 * Stop a given metadata channel timer if enabled and close the wait fd which
2825 * is the poll pipe of the metadata stream.
2826 *
2827 * This MUST be called with the metadata channel acquired.
2828 */
2829 void lttng_ustconsumer_close_metadata(struct lttng_consumer_channel *metadata)
2830 {
2831 int ret;
2832
2833 assert(metadata);
2834 assert(metadata->type == CONSUMER_CHANNEL_TYPE_METADATA);
2835
2836 DBG("Closing metadata channel key %" PRIu64, metadata->key);
2837
2838 if (metadata->switch_timer_enabled == 1) {
2839 consumer_timer_switch_stop(metadata);
2840 }
2841
2842 if (!metadata->metadata_stream) {
2843 goto end;
2844 }
2845
2846 /*
2847 * Closing write side so the thread monitoring the stream wakes up if any
2848 * and clean the metadata stream.
2849 */
2850 if (metadata->metadata_stream->ust_metadata_poll_pipe[1] >= 0) {
2851 ret = close(metadata->metadata_stream->ust_metadata_poll_pipe[1]);
2852 if (ret < 0) {
2853 PERROR("closing metadata pipe write side");
2854 }
2855 metadata->metadata_stream->ust_metadata_poll_pipe[1] = -1;
2856 }
2857
2858 end:
2859 return;
2860 }
2861
2862 /*
2863 * Close every metadata stream wait fd of the metadata hash table. This
2864 * function MUST be used very carefully so not to run into a race between the
2865 * metadata thread handling streams and this function closing their wait fd.
2866 *
2867 * For UST, this is used when the session daemon hangs up. Its the metadata
2868 * producer so calling this is safe because we are assured that no state change
2869 * can occur in the metadata thread for the streams in the hash table.
2870 */
2871 void lttng_ustconsumer_close_all_metadata(struct lttng_ht *metadata_ht)
2872 {
2873 struct lttng_ht_iter iter;
2874 struct lttng_consumer_stream *stream;
2875
2876 assert(metadata_ht);
2877 assert(metadata_ht->ht);
2878
2879 DBG("UST consumer closing all metadata streams");
2880
2881 rcu_read_lock();
2882 cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream,
2883 node.node) {
2884
2885 health_code_update();
2886
2887 pthread_mutex_lock(&stream->chan->lock);
2888 lttng_ustconsumer_close_metadata(stream->chan);
2889 pthread_mutex_unlock(&stream->chan->lock);
2890
2891 }
2892 rcu_read_unlock();
2893 }
2894
2895 void lttng_ustconsumer_close_stream_wakeup(struct lttng_consumer_stream *stream)
2896 {
2897 int ret;
2898
2899 ret = ustctl_stream_close_wakeup_fd(stream->ustream);
2900 if (ret < 0) {
2901 ERR("Unable to close wakeup fd");
2902 }
2903 }
2904
2905 /*
2906 * Please refer to consumer-timer.c before adding any lock within this
2907 * function or any of its callees. Timers have a very strict locking
2908 * semantic with respect to teardown. Failure to respect this semantic
2909 * introduces deadlocks.
2910 *
2911 * DON'T hold the metadata lock when calling this function, else this
2912 * can cause deadlock involving consumer awaiting for metadata to be
2913 * pushed out due to concurrent interaction with the session daemon.
2914 */
2915 int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
2916 struct lttng_consumer_channel *channel, int timer, int wait)
2917 {
2918 struct lttcomm_metadata_request_msg request;
2919 struct lttcomm_consumer_msg msg;
2920 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
2921 uint64_t len, key, offset, version;
2922 int ret;
2923
2924 assert(channel);
2925 assert(channel->metadata_cache);
2926
2927 memset(&request, 0, sizeof(request));
2928
2929 /* send the metadata request to sessiond */
2930 switch (consumer_data.type) {
2931 case LTTNG_CONSUMER64_UST:
2932 request.bits_per_long = 64;
2933 break;
2934 case LTTNG_CONSUMER32_UST:
2935 request.bits_per_long = 32;
2936 break;
2937 default:
2938 request.bits_per_long = 0;
2939 break;
2940 }
2941
2942 request.session_id = channel->session_id;
2943 request.session_id_per_pid = channel->session_id_per_pid;
2944 /*
2945 * Request the application UID here so the metadata of that application can
2946 * be sent back. The channel UID corresponds to the user UID of the session
2947 * used for the rights on the stream file(s).
2948 */
2949 request.uid = channel->ust_app_uid;
2950 request.key = channel->key;
2951
2952 DBG("Sending metadata request to sessiond, session id %" PRIu64
2953 ", per-pid %" PRIu64 ", app UID %u and channel key %" PRIu64,
2954 request.session_id, request.session_id_per_pid, request.uid,
2955 request.key);
2956
2957 pthread_mutex_lock(&ctx->metadata_socket_lock);
2958
2959 health_code_update();
2960
2961 ret = lttcomm_send_unix_sock(ctx->consumer_metadata_socket, &request,
2962 sizeof(request));
2963 if (ret < 0) {
2964 ERR("Asking metadata to sessiond");
2965 goto end;
2966 }
2967
2968 health_code_update();
2969
2970 /* Receive the metadata from sessiond */
2971 ret = lttcomm_recv_unix_sock(ctx->consumer_metadata_socket, &msg,
2972 sizeof(msg));
2973 if (ret != sizeof(msg)) {
2974 DBG("Consumer received unexpected message size %d (expects %zu)",
2975 ret, sizeof(msg));
2976 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
2977 /*
2978 * The ret value might 0 meaning an orderly shutdown but this is ok
2979 * since the caller handles this.
2980 */
2981 goto end;
2982 }
2983
2984 health_code_update();
2985
2986 if (msg.cmd_type == LTTNG_ERR_UND) {
2987 /* No registry found */
2988 (void) consumer_send_status_msg(ctx->consumer_metadata_socket,
2989 ret_code);
2990 ret = 0;
2991 goto end;
2992 } else if (msg.cmd_type != LTTNG_CONSUMER_PUSH_METADATA) {
2993 ERR("Unexpected cmd_type received %d", msg.cmd_type);
2994 ret = -1;
2995 goto end;
2996 }
2997
2998 len = msg.u.push_metadata.len;
2999 key = msg.u.push_metadata.key;
3000 offset = msg.u.push_metadata.target_offset;
3001 version = msg.u.push_metadata.version;
3002
3003 assert(key == channel->key);
3004 if (len == 0) {
3005 DBG("No new metadata to receive for key %" PRIu64, key);
3006 }
3007
3008 health_code_update();
3009
3010 /* Tell session daemon we are ready to receive the metadata. */
3011 ret = consumer_send_status_msg(ctx->consumer_metadata_socket,
3012 LTTCOMM_CONSUMERD_SUCCESS);
3013 if (ret < 0 || len == 0) {
3014 /*
3015 * Somehow, the session daemon is not responding anymore or there is
3016 * nothing to receive.
3017 */
3018 goto end;
3019 }
3020
3021 health_code_update();
3022
3023 ret = lttng_ustconsumer_recv_metadata(ctx->consumer_metadata_socket,
3024 key, offset, len, version, channel, timer, wait);
3025 if (ret >= 0) {
3026 /*
3027 * Only send the status msg if the sessiond is alive meaning a positive
3028 * ret code.
3029 */
3030 (void) consumer_send_status_msg(ctx->consumer_metadata_socket, ret);
3031 }
3032 ret = 0;
3033
3034 end:
3035 health_code_update();
3036
3037 pthread_mutex_unlock(&ctx->metadata_socket_lock);
3038 return ret;
3039 }
3040
3041 /*
3042 * Return the ustctl call for the get stream id.
3043 */
3044 int lttng_ustconsumer_get_stream_id(struct lttng_consumer_stream *stream,
3045 uint64_t *stream_id)
3046 {
3047 assert(stream);
3048 assert(stream_id);
3049
3050 return ustctl_get_stream_id(stream->ustream, stream_id);
3051 }
This page took 0.15949 seconds and 5 git commands to generate.