consumerd: cleanup: use buffer view interface for mmap read subbuf
[lttng-tools.git] / src / common / ust-consumer / ust-consumer.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #include <stdint.h>
20 #define _LGPL_SOURCE
21 #include <assert.h>
22 #include <lttng/ust-ctl.h>
23 #include <poll.h>
24 #include <pthread.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <sys/mman.h>
28 #include <sys/socket.h>
29 #include <sys/stat.h>
30 #include <sys/types.h>
31 #include <inttypes.h>
32 #include <unistd.h>
33 #include <urcu/list.h>
34 #include <signal.h>
35
36 #include <bin/lttng-consumerd/health-consumerd.h>
37 #include <common/common.h>
38 #include <common/sessiond-comm/sessiond-comm.h>
39 #include <common/relayd/relayd.h>
40 #include <common/compat/fcntl.h>
41 #include <common/compat/endian.h>
42 #include <common/consumer/consumer-metadata-cache.h>
43 #include <common/consumer/consumer-stream.h>
44 #include <common/consumer/consumer-timer.h>
45 #include <common/utils.h>
46 #include <common/index/index.h>
47
48 #include "ust-consumer.h"
49
50 #define INT_MAX_STR_LEN 12 /* includes \0 */
51
52 extern struct lttng_consumer_global_data consumer_data;
53 extern int consumer_poll_timeout;
54 extern volatile int consumer_quit;
55
56 /*
57 * Free channel object and all streams associated with it. This MUST be used
58 * only and only if the channel has _NEVER_ been added to the global channel
59 * hash table.
60 */
61 static void destroy_channel(struct lttng_consumer_channel *channel)
62 {
63 struct lttng_consumer_stream *stream, *stmp;
64
65 assert(channel);
66
67 DBG("UST consumer cleaning stream list");
68
69 cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
70 send_node) {
71
72 health_code_update();
73
74 cds_list_del(&stream->send_node);
75 ustctl_destroy_stream(stream->ustream);
76 free(stream);
77 }
78
79 /*
80 * If a channel is available meaning that was created before the streams
81 * were, delete it.
82 */
83 if (channel->uchan) {
84 lttng_ustconsumer_del_channel(channel);
85 lttng_ustconsumer_free_channel(channel);
86 }
87 free(channel);
88 }
89
90 /*
91 * Add channel to internal consumer state.
92 *
93 * Returns 0 on success or else a negative value.
94 */
95 static int add_channel(struct lttng_consumer_channel *channel,
96 struct lttng_consumer_local_data *ctx)
97 {
98 int ret = 0;
99
100 assert(channel);
101 assert(ctx);
102
103 if (ctx->on_recv_channel != NULL) {
104 ret = ctx->on_recv_channel(channel);
105 if (ret == 0) {
106 ret = consumer_add_channel(channel, ctx);
107 } else if (ret < 0) {
108 /* Most likely an ENOMEM. */
109 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
110 goto error;
111 }
112 } else {
113 ret = consumer_add_channel(channel, ctx);
114 }
115
116 DBG("UST consumer channel added (key: %" PRIu64 ")", channel->key);
117
118 error:
119 return ret;
120 }
121
122 /*
123 * Allocate and return a consumer channel object.
124 */
125 static struct lttng_consumer_channel *allocate_channel(uint64_t session_id,
126 const char *pathname, const char *name, uid_t uid, gid_t gid,
127 uint64_t relayd_id, uint64_t key, enum lttng_event_output output,
128 uint64_t tracefile_size, uint64_t tracefile_count,
129 uint64_t session_id_per_pid, unsigned int monitor,
130 unsigned int live_timer_interval,
131 const char *root_shm_path, const char *shm_path)
132 {
133 assert(pathname);
134 assert(name);
135
136 return consumer_allocate_channel(key, session_id, pathname, name, uid,
137 gid, relayd_id, output, tracefile_size,
138 tracefile_count, session_id_per_pid, monitor,
139 live_timer_interval, root_shm_path, shm_path);
140 }
141
142 /*
143 * Allocate and return a consumer stream object. If _alloc_ret is not NULL, the
144 * error value if applicable is set in it else it is kept untouched.
145 *
146 * Return NULL on error else the newly allocated stream object.
147 */
148 static struct lttng_consumer_stream *allocate_stream(int cpu, int key,
149 struct lttng_consumer_channel *channel,
150 struct lttng_consumer_local_data *ctx, int *_alloc_ret)
151 {
152 int alloc_ret;
153 struct lttng_consumer_stream *stream = NULL;
154
155 assert(channel);
156 assert(ctx);
157
158 stream = consumer_allocate_stream(channel->key,
159 key,
160 LTTNG_CONSUMER_ACTIVE_STREAM,
161 channel->name,
162 channel->uid,
163 channel->gid,
164 channel->relayd_id,
165 channel->session_id,
166 cpu,
167 &alloc_ret,
168 channel->type,
169 channel->monitor);
170 if (stream == NULL) {
171 switch (alloc_ret) {
172 case -ENOENT:
173 /*
174 * We could not find the channel. Can happen if cpu hotplug
175 * happens while tearing down.
176 */
177 DBG3("Could not find channel");
178 break;
179 case -ENOMEM:
180 case -EINVAL:
181 default:
182 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
183 break;
184 }
185 goto error;
186 }
187
188 stream->chan = channel;
189
190 error:
191 if (_alloc_ret) {
192 *_alloc_ret = alloc_ret;
193 }
194 return stream;
195 }
196
197 /*
198 * Send the given stream pointer to the corresponding thread.
199 *
200 * Returns 0 on success else a negative value.
201 */
202 static int send_stream_to_thread(struct lttng_consumer_stream *stream,
203 struct lttng_consumer_local_data *ctx)
204 {
205 int ret;
206 struct lttng_pipe *stream_pipe;
207
208 /* Get the right pipe where the stream will be sent. */
209 if (stream->metadata_flag) {
210 ret = consumer_add_metadata_stream(stream);
211 if (ret) {
212 ERR("Consumer add metadata stream %" PRIu64 " failed.",
213 stream->key);
214 goto error;
215 }
216 stream_pipe = ctx->consumer_metadata_pipe;
217 } else {
218 ret = consumer_add_data_stream(stream);
219 if (ret) {
220 ERR("Consumer add stream %" PRIu64 " failed.",
221 stream->key);
222 goto error;
223 }
224 stream_pipe = ctx->consumer_data_pipe;
225 }
226
227 /*
228 * From this point on, the stream's ownership has been moved away from
229 * the channel and becomes globally visible.
230 */
231 stream->globally_visible = 1;
232
233 ret = lttng_pipe_write(stream_pipe, &stream, sizeof(stream));
234 if (ret < 0) {
235 ERR("Consumer write %s stream to pipe %d",
236 stream->metadata_flag ? "metadata" : "data",
237 lttng_pipe_get_writefd(stream_pipe));
238 if (stream->metadata_flag) {
239 consumer_del_stream_for_metadata(stream);
240 } else {
241 consumer_del_stream_for_data(stream);
242 }
243 }
244 error:
245 return ret;
246 }
247
248 static
249 int get_stream_shm_path(char *stream_shm_path, const char *shm_path, int cpu)
250 {
251 char cpu_nr[INT_MAX_STR_LEN]; /* int max len */
252 int ret;
253
254 strncpy(stream_shm_path, shm_path, PATH_MAX);
255 stream_shm_path[PATH_MAX - 1] = '\0';
256 ret = snprintf(cpu_nr, INT_MAX_STR_LEN, "%i", cpu);
257 if (ret < 0) {
258 PERROR("snprintf");
259 goto end;
260 }
261 strncat(stream_shm_path, cpu_nr,
262 PATH_MAX - strlen(stream_shm_path) - 1);
263 ret = 0;
264 end:
265 return ret;
266 }
267
268 /*
269 * Create streams for the given channel using liblttng-ust-ctl.
270 *
271 * Return 0 on success else a negative value.
272 */
273 static int create_ust_streams(struct lttng_consumer_channel *channel,
274 struct lttng_consumer_local_data *ctx)
275 {
276 int ret, cpu = 0;
277 struct ustctl_consumer_stream *ustream;
278 struct lttng_consumer_stream *stream;
279
280 assert(channel);
281 assert(ctx);
282
283 /*
284 * While a stream is available from ustctl. When NULL is returned, we've
285 * reached the end of the possible stream for the channel.
286 */
287 while ((ustream = ustctl_create_stream(channel->uchan, cpu))) {
288 int wait_fd;
289 int ust_metadata_pipe[2];
290
291 health_code_update();
292
293 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && channel->monitor) {
294 ret = utils_create_pipe_cloexec_nonblock(ust_metadata_pipe);
295 if (ret < 0) {
296 ERR("Create ust metadata poll pipe");
297 goto error;
298 }
299 wait_fd = ust_metadata_pipe[0];
300 } else {
301 wait_fd = ustctl_stream_get_wait_fd(ustream);
302 }
303
304 /* Allocate consumer stream object. */
305 stream = allocate_stream(cpu, wait_fd, channel, ctx, &ret);
306 if (!stream) {
307 goto error_alloc;
308 }
309 stream->ustream = ustream;
310 /*
311 * Store it so we can save multiple function calls afterwards since
312 * this value is used heavily in the stream threads. This is UST
313 * specific so this is why it's done after allocation.
314 */
315 stream->wait_fd = wait_fd;
316
317 /*
318 * Increment channel refcount since the channel reference has now been
319 * assigned in the allocation process above.
320 */
321 if (stream->chan->monitor) {
322 uatomic_inc(&stream->chan->refcount);
323 }
324
325 /*
326 * Order is important this is why a list is used. On error, the caller
327 * should clean this list.
328 */
329 cds_list_add_tail(&stream->send_node, &channel->streams.head);
330
331 ret = ustctl_get_max_subbuf_size(stream->ustream,
332 &stream->max_sb_size);
333 if (ret < 0) {
334 ERR("ustctl_get_max_subbuf_size failed for stream %s",
335 stream->name);
336 goto error;
337 }
338
339 /* Do actions once stream has been received. */
340 if (ctx->on_recv_stream) {
341 ret = ctx->on_recv_stream(stream);
342 if (ret < 0) {
343 goto error;
344 }
345 }
346
347 DBG("UST consumer add stream %s (key: %" PRIu64 ") with relayd id %" PRIu64,
348 stream->name, stream->key, stream->relayd_stream_id);
349
350 /* Set next CPU stream. */
351 channel->streams.count = ++cpu;
352
353 /* Keep stream reference when creating metadata. */
354 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA) {
355 channel->metadata_stream = stream;
356 if (channel->monitor) {
357 /* Set metadata poll pipe if we created one */
358 memcpy(stream->ust_metadata_poll_pipe,
359 ust_metadata_pipe,
360 sizeof(ust_metadata_pipe));
361 }
362 }
363 }
364
365 return 0;
366
367 error:
368 error_alloc:
369 return ret;
370 }
371
372 /*
373 * create_posix_shm is never called concurrently within a process.
374 */
375 static
376 int create_posix_shm(void)
377 {
378 char tmp_name[NAME_MAX];
379 int shmfd, ret;
380
381 ret = snprintf(tmp_name, NAME_MAX, "/ust-shm-consumer-%d", getpid());
382 if (ret < 0) {
383 PERROR("snprintf");
384 return -1;
385 }
386 /*
387 * Allocate shm, and immediately unlink its shm oject, keeping
388 * only the file descriptor as a reference to the object.
389 * We specifically do _not_ use the / at the beginning of the
390 * pathname so that some OS implementations can keep it local to
391 * the process (POSIX leaves this implementation-defined).
392 */
393 shmfd = shm_open(tmp_name, O_CREAT | O_EXCL | O_RDWR, 0700);
394 if (shmfd < 0) {
395 PERROR("shm_open");
396 goto error_shm_open;
397 }
398 ret = shm_unlink(tmp_name);
399 if (ret < 0 && errno != ENOENT) {
400 PERROR("shm_unlink");
401 goto error_shm_release;
402 }
403 return shmfd;
404
405 error_shm_release:
406 ret = close(shmfd);
407 if (ret) {
408 PERROR("close");
409 }
410 error_shm_open:
411 return -1;
412 }
413
414 static int open_ust_stream_fd(struct lttng_consumer_channel *channel,
415 struct ustctl_consumer_channel_attr *attr,
416 int cpu)
417 {
418 char shm_path[PATH_MAX];
419 int ret;
420
421 if (!channel->shm_path[0]) {
422 return create_posix_shm();
423 }
424 ret = get_stream_shm_path(shm_path, channel->shm_path, cpu);
425 if (ret) {
426 goto error_shm_path;
427 }
428 return run_as_open(shm_path,
429 O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR,
430 channel->uid, channel->gid);
431
432 error_shm_path:
433 return -1;
434 }
435
436 /*
437 * Create an UST channel with the given attributes and send it to the session
438 * daemon using the ust ctl API.
439 *
440 * Return 0 on success or else a negative value.
441 */
442 static int create_ust_channel(struct lttng_consumer_channel *channel,
443 struct ustctl_consumer_channel_attr *attr,
444 struct ustctl_consumer_channel **ust_chanp)
445 {
446 int ret, nr_stream_fds, i, j;
447 int *stream_fds;
448 struct ustctl_consumer_channel *ust_channel;
449
450 assert(channel);
451 assert(attr);
452 assert(ust_chanp);
453
454 DBG3("Creating channel to ustctl with attr: [overwrite: %d, "
455 "subbuf_size: %" PRIu64 ", num_subbuf: %" PRIu64 ", "
456 "switch_timer_interval: %u, read_timer_interval: %u, "
457 "output: %d, type: %d", attr->overwrite, attr->subbuf_size,
458 attr->num_subbuf, attr->switch_timer_interval,
459 attr->read_timer_interval, attr->output, attr->type);
460
461 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA)
462 nr_stream_fds = 1;
463 else
464 nr_stream_fds = ustctl_get_nr_stream_per_channel();
465 stream_fds = zmalloc(nr_stream_fds * sizeof(*stream_fds));
466 if (!stream_fds) {
467 ret = -1;
468 goto error_alloc;
469 }
470 for (i = 0; i < nr_stream_fds; i++) {
471 stream_fds[i] = open_ust_stream_fd(channel, attr, i);
472 if (stream_fds[i] < 0) {
473 ret = -1;
474 goto error_open;
475 }
476 }
477 ust_channel = ustctl_create_channel(attr, stream_fds, nr_stream_fds);
478 if (!ust_channel) {
479 ret = -1;
480 goto error_create;
481 }
482 channel->nr_stream_fds = nr_stream_fds;
483 channel->stream_fds = stream_fds;
484 *ust_chanp = ust_channel;
485
486 return 0;
487
488 error_create:
489 error_open:
490 for (j = i - 1; j >= 0; j--) {
491 int closeret;
492
493 closeret = close(stream_fds[j]);
494 if (closeret) {
495 PERROR("close");
496 }
497 if (channel->shm_path[0]) {
498 char shm_path[PATH_MAX];
499
500 closeret = get_stream_shm_path(shm_path,
501 channel->shm_path, j);
502 if (closeret) {
503 ERR("Cannot get stream shm path");
504 }
505 closeret = run_as_unlink(shm_path,
506 channel->uid, channel->gid);
507 if (closeret) {
508 PERROR("unlink %s", shm_path);
509 }
510 }
511 }
512 /* Try to rmdir all directories under shm_path root. */
513 if (channel->root_shm_path[0]) {
514 (void) run_as_recursive_rmdir(channel->root_shm_path,
515 channel->uid, channel->gid);
516 }
517 free(stream_fds);
518 error_alloc:
519 return ret;
520 }
521
522 /*
523 * Send a single given stream to the session daemon using the sock.
524 *
525 * Return 0 on success else a negative value.
526 */
527 static int send_sessiond_stream(int sock, struct lttng_consumer_stream *stream)
528 {
529 int ret;
530
531 assert(stream);
532 assert(sock >= 0);
533
534 DBG("UST consumer sending stream %" PRIu64 " to sessiond", stream->key);
535
536 /* Send stream to session daemon. */
537 ret = ustctl_send_stream_to_sessiond(sock, stream->ustream);
538 if (ret < 0) {
539 goto error;
540 }
541
542 error:
543 return ret;
544 }
545
546 /*
547 * Send channel to sessiond.
548 *
549 * Return 0 on success or else a negative value.
550 */
551 static int send_sessiond_channel(int sock,
552 struct lttng_consumer_channel *channel,
553 struct lttng_consumer_local_data *ctx, int *relayd_error)
554 {
555 int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS;
556 struct lttng_consumer_stream *stream;
557 uint64_t relayd_id = -1ULL;
558
559 assert(channel);
560 assert(ctx);
561 assert(sock >= 0);
562
563 DBG("UST consumer sending channel %s to sessiond", channel->name);
564
565 if (channel->relayd_id != (uint64_t) -1ULL) {
566 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
567
568 health_code_update();
569
570 /* Try to send the stream to the relayd if one is available. */
571 ret = consumer_send_relayd_stream(stream, stream->chan->pathname);
572 if (ret < 0) {
573 /*
574 * Flag that the relayd was the problem here probably due to a
575 * communicaton error on the socket.
576 */
577 if (relayd_error) {
578 *relayd_error = 1;
579 }
580 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
581 }
582 if (relayd_id == -1ULL) {
583 relayd_id = stream->relayd_id;
584 }
585 }
586 }
587
588 /* Inform sessiond that we are about to send channel and streams. */
589 ret = consumer_send_status_msg(sock, ret_code);
590 if (ret < 0 || ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
591 /*
592 * Either the session daemon is not responding or the relayd died so we
593 * stop now.
594 */
595 goto error;
596 }
597
598 /* Send channel to sessiond. */
599 ret = ustctl_send_channel_to_sessiond(sock, channel->uchan);
600 if (ret < 0) {
601 goto error;
602 }
603
604 ret = ustctl_channel_close_wakeup_fd(channel->uchan);
605 if (ret < 0) {
606 goto error;
607 }
608
609 /* The channel was sent successfully to the sessiond at this point. */
610 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
611
612 health_code_update();
613
614 /* Send stream to session daemon. */
615 ret = send_sessiond_stream(sock, stream);
616 if (ret < 0) {
617 goto error;
618 }
619 }
620
621 /* Tell sessiond there is no more stream. */
622 ret = ustctl_send_stream_to_sessiond(sock, NULL);
623 if (ret < 0) {
624 goto error;
625 }
626
627 DBG("UST consumer NULL stream sent to sessiond");
628
629 return 0;
630
631 error:
632 if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
633 ret = -1;
634 }
635 return ret;
636 }
637
638 /*
639 * Creates a channel and streams and add the channel it to the channel internal
640 * state. The created stream must ONLY be sent once the GET_CHANNEL command is
641 * received.
642 *
643 * Return 0 on success or else, a negative value is returned and the channel
644 * MUST be destroyed by consumer_del_channel().
645 */
646 static int ask_channel(struct lttng_consumer_local_data *ctx, int sock,
647 struct lttng_consumer_channel *channel,
648 struct ustctl_consumer_channel_attr *attr)
649 {
650 int ret;
651
652 assert(ctx);
653 assert(channel);
654 assert(attr);
655
656 /*
657 * This value is still used by the kernel consumer since for the kernel,
658 * the stream ownership is not IN the consumer so we need to have the
659 * number of left stream that needs to be initialized so we can know when
660 * to delete the channel (see consumer.c).
661 *
662 * As for the user space tracer now, the consumer creates and sends the
663 * stream to the session daemon which only sends them to the application
664 * once every stream of a channel is received making this value useless
665 * because we they will be added to the poll thread before the application
666 * receives them. This ensures that a stream can not hang up during
667 * initilization of a channel.
668 */
669 channel->nb_init_stream_left = 0;
670
671 /* The reply msg status is handled in the following call. */
672 ret = create_ust_channel(channel, attr, &channel->uchan);
673 if (ret < 0) {
674 goto end;
675 }
676
677 channel->wait_fd = ustctl_channel_get_wait_fd(channel->uchan);
678
679 /*
680 * For the snapshots (no monitor), we create the metadata streams
681 * on demand, not during the channel creation.
682 */
683 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && !channel->monitor) {
684 ret = 0;
685 goto end;
686 }
687
688 /* Open all streams for this channel. */
689 ret = create_ust_streams(channel, ctx);
690 if (ret < 0) {
691 goto end;
692 }
693
694 end:
695 return ret;
696 }
697
698 /*
699 * Send all stream of a channel to the right thread handling it.
700 *
701 * On error, return a negative value else 0 on success.
702 */
703 static int send_streams_to_thread(struct lttng_consumer_channel *channel,
704 struct lttng_consumer_local_data *ctx)
705 {
706 int ret = 0;
707 struct lttng_consumer_stream *stream, *stmp;
708
709 assert(channel);
710 assert(ctx);
711
712 /* Send streams to the corresponding thread. */
713 cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
714 send_node) {
715
716 health_code_update();
717
718 /* Sending the stream to the thread. */
719 ret = send_stream_to_thread(stream, ctx);
720 if (ret < 0) {
721 /*
722 * If we are unable to send the stream to the thread, there is
723 * a big problem so just stop everything.
724 */
725 /* Remove node from the channel stream list. */
726 cds_list_del(&stream->send_node);
727 goto error;
728 }
729
730 /* Remove node from the channel stream list. */
731 cds_list_del(&stream->send_node);
732
733 }
734
735 error:
736 return ret;
737 }
738
739 /*
740 * Flush channel's streams using the given key to retrieve the channel.
741 *
742 * Return 0 on success else an LTTng error code.
743 */
744 static int flush_channel(uint64_t chan_key)
745 {
746 int ret = 0;
747 struct lttng_consumer_channel *channel;
748 struct lttng_consumer_stream *stream;
749 struct lttng_ht *ht;
750 struct lttng_ht_iter iter;
751
752 DBG("UST consumer flush channel key %" PRIu64, chan_key);
753
754 rcu_read_lock();
755 channel = consumer_find_channel(chan_key);
756 if (!channel) {
757 ERR("UST consumer flush channel %" PRIu64 " not found", chan_key);
758 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
759 goto error;
760 }
761
762 ht = consumer_data.stream_per_chan_id_ht;
763
764 /* For each stream of the channel id, flush it. */
765 cds_lfht_for_each_entry_duplicate(ht->ht,
766 ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct,
767 &channel->key, &iter.iter, stream, node_channel_id.node) {
768
769 health_code_update();
770
771 pthread_mutex_lock(&stream->lock);
772
773 /*
774 * Protect against concurrent teardown of a stream.
775 */
776 if (cds_lfht_is_node_deleted(&stream->node.node)) {
777 goto next;
778 }
779
780 if (!stream->quiescent) {
781 ustctl_flush_buffer(stream->ustream, 0);
782 stream->quiescent = true;
783 }
784 next:
785 pthread_mutex_unlock(&stream->lock);
786 }
787 error:
788 rcu_read_unlock();
789 return ret;
790 }
791
792 /*
793 * Clear quiescent state from channel's streams using the given key to
794 * retrieve the channel.
795 *
796 * Return 0 on success else an LTTng error code.
797 */
798 static int clear_quiescent_channel(uint64_t chan_key)
799 {
800 int ret = 0;
801 struct lttng_consumer_channel *channel;
802 struct lttng_consumer_stream *stream;
803 struct lttng_ht *ht;
804 struct lttng_ht_iter iter;
805
806 DBG("UST consumer clear quiescent channel key %" PRIu64, chan_key);
807
808 rcu_read_lock();
809 channel = consumer_find_channel(chan_key);
810 if (!channel) {
811 ERR("UST consumer clear quiescent channel %" PRIu64 " not found", chan_key);
812 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
813 goto error;
814 }
815
816 ht = consumer_data.stream_per_chan_id_ht;
817
818 /* For each stream of the channel id, clear quiescent state. */
819 cds_lfht_for_each_entry_duplicate(ht->ht,
820 ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct,
821 &channel->key, &iter.iter, stream, node_channel_id.node) {
822
823 health_code_update();
824
825 pthread_mutex_lock(&stream->lock);
826 stream->quiescent = false;
827 pthread_mutex_unlock(&stream->lock);
828 }
829 error:
830 rcu_read_unlock();
831 return ret;
832 }
833
834 /*
835 * Close metadata stream wakeup_fd using the given key to retrieve the channel.
836 * RCU read side lock MUST be acquired before calling this function.
837 *
838 * Return 0 on success else an LTTng error code.
839 */
840 static int close_metadata(uint64_t chan_key)
841 {
842 int ret = 0;
843 struct lttng_consumer_channel *channel;
844 unsigned int channel_monitor;
845
846 DBG("UST consumer close metadata key %" PRIu64, chan_key);
847
848 channel = consumer_find_channel(chan_key);
849 if (!channel) {
850 /*
851 * This is possible if the metadata thread has issue a delete because
852 * the endpoint point of the stream hung up. There is no way the
853 * session daemon can know about it thus use a DBG instead of an actual
854 * error.
855 */
856 DBG("UST consumer close metadata %" PRIu64 " not found", chan_key);
857 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
858 goto error;
859 }
860
861 pthread_mutex_lock(&consumer_data.lock);
862 pthread_mutex_lock(&channel->lock);
863 channel_monitor = channel->monitor;
864 if (cds_lfht_is_node_deleted(&channel->node.node)) {
865 goto error_unlock;
866 }
867
868 lttng_ustconsumer_close_metadata(channel);
869 pthread_mutex_unlock(&channel->lock);
870 pthread_mutex_unlock(&consumer_data.lock);
871
872 /*
873 * The ownership of a metadata channel depends on the type of
874 * session to which it belongs. In effect, the monitor flag is checked
875 * to determine if this metadata channel is in "snapshot" mode or not.
876 *
877 * In the non-snapshot case, the metadata channel is created along with
878 * a single stream which will remain present until the metadata channel
879 * is destroyed (on the destruction of its session). In this case, the
880 * metadata stream in "monitored" by the metadata poll thread and holds
881 * the ownership of its channel.
882 *
883 * Closing the metadata will cause the metadata stream's "metadata poll
884 * pipe" to be closed. Closing this pipe will wake-up the metadata poll
885 * thread which will teardown the metadata stream which, in return,
886 * deletes the metadata channel.
887 *
888 * In the snapshot case, the metadata stream is created and destroyed
889 * on every snapshot record. Since the channel doesn't have an owner
890 * other than the session daemon, it is safe to destroy it immediately
891 * on reception of the CLOSE_METADATA command.
892 */
893 if (!channel_monitor) {
894 /*
895 * The channel and consumer_data locks must be
896 * released before this call since consumer_del_channel
897 * re-acquires the channel and consumer_data locks to teardown
898 * the channel and queue its reclamation by the "call_rcu"
899 * worker thread.
900 */
901 consumer_del_channel(channel);
902 }
903
904 return ret;
905 error_unlock:
906 pthread_mutex_unlock(&channel->lock);
907 pthread_mutex_unlock(&consumer_data.lock);
908 error:
909 return ret;
910 }
911
912 /*
913 * RCU read side lock MUST be acquired before calling this function.
914 *
915 * Return 0 on success else an LTTng error code.
916 */
917 static int setup_metadata(struct lttng_consumer_local_data *ctx, uint64_t key)
918 {
919 int ret;
920 struct lttng_consumer_channel *metadata;
921
922 DBG("UST consumer setup metadata key %" PRIu64, key);
923
924 metadata = consumer_find_channel(key);
925 if (!metadata) {
926 ERR("UST consumer push metadata %" PRIu64 " not found", key);
927 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
928 goto end;
929 }
930
931 /*
932 * In no monitor mode, the metadata channel has no stream(s) so skip the
933 * ownership transfer to the metadata thread.
934 */
935 if (!metadata->monitor) {
936 DBG("Metadata channel in no monitor");
937 ret = 0;
938 goto end;
939 }
940
941 /*
942 * Send metadata stream to relayd if one available. Availability is
943 * known if the stream is still in the list of the channel.
944 */
945 if (cds_list_empty(&metadata->streams.head)) {
946 ERR("Metadata channel key %" PRIu64 ", no stream available.", key);
947 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
948 goto error_no_stream;
949 }
950
951 /* Send metadata stream to relayd if needed. */
952 if (metadata->metadata_stream->relayd_id != (uint64_t) -1ULL) {
953 ret = consumer_send_relayd_stream(metadata->metadata_stream,
954 metadata->pathname);
955 if (ret < 0) {
956 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
957 goto error;
958 }
959 ret = consumer_send_relayd_streams_sent(
960 metadata->metadata_stream->relayd_id);
961 if (ret < 0) {
962 ret = LTTCOMM_CONSUMERD_RELAYD_FAIL;
963 goto error;
964 }
965 }
966
967 ret = send_streams_to_thread(metadata, ctx);
968 if (ret < 0) {
969 /*
970 * If we are unable to send the stream to the thread, there is
971 * a big problem so just stop everything.
972 */
973 ret = LTTCOMM_CONSUMERD_FATAL;
974 goto error;
975 }
976 /* List MUST be empty after or else it could be reused. */
977 assert(cds_list_empty(&metadata->streams.head));
978
979 ret = 0;
980 goto end;
981
982 error:
983 /*
984 * Delete metadata channel on error. At this point, the metadata stream can
985 * NOT be monitored by the metadata thread thus having the guarantee that
986 * the stream is still in the local stream list of the channel. This call
987 * will make sure to clean that list.
988 */
989 consumer_stream_destroy(metadata->metadata_stream, NULL);
990 cds_list_del(&metadata->metadata_stream->send_node);
991 metadata->metadata_stream = NULL;
992 error_no_stream:
993 end:
994 return ret;
995 }
996
997 /*
998 * Snapshot the whole metadata.
999 *
1000 * Returns 0 on success, < 0 on error
1001 */
1002 static int snapshot_metadata(uint64_t key, char *path, uint64_t relayd_id,
1003 struct lttng_consumer_local_data *ctx)
1004 {
1005 int ret = 0;
1006 struct lttng_consumer_channel *metadata_channel;
1007 struct lttng_consumer_stream *metadata_stream;
1008
1009 assert(path);
1010 assert(ctx);
1011
1012 DBG("UST consumer snapshot metadata with key %" PRIu64 " at path %s",
1013 key, path);
1014
1015 rcu_read_lock();
1016
1017 metadata_channel = consumer_find_channel(key);
1018 if (!metadata_channel) {
1019 ERR("UST snapshot metadata channel not found for key %" PRIu64,
1020 key);
1021 ret = -1;
1022 goto error;
1023 }
1024 assert(!metadata_channel->monitor);
1025
1026 health_code_update();
1027
1028 /*
1029 * Ask the sessiond if we have new metadata waiting and update the
1030 * consumer metadata cache.
1031 */
1032 ret = lttng_ustconsumer_request_metadata(ctx, metadata_channel, 0, 1);
1033 if (ret < 0) {
1034 goto error;
1035 }
1036
1037 health_code_update();
1038
1039 /*
1040 * The metadata stream is NOT created in no monitor mode when the channel
1041 * is created on a sessiond ask channel command.
1042 */
1043 ret = create_ust_streams(metadata_channel, ctx);
1044 if (ret < 0) {
1045 goto error;
1046 }
1047
1048 metadata_stream = metadata_channel->metadata_stream;
1049 assert(metadata_stream);
1050
1051 if (relayd_id != (uint64_t) -1ULL) {
1052 metadata_stream->relayd_id = relayd_id;
1053 ret = consumer_send_relayd_stream(metadata_stream, path);
1054 if (ret < 0) {
1055 goto error_stream;
1056 }
1057 } else {
1058 ret = utils_create_stream_file(path, metadata_stream->name,
1059 metadata_stream->chan->tracefile_size,
1060 metadata_stream->tracefile_count_current,
1061 metadata_stream->uid, metadata_stream->gid, NULL);
1062 if (ret < 0) {
1063 goto error_stream;
1064 }
1065 metadata_stream->out_fd = ret;
1066 metadata_stream->tracefile_size_current = 0;
1067 }
1068
1069 do {
1070 health_code_update();
1071
1072 ret = lttng_consumer_read_subbuffer(metadata_stream, ctx);
1073 if (ret < 0) {
1074 goto error_stream;
1075 }
1076 } while (ret > 0);
1077
1078 error_stream:
1079 /*
1080 * Clean up the stream completly because the next snapshot will use a new
1081 * metadata stream.
1082 */
1083 consumer_stream_destroy(metadata_stream, NULL);
1084 cds_list_del(&metadata_stream->send_node);
1085 metadata_channel->metadata_stream = NULL;
1086
1087 error:
1088 rcu_read_unlock();
1089 return ret;
1090 }
1091
1092 static
1093 int get_current_subbuf_addr(struct lttng_consumer_stream *stream,
1094 const char **addr)
1095 {
1096 int ret;
1097 unsigned long mmap_offset;
1098 const char *mmap_base;
1099
1100 mmap_base = ustctl_get_mmap_base(stream->ustream);
1101 if (!mmap_base) {
1102 ERR("Failed to get mmap base for stream `%s`",
1103 stream->name);
1104 ret = -EPERM;
1105 goto error;
1106 }
1107
1108 ret = ustctl_get_mmap_read_offset(stream->ustream, &mmap_offset);
1109 if (ret != 0) {
1110 ERR("Failed to get mmap offset for stream `%s`", stream->name);
1111 ret = -EINVAL;
1112 goto error;
1113 }
1114
1115 *addr = mmap_base + mmap_offset;
1116 error:
1117 return ret;
1118
1119 }
1120
1121 /*
1122 * Take a snapshot of all the stream of a channel.
1123 *
1124 * Returns 0 on success, < 0 on error
1125 */
1126 static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
1127 uint64_t nb_packets_per_stream, struct lttng_consumer_local_data *ctx)
1128 {
1129 int ret;
1130 unsigned use_relayd = 0;
1131 unsigned long consumed_pos, produced_pos;
1132 struct lttng_consumer_channel *channel;
1133 struct lttng_consumer_stream *stream;
1134
1135 assert(path);
1136 assert(ctx);
1137
1138 rcu_read_lock();
1139
1140 if (relayd_id != (uint64_t) -1ULL) {
1141 use_relayd = 1;
1142 }
1143
1144 channel = consumer_find_channel(key);
1145 if (!channel) {
1146 ERR("UST snapshot channel not found for key %" PRIu64, key);
1147 ret = -1;
1148 goto error;
1149 }
1150 assert(!channel->monitor);
1151 DBG("UST consumer snapshot channel %" PRIu64, key);
1152
1153 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
1154 health_code_update();
1155
1156 /* Lock stream because we are about to change its state. */
1157 pthread_mutex_lock(&stream->lock);
1158 stream->relayd_id = relayd_id;
1159
1160 if (use_relayd) {
1161 ret = consumer_send_relayd_stream(stream, path);
1162 if (ret < 0) {
1163 goto error_unlock;
1164 }
1165 } else {
1166 ret = utils_create_stream_file(path, stream->name,
1167 stream->chan->tracefile_size,
1168 stream->tracefile_count_current,
1169 stream->uid, stream->gid, NULL);
1170 if (ret < 0) {
1171 goto error_unlock;
1172 }
1173 stream->out_fd = ret;
1174 stream->tracefile_size_current = 0;
1175
1176 DBG("UST consumer snapshot stream %s/%s (%" PRIu64 ")", path,
1177 stream->name, stream->key);
1178 }
1179 if (relayd_id != -1ULL) {
1180 ret = consumer_send_relayd_streams_sent(relayd_id);
1181 if (ret < 0) {
1182 goto error_unlock;
1183 }
1184 }
1185
1186 /*
1187 * If tracing is active, we want to perform a "full" buffer flush.
1188 * Else, if quiescent, it has already been done by the prior stop.
1189 */
1190 if (!stream->quiescent) {
1191 ustctl_flush_buffer(stream->ustream, 0);
1192 }
1193
1194 ret = lttng_ustconsumer_take_snapshot(stream);
1195 if (ret < 0) {
1196 ERR("Taking UST snapshot");
1197 goto error_unlock;
1198 }
1199
1200 ret = lttng_ustconsumer_get_produced_snapshot(stream, &produced_pos);
1201 if (ret < 0) {
1202 ERR("Produced UST snapshot position");
1203 goto error_unlock;
1204 }
1205
1206 ret = lttng_ustconsumer_get_consumed_snapshot(stream, &consumed_pos);
1207 if (ret < 0) {
1208 ERR("Consumerd UST snapshot position");
1209 goto error_unlock;
1210 }
1211
1212 /*
1213 * The original value is sent back if max stream size is larger than
1214 * the possible size of the snapshot. Also, we assume that the session
1215 * daemon should never send a maximum stream size that is lower than
1216 * subbuffer size.
1217 */
1218 consumed_pos = consumer_get_consume_start_pos(consumed_pos,
1219 produced_pos, nb_packets_per_stream,
1220 stream->max_sb_size);
1221
1222 while (consumed_pos < produced_pos) {
1223 ssize_t read_len;
1224 unsigned long len, padded_len;
1225 const char *subbuf_addr;
1226 struct lttng_buffer_view subbuf_view;
1227
1228 health_code_update();
1229
1230 DBG("UST consumer taking snapshot at pos %lu", consumed_pos);
1231
1232 ret = ustctl_get_subbuf(stream->ustream, &consumed_pos);
1233 if (ret < 0) {
1234 if (ret != -EAGAIN) {
1235 PERROR("ustctl_get_subbuf snapshot");
1236 goto error_close_stream;
1237 }
1238 DBG("UST consumer get subbuf failed. Skipping it.");
1239 consumed_pos += stream->max_sb_size;
1240 stream->chan->lost_packets++;
1241 continue;
1242 }
1243
1244 ret = ustctl_get_subbuf_size(stream->ustream, &len);
1245 if (ret < 0) {
1246 ERR("Snapshot ustctl_get_subbuf_size");
1247 goto error_put_subbuf;
1248 }
1249
1250 ret = ustctl_get_padded_subbuf_size(stream->ustream, &padded_len);
1251 if (ret < 0) {
1252 ERR("Snapshot ustctl_get_padded_subbuf_size");
1253 goto error_put_subbuf;
1254 }
1255
1256 ret = get_current_subbuf_addr(stream, &subbuf_addr);
1257 if (ret) {
1258 goto error_put_subbuf;
1259 }
1260
1261 subbuf_view = lttng_buffer_view_init(
1262 subbuf_addr, 0, padded_len);
1263 read_len = lttng_consumer_on_read_subbuffer_mmap(ctx,
1264 stream, &subbuf_view, padded_len - len,
1265 NULL);
1266 if (use_relayd) {
1267 if (read_len != len) {
1268 ret = -EPERM;
1269 goto error_put_subbuf;
1270 }
1271 } else {
1272 if (read_len != padded_len) {
1273 ret = -EPERM;
1274 goto error_put_subbuf;
1275 }
1276 }
1277
1278 ret = ustctl_put_subbuf(stream->ustream);
1279 if (ret < 0) {
1280 ERR("Snapshot ustctl_put_subbuf");
1281 goto error_close_stream;
1282 }
1283 consumed_pos += stream->max_sb_size;
1284 }
1285
1286 /* Simply close the stream so we can use it on the next snapshot. */
1287 consumer_stream_close(stream);
1288 pthread_mutex_unlock(&stream->lock);
1289 }
1290
1291 rcu_read_unlock();
1292 return 0;
1293
1294 error_put_subbuf:
1295 if (ustctl_put_subbuf(stream->ustream) < 0) {
1296 ERR("Snapshot ustctl_put_subbuf");
1297 }
1298 error_close_stream:
1299 consumer_stream_close(stream);
1300 error_unlock:
1301 pthread_mutex_unlock(&stream->lock);
1302 error:
1303 rcu_read_unlock();
1304 return ret;
1305 }
1306
1307 /*
1308 * Receive the metadata updates from the sessiond. Supports receiving
1309 * overlapping metadata, but is needs to always belong to a contiguous
1310 * range starting from 0.
1311 * Be careful about the locks held when calling this function: it needs
1312 * the metadata cache flush to concurrently progress in order to
1313 * complete.
1314 */
1315 int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset,
1316 uint64_t len, uint64_t version,
1317 struct lttng_consumer_channel *channel, int timer, int wait)
1318 {
1319 int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1320 char *metadata_str;
1321
1322 DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key, len);
1323
1324 metadata_str = zmalloc(len * sizeof(char));
1325 if (!metadata_str) {
1326 PERROR("zmalloc metadata string");
1327 ret_code = LTTCOMM_CONSUMERD_ENOMEM;
1328 goto end;
1329 }
1330
1331 health_code_update();
1332
1333 /* Receive metadata string. */
1334 ret = lttcomm_recv_unix_sock(sock, metadata_str, len);
1335 if (ret < 0) {
1336 /* Session daemon is dead so return gracefully. */
1337 ret_code = ret;
1338 goto end_free;
1339 }
1340
1341 health_code_update();
1342
1343 pthread_mutex_lock(&channel->metadata_cache->lock);
1344 ret = consumer_metadata_cache_write(channel, offset, len, version,
1345 metadata_str);
1346 if (ret < 0) {
1347 /* Unable to handle metadata. Notify session daemon. */
1348 ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
1349 /*
1350 * Skip metadata flush on write error since the offset and len might
1351 * not have been updated which could create an infinite loop below when
1352 * waiting for the metadata cache to be flushed.
1353 */
1354 pthread_mutex_unlock(&channel->metadata_cache->lock);
1355 goto end_free;
1356 }
1357 pthread_mutex_unlock(&channel->metadata_cache->lock);
1358
1359 if (!wait) {
1360 goto end_free;
1361 }
1362 while (consumer_metadata_cache_flushed(channel, offset + len, timer)) {
1363 DBG("Waiting for metadata to be flushed");
1364
1365 health_code_update();
1366
1367 usleep(DEFAULT_METADATA_AVAILABILITY_WAIT_TIME);
1368 }
1369
1370 end_free:
1371 free(metadata_str);
1372 end:
1373 return ret_code;
1374 }
1375
1376 /*
1377 * Receive command from session daemon and process it.
1378 *
1379 * Return 1 on success else a negative value or 0.
1380 */
1381 int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
1382 int sock, struct pollfd *consumer_sockpoll)
1383 {
1384 ssize_t ret;
1385 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1386 struct lttcomm_consumer_msg msg;
1387 struct lttng_consumer_channel *channel = NULL;
1388
1389 health_code_update();
1390
1391 ret = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg));
1392 if (ret != sizeof(msg)) {
1393 DBG("Consumer received unexpected message size %zd (expects %zu)",
1394 ret, sizeof(msg));
1395 /*
1396 * The ret value might 0 meaning an orderly shutdown but this is ok
1397 * since the caller handles this.
1398 */
1399 if (ret > 0) {
1400 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
1401 ret = -1;
1402 }
1403 return ret;
1404 }
1405
1406 health_code_update();
1407
1408 /* deprecated */
1409 assert(msg.cmd_type != LTTNG_CONSUMER_STOP);
1410
1411 health_code_update();
1412
1413 /* relayd needs RCU read-side lock */
1414 rcu_read_lock();
1415
1416 switch (msg.cmd_type) {
1417 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
1418 {
1419 /* Session daemon status message are handled in the following call. */
1420 consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
1421 msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll,
1422 &msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id,
1423 msg.u.relayd_sock.relayd_session_id);
1424 goto end_nosignal;
1425 }
1426 case LTTNG_CONSUMER_DESTROY_RELAYD:
1427 {
1428 uint64_t index = msg.u.destroy_relayd.net_seq_idx;
1429 struct consumer_relayd_sock_pair *relayd;
1430
1431 DBG("UST consumer destroying relayd %" PRIu64, index);
1432
1433 /* Get relayd reference if exists. */
1434 relayd = consumer_find_relayd(index);
1435 if (relayd == NULL) {
1436 DBG("Unable to find relayd %" PRIu64, index);
1437 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
1438 }
1439
1440 /*
1441 * Each relayd socket pair has a refcount of stream attached to it
1442 * which tells if the relayd is still active or not depending on the
1443 * refcount value.
1444 *
1445 * This will set the destroy flag of the relayd object and destroy it
1446 * if the refcount reaches zero when called.
1447 *
1448 * The destroy can happen either here or when a stream fd hangs up.
1449 */
1450 if (relayd) {
1451 consumer_flag_relayd_for_destroy(relayd);
1452 }
1453
1454 goto end_msg_sessiond;
1455 }
1456 case LTTNG_CONSUMER_UPDATE_STREAM:
1457 {
1458 rcu_read_unlock();
1459 return -ENOSYS;
1460 }
1461 case LTTNG_CONSUMER_DATA_PENDING:
1462 {
1463 int ret, is_data_pending;
1464 uint64_t id = msg.u.data_pending.session_id;
1465
1466 DBG("UST consumer data pending command for id %" PRIu64, id);
1467
1468 is_data_pending = consumer_data_pending(id);
1469
1470 /* Send back returned value to session daemon */
1471 ret = lttcomm_send_unix_sock(sock, &is_data_pending,
1472 sizeof(is_data_pending));
1473 if (ret < 0) {
1474 DBG("Error when sending the data pending ret code: %d", ret);
1475 goto error_fatal;
1476 }
1477
1478 /*
1479 * No need to send back a status message since the data pending
1480 * returned value is the response.
1481 */
1482 break;
1483 }
1484 case LTTNG_CONSUMER_ASK_CHANNEL_CREATION:
1485 {
1486 int ret;
1487 struct ustctl_consumer_channel_attr attr;
1488
1489 /* Create a plain object and reserve a channel key. */
1490 channel = allocate_channel(msg.u.ask_channel.session_id,
1491 msg.u.ask_channel.pathname, msg.u.ask_channel.name,
1492 msg.u.ask_channel.uid, msg.u.ask_channel.gid,
1493 msg.u.ask_channel.relayd_id, msg.u.ask_channel.key,
1494 (enum lttng_event_output) msg.u.ask_channel.output,
1495 msg.u.ask_channel.tracefile_size,
1496 msg.u.ask_channel.tracefile_count,
1497 msg.u.ask_channel.session_id_per_pid,
1498 msg.u.ask_channel.monitor,
1499 msg.u.ask_channel.live_timer_interval,
1500 msg.u.ask_channel.root_shm_path,
1501 msg.u.ask_channel.shm_path);
1502 if (!channel) {
1503 goto end_channel_error;
1504 }
1505
1506 /*
1507 * Assign UST application UID to the channel. This value is ignored for
1508 * per PID buffers. This is specific to UST thus setting this after the
1509 * allocation.
1510 */
1511 channel->ust_app_uid = msg.u.ask_channel.ust_app_uid;
1512
1513 /* Build channel attributes from received message. */
1514 attr.subbuf_size = msg.u.ask_channel.subbuf_size;
1515 attr.num_subbuf = msg.u.ask_channel.num_subbuf;
1516 attr.overwrite = msg.u.ask_channel.overwrite;
1517 attr.switch_timer_interval = msg.u.ask_channel.switch_timer_interval;
1518 attr.read_timer_interval = msg.u.ask_channel.read_timer_interval;
1519 attr.chan_id = msg.u.ask_channel.chan_id;
1520 memcpy(attr.uuid, msg.u.ask_channel.uuid, sizeof(attr.uuid));
1521
1522 /* Match channel buffer type to the UST abi. */
1523 switch (msg.u.ask_channel.output) {
1524 case LTTNG_EVENT_MMAP:
1525 default:
1526 attr.output = LTTNG_UST_MMAP;
1527 break;
1528 }
1529
1530 /* Translate and save channel type. */
1531 switch (msg.u.ask_channel.type) {
1532 case LTTNG_UST_CHAN_PER_CPU:
1533 channel->type = CONSUMER_CHANNEL_TYPE_DATA;
1534 attr.type = LTTNG_UST_CHAN_PER_CPU;
1535 /*
1536 * Set refcount to 1 for owner. Below, we will
1537 * pass ownership to the
1538 * consumer_thread_channel_poll() thread.
1539 */
1540 channel->refcount = 1;
1541 break;
1542 case LTTNG_UST_CHAN_METADATA:
1543 channel->type = CONSUMER_CHANNEL_TYPE_METADATA;
1544 attr.type = LTTNG_UST_CHAN_METADATA;
1545 break;
1546 default:
1547 assert(0);
1548 goto error_fatal;
1549 };
1550
1551 health_code_update();
1552
1553 ret = ask_channel(ctx, sock, channel, &attr);
1554 if (ret < 0) {
1555 goto end_channel_error;
1556 }
1557
1558 if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
1559 ret = consumer_metadata_cache_allocate(channel);
1560 if (ret < 0) {
1561 ERR("Allocating metadata cache");
1562 goto end_channel_error;
1563 }
1564 consumer_timer_switch_start(channel, attr.switch_timer_interval);
1565 attr.switch_timer_interval = 0;
1566 } else {
1567 consumer_timer_live_start(channel,
1568 msg.u.ask_channel.live_timer_interval);
1569 }
1570
1571 health_code_update();
1572
1573 /*
1574 * Add the channel to the internal state AFTER all streams were created
1575 * and successfully sent to session daemon. This way, all streams must
1576 * be ready before this channel is visible to the threads.
1577 * If add_channel succeeds, ownership of the channel is
1578 * passed to consumer_thread_channel_poll().
1579 */
1580 ret = add_channel(channel, ctx);
1581 if (ret < 0) {
1582 if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
1583 if (channel->switch_timer_enabled == 1) {
1584 consumer_timer_switch_stop(channel);
1585 }
1586 consumer_metadata_cache_destroy(channel);
1587 }
1588 if (channel->live_timer_enabled == 1) {
1589 consumer_timer_live_stop(channel);
1590 }
1591 goto end_channel_error;
1592 }
1593
1594 health_code_update();
1595
1596 /*
1597 * Channel and streams are now created. Inform the session daemon that
1598 * everything went well and should wait to receive the channel and
1599 * streams with ustctl API.
1600 */
1601 ret = consumer_send_status_channel(sock, channel);
1602 if (ret < 0) {
1603 /*
1604 * There is probably a problem on the socket.
1605 */
1606 goto error_fatal;
1607 }
1608
1609 break;
1610 }
1611 case LTTNG_CONSUMER_GET_CHANNEL:
1612 {
1613 int ret, relayd_err = 0;
1614 uint64_t key = msg.u.get_channel.key;
1615 struct lttng_consumer_channel *channel;
1616
1617 channel = consumer_find_channel(key);
1618 if (!channel) {
1619 ERR("UST consumer get channel key %" PRIu64 " not found", key);
1620 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1621 goto end_msg_sessiond;
1622 }
1623
1624 health_code_update();
1625
1626 /* Send everything to sessiond. */
1627 ret = send_sessiond_channel(sock, channel, ctx, &relayd_err);
1628 if (ret < 0) {
1629 if (relayd_err) {
1630 /*
1631 * We were unable to send to the relayd the stream so avoid
1632 * sending back a fatal error to the thread since this is OK
1633 * and the consumer can continue its work. The above call
1634 * has sent the error status message to the sessiond.
1635 */
1636 goto end_nosignal;
1637 }
1638 /*
1639 * The communicaton was broken hence there is a bad state between
1640 * the consumer and sessiond so stop everything.
1641 */
1642 goto error_fatal;
1643 }
1644
1645 health_code_update();
1646
1647 /*
1648 * In no monitor mode, the streams ownership is kept inside the channel
1649 * so don't send them to the data thread.
1650 */
1651 if (!channel->monitor) {
1652 goto end_msg_sessiond;
1653 }
1654
1655 ret = send_streams_to_thread(channel, ctx);
1656 if (ret < 0) {
1657 /*
1658 * If we are unable to send the stream to the thread, there is
1659 * a big problem so just stop everything.
1660 */
1661 goto error_fatal;
1662 }
1663 /* List MUST be empty after or else it could be reused. */
1664 assert(cds_list_empty(&channel->streams.head));
1665 goto end_msg_sessiond;
1666 }
1667 case LTTNG_CONSUMER_DESTROY_CHANNEL:
1668 {
1669 uint64_t key = msg.u.destroy_channel.key;
1670
1671 /*
1672 * Only called if streams have not been sent to stream
1673 * manager thread. However, channel has been sent to
1674 * channel manager thread.
1675 */
1676 notify_thread_del_channel(ctx, key);
1677 goto end_msg_sessiond;
1678 }
1679 case LTTNG_CONSUMER_CLOSE_METADATA:
1680 {
1681 int ret;
1682
1683 ret = close_metadata(msg.u.close_metadata.key);
1684 if (ret != 0) {
1685 ret_code = ret;
1686 }
1687
1688 goto end_msg_sessiond;
1689 }
1690 case LTTNG_CONSUMER_FLUSH_CHANNEL:
1691 {
1692 int ret;
1693
1694 ret = flush_channel(msg.u.flush_channel.key);
1695 if (ret != 0) {
1696 ret_code = ret;
1697 }
1698
1699 goto end_msg_sessiond;
1700 }
1701 case LTTNG_CONSUMER_CLEAR_QUIESCENT_CHANNEL:
1702 {
1703 int ret;
1704
1705 ret = clear_quiescent_channel(
1706 msg.u.clear_quiescent_channel.key);
1707 if (ret != 0) {
1708 ret_code = ret;
1709 }
1710
1711 goto end_msg_sessiond;
1712 }
1713 case LTTNG_CONSUMER_PUSH_METADATA:
1714 {
1715 int ret;
1716 uint64_t len = msg.u.push_metadata.len;
1717 uint64_t key = msg.u.push_metadata.key;
1718 uint64_t offset = msg.u.push_metadata.target_offset;
1719 uint64_t version = msg.u.push_metadata.version;
1720 struct lttng_consumer_channel *channel;
1721
1722 DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key,
1723 len);
1724
1725 channel = consumer_find_channel(key);
1726 if (!channel) {
1727 /*
1728 * This is possible if the metadata creation on the consumer side
1729 * is in flight vis-a-vis a concurrent push metadata from the
1730 * session daemon. Simply return that the channel failed and the
1731 * session daemon will handle that message correctly considering
1732 * that this race is acceptable thus the DBG() statement here.
1733 */
1734 DBG("UST consumer push metadata %" PRIu64 " not found", key);
1735 ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL;
1736 goto end_msg_sessiond;
1737 }
1738
1739 health_code_update();
1740
1741 if (!len) {
1742 /*
1743 * There is nothing to receive. We have simply
1744 * checked whether the channel can be found.
1745 */
1746 ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1747 goto end_msg_sessiond;
1748 }
1749
1750 /* Tell session daemon we are ready to receive the metadata. */
1751 ret = consumer_send_status_msg(sock, LTTCOMM_CONSUMERD_SUCCESS);
1752 if (ret < 0) {
1753 /* Somehow, the session daemon is not responding anymore. */
1754 goto error_fatal;
1755 }
1756
1757 health_code_update();
1758
1759 /* Wait for more data. */
1760 health_poll_entry();
1761 ret = lttng_consumer_poll_socket(consumer_sockpoll);
1762 health_poll_exit();
1763 if (ret) {
1764 goto error_fatal;
1765 }
1766
1767 health_code_update();
1768
1769 ret = lttng_ustconsumer_recv_metadata(sock, key, offset,
1770 len, version, channel, 0, 1);
1771 if (ret < 0) {
1772 /* error receiving from sessiond */
1773 goto error_fatal;
1774 } else {
1775 ret_code = ret;
1776 goto end_msg_sessiond;
1777 }
1778 }
1779 case LTTNG_CONSUMER_SETUP_METADATA:
1780 {
1781 int ret;
1782
1783 ret = setup_metadata(ctx, msg.u.setup_metadata.key);
1784 if (ret) {
1785 ret_code = ret;
1786 }
1787 goto end_msg_sessiond;
1788 }
1789 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
1790 {
1791 if (msg.u.snapshot_channel.metadata) {
1792 ret = snapshot_metadata(msg.u.snapshot_channel.key,
1793 msg.u.snapshot_channel.pathname,
1794 msg.u.snapshot_channel.relayd_id,
1795 ctx);
1796 if (ret < 0) {
1797 ERR("Snapshot metadata failed");
1798 ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
1799 }
1800 } else {
1801 ret = snapshot_channel(msg.u.snapshot_channel.key,
1802 msg.u.snapshot_channel.pathname,
1803 msg.u.snapshot_channel.relayd_id,
1804 msg.u.snapshot_channel.nb_packets_per_stream,
1805 ctx);
1806 if (ret < 0) {
1807 ERR("Snapshot channel failed");
1808 ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL;
1809 }
1810 }
1811
1812 health_code_update();
1813 ret = consumer_send_status_msg(sock, ret_code);
1814 if (ret < 0) {
1815 /* Somehow, the session daemon is not responding anymore. */
1816 goto end_nosignal;
1817 }
1818 health_code_update();
1819 break;
1820 }
1821 case LTTNG_CONSUMER_DISCARDED_EVENTS:
1822 {
1823 int ret = 0;
1824 uint64_t discarded_events;
1825 struct lttng_ht_iter iter;
1826 struct lttng_ht *ht;
1827 struct lttng_consumer_stream *stream;
1828 uint64_t id = msg.u.discarded_events.session_id;
1829 uint64_t key = msg.u.discarded_events.channel_key;
1830
1831 DBG("UST consumer discarded events command for session id %"
1832 PRIu64, id);
1833 rcu_read_lock();
1834 pthread_mutex_lock(&consumer_data.lock);
1835
1836 ht = consumer_data.stream_list_ht;
1837
1838 /*
1839 * We only need a reference to the channel, but they are not
1840 * directly indexed, so we just use the first matching stream
1841 * to extract the information we need, we default to 0 if not
1842 * found (no events are dropped if the channel is not yet in
1843 * use).
1844 */
1845 discarded_events = 0;
1846 cds_lfht_for_each_entry_duplicate(ht->ht,
1847 ht->hash_fct(&id, lttng_ht_seed),
1848 ht->match_fct, &id,
1849 &iter.iter, stream, node_session_id.node) {
1850 if (stream->chan->key == key) {
1851 discarded_events = stream->chan->discarded_events;
1852 break;
1853 }
1854 }
1855 pthread_mutex_unlock(&consumer_data.lock);
1856 rcu_read_unlock();
1857
1858 DBG("UST consumer discarded events command for session id %"
1859 PRIu64 ", channel key %" PRIu64, id, key);
1860
1861 health_code_update();
1862
1863 /* Send back returned value to session daemon */
1864 ret = lttcomm_send_unix_sock(sock, &discarded_events, sizeof(discarded_events));
1865 if (ret < 0) {
1866 PERROR("send discarded events");
1867 goto error_fatal;
1868 }
1869
1870 break;
1871 }
1872 case LTTNG_CONSUMER_LOST_PACKETS:
1873 {
1874 int ret;
1875 uint64_t lost_packets;
1876 struct lttng_ht_iter iter;
1877 struct lttng_ht *ht;
1878 struct lttng_consumer_stream *stream;
1879 uint64_t id = msg.u.lost_packets.session_id;
1880 uint64_t key = msg.u.lost_packets.channel_key;
1881
1882 DBG("UST consumer lost packets command for session id %"
1883 PRIu64, id);
1884 rcu_read_lock();
1885 pthread_mutex_lock(&consumer_data.lock);
1886
1887 ht = consumer_data.stream_list_ht;
1888
1889 /*
1890 * We only need a reference to the channel, but they are not
1891 * directly indexed, so we just use the first matching stream
1892 * to extract the information we need, we default to 0 if not
1893 * found (no packets lost if the channel is not yet in use).
1894 */
1895 lost_packets = 0;
1896 cds_lfht_for_each_entry_duplicate(ht->ht,
1897 ht->hash_fct(&id, lttng_ht_seed),
1898 ht->match_fct, &id,
1899 &iter.iter, stream, node_session_id.node) {
1900 if (stream->chan->key == key) {
1901 lost_packets = stream->chan->lost_packets;
1902 break;
1903 }
1904 }
1905 pthread_mutex_unlock(&consumer_data.lock);
1906 rcu_read_unlock();
1907
1908 DBG("UST consumer lost packets command for session id %"
1909 PRIu64 ", channel key %" PRIu64, id, key);
1910
1911 health_code_update();
1912
1913 /* Send back returned value to session daemon */
1914 ret = lttcomm_send_unix_sock(sock, &lost_packets,
1915 sizeof(lost_packets));
1916 if (ret < 0) {
1917 PERROR("send lost packets");
1918 goto error_fatal;
1919 }
1920
1921 break;
1922 }
1923 default:
1924 break;
1925 }
1926
1927 end_nosignal:
1928 rcu_read_unlock();
1929
1930 health_code_update();
1931
1932 /*
1933 * Return 1 to indicate success since the 0 value can be a socket
1934 * shutdown during the recv() or send() call.
1935 */
1936 return 1;
1937
1938 end_msg_sessiond:
1939 /*
1940 * The returned value here is not useful since either way we'll return 1 to
1941 * the caller because the session daemon socket management is done
1942 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
1943 */
1944 ret = consumer_send_status_msg(sock, ret_code);
1945 if (ret < 0) {
1946 goto error_fatal;
1947 }
1948 rcu_read_unlock();
1949
1950 health_code_update();
1951
1952 return 1;
1953 end_channel_error:
1954 if (channel) {
1955 /*
1956 * Free channel here since no one has a reference to it. We don't
1957 * free after that because a stream can store this pointer.
1958 */
1959 destroy_channel(channel);
1960 }
1961 /* We have to send a status channel message indicating an error. */
1962 ret = consumer_send_status_channel(sock, NULL);
1963 if (ret < 0) {
1964 /* Stop everything if session daemon can not be notified. */
1965 goto error_fatal;
1966 }
1967 rcu_read_unlock();
1968
1969 health_code_update();
1970
1971 return 1;
1972 error_fatal:
1973 rcu_read_unlock();
1974 /* This will issue a consumer stop. */
1975 return -1;
1976 }
1977
1978 void lttng_ustctl_flush_buffer(struct lttng_consumer_stream *stream,
1979 int producer_active)
1980 {
1981 assert(stream);
1982 assert(stream->ustream);
1983
1984 ustctl_flush_buffer(stream->ustream, producer_active);
1985 }
1986
1987 /*
1988 * Take a snapshot for a specific fd
1989 *
1990 * Returns 0 on success, < 0 on error
1991 */
1992 int lttng_ustconsumer_take_snapshot(struct lttng_consumer_stream *stream)
1993 {
1994 assert(stream);
1995 assert(stream->ustream);
1996
1997 return ustctl_snapshot(stream->ustream);
1998 }
1999
2000 /*
2001 * Get the produced position
2002 *
2003 * Returns 0 on success, < 0 on error
2004 */
2005 int lttng_ustconsumer_get_produced_snapshot(
2006 struct lttng_consumer_stream *stream, unsigned long *pos)
2007 {
2008 assert(stream);
2009 assert(stream->ustream);
2010 assert(pos);
2011
2012 return ustctl_snapshot_get_produced(stream->ustream, pos);
2013 }
2014
2015 /*
2016 * Get the consumed position
2017 *
2018 * Returns 0 on success, < 0 on error
2019 */
2020 int lttng_ustconsumer_get_consumed_snapshot(
2021 struct lttng_consumer_stream *stream, unsigned long *pos)
2022 {
2023 assert(stream);
2024 assert(stream->ustream);
2025 assert(pos);
2026
2027 return ustctl_snapshot_get_consumed(stream->ustream, pos);
2028 }
2029
2030 void lttng_ustconsumer_flush_buffer(struct lttng_consumer_stream *stream,
2031 int producer)
2032 {
2033 assert(stream);
2034 assert(stream->ustream);
2035
2036 ustctl_flush_buffer(stream->ustream, producer);
2037 }
2038
2039 int lttng_ustconsumer_get_current_timestamp(
2040 struct lttng_consumer_stream *stream, uint64_t *ts)
2041 {
2042 assert(stream);
2043 assert(stream->ustream);
2044 assert(ts);
2045
2046 return ustctl_get_current_timestamp(stream->ustream, ts);
2047 }
2048
2049 int lttng_ustconsumer_get_sequence_number(
2050 struct lttng_consumer_stream *stream, uint64_t *seq)
2051 {
2052 assert(stream);
2053 assert(stream->ustream);
2054 assert(seq);
2055
2056 return ustctl_get_sequence_number(stream->ustream, seq);
2057 }
2058
2059 /*
2060 * Called when the stream signals the consumer that it has hung up.
2061 */
2062 void lttng_ustconsumer_on_stream_hangup(struct lttng_consumer_stream *stream)
2063 {
2064 assert(stream);
2065 assert(stream->ustream);
2066
2067 pthread_mutex_lock(&stream->lock);
2068 if (!stream->quiescent) {
2069 ustctl_flush_buffer(stream->ustream, 0);
2070 stream->quiescent = true;
2071 }
2072 pthread_mutex_unlock(&stream->lock);
2073 stream->hangup_flush_done = 1;
2074 }
2075
2076 void lttng_ustconsumer_del_channel(struct lttng_consumer_channel *chan)
2077 {
2078 int i;
2079
2080 assert(chan);
2081 assert(chan->uchan);
2082
2083 if (chan->switch_timer_enabled == 1) {
2084 consumer_timer_switch_stop(chan);
2085 }
2086 for (i = 0; i < chan->nr_stream_fds; i++) {
2087 int ret;
2088
2089 ret = close(chan->stream_fds[i]);
2090 if (ret) {
2091 PERROR("close");
2092 }
2093 if (chan->shm_path[0]) {
2094 char shm_path[PATH_MAX];
2095
2096 ret = get_stream_shm_path(shm_path, chan->shm_path, i);
2097 if (ret) {
2098 ERR("Cannot get stream shm path");
2099 }
2100 ret = run_as_unlink(shm_path, chan->uid, chan->gid);
2101 if (ret) {
2102 PERROR("unlink %s", shm_path);
2103 }
2104 }
2105 }
2106 }
2107
2108 void lttng_ustconsumer_free_channel(struct lttng_consumer_channel *chan)
2109 {
2110 assert(chan);
2111 assert(chan->uchan);
2112
2113 consumer_metadata_cache_destroy(chan);
2114 ustctl_destroy_channel(chan->uchan);
2115 /* Try to rmdir all directories under shm_path root. */
2116 if (chan->root_shm_path[0]) {
2117 (void) run_as_recursive_rmdir(chan->root_shm_path,
2118 chan->uid, chan->gid);
2119 }
2120 free(chan->stream_fds);
2121 }
2122
2123 void lttng_ustconsumer_del_stream(struct lttng_consumer_stream *stream)
2124 {
2125 assert(stream);
2126 assert(stream->ustream);
2127
2128 if (stream->chan->switch_timer_enabled == 1) {
2129 consumer_timer_switch_stop(stream->chan);
2130 }
2131 ustctl_destroy_stream(stream->ustream);
2132 }
2133
2134 int lttng_ustconsumer_get_wakeup_fd(struct lttng_consumer_stream *stream)
2135 {
2136 assert(stream);
2137 assert(stream->ustream);
2138
2139 return ustctl_stream_get_wakeup_fd(stream->ustream);
2140 }
2141
2142 int lttng_ustconsumer_close_wakeup_fd(struct lttng_consumer_stream *stream)
2143 {
2144 assert(stream);
2145 assert(stream->ustream);
2146
2147 return ustctl_stream_close_wakeup_fd(stream->ustream);
2148 }
2149
2150 /*
2151 * Populate index values of a UST stream. Values are set in big endian order.
2152 *
2153 * Return 0 on success or else a negative value.
2154 */
2155 static int get_index_values(struct ctf_packet_index *index,
2156 struct ustctl_consumer_stream *ustream)
2157 {
2158 int ret;
2159
2160 ret = ustctl_get_timestamp_begin(ustream, &index->timestamp_begin);
2161 if (ret < 0) {
2162 PERROR("ustctl_get_timestamp_begin");
2163 goto error;
2164 }
2165 index->timestamp_begin = htobe64(index->timestamp_begin);
2166
2167 ret = ustctl_get_timestamp_end(ustream, &index->timestamp_end);
2168 if (ret < 0) {
2169 PERROR("ustctl_get_timestamp_end");
2170 goto error;
2171 }
2172 index->timestamp_end = htobe64(index->timestamp_end);
2173
2174 ret = ustctl_get_events_discarded(ustream, &index->events_discarded);
2175 if (ret < 0) {
2176 PERROR("ustctl_get_events_discarded");
2177 goto error;
2178 }
2179 index->events_discarded = htobe64(index->events_discarded);
2180
2181 ret = ustctl_get_content_size(ustream, &index->content_size);
2182 if (ret < 0) {
2183 PERROR("ustctl_get_content_size");
2184 goto error;
2185 }
2186 index->content_size = htobe64(index->content_size);
2187
2188 ret = ustctl_get_packet_size(ustream, &index->packet_size);
2189 if (ret < 0) {
2190 PERROR("ustctl_get_packet_size");
2191 goto error;
2192 }
2193 index->packet_size = htobe64(index->packet_size);
2194
2195 ret = ustctl_get_stream_id(ustream, &index->stream_id);
2196 if (ret < 0) {
2197 PERROR("ustctl_get_stream_id");
2198 goto error;
2199 }
2200 index->stream_id = htobe64(index->stream_id);
2201
2202 ret = ustctl_get_instance_id(ustream, &index->stream_instance_id);
2203 if (ret < 0) {
2204 PERROR("ustctl_get_instance_id");
2205 goto error;
2206 }
2207 index->stream_instance_id = htobe64(index->stream_instance_id);
2208
2209 ret = ustctl_get_sequence_number(ustream, &index->packet_seq_num);
2210 if (ret < 0) {
2211 PERROR("ustctl_get_sequence_number");
2212 goto error;
2213 }
2214 index->packet_seq_num = htobe64(index->packet_seq_num);
2215
2216 error:
2217 return ret;
2218 }
2219
2220 static
2221 void metadata_stream_reset_cache(struct lttng_consumer_stream *stream,
2222 struct consumer_metadata_cache *cache)
2223 {
2224 DBG("Metadata stream update to version %" PRIu64,
2225 cache->version);
2226 stream->ust_metadata_pushed = 0;
2227 stream->metadata_version = cache->version;
2228 stream->reset_metadata_flag = 1;
2229 }
2230
2231 /*
2232 * Check if the version of the metadata stream and metadata cache match.
2233 * If the cache got updated, reset the metadata stream.
2234 * The stream lock and metadata cache lock MUST be held.
2235 * Return 0 on success, a negative value on error.
2236 */
2237 static
2238 int metadata_stream_check_version(struct lttng_consumer_stream *stream)
2239 {
2240 int ret = 0;
2241 struct consumer_metadata_cache *cache = stream->chan->metadata_cache;
2242
2243 if (cache->version == stream->metadata_version) {
2244 goto end;
2245 }
2246 metadata_stream_reset_cache(stream, cache);
2247
2248 end:
2249 return ret;
2250 }
2251
2252 /*
2253 * Write up to one packet from the metadata cache to the channel.
2254 *
2255 * Returns the number of bytes pushed in the cache, or a negative value
2256 * on error.
2257 */
2258 static
2259 int commit_one_metadata_packet(struct lttng_consumer_stream *stream)
2260 {
2261 ssize_t write_len;
2262 int ret;
2263
2264 pthread_mutex_lock(&stream->chan->metadata_cache->lock);
2265 ret = metadata_stream_check_version(stream);
2266 if (ret < 0) {
2267 goto end;
2268 }
2269 if (stream->chan->metadata_cache->max_offset
2270 == stream->ust_metadata_pushed) {
2271 ret = 0;
2272 goto end;
2273 }
2274
2275 write_len = ustctl_write_one_packet_to_channel(stream->chan->uchan,
2276 &stream->chan->metadata_cache->data[stream->ust_metadata_pushed],
2277 stream->chan->metadata_cache->max_offset
2278 - stream->ust_metadata_pushed);
2279 assert(write_len != 0);
2280 if (write_len < 0) {
2281 ERR("Writing one metadata packet");
2282 ret = -1;
2283 goto end;
2284 }
2285 stream->ust_metadata_pushed += write_len;
2286
2287 assert(stream->chan->metadata_cache->max_offset >=
2288 stream->ust_metadata_pushed);
2289 ret = write_len;
2290
2291 end:
2292 pthread_mutex_unlock(&stream->chan->metadata_cache->lock);
2293 return ret;
2294 }
2295
2296
2297 /*
2298 * Sync metadata meaning request them to the session daemon and snapshot to the
2299 * metadata thread can consumer them.
2300 *
2301 * Metadata stream lock is held here, but we need to release it when
2302 * interacting with sessiond, else we cause a deadlock with live
2303 * awaiting on metadata to be pushed out.
2304 *
2305 * Return 0 if new metadatda is available, EAGAIN if the metadata stream
2306 * is empty or a negative value on error.
2307 */
2308 int lttng_ustconsumer_sync_metadata(struct lttng_consumer_local_data *ctx,
2309 struct lttng_consumer_stream *metadata)
2310 {
2311 int ret;
2312 int retry = 0;
2313
2314 assert(ctx);
2315 assert(metadata);
2316
2317 pthread_mutex_unlock(&metadata->lock);
2318 /*
2319 * Request metadata from the sessiond, but don't wait for the flush
2320 * because we locked the metadata thread.
2321 */
2322 ret = lttng_ustconsumer_request_metadata(ctx, metadata->chan, 0, 0);
2323 pthread_mutex_lock(&metadata->lock);
2324 if (ret < 0) {
2325 goto end;
2326 }
2327
2328 ret = commit_one_metadata_packet(metadata);
2329 if (ret <= 0) {
2330 goto end;
2331 } else if (ret > 0) {
2332 retry = 1;
2333 }
2334
2335 ustctl_flush_buffer(metadata->ustream, 1);
2336 ret = ustctl_snapshot(metadata->ustream);
2337 if (ret < 0) {
2338 if (errno != EAGAIN) {
2339 ERR("Sync metadata, taking UST snapshot");
2340 goto end;
2341 }
2342 DBG("No new metadata when syncing them.");
2343 /* No new metadata, exit. */
2344 ret = ENODATA;
2345 goto end;
2346 }
2347
2348 /*
2349 * After this flush, we still need to extract metadata.
2350 */
2351 if (retry) {
2352 ret = EAGAIN;
2353 }
2354
2355 end:
2356 return ret;
2357 }
2358
2359 /*
2360 * Return 0 on success else a negative value.
2361 */
2362 static int notify_if_more_data(struct lttng_consumer_stream *stream,
2363 struct lttng_consumer_local_data *ctx)
2364 {
2365 int ret;
2366 struct ustctl_consumer_stream *ustream;
2367
2368 assert(stream);
2369 assert(ctx);
2370
2371 ustream = stream->ustream;
2372
2373 /*
2374 * First, we are going to check if there is a new subbuffer available
2375 * before reading the stream wait_fd.
2376 */
2377 /* Get the next subbuffer */
2378 ret = ustctl_get_next_subbuf(ustream);
2379 if (ret) {
2380 /* No more data found, flag the stream. */
2381 stream->has_data = 0;
2382 ret = 0;
2383 goto end;
2384 }
2385
2386 ret = ustctl_put_subbuf(ustream);
2387 assert(!ret);
2388
2389 /* This stream still has data. Flag it and wake up the data thread. */
2390 stream->has_data = 1;
2391
2392 if (stream->monitor && !stream->hangup_flush_done && !ctx->has_wakeup) {
2393 ssize_t writelen;
2394
2395 writelen = lttng_pipe_write(ctx->consumer_wakeup_pipe, "!", 1);
2396 if (writelen < 0 && errno != EAGAIN && errno != EWOULDBLOCK) {
2397 ret = writelen;
2398 goto end;
2399 }
2400
2401 /* The wake up pipe has been notified. */
2402 ctx->has_wakeup = 1;
2403 }
2404 ret = 0;
2405
2406 end:
2407 return ret;
2408 }
2409
2410 static
2411 int update_stream_stats(struct lttng_consumer_stream *stream)
2412 {
2413 int ret;
2414 uint64_t seq, discarded;
2415
2416 ret = ustctl_get_sequence_number(stream->ustream, &seq);
2417 if (ret < 0) {
2418 PERROR("ustctl_get_sequence_number");
2419 goto end;
2420 }
2421 /*
2422 * Start the sequence when we extract the first packet in case we don't
2423 * start at 0 (for example if a consumer is not connected to the
2424 * session immediately after the beginning).
2425 */
2426 if (stream->last_sequence_number == -1ULL) {
2427 stream->last_sequence_number = seq;
2428 } else if (seq > stream->last_sequence_number) {
2429 stream->chan->lost_packets += seq -
2430 stream->last_sequence_number - 1;
2431 } else {
2432 /* seq <= last_sequence_number */
2433 ERR("Sequence number inconsistent : prev = %" PRIu64
2434 ", current = %" PRIu64,
2435 stream->last_sequence_number, seq);
2436 ret = -1;
2437 goto end;
2438 }
2439 stream->last_sequence_number = seq;
2440
2441 ret = ustctl_get_events_discarded(stream->ustream, &discarded);
2442 if (ret < 0) {
2443 PERROR("kernctl_get_events_discarded");
2444 goto end;
2445 }
2446 if (discarded < stream->last_discarded_events) {
2447 /*
2448 * Overflow has occurred. We assume only one wrap-around
2449 * has occurred.
2450 */
2451 stream->chan->discarded_events +=
2452 (1ULL << (CAA_BITS_PER_LONG - 1)) -
2453 stream->last_discarded_events + discarded;
2454 } else {
2455 stream->chan->discarded_events += discarded -
2456 stream->last_discarded_events;
2457 }
2458 stream->last_discarded_events = discarded;
2459 ret = 0;
2460
2461 end:
2462 return ret;
2463 }
2464
2465 /*
2466 * Read subbuffer from the given stream.
2467 *
2468 * Stream lock MUST be acquired.
2469 *
2470 * Return 0 on success else a negative value.
2471 */
2472 int lttng_ustconsumer_read_subbuffer(struct lttng_consumer_stream *stream,
2473 struct lttng_consumer_local_data *ctx)
2474 {
2475 unsigned long len, subbuf_size, padding;
2476 int err, write_index = 1;
2477 long ret = 0;
2478 struct ustctl_consumer_stream *ustream;
2479 struct ctf_packet_index index;
2480 const char *subbuf_addr;
2481 struct lttng_buffer_view subbuf_view;
2482
2483 assert(stream);
2484 assert(stream->ustream);
2485 assert(ctx);
2486
2487 DBG("In UST read_subbuffer (wait_fd: %d, name: %s)", stream->wait_fd,
2488 stream->name);
2489
2490 /* Ease our life for what's next. */
2491 ustream = stream->ustream;
2492
2493 /*
2494 * We can consume the 1 byte written into the wait_fd by UST. Don't trigger
2495 * error if we cannot read this one byte (read returns 0), or if the error
2496 * is EAGAIN or EWOULDBLOCK.
2497 *
2498 * This is only done when the stream is monitored by a thread, before the
2499 * flush is done after a hangup and if the stream is not flagged with data
2500 * since there might be nothing to consume in the wait fd but still have
2501 * data available flagged by the consumer wake up pipe.
2502 */
2503 if (stream->monitor && !stream->hangup_flush_done && !stream->has_data) {
2504 char dummy;
2505 ssize_t readlen;
2506
2507 readlen = lttng_read(stream->wait_fd, &dummy, 1);
2508 if (readlen < 0 && errno != EAGAIN && errno != EWOULDBLOCK) {
2509 ret = readlen;
2510 goto end;
2511 }
2512 }
2513
2514 retry:
2515 /* Get the next subbuffer */
2516 err = ustctl_get_next_subbuf(ustream);
2517 if (err != 0) {
2518 /*
2519 * Populate metadata info if the existing info has
2520 * already been read.
2521 */
2522 if (stream->metadata_flag) {
2523 ret = commit_one_metadata_packet(stream);
2524 if (ret <= 0) {
2525 goto end;
2526 }
2527 ustctl_flush_buffer(stream->ustream, 1);
2528 goto retry;
2529 }
2530
2531 ret = err; /* ustctl_get_next_subbuf returns negative, caller expect positive. */
2532 /*
2533 * This is a debug message even for single-threaded consumer,
2534 * because poll() have more relaxed criterions than get subbuf,
2535 * so get_subbuf may fail for short race windows where poll()
2536 * would issue wakeups.
2537 */
2538 DBG("Reserving sub buffer failed (everything is normal, "
2539 "it is due to concurrency) [ret: %d]", err);
2540 goto end;
2541 }
2542 assert(stream->chan->output == CONSUMER_CHANNEL_MMAP);
2543
2544 if (!stream->metadata_flag) {
2545 index.offset = htobe64(stream->out_fd_offset);
2546 ret = get_index_values(&index, ustream);
2547 if (ret < 0) {
2548 err = ustctl_put_subbuf(ustream);
2549 assert(err == 0);
2550 goto end;
2551 }
2552
2553 /* Update the stream's sequence and discarded events count. */
2554 ret = update_stream_stats(stream);
2555 if (ret < 0) {
2556 PERROR("kernctl_get_events_discarded");
2557 err = ustctl_put_subbuf(ustream);
2558 assert(err == 0);
2559 goto end;
2560 }
2561 } else {
2562 write_index = 0;
2563 }
2564
2565 /* Get the full padded subbuffer size */
2566 err = ustctl_get_padded_subbuf_size(ustream, &len);
2567 assert(err == 0);
2568
2569 /* Get subbuffer data size (without padding) */
2570 err = ustctl_get_subbuf_size(ustream, &subbuf_size);
2571 assert(err == 0);
2572
2573 /* Make sure we don't get a subbuffer size bigger than the padded */
2574 assert(len >= subbuf_size);
2575
2576 padding = len - subbuf_size;
2577
2578 ret = get_current_subbuf_addr(stream, &subbuf_addr);
2579 if (ret) {
2580 write_index = 0;
2581 goto error_put_subbuf;
2582 }
2583
2584 subbuf_view = lttng_buffer_view_init(subbuf_addr, 0, len);
2585
2586 /* write the subbuffer to the tracefile */
2587 ret = lttng_consumer_on_read_subbuffer_mmap(
2588 ctx, stream, &subbuf_view, padding, &index);
2589 /*
2590 * The mmap operation should write subbuf_size amount of data when
2591 * network streaming or the full padding (len) size when we are _not_
2592 * streaming.
2593 */
2594 if ((ret != subbuf_size && stream->relayd_id != (uint64_t) -1ULL) ||
2595 (ret != len && stream->relayd_id == (uint64_t) -1ULL)) {
2596 /*
2597 * Display the error but continue processing to try to release the
2598 * subbuffer. This is a DBG statement since any unexpected kill or
2599 * signal, the application gets unregistered, relayd gets closed or
2600 * anything that affects the buffer lifetime will trigger this error.
2601 * So, for the sake of the user, don't print this error since it can
2602 * happen and it is OK with the code flow.
2603 */
2604 DBG("Error writing to tracefile "
2605 "(ret: %ld != len: %lu != subbuf_size: %lu)",
2606 ret, len, subbuf_size);
2607 write_index = 0;
2608 }
2609 error_put_subbuf:
2610 err = ustctl_put_next_subbuf(ustream);
2611 assert(err == 0);
2612
2613 /*
2614 * This will consumer the byte on the wait_fd if and only if there is not
2615 * next subbuffer to be acquired.
2616 */
2617 if (!stream->metadata_flag) {
2618 ret = notify_if_more_data(stream, ctx);
2619 if (ret < 0) {
2620 goto end;
2621 }
2622 }
2623
2624 /* Write index if needed. */
2625 if (!write_index) {
2626 goto end;
2627 }
2628
2629 if (stream->chan->live_timer_interval && !stream->metadata_flag) {
2630 /*
2631 * In live, block until all the metadata is sent.
2632 */
2633 pthread_mutex_lock(&stream->metadata_timer_lock);
2634 assert(!stream->missed_metadata_flush);
2635 stream->waiting_on_metadata = true;
2636 pthread_mutex_unlock(&stream->metadata_timer_lock);
2637
2638 err = consumer_stream_sync_metadata(ctx, stream->session_id);
2639
2640 pthread_mutex_lock(&stream->metadata_timer_lock);
2641 stream->waiting_on_metadata = false;
2642 if (stream->missed_metadata_flush) {
2643 stream->missed_metadata_flush = false;
2644 pthread_mutex_unlock(&stream->metadata_timer_lock);
2645 (void) consumer_flush_ust_index(stream);
2646 } else {
2647 pthread_mutex_unlock(&stream->metadata_timer_lock);
2648 }
2649
2650 if (err < 0) {
2651 goto end;
2652 }
2653 }
2654
2655 assert(!stream->metadata_flag);
2656 err = consumer_stream_write_index(stream, &index);
2657 if (err < 0) {
2658 goto end;
2659 }
2660
2661 end:
2662 return ret;
2663 }
2664
2665 /*
2666 * Called when a stream is created.
2667 *
2668 * Return 0 on success or else a negative value.
2669 */
2670 int lttng_ustconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
2671 {
2672 int ret;
2673
2674 assert(stream);
2675
2676 /* Don't create anything if this is set for streaming. */
2677 if (stream->relayd_id == (uint64_t) -1ULL && stream->chan->monitor) {
2678 ret = utils_create_stream_file(stream->chan->pathname, stream->name,
2679 stream->chan->tracefile_size, stream->tracefile_count_current,
2680 stream->uid, stream->gid, NULL);
2681 if (ret < 0) {
2682 goto error;
2683 }
2684 stream->out_fd = ret;
2685 stream->tracefile_size_current = 0;
2686
2687 if (!stream->metadata_flag) {
2688 struct lttng_index_file *index_file;
2689
2690 index_file = lttng_index_file_create(stream->chan->pathname,
2691 stream->name, stream->uid, stream->gid,
2692 stream->chan->tracefile_size,
2693 stream->tracefile_count_current,
2694 CTF_INDEX_MAJOR, CTF_INDEX_MINOR);
2695 if (!index_file) {
2696 goto error;
2697 }
2698 stream->index_file = index_file;
2699 }
2700 }
2701 ret = 0;
2702
2703 error:
2704 return ret;
2705 }
2706
2707 /*
2708 * Check if data is still being extracted from the buffers for a specific
2709 * stream. Consumer data lock MUST be acquired before calling this function
2710 * and the stream lock.
2711 *
2712 * Return 1 if the traced data are still getting read else 0 meaning that the
2713 * data is available for trace viewer reading.
2714 */
2715 int lttng_ustconsumer_data_pending(struct lttng_consumer_stream *stream)
2716 {
2717 int ret;
2718
2719 assert(stream);
2720 assert(stream->ustream);
2721
2722 DBG("UST consumer checking data pending");
2723
2724 if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) {
2725 ret = 0;
2726 goto end;
2727 }
2728
2729 if (stream->chan->type == CONSUMER_CHANNEL_TYPE_METADATA) {
2730 uint64_t contiguous, pushed;
2731
2732 /* Ease our life a bit. */
2733 contiguous = stream->chan->metadata_cache->max_offset;
2734 pushed = stream->ust_metadata_pushed;
2735
2736 /*
2737 * We can simply check whether all contiguously available data
2738 * has been pushed to the ring buffer, since the push operation
2739 * is performed within get_next_subbuf(), and because both
2740 * get_next_subbuf() and put_next_subbuf() are issued atomically
2741 * thanks to the stream lock within
2742 * lttng_ustconsumer_read_subbuffer(). This basically means that
2743 * whetnever ust_metadata_pushed is incremented, the associated
2744 * metadata has been consumed from the metadata stream.
2745 */
2746 DBG("UST consumer metadata pending check: contiguous %" PRIu64 " vs pushed %" PRIu64,
2747 contiguous, pushed);
2748 assert(((int64_t) (contiguous - pushed)) >= 0);
2749 if ((contiguous != pushed) ||
2750 (((int64_t) contiguous - pushed) > 0 || contiguous == 0)) {
2751 ret = 1; /* Data is pending */
2752 goto end;
2753 }
2754 } else {
2755 ret = ustctl_get_next_subbuf(stream->ustream);
2756 if (ret == 0) {
2757 /*
2758 * There is still data so let's put back this
2759 * subbuffer.
2760 */
2761 ret = ustctl_put_subbuf(stream->ustream);
2762 assert(ret == 0);
2763 ret = 1; /* Data is pending */
2764 goto end;
2765 }
2766 }
2767
2768 /* Data is NOT pending so ready to be read. */
2769 ret = 0;
2770
2771 end:
2772 return ret;
2773 }
2774
2775 /*
2776 * Stop a given metadata channel timer if enabled and close the wait fd which
2777 * is the poll pipe of the metadata stream.
2778 *
2779 * This MUST be called with the metadata channel acquired.
2780 */
2781 void lttng_ustconsumer_close_metadata(struct lttng_consumer_channel *metadata)
2782 {
2783 int ret;
2784
2785 assert(metadata);
2786 assert(metadata->type == CONSUMER_CHANNEL_TYPE_METADATA);
2787
2788 DBG("Closing metadata channel key %" PRIu64, metadata->key);
2789
2790 if (metadata->switch_timer_enabled == 1) {
2791 consumer_timer_switch_stop(metadata);
2792 }
2793
2794 if (!metadata->metadata_stream) {
2795 goto end;
2796 }
2797
2798 /*
2799 * Closing write side so the thread monitoring the stream wakes up if any
2800 * and clean the metadata stream.
2801 */
2802 if (metadata->metadata_stream->ust_metadata_poll_pipe[1] >= 0) {
2803 ret = close(metadata->metadata_stream->ust_metadata_poll_pipe[1]);
2804 if (ret < 0) {
2805 PERROR("closing metadata pipe write side");
2806 }
2807 metadata->metadata_stream->ust_metadata_poll_pipe[1] = -1;
2808 }
2809
2810 end:
2811 return;
2812 }
2813
2814 /*
2815 * Close every metadata stream wait fd of the metadata hash table. This
2816 * function MUST be used very carefully so not to run into a race between the
2817 * metadata thread handling streams and this function closing their wait fd.
2818 *
2819 * For UST, this is used when the session daemon hangs up. Its the metadata
2820 * producer so calling this is safe because we are assured that no state change
2821 * can occur in the metadata thread for the streams in the hash table.
2822 */
2823 void lttng_ustconsumer_close_all_metadata(struct lttng_ht *metadata_ht)
2824 {
2825 struct lttng_ht_iter iter;
2826 struct lttng_consumer_stream *stream;
2827
2828 assert(metadata_ht);
2829 assert(metadata_ht->ht);
2830
2831 DBG("UST consumer closing all metadata streams");
2832
2833 rcu_read_lock();
2834 cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream,
2835 node.node) {
2836
2837 health_code_update();
2838
2839 pthread_mutex_lock(&stream->chan->lock);
2840 lttng_ustconsumer_close_metadata(stream->chan);
2841 pthread_mutex_unlock(&stream->chan->lock);
2842
2843 }
2844 rcu_read_unlock();
2845 }
2846
2847 void lttng_ustconsumer_close_stream_wakeup(struct lttng_consumer_stream *stream)
2848 {
2849 int ret;
2850
2851 ret = ustctl_stream_close_wakeup_fd(stream->ustream);
2852 if (ret < 0) {
2853 ERR("Unable to close wakeup fd");
2854 }
2855 }
2856
2857 /*
2858 * Please refer to consumer-timer.c before adding any lock within this
2859 * function or any of its callees. Timers have a very strict locking
2860 * semantic with respect to teardown. Failure to respect this semantic
2861 * introduces deadlocks.
2862 *
2863 * DON'T hold the metadata lock when calling this function, else this
2864 * can cause deadlock involving consumer awaiting for metadata to be
2865 * pushed out due to concurrent interaction with the session daemon.
2866 */
2867 int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
2868 struct lttng_consumer_channel *channel, int timer, int wait)
2869 {
2870 struct lttcomm_metadata_request_msg request;
2871 struct lttcomm_consumer_msg msg;
2872 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
2873 uint64_t len, key, offset, version;
2874 int ret;
2875
2876 assert(channel);
2877 assert(channel->metadata_cache);
2878
2879 memset(&request, 0, sizeof(request));
2880
2881 /* send the metadata request to sessiond */
2882 switch (consumer_data.type) {
2883 case LTTNG_CONSUMER64_UST:
2884 request.bits_per_long = 64;
2885 break;
2886 case LTTNG_CONSUMER32_UST:
2887 request.bits_per_long = 32;
2888 break;
2889 default:
2890 request.bits_per_long = 0;
2891 break;
2892 }
2893
2894 request.session_id = channel->session_id;
2895 request.session_id_per_pid = channel->session_id_per_pid;
2896 /*
2897 * Request the application UID here so the metadata of that application can
2898 * be sent back. The channel UID corresponds to the user UID of the session
2899 * used for the rights on the stream file(s).
2900 */
2901 request.uid = channel->ust_app_uid;
2902 request.key = channel->key;
2903
2904 DBG("Sending metadata request to sessiond, session id %" PRIu64
2905 ", per-pid %" PRIu64 ", app UID %u and channel key %" PRIu64,
2906 request.session_id, request.session_id_per_pid, request.uid,
2907 request.key);
2908
2909 pthread_mutex_lock(&ctx->metadata_socket_lock);
2910
2911 health_code_update();
2912
2913 ret = lttcomm_send_unix_sock(ctx->consumer_metadata_socket, &request,
2914 sizeof(request));
2915 if (ret < 0) {
2916 ERR("Asking metadata to sessiond");
2917 goto end;
2918 }
2919
2920 health_code_update();
2921
2922 /* Receive the metadata from sessiond */
2923 ret = lttcomm_recv_unix_sock(ctx->consumer_metadata_socket, &msg,
2924 sizeof(msg));
2925 if (ret != sizeof(msg)) {
2926 DBG("Consumer received unexpected message size %d (expects %zu)",
2927 ret, sizeof(msg));
2928 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
2929 /*
2930 * The ret value might 0 meaning an orderly shutdown but this is ok
2931 * since the caller handles this.
2932 */
2933 goto end;
2934 }
2935
2936 health_code_update();
2937
2938 if (msg.cmd_type == LTTNG_ERR_UND) {
2939 /* No registry found */
2940 (void) consumer_send_status_msg(ctx->consumer_metadata_socket,
2941 ret_code);
2942 ret = 0;
2943 goto end;
2944 } else if (msg.cmd_type != LTTNG_CONSUMER_PUSH_METADATA) {
2945 ERR("Unexpected cmd_type received %d", msg.cmd_type);
2946 ret = -1;
2947 goto end;
2948 }
2949
2950 len = msg.u.push_metadata.len;
2951 key = msg.u.push_metadata.key;
2952 offset = msg.u.push_metadata.target_offset;
2953 version = msg.u.push_metadata.version;
2954
2955 assert(key == channel->key);
2956 if (len == 0) {
2957 DBG("No new metadata to receive for key %" PRIu64, key);
2958 }
2959
2960 health_code_update();
2961
2962 /* Tell session daemon we are ready to receive the metadata. */
2963 ret = consumer_send_status_msg(ctx->consumer_metadata_socket,
2964 LTTCOMM_CONSUMERD_SUCCESS);
2965 if (ret < 0 || len == 0) {
2966 /*
2967 * Somehow, the session daemon is not responding anymore or there is
2968 * nothing to receive.
2969 */
2970 goto end;
2971 }
2972
2973 health_code_update();
2974
2975 ret = lttng_ustconsumer_recv_metadata(ctx->consumer_metadata_socket,
2976 key, offset, len, version, channel, timer, wait);
2977 if (ret >= 0) {
2978 /*
2979 * Only send the status msg if the sessiond is alive meaning a positive
2980 * ret code.
2981 */
2982 (void) consumer_send_status_msg(ctx->consumer_metadata_socket, ret);
2983 }
2984 ret = 0;
2985
2986 end:
2987 health_code_update();
2988
2989 pthread_mutex_unlock(&ctx->metadata_socket_lock);
2990 return ret;
2991 }
2992
2993 /*
2994 * Return the ustctl call for the get stream id.
2995 */
2996 int lttng_ustconsumer_get_stream_id(struct lttng_consumer_stream *stream,
2997 uint64_t *stream_id)
2998 {
2999 assert(stream);
3000 assert(stream_id);
3001
3002 return ustctl_get_stream_id(stream->ustream, stream_id);
3003 }
This page took 0.161348 seconds and 5 git commands to generate.