4e93faf64911fd4d90b1a33fdc7a9ddd43935294
[lttng-tools.git] / src / common / ust-consumer / ust-consumer.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #include <stdint.h>
20 #define _LGPL_SOURCE
21 #include <assert.h>
22 #include <lttng/ust-ctl.h>
23 #include <poll.h>
24 #include <pthread.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <sys/mman.h>
28 #include <sys/socket.h>
29 #include <sys/stat.h>
30 #include <sys/types.h>
31 #include <inttypes.h>
32 #include <unistd.h>
33 #include <urcu/list.h>
34 #include <signal.h>
35
36 #include <bin/lttng-consumerd/health-consumerd.h>
37 #include <common/common.h>
38 #include <common/sessiond-comm/sessiond-comm.h>
39 #include <common/relayd/relayd.h>
40 #include <common/compat/fcntl.h>
41 #include <common/compat/endian.h>
42 #include <common/consumer/consumer-metadata-cache.h>
43 #include <common/consumer/consumer-stream.h>
44 #include <common/consumer/consumer-timer.h>
45 #include <common/utils.h>
46 #include <common/index/index.h>
47
48 #include "ust-consumer.h"
49
50 #define INT_MAX_STR_LEN 12 /* includes \0 */
51
52 extern struct lttng_consumer_global_data consumer_data;
53 extern int consumer_poll_timeout;
54 extern volatile int consumer_quit;
55
56 /*
57 * Free channel object and all streams associated with it. This MUST be used
58 * only and only if the channel has _NEVER_ been added to the global channel
59 * hash table.
60 */
61 static void destroy_channel(struct lttng_consumer_channel *channel)
62 {
63 struct lttng_consumer_stream *stream, *stmp;
64
65 assert(channel);
66
67 DBG("UST consumer cleaning stream list");
68
69 cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
70 send_node) {
71
72 health_code_update();
73
74 cds_list_del(&stream->send_node);
75 ustctl_destroy_stream(stream->ustream);
76 free(stream);
77 }
78
79 /*
80 * If a channel is available meaning that was created before the streams
81 * were, delete it.
82 */
83 if (channel->uchan) {
84 lttng_ustconsumer_del_channel(channel);
85 lttng_ustconsumer_free_channel(channel);
86 }
87 free(channel);
88 }
89
90 /*
91 * Add channel to internal consumer state.
92 *
93 * Returns 0 on success or else a negative value.
94 */
95 static int add_channel(struct lttng_consumer_channel *channel,
96 struct lttng_consumer_local_data *ctx)
97 {
98 int ret = 0;
99
100 assert(channel);
101 assert(ctx);
102
103 if (ctx->on_recv_channel != NULL) {
104 ret = ctx->on_recv_channel(channel);
105 if (ret == 0) {
106 ret = consumer_add_channel(channel, ctx);
107 } else if (ret < 0) {
108 /* Most likely an ENOMEM. */
109 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
110 goto error;
111 }
112 } else {
113 ret = consumer_add_channel(channel, ctx);
114 }
115
116 DBG("UST consumer channel added (key: %" PRIu64 ")", channel->key);
117
118 error:
119 return ret;
120 }
121
122 /*
123 * Allocate and return a consumer channel object.
124 */
125 static struct lttng_consumer_channel *allocate_channel(uint64_t session_id,
126 const char *pathname, const char *name, uid_t uid, gid_t gid,
127 uint64_t relayd_id, uint64_t key, enum lttng_event_output output,
128 uint64_t tracefile_size, uint64_t tracefile_count,
129 uint64_t session_id_per_pid, unsigned int monitor,
130 unsigned int live_timer_interval,
131 const char *root_shm_path, const char *shm_path)
132 {
133 assert(pathname);
134 assert(name);
135
136 return consumer_allocate_channel(key, session_id, pathname, name, uid,
137 gid, relayd_id, output, tracefile_size,
138 tracefile_count, session_id_per_pid, monitor,
139 live_timer_interval, root_shm_path, shm_path);
140 }
141
142 /*
143 * Allocate and return a consumer stream object. If _alloc_ret is not NULL, the
144 * error value if applicable is set in it else it is kept untouched.
145 *
146 * Return NULL on error else the newly allocated stream object.
147 */
148 static struct lttng_consumer_stream *allocate_stream(int cpu, int key,
149 struct lttng_consumer_channel *channel,
150 struct lttng_consumer_local_data *ctx, int *_alloc_ret)
151 {
152 int alloc_ret;
153 struct lttng_consumer_stream *stream = NULL;
154
155 assert(channel);
156 assert(ctx);
157
158 stream = consumer_allocate_stream(channel->key,
159 key,
160 LTTNG_CONSUMER_ACTIVE_STREAM,
161 channel->name,
162 channel->uid,
163 channel->gid,
164 channel->relayd_id,
165 channel->session_id,
166 cpu,
167 &alloc_ret,
168 channel->type,
169 channel->monitor);
170 if (stream == NULL) {
171 switch (alloc_ret) {
172 case -ENOENT:
173 /*
174 * We could not find the channel. Can happen if cpu hotplug
175 * happens while tearing down.
176 */
177 DBG3("Could not find channel");
178 break;
179 case -ENOMEM:
180 case -EINVAL:
181 default:
182 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
183 break;
184 }
185 goto error;
186 }
187
188 stream->chan = channel;
189
190 error:
191 if (_alloc_ret) {
192 *_alloc_ret = alloc_ret;
193 }
194 return stream;
195 }
196
197 /*
198 * Send the given stream pointer to the corresponding thread.
199 *
200 * Returns 0 on success else a negative value.
201 */
202 static int send_stream_to_thread(struct lttng_consumer_stream *stream,
203 struct lttng_consumer_local_data *ctx)
204 {
205 int ret;
206 struct lttng_pipe *stream_pipe;
207
208 /* Get the right pipe where the stream will be sent. */
209 if (stream->metadata_flag) {
210 ret = consumer_add_metadata_stream(stream);
211 if (ret) {
212 ERR("Consumer add metadata stream %" PRIu64 " failed.",
213 stream->key);
214 goto error;
215 }
216 stream_pipe = ctx->consumer_metadata_pipe;
217 } else {
218 ret = consumer_add_data_stream(stream);
219 if (ret) {
220 ERR("Consumer add stream %" PRIu64 " failed.",
221 stream->key);
222 goto error;
223 }
224 stream_pipe = ctx->consumer_data_pipe;
225 }
226
227 /*
228 * From this point on, the stream's ownership has been moved away from
229 * the channel and becomes globally visible.
230 */
231 stream->globally_visible = 1;
232
233 ret = lttng_pipe_write(stream_pipe, &stream, sizeof(stream));
234 if (ret < 0) {
235 ERR("Consumer write %s stream to pipe %d",
236 stream->metadata_flag ? "metadata" : "data",
237 lttng_pipe_get_writefd(stream_pipe));
238 if (stream->metadata_flag) {
239 consumer_del_stream_for_metadata(stream);
240 } else {
241 consumer_del_stream_for_data(stream);
242 }
243 }
244 error:
245 return ret;
246 }
247
248 static
249 int get_stream_shm_path(char *stream_shm_path, const char *shm_path, int cpu)
250 {
251 char cpu_nr[INT_MAX_STR_LEN]; /* int max len */
252 int ret;
253
254 strncpy(stream_shm_path, shm_path, PATH_MAX);
255 stream_shm_path[PATH_MAX - 1] = '\0';
256 ret = snprintf(cpu_nr, INT_MAX_STR_LEN, "%i", cpu);
257 if (ret < 0) {
258 PERROR("snprintf");
259 goto end;
260 }
261 strncat(stream_shm_path, cpu_nr,
262 PATH_MAX - strlen(stream_shm_path) - 1);
263 ret = 0;
264 end:
265 return ret;
266 }
267
268 /*
269 * Create streams for the given channel using liblttng-ust-ctl.
270 *
271 * Return 0 on success else a negative value.
272 */
273 static int create_ust_streams(struct lttng_consumer_channel *channel,
274 struct lttng_consumer_local_data *ctx)
275 {
276 int ret, cpu = 0;
277 struct ustctl_consumer_stream *ustream;
278 struct lttng_consumer_stream *stream;
279
280 assert(channel);
281 assert(ctx);
282
283 /*
284 * While a stream is available from ustctl. When NULL is returned, we've
285 * reached the end of the possible stream for the channel.
286 */
287 while ((ustream = ustctl_create_stream(channel->uchan, cpu))) {
288 int wait_fd;
289 int ust_metadata_pipe[2];
290
291 health_code_update();
292
293 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && channel->monitor) {
294 ret = utils_create_pipe_cloexec_nonblock(ust_metadata_pipe);
295 if (ret < 0) {
296 ERR("Create ust metadata poll pipe");
297 goto error;
298 }
299 wait_fd = ust_metadata_pipe[0];
300 } else {
301 wait_fd = ustctl_stream_get_wait_fd(ustream);
302 }
303
304 /* Allocate consumer stream object. */
305 stream = allocate_stream(cpu, wait_fd, channel, ctx, &ret);
306 if (!stream) {
307 goto error_alloc;
308 }
309 stream->ustream = ustream;
310 /*
311 * Store it so we can save multiple function calls afterwards since
312 * this value is used heavily in the stream threads. This is UST
313 * specific so this is why it's done after allocation.
314 */
315 stream->wait_fd = wait_fd;
316
317 /*
318 * Increment channel refcount since the channel reference has now been
319 * assigned in the allocation process above.
320 */
321 if (stream->chan->monitor) {
322 uatomic_inc(&stream->chan->refcount);
323 }
324
325 /*
326 * Order is important this is why a list is used. On error, the caller
327 * should clean this list.
328 */
329 cds_list_add_tail(&stream->send_node, &channel->streams.head);
330
331 ret = ustctl_get_max_subbuf_size(stream->ustream,
332 &stream->max_sb_size);
333 if (ret < 0) {
334 ERR("ustctl_get_max_subbuf_size failed for stream %s",
335 stream->name);
336 goto error;
337 }
338
339 /* Do actions once stream has been received. */
340 if (ctx->on_recv_stream) {
341 ret = ctx->on_recv_stream(stream);
342 if (ret < 0) {
343 goto error;
344 }
345 }
346
347 DBG("UST consumer add stream %s (key: %" PRIu64 ") with relayd id %" PRIu64,
348 stream->name, stream->key, stream->relayd_stream_id);
349
350 /* Set next CPU stream. */
351 channel->streams.count = ++cpu;
352
353 /* Keep stream reference when creating metadata. */
354 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA) {
355 channel->metadata_stream = stream;
356 if (channel->monitor) {
357 /* Set metadata poll pipe if we created one */
358 memcpy(stream->ust_metadata_poll_pipe,
359 ust_metadata_pipe,
360 sizeof(ust_metadata_pipe));
361 }
362 }
363 }
364
365 return 0;
366
367 error:
368 error_alloc:
369 return ret;
370 }
371
372 /*
373 * create_posix_shm is never called concurrently within a process.
374 */
375 static
376 int create_posix_shm(void)
377 {
378 char tmp_name[NAME_MAX];
379 int shmfd, ret;
380
381 ret = snprintf(tmp_name, NAME_MAX, "/ust-shm-consumer-%d", getpid());
382 if (ret < 0) {
383 PERROR("snprintf");
384 return -1;
385 }
386 /*
387 * Allocate shm, and immediately unlink its shm oject, keeping
388 * only the file descriptor as a reference to the object.
389 * We specifically do _not_ use the / at the beginning of the
390 * pathname so that some OS implementations can keep it local to
391 * the process (POSIX leaves this implementation-defined).
392 */
393 shmfd = shm_open(tmp_name, O_CREAT | O_EXCL | O_RDWR, 0700);
394 if (shmfd < 0) {
395 PERROR("shm_open");
396 goto error_shm_open;
397 }
398 ret = shm_unlink(tmp_name);
399 if (ret < 0 && errno != ENOENT) {
400 PERROR("shm_unlink");
401 goto error_shm_release;
402 }
403 return shmfd;
404
405 error_shm_release:
406 ret = close(shmfd);
407 if (ret) {
408 PERROR("close");
409 }
410 error_shm_open:
411 return -1;
412 }
413
414 static int open_ust_stream_fd(struct lttng_consumer_channel *channel,
415 struct ustctl_consumer_channel_attr *attr,
416 int cpu)
417 {
418 char shm_path[PATH_MAX];
419 int ret;
420
421 if (!channel->shm_path[0]) {
422 return create_posix_shm();
423 }
424 ret = get_stream_shm_path(shm_path, channel->shm_path, cpu);
425 if (ret) {
426 goto error_shm_path;
427 }
428 return run_as_open(shm_path,
429 O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR,
430 channel->uid, channel->gid);
431
432 error_shm_path:
433 return -1;
434 }
435
436 /*
437 * Create an UST channel with the given attributes and send it to the session
438 * daemon using the ust ctl API.
439 *
440 * Return 0 on success or else a negative value.
441 */
442 static int create_ust_channel(struct lttng_consumer_channel *channel,
443 struct ustctl_consumer_channel_attr *attr,
444 struct ustctl_consumer_channel **ust_chanp)
445 {
446 int ret, nr_stream_fds, i, j;
447 int *stream_fds;
448 struct ustctl_consumer_channel *ust_channel;
449
450 assert(channel);
451 assert(attr);
452 assert(ust_chanp);
453
454 DBG3("Creating channel to ustctl with attr: [overwrite: %d, "
455 "subbuf_size: %" PRIu64 ", num_subbuf: %" PRIu64 ", "
456 "switch_timer_interval: %u, read_timer_interval: %u, "
457 "output: %d, type: %d", attr->overwrite, attr->subbuf_size,
458 attr->num_subbuf, attr->switch_timer_interval,
459 attr->read_timer_interval, attr->output, attr->type);
460
461 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA)
462 nr_stream_fds = 1;
463 else
464 nr_stream_fds = ustctl_get_nr_stream_per_channel();
465 stream_fds = zmalloc(nr_stream_fds * sizeof(*stream_fds));
466 if (!stream_fds) {
467 ret = -1;
468 goto error_alloc;
469 }
470 for (i = 0; i < nr_stream_fds; i++) {
471 stream_fds[i] = open_ust_stream_fd(channel, attr, i);
472 if (stream_fds[i] < 0) {
473 ret = -1;
474 goto error_open;
475 }
476 }
477 ust_channel = ustctl_create_channel(attr, stream_fds, nr_stream_fds);
478 if (!ust_channel) {
479 ret = -1;
480 goto error_create;
481 }
482 channel->nr_stream_fds = nr_stream_fds;
483 channel->stream_fds = stream_fds;
484 *ust_chanp = ust_channel;
485
486 return 0;
487
488 error_create:
489 error_open:
490 for (j = i - 1; j >= 0; j--) {
491 int closeret;
492
493 closeret = close(stream_fds[j]);
494 if (closeret) {
495 PERROR("close");
496 }
497 if (channel->shm_path[0]) {
498 char shm_path[PATH_MAX];
499
500 closeret = get_stream_shm_path(shm_path,
501 channel->shm_path, j);
502 if (closeret) {
503 ERR("Cannot get stream shm path");
504 }
505 closeret = run_as_unlink(shm_path,
506 channel->uid, channel->gid);
507 if (closeret) {
508 PERROR("unlink %s", shm_path);
509 }
510 }
511 }
512 /* Try to rmdir all directories under shm_path root. */
513 if (channel->root_shm_path[0]) {
514 (void) run_as_recursive_rmdir(channel->root_shm_path,
515 channel->uid, channel->gid);
516 }
517 free(stream_fds);
518 error_alloc:
519 return ret;
520 }
521
522 /*
523 * Send a single given stream to the session daemon using the sock.
524 *
525 * Return 0 on success else a negative value.
526 */
527 static int send_sessiond_stream(int sock, struct lttng_consumer_stream *stream)
528 {
529 int ret;
530
531 assert(stream);
532 assert(sock >= 0);
533
534 DBG("UST consumer sending stream %" PRIu64 " to sessiond", stream->key);
535
536 /* Send stream to session daemon. */
537 ret = ustctl_send_stream_to_sessiond(sock, stream->ustream);
538 if (ret < 0) {
539 goto error;
540 }
541
542 error:
543 return ret;
544 }
545
546 /*
547 * Send channel to sessiond.
548 *
549 * Return 0 on success or else a negative value.
550 */
551 static int send_sessiond_channel(int sock,
552 struct lttng_consumer_channel *channel,
553 struct lttng_consumer_local_data *ctx, int *relayd_error)
554 {
555 int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS;
556 struct lttng_consumer_stream *stream;
557 uint64_t relayd_id = -1ULL;
558
559 assert(channel);
560 assert(ctx);
561 assert(sock >= 0);
562
563 DBG("UST consumer sending channel %s to sessiond", channel->name);
564
565 if (channel->relayd_id != (uint64_t) -1ULL) {
566 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
567
568 health_code_update();
569
570 /* Try to send the stream to the relayd if one is available. */
571 ret = consumer_send_relayd_stream(stream, stream->chan->pathname);
572 if (ret < 0) {
573 /*
574 * Flag that the relayd was the problem here probably due to a
575 * communicaton error on the socket.
576 */
577 if (relayd_error) {
578 *relayd_error = 1;
579 }
580 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
581 }
582 if (relayd_id == -1ULL) {
583 relayd_id = stream->relayd_id;
584 }
585 }
586 }
587
588 /* Inform sessiond that we are about to send channel and streams. */
589 ret = consumer_send_status_msg(sock, ret_code);
590 if (ret < 0 || ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
591 /*
592 * Either the session daemon is not responding or the relayd died so we
593 * stop now.
594 */
595 goto error;
596 }
597
598 /* Send channel to sessiond. */
599 ret = ustctl_send_channel_to_sessiond(sock, channel->uchan);
600 if (ret < 0) {
601 goto error;
602 }
603
604 ret = ustctl_channel_close_wakeup_fd(channel->uchan);
605 if (ret < 0) {
606 goto error;
607 }
608
609 /* The channel was sent successfully to the sessiond at this point. */
610 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
611
612 health_code_update();
613
614 /* Send stream to session daemon. */
615 ret = send_sessiond_stream(sock, stream);
616 if (ret < 0) {
617 goto error;
618 }
619 }
620
621 /* Tell sessiond there is no more stream. */
622 ret = ustctl_send_stream_to_sessiond(sock, NULL);
623 if (ret < 0) {
624 goto error;
625 }
626
627 DBG("UST consumer NULL stream sent to sessiond");
628
629 return 0;
630
631 error:
632 if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
633 ret = -1;
634 }
635 return ret;
636 }
637
638 /*
639 * Creates a channel and streams and add the channel it to the channel internal
640 * state. The created stream must ONLY be sent once the GET_CHANNEL command is
641 * received.
642 *
643 * Return 0 on success or else, a negative value is returned and the channel
644 * MUST be destroyed by consumer_del_channel().
645 */
646 static int ask_channel(struct lttng_consumer_local_data *ctx, int sock,
647 struct lttng_consumer_channel *channel,
648 struct ustctl_consumer_channel_attr *attr)
649 {
650 int ret;
651
652 assert(ctx);
653 assert(channel);
654 assert(attr);
655
656 /*
657 * This value is still used by the kernel consumer since for the kernel,
658 * the stream ownership is not IN the consumer so we need to have the
659 * number of left stream that needs to be initialized so we can know when
660 * to delete the channel (see consumer.c).
661 *
662 * As for the user space tracer now, the consumer creates and sends the
663 * stream to the session daemon which only sends them to the application
664 * once every stream of a channel is received making this value useless
665 * because we they will be added to the poll thread before the application
666 * receives them. This ensures that a stream can not hang up during
667 * initilization of a channel.
668 */
669 channel->nb_init_stream_left = 0;
670
671 /* The reply msg status is handled in the following call. */
672 ret = create_ust_channel(channel, attr, &channel->uchan);
673 if (ret < 0) {
674 goto end;
675 }
676
677 channel->wait_fd = ustctl_channel_get_wait_fd(channel->uchan);
678
679 /*
680 * For the snapshots (no monitor), we create the metadata streams
681 * on demand, not during the channel creation.
682 */
683 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && !channel->monitor) {
684 ret = 0;
685 goto end;
686 }
687
688 /* Open all streams for this channel. */
689 ret = create_ust_streams(channel, ctx);
690 if (ret < 0) {
691 goto end;
692 }
693
694 end:
695 return ret;
696 }
697
698 /*
699 * Send all stream of a channel to the right thread handling it.
700 *
701 * On error, return a negative value else 0 on success.
702 */
703 static int send_streams_to_thread(struct lttng_consumer_channel *channel,
704 struct lttng_consumer_local_data *ctx)
705 {
706 int ret = 0;
707 struct lttng_consumer_stream *stream, *stmp;
708
709 assert(channel);
710 assert(ctx);
711
712 /* Send streams to the corresponding thread. */
713 cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
714 send_node) {
715
716 health_code_update();
717
718 /* Sending the stream to the thread. */
719 ret = send_stream_to_thread(stream, ctx);
720 if (ret < 0) {
721 /*
722 * If we are unable to send the stream to the thread, there is
723 * a big problem so just stop everything.
724 */
725 /* Remove node from the channel stream list. */
726 cds_list_del(&stream->send_node);
727 goto error;
728 }
729
730 /* Remove node from the channel stream list. */
731 cds_list_del(&stream->send_node);
732
733 }
734
735 error:
736 return ret;
737 }
738
739 /*
740 * Flush channel's streams using the given key to retrieve the channel.
741 *
742 * Return 0 on success else an LTTng error code.
743 */
744 static int flush_channel(uint64_t chan_key)
745 {
746 int ret = 0;
747 struct lttng_consumer_channel *channel;
748 struct lttng_consumer_stream *stream;
749 struct lttng_ht *ht;
750 struct lttng_ht_iter iter;
751
752 DBG("UST consumer flush channel key %" PRIu64, chan_key);
753
754 rcu_read_lock();
755 channel = consumer_find_channel(chan_key);
756 if (!channel) {
757 ERR("UST consumer flush channel %" PRIu64 " not found", chan_key);
758 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
759 goto error;
760 }
761
762 ht = consumer_data.stream_per_chan_id_ht;
763
764 /* For each stream of the channel id, flush it. */
765 cds_lfht_for_each_entry_duplicate(ht->ht,
766 ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct,
767 &channel->key, &iter.iter, stream, node_channel_id.node) {
768
769 health_code_update();
770
771 pthread_mutex_lock(&stream->lock);
772
773 /*
774 * Protect against concurrent teardown of a stream.
775 */
776 if (cds_lfht_is_node_deleted(&stream->node.node)) {
777 goto next;
778 }
779
780 if (!stream->quiescent) {
781 ustctl_flush_buffer(stream->ustream, 0);
782 stream->quiescent = true;
783 }
784 next:
785 pthread_mutex_unlock(&stream->lock);
786 }
787 error:
788 rcu_read_unlock();
789 return ret;
790 }
791
792 /*
793 * Clear quiescent state from channel's streams using the given key to
794 * retrieve the channel.
795 *
796 * Return 0 on success else an LTTng error code.
797 */
798 static int clear_quiescent_channel(uint64_t chan_key)
799 {
800 int ret = 0;
801 struct lttng_consumer_channel *channel;
802 struct lttng_consumer_stream *stream;
803 struct lttng_ht *ht;
804 struct lttng_ht_iter iter;
805
806 DBG("UST consumer clear quiescent channel key %" PRIu64, chan_key);
807
808 rcu_read_lock();
809 channel = consumer_find_channel(chan_key);
810 if (!channel) {
811 ERR("UST consumer clear quiescent channel %" PRIu64 " not found", chan_key);
812 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
813 goto error;
814 }
815
816 ht = consumer_data.stream_per_chan_id_ht;
817
818 /* For each stream of the channel id, clear quiescent state. */
819 cds_lfht_for_each_entry_duplicate(ht->ht,
820 ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct,
821 &channel->key, &iter.iter, stream, node_channel_id.node) {
822
823 health_code_update();
824
825 pthread_mutex_lock(&stream->lock);
826 stream->quiescent = false;
827 pthread_mutex_unlock(&stream->lock);
828 }
829 error:
830 rcu_read_unlock();
831 return ret;
832 }
833
834 /*
835 * Close metadata stream wakeup_fd using the given key to retrieve the channel.
836 * RCU read side lock MUST be acquired before calling this function.
837 *
838 * Return 0 on success else an LTTng error code.
839 */
840 static int close_metadata(uint64_t chan_key)
841 {
842 int ret = 0;
843 struct lttng_consumer_channel *channel;
844 unsigned int channel_monitor;
845
846 DBG("UST consumer close metadata key %" PRIu64, chan_key);
847
848 channel = consumer_find_channel(chan_key);
849 if (!channel) {
850 /*
851 * This is possible if the metadata thread has issue a delete because
852 * the endpoint point of the stream hung up. There is no way the
853 * session daemon can know about it thus use a DBG instead of an actual
854 * error.
855 */
856 DBG("UST consumer close metadata %" PRIu64 " not found", chan_key);
857 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
858 goto error;
859 }
860
861 pthread_mutex_lock(&consumer_data.lock);
862 pthread_mutex_lock(&channel->lock);
863 channel_monitor = channel->monitor;
864 if (cds_lfht_is_node_deleted(&channel->node.node)) {
865 goto error_unlock;
866 }
867
868 lttng_ustconsumer_close_metadata(channel);
869 pthread_mutex_unlock(&channel->lock);
870 pthread_mutex_unlock(&consumer_data.lock);
871
872 /*
873 * The ownership of a metadata channel depends on the type of
874 * session to which it belongs. In effect, the monitor flag is checked
875 * to determine if this metadata channel is in "snapshot" mode or not.
876 *
877 * In the non-snapshot case, the metadata channel is created along with
878 * a single stream which will remain present until the metadata channel
879 * is destroyed (on the destruction of its session). In this case, the
880 * metadata stream in "monitored" by the metadata poll thread and holds
881 * the ownership of its channel.
882 *
883 * Closing the metadata will cause the metadata stream's "metadata poll
884 * pipe" to be closed. Closing this pipe will wake-up the metadata poll
885 * thread which will teardown the metadata stream which, in return,
886 * deletes the metadata channel.
887 *
888 * In the snapshot case, the metadata stream is created and destroyed
889 * on every snapshot record. Since the channel doesn't have an owner
890 * other than the session daemon, it is safe to destroy it immediately
891 * on reception of the CLOSE_METADATA command.
892 */
893 if (!channel_monitor) {
894 /*
895 * The channel and consumer_data locks must be
896 * released before this call since consumer_del_channel
897 * re-acquires the channel and consumer_data locks to teardown
898 * the channel and queue its reclamation by the "call_rcu"
899 * worker thread.
900 */
901 consumer_del_channel(channel);
902 }
903
904 return ret;
905 error_unlock:
906 pthread_mutex_unlock(&channel->lock);
907 pthread_mutex_unlock(&consumer_data.lock);
908 error:
909 return ret;
910 }
911
912 /*
913 * RCU read side lock MUST be acquired before calling this function.
914 *
915 * Return 0 on success else an LTTng error code.
916 */
917 static int setup_metadata(struct lttng_consumer_local_data *ctx, uint64_t key)
918 {
919 int ret;
920 struct lttng_consumer_channel *metadata;
921
922 DBG("UST consumer setup metadata key %" PRIu64, key);
923
924 metadata = consumer_find_channel(key);
925 if (!metadata) {
926 ERR("UST consumer push metadata %" PRIu64 " not found", key);
927 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
928 goto end;
929 }
930
931 /*
932 * In no monitor mode, the metadata channel has no stream(s) so skip the
933 * ownership transfer to the metadata thread.
934 */
935 if (!metadata->monitor) {
936 DBG("Metadata channel in no monitor");
937 ret = 0;
938 goto end;
939 }
940
941 /*
942 * Send metadata stream to relayd if one available. Availability is
943 * known if the stream is still in the list of the channel.
944 */
945 if (cds_list_empty(&metadata->streams.head)) {
946 ERR("Metadata channel key %" PRIu64 ", no stream available.", key);
947 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
948 goto error_no_stream;
949 }
950
951 /* Send metadata stream to relayd if needed. */
952 if (metadata->metadata_stream->relayd_id != (uint64_t) -1ULL) {
953 ret = consumer_send_relayd_stream(metadata->metadata_stream,
954 metadata->pathname);
955 if (ret < 0) {
956 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
957 goto error;
958 }
959 ret = consumer_send_relayd_streams_sent(
960 metadata->metadata_stream->relayd_id);
961 if (ret < 0) {
962 ret = LTTCOMM_CONSUMERD_RELAYD_FAIL;
963 goto error;
964 }
965 }
966
967 ret = send_streams_to_thread(metadata, ctx);
968 if (ret < 0) {
969 /*
970 * If we are unable to send the stream to the thread, there is
971 * a big problem so just stop everything.
972 */
973 ret = LTTCOMM_CONSUMERD_FATAL;
974 goto error;
975 }
976 /* List MUST be empty after or else it could be reused. */
977 assert(cds_list_empty(&metadata->streams.head));
978
979 ret = 0;
980 goto end;
981
982 error:
983 /*
984 * Delete metadata channel on error. At this point, the metadata stream can
985 * NOT be monitored by the metadata thread thus having the guarantee that
986 * the stream is still in the local stream list of the channel. This call
987 * will make sure to clean that list.
988 */
989 consumer_stream_destroy(metadata->metadata_stream, NULL);
990 cds_list_del(&metadata->metadata_stream->send_node);
991 metadata->metadata_stream = NULL;
992 error_no_stream:
993 end:
994 return ret;
995 }
996
997 /*
998 * Snapshot the whole metadata.
999 *
1000 * Returns 0 on success, < 0 on error
1001 */
1002 static int snapshot_metadata(uint64_t key, char *path, uint64_t relayd_id,
1003 struct lttng_consumer_local_data *ctx)
1004 {
1005 int ret = 0;
1006 struct lttng_consumer_channel *metadata_channel;
1007 struct lttng_consumer_stream *metadata_stream;
1008
1009 assert(path);
1010 assert(ctx);
1011
1012 DBG("UST consumer snapshot metadata with key %" PRIu64 " at path %s",
1013 key, path);
1014
1015 rcu_read_lock();
1016
1017 metadata_channel = consumer_find_channel(key);
1018 if (!metadata_channel) {
1019 ERR("UST snapshot metadata channel not found for key %" PRIu64,
1020 key);
1021 ret = -1;
1022 goto error;
1023 }
1024 assert(!metadata_channel->monitor);
1025
1026 health_code_update();
1027
1028 /*
1029 * Ask the sessiond if we have new metadata waiting and update the
1030 * consumer metadata cache.
1031 */
1032 ret = lttng_ustconsumer_request_metadata(ctx, metadata_channel, 0, 1);
1033 if (ret < 0) {
1034 goto error;
1035 }
1036
1037 health_code_update();
1038
1039 /*
1040 * The metadata stream is NOT created in no monitor mode when the channel
1041 * is created on a sessiond ask channel command.
1042 */
1043 ret = create_ust_streams(metadata_channel, ctx);
1044 if (ret < 0) {
1045 goto error;
1046 }
1047
1048 metadata_stream = metadata_channel->metadata_stream;
1049 assert(metadata_stream);
1050
1051 if (relayd_id != (uint64_t) -1ULL) {
1052 metadata_stream->relayd_id = relayd_id;
1053 ret = consumer_send_relayd_stream(metadata_stream, path);
1054 if (ret < 0) {
1055 goto error_stream;
1056 }
1057 } else {
1058 ret = utils_create_stream_file(path, metadata_stream->name,
1059 metadata_stream->chan->tracefile_size,
1060 metadata_stream->tracefile_count_current,
1061 metadata_stream->uid, metadata_stream->gid, NULL);
1062 if (ret < 0) {
1063 goto error_stream;
1064 }
1065 metadata_stream->out_fd = ret;
1066 metadata_stream->tracefile_size_current = 0;
1067 }
1068
1069 do {
1070 health_code_update();
1071
1072 ret = lttng_consumer_read_subbuffer(metadata_stream, ctx);
1073 if (ret < 0) {
1074 goto error_stream;
1075 }
1076 } while (ret > 0);
1077
1078 error_stream:
1079 /*
1080 * Clean up the stream completly because the next snapshot will use a new
1081 * metadata stream.
1082 */
1083 consumer_stream_destroy(metadata_stream, NULL);
1084 cds_list_del(&metadata_stream->send_node);
1085 metadata_channel->metadata_stream = NULL;
1086
1087 error:
1088 rcu_read_unlock();
1089 return ret;
1090 }
1091
1092 static
1093 int get_current_subbuf_addr(struct lttng_consumer_stream *stream,
1094 const char **addr)
1095 {
1096 int ret;
1097 unsigned long mmap_offset;
1098 const char *mmap_base;
1099
1100 mmap_base = ustctl_get_mmap_base(stream->ustream);
1101 if (!mmap_base) {
1102 ERR("Failed to get mmap base for stream `%s`",
1103 stream->name);
1104 ret = -EPERM;
1105 goto error;
1106 }
1107
1108 ret = ustctl_get_mmap_read_offset(stream->ustream, &mmap_offset);
1109 if (ret != 0) {
1110 ERR("Failed to get mmap offset for stream `%s`", stream->name);
1111 ret = -EINVAL;
1112 goto error;
1113 }
1114
1115 *addr = mmap_base + mmap_offset;
1116 error:
1117 return ret;
1118
1119 }
1120
1121 /*
1122 * Take a snapshot of all the stream of a channel.
1123 *
1124 * Returns 0 on success, < 0 on error
1125 */
1126 static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
1127 uint64_t nb_packets_per_stream, struct lttng_consumer_local_data *ctx)
1128 {
1129 int ret;
1130 unsigned use_relayd = 0;
1131 unsigned long consumed_pos, produced_pos;
1132 struct lttng_consumer_channel *channel;
1133 struct lttng_consumer_stream *stream;
1134
1135 assert(path);
1136 assert(ctx);
1137
1138 rcu_read_lock();
1139
1140 if (relayd_id != (uint64_t) -1ULL) {
1141 use_relayd = 1;
1142 }
1143
1144 channel = consumer_find_channel(key);
1145 if (!channel) {
1146 ERR("UST snapshot channel not found for key %" PRIu64, key);
1147 ret = -1;
1148 goto error;
1149 }
1150 assert(!channel->monitor);
1151 DBG("UST consumer snapshot channel %" PRIu64, key);
1152
1153 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
1154 health_code_update();
1155
1156 /* Lock stream because we are about to change its state. */
1157 pthread_mutex_lock(&stream->lock);
1158 stream->relayd_id = relayd_id;
1159
1160 if (use_relayd) {
1161 ret = consumer_send_relayd_stream(stream, path);
1162 if (ret < 0) {
1163 goto error_unlock;
1164 }
1165 } else {
1166 ret = utils_create_stream_file(path, stream->name,
1167 stream->chan->tracefile_size,
1168 stream->tracefile_count_current,
1169 stream->uid, stream->gid, NULL);
1170 if (ret < 0) {
1171 goto error_unlock;
1172 }
1173 stream->out_fd = ret;
1174 stream->tracefile_size_current = 0;
1175
1176 DBG("UST consumer snapshot stream %s/%s (%" PRIu64 ")", path,
1177 stream->name, stream->key);
1178 }
1179 if (relayd_id != -1ULL) {
1180 ret = consumer_send_relayd_streams_sent(relayd_id);
1181 if (ret < 0) {
1182 goto error_unlock;
1183 }
1184 }
1185
1186 /*
1187 * If tracing is active, we want to perform a "full" buffer flush.
1188 * Else, if quiescent, it has already been done by the prior stop.
1189 */
1190 if (!stream->quiescent) {
1191 ustctl_flush_buffer(stream->ustream, 0);
1192 }
1193
1194 ret = lttng_ustconsumer_take_snapshot(stream);
1195 if (ret < 0) {
1196 ERR("Taking UST snapshot");
1197 goto error_unlock;
1198 }
1199
1200 ret = lttng_ustconsumer_get_produced_snapshot(stream, &produced_pos);
1201 if (ret < 0) {
1202 ERR("Produced UST snapshot position");
1203 goto error_unlock;
1204 }
1205
1206 ret = lttng_ustconsumer_get_consumed_snapshot(stream, &consumed_pos);
1207 if (ret < 0) {
1208 ERR("Consumerd UST snapshot position");
1209 goto error_unlock;
1210 }
1211
1212 /*
1213 * The original value is sent back if max stream size is larger than
1214 * the possible size of the snapshot. Also, we assume that the session
1215 * daemon should never send a maximum stream size that is lower than
1216 * subbuffer size.
1217 */
1218 consumed_pos = consumer_get_consume_start_pos(consumed_pos,
1219 produced_pos, nb_packets_per_stream,
1220 stream->max_sb_size);
1221
1222 while (consumed_pos < produced_pos) {
1223 ssize_t read_len;
1224 unsigned long len, padded_len;
1225 const char *subbuf_addr;
1226
1227 health_code_update();
1228
1229 DBG("UST consumer taking snapshot at pos %lu", consumed_pos);
1230
1231 ret = ustctl_get_subbuf(stream->ustream, &consumed_pos);
1232 if (ret < 0) {
1233 if (ret != -EAGAIN) {
1234 PERROR("ustctl_get_subbuf snapshot");
1235 goto error_close_stream;
1236 }
1237 DBG("UST consumer get subbuf failed. Skipping it.");
1238 consumed_pos += stream->max_sb_size;
1239 stream->chan->lost_packets++;
1240 continue;
1241 }
1242
1243 ret = ustctl_get_subbuf_size(stream->ustream, &len);
1244 if (ret < 0) {
1245 ERR("Snapshot ustctl_get_subbuf_size");
1246 goto error_put_subbuf;
1247 }
1248
1249 ret = ustctl_get_padded_subbuf_size(stream->ustream, &padded_len);
1250 if (ret < 0) {
1251 ERR("Snapshot ustctl_get_padded_subbuf_size");
1252 goto error_put_subbuf;
1253 }
1254
1255 ret = get_current_subbuf_addr(stream, &subbuf_addr);
1256 if (ret) {
1257 goto error_put_subbuf;
1258 }
1259
1260 read_len = lttng_consumer_on_read_subbuffer_mmap(ctx,
1261 stream, subbuf_addr, len,
1262 padded_len - len, NULL);
1263 if (use_relayd) {
1264 if (read_len != len) {
1265 ret = -EPERM;
1266 goto error_put_subbuf;
1267 }
1268 } else {
1269 if (read_len != padded_len) {
1270 ret = -EPERM;
1271 goto error_put_subbuf;
1272 }
1273 }
1274
1275 ret = ustctl_put_subbuf(stream->ustream);
1276 if (ret < 0) {
1277 ERR("Snapshot ustctl_put_subbuf");
1278 goto error_close_stream;
1279 }
1280 consumed_pos += stream->max_sb_size;
1281 }
1282
1283 /* Simply close the stream so we can use it on the next snapshot. */
1284 consumer_stream_close(stream);
1285 pthread_mutex_unlock(&stream->lock);
1286 }
1287
1288 rcu_read_unlock();
1289 return 0;
1290
1291 error_put_subbuf:
1292 if (ustctl_put_subbuf(stream->ustream) < 0) {
1293 ERR("Snapshot ustctl_put_subbuf");
1294 }
1295 error_close_stream:
1296 consumer_stream_close(stream);
1297 error_unlock:
1298 pthread_mutex_unlock(&stream->lock);
1299 error:
1300 rcu_read_unlock();
1301 return ret;
1302 }
1303
1304 /*
1305 * Receive the metadata updates from the sessiond. Supports receiving
1306 * overlapping metadata, but is needs to always belong to a contiguous
1307 * range starting from 0.
1308 * Be careful about the locks held when calling this function: it needs
1309 * the metadata cache flush to concurrently progress in order to
1310 * complete.
1311 */
1312 int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset,
1313 uint64_t len, uint64_t version,
1314 struct lttng_consumer_channel *channel, int timer, int wait)
1315 {
1316 int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1317 char *metadata_str;
1318
1319 DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key, len);
1320
1321 metadata_str = zmalloc(len * sizeof(char));
1322 if (!metadata_str) {
1323 PERROR("zmalloc metadata string");
1324 ret_code = LTTCOMM_CONSUMERD_ENOMEM;
1325 goto end;
1326 }
1327
1328 health_code_update();
1329
1330 /* Receive metadata string. */
1331 ret = lttcomm_recv_unix_sock(sock, metadata_str, len);
1332 if (ret < 0) {
1333 /* Session daemon is dead so return gracefully. */
1334 ret_code = ret;
1335 goto end_free;
1336 }
1337
1338 health_code_update();
1339
1340 pthread_mutex_lock(&channel->metadata_cache->lock);
1341 ret = consumer_metadata_cache_write(channel, offset, len, version,
1342 metadata_str);
1343 if (ret < 0) {
1344 /* Unable to handle metadata. Notify session daemon. */
1345 ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
1346 /*
1347 * Skip metadata flush on write error since the offset and len might
1348 * not have been updated which could create an infinite loop below when
1349 * waiting for the metadata cache to be flushed.
1350 */
1351 pthread_mutex_unlock(&channel->metadata_cache->lock);
1352 goto end_free;
1353 }
1354 pthread_mutex_unlock(&channel->metadata_cache->lock);
1355
1356 if (!wait) {
1357 goto end_free;
1358 }
1359 while (consumer_metadata_cache_flushed(channel, offset + len, timer)) {
1360 DBG("Waiting for metadata to be flushed");
1361
1362 health_code_update();
1363
1364 usleep(DEFAULT_METADATA_AVAILABILITY_WAIT_TIME);
1365 }
1366
1367 end_free:
1368 free(metadata_str);
1369 end:
1370 return ret_code;
1371 }
1372
1373 /*
1374 * Receive command from session daemon and process it.
1375 *
1376 * Return 1 on success else a negative value or 0.
1377 */
1378 int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
1379 int sock, struct pollfd *consumer_sockpoll)
1380 {
1381 ssize_t ret;
1382 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1383 struct lttcomm_consumer_msg msg;
1384 struct lttng_consumer_channel *channel = NULL;
1385
1386 health_code_update();
1387
1388 ret = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg));
1389 if (ret != sizeof(msg)) {
1390 DBG("Consumer received unexpected message size %zd (expects %zu)",
1391 ret, sizeof(msg));
1392 /*
1393 * The ret value might 0 meaning an orderly shutdown but this is ok
1394 * since the caller handles this.
1395 */
1396 if (ret > 0) {
1397 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
1398 ret = -1;
1399 }
1400 return ret;
1401 }
1402
1403 health_code_update();
1404
1405 /* deprecated */
1406 assert(msg.cmd_type != LTTNG_CONSUMER_STOP);
1407
1408 health_code_update();
1409
1410 /* relayd needs RCU read-side lock */
1411 rcu_read_lock();
1412
1413 switch (msg.cmd_type) {
1414 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
1415 {
1416 /* Session daemon status message are handled in the following call. */
1417 consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
1418 msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll,
1419 &msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id,
1420 msg.u.relayd_sock.relayd_session_id);
1421 goto end_nosignal;
1422 }
1423 case LTTNG_CONSUMER_DESTROY_RELAYD:
1424 {
1425 uint64_t index = msg.u.destroy_relayd.net_seq_idx;
1426 struct consumer_relayd_sock_pair *relayd;
1427
1428 DBG("UST consumer destroying relayd %" PRIu64, index);
1429
1430 /* Get relayd reference if exists. */
1431 relayd = consumer_find_relayd(index);
1432 if (relayd == NULL) {
1433 DBG("Unable to find relayd %" PRIu64, index);
1434 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
1435 }
1436
1437 /*
1438 * Each relayd socket pair has a refcount of stream attached to it
1439 * which tells if the relayd is still active or not depending on the
1440 * refcount value.
1441 *
1442 * This will set the destroy flag of the relayd object and destroy it
1443 * if the refcount reaches zero when called.
1444 *
1445 * The destroy can happen either here or when a stream fd hangs up.
1446 */
1447 if (relayd) {
1448 consumer_flag_relayd_for_destroy(relayd);
1449 }
1450
1451 goto end_msg_sessiond;
1452 }
1453 case LTTNG_CONSUMER_UPDATE_STREAM:
1454 {
1455 rcu_read_unlock();
1456 return -ENOSYS;
1457 }
1458 case LTTNG_CONSUMER_DATA_PENDING:
1459 {
1460 int ret, is_data_pending;
1461 uint64_t id = msg.u.data_pending.session_id;
1462
1463 DBG("UST consumer data pending command for id %" PRIu64, id);
1464
1465 is_data_pending = consumer_data_pending(id);
1466
1467 /* Send back returned value to session daemon */
1468 ret = lttcomm_send_unix_sock(sock, &is_data_pending,
1469 sizeof(is_data_pending));
1470 if (ret < 0) {
1471 DBG("Error when sending the data pending ret code: %d", ret);
1472 goto error_fatal;
1473 }
1474
1475 /*
1476 * No need to send back a status message since the data pending
1477 * returned value is the response.
1478 */
1479 break;
1480 }
1481 case LTTNG_CONSUMER_ASK_CHANNEL_CREATION:
1482 {
1483 int ret;
1484 struct ustctl_consumer_channel_attr attr;
1485
1486 /* Create a plain object and reserve a channel key. */
1487 channel = allocate_channel(msg.u.ask_channel.session_id,
1488 msg.u.ask_channel.pathname, msg.u.ask_channel.name,
1489 msg.u.ask_channel.uid, msg.u.ask_channel.gid,
1490 msg.u.ask_channel.relayd_id, msg.u.ask_channel.key,
1491 (enum lttng_event_output) msg.u.ask_channel.output,
1492 msg.u.ask_channel.tracefile_size,
1493 msg.u.ask_channel.tracefile_count,
1494 msg.u.ask_channel.session_id_per_pid,
1495 msg.u.ask_channel.monitor,
1496 msg.u.ask_channel.live_timer_interval,
1497 msg.u.ask_channel.root_shm_path,
1498 msg.u.ask_channel.shm_path);
1499 if (!channel) {
1500 goto end_channel_error;
1501 }
1502
1503 /*
1504 * Assign UST application UID to the channel. This value is ignored for
1505 * per PID buffers. This is specific to UST thus setting this after the
1506 * allocation.
1507 */
1508 channel->ust_app_uid = msg.u.ask_channel.ust_app_uid;
1509
1510 /* Build channel attributes from received message. */
1511 attr.subbuf_size = msg.u.ask_channel.subbuf_size;
1512 attr.num_subbuf = msg.u.ask_channel.num_subbuf;
1513 attr.overwrite = msg.u.ask_channel.overwrite;
1514 attr.switch_timer_interval = msg.u.ask_channel.switch_timer_interval;
1515 attr.read_timer_interval = msg.u.ask_channel.read_timer_interval;
1516 attr.chan_id = msg.u.ask_channel.chan_id;
1517 memcpy(attr.uuid, msg.u.ask_channel.uuid, sizeof(attr.uuid));
1518
1519 /* Match channel buffer type to the UST abi. */
1520 switch (msg.u.ask_channel.output) {
1521 case LTTNG_EVENT_MMAP:
1522 default:
1523 attr.output = LTTNG_UST_MMAP;
1524 break;
1525 }
1526
1527 /* Translate and save channel type. */
1528 switch (msg.u.ask_channel.type) {
1529 case LTTNG_UST_CHAN_PER_CPU:
1530 channel->type = CONSUMER_CHANNEL_TYPE_DATA;
1531 attr.type = LTTNG_UST_CHAN_PER_CPU;
1532 /*
1533 * Set refcount to 1 for owner. Below, we will
1534 * pass ownership to the
1535 * consumer_thread_channel_poll() thread.
1536 */
1537 channel->refcount = 1;
1538 break;
1539 case LTTNG_UST_CHAN_METADATA:
1540 channel->type = CONSUMER_CHANNEL_TYPE_METADATA;
1541 attr.type = LTTNG_UST_CHAN_METADATA;
1542 break;
1543 default:
1544 assert(0);
1545 goto error_fatal;
1546 };
1547
1548 health_code_update();
1549
1550 ret = ask_channel(ctx, sock, channel, &attr);
1551 if (ret < 0) {
1552 goto end_channel_error;
1553 }
1554
1555 if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
1556 ret = consumer_metadata_cache_allocate(channel);
1557 if (ret < 0) {
1558 ERR("Allocating metadata cache");
1559 goto end_channel_error;
1560 }
1561 consumer_timer_switch_start(channel, attr.switch_timer_interval);
1562 attr.switch_timer_interval = 0;
1563 } else {
1564 consumer_timer_live_start(channel,
1565 msg.u.ask_channel.live_timer_interval);
1566 }
1567
1568 health_code_update();
1569
1570 /*
1571 * Add the channel to the internal state AFTER all streams were created
1572 * and successfully sent to session daemon. This way, all streams must
1573 * be ready before this channel is visible to the threads.
1574 * If add_channel succeeds, ownership of the channel is
1575 * passed to consumer_thread_channel_poll().
1576 */
1577 ret = add_channel(channel, ctx);
1578 if (ret < 0) {
1579 if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
1580 if (channel->switch_timer_enabled == 1) {
1581 consumer_timer_switch_stop(channel);
1582 }
1583 consumer_metadata_cache_destroy(channel);
1584 }
1585 if (channel->live_timer_enabled == 1) {
1586 consumer_timer_live_stop(channel);
1587 }
1588 goto end_channel_error;
1589 }
1590
1591 health_code_update();
1592
1593 /*
1594 * Channel and streams are now created. Inform the session daemon that
1595 * everything went well and should wait to receive the channel and
1596 * streams with ustctl API.
1597 */
1598 ret = consumer_send_status_channel(sock, channel);
1599 if (ret < 0) {
1600 /*
1601 * There is probably a problem on the socket.
1602 */
1603 goto error_fatal;
1604 }
1605
1606 break;
1607 }
1608 case LTTNG_CONSUMER_GET_CHANNEL:
1609 {
1610 int ret, relayd_err = 0;
1611 uint64_t key = msg.u.get_channel.key;
1612 struct lttng_consumer_channel *channel;
1613
1614 channel = consumer_find_channel(key);
1615 if (!channel) {
1616 ERR("UST consumer get channel key %" PRIu64 " not found", key);
1617 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1618 goto end_msg_sessiond;
1619 }
1620
1621 health_code_update();
1622
1623 /* Send everything to sessiond. */
1624 ret = send_sessiond_channel(sock, channel, ctx, &relayd_err);
1625 if (ret < 0) {
1626 if (relayd_err) {
1627 /*
1628 * We were unable to send to the relayd the stream so avoid
1629 * sending back a fatal error to the thread since this is OK
1630 * and the consumer can continue its work. The above call
1631 * has sent the error status message to the sessiond.
1632 */
1633 goto end_nosignal;
1634 }
1635 /*
1636 * The communicaton was broken hence there is a bad state between
1637 * the consumer and sessiond so stop everything.
1638 */
1639 goto error_fatal;
1640 }
1641
1642 health_code_update();
1643
1644 /*
1645 * In no monitor mode, the streams ownership is kept inside the channel
1646 * so don't send them to the data thread.
1647 */
1648 if (!channel->monitor) {
1649 goto end_msg_sessiond;
1650 }
1651
1652 ret = send_streams_to_thread(channel, ctx);
1653 if (ret < 0) {
1654 /*
1655 * If we are unable to send the stream to the thread, there is
1656 * a big problem so just stop everything.
1657 */
1658 goto error_fatal;
1659 }
1660 /* List MUST be empty after or else it could be reused. */
1661 assert(cds_list_empty(&channel->streams.head));
1662 goto end_msg_sessiond;
1663 }
1664 case LTTNG_CONSUMER_DESTROY_CHANNEL:
1665 {
1666 uint64_t key = msg.u.destroy_channel.key;
1667
1668 /*
1669 * Only called if streams have not been sent to stream
1670 * manager thread. However, channel has been sent to
1671 * channel manager thread.
1672 */
1673 notify_thread_del_channel(ctx, key);
1674 goto end_msg_sessiond;
1675 }
1676 case LTTNG_CONSUMER_CLOSE_METADATA:
1677 {
1678 int ret;
1679
1680 ret = close_metadata(msg.u.close_metadata.key);
1681 if (ret != 0) {
1682 ret_code = ret;
1683 }
1684
1685 goto end_msg_sessiond;
1686 }
1687 case LTTNG_CONSUMER_FLUSH_CHANNEL:
1688 {
1689 int ret;
1690
1691 ret = flush_channel(msg.u.flush_channel.key);
1692 if (ret != 0) {
1693 ret_code = ret;
1694 }
1695
1696 goto end_msg_sessiond;
1697 }
1698 case LTTNG_CONSUMER_CLEAR_QUIESCENT_CHANNEL:
1699 {
1700 int ret;
1701
1702 ret = clear_quiescent_channel(
1703 msg.u.clear_quiescent_channel.key);
1704 if (ret != 0) {
1705 ret_code = ret;
1706 }
1707
1708 goto end_msg_sessiond;
1709 }
1710 case LTTNG_CONSUMER_PUSH_METADATA:
1711 {
1712 int ret;
1713 uint64_t len = msg.u.push_metadata.len;
1714 uint64_t key = msg.u.push_metadata.key;
1715 uint64_t offset = msg.u.push_metadata.target_offset;
1716 uint64_t version = msg.u.push_metadata.version;
1717 struct lttng_consumer_channel *channel;
1718
1719 DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key,
1720 len);
1721
1722 channel = consumer_find_channel(key);
1723 if (!channel) {
1724 /*
1725 * This is possible if the metadata creation on the consumer side
1726 * is in flight vis-a-vis a concurrent push metadata from the
1727 * session daemon. Simply return that the channel failed and the
1728 * session daemon will handle that message correctly considering
1729 * that this race is acceptable thus the DBG() statement here.
1730 */
1731 DBG("UST consumer push metadata %" PRIu64 " not found", key);
1732 ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL;
1733 goto end_msg_sessiond;
1734 }
1735
1736 health_code_update();
1737
1738 if (!len) {
1739 /*
1740 * There is nothing to receive. We have simply
1741 * checked whether the channel can be found.
1742 */
1743 ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1744 goto end_msg_sessiond;
1745 }
1746
1747 /* Tell session daemon we are ready to receive the metadata. */
1748 ret = consumer_send_status_msg(sock, LTTCOMM_CONSUMERD_SUCCESS);
1749 if (ret < 0) {
1750 /* Somehow, the session daemon is not responding anymore. */
1751 goto error_fatal;
1752 }
1753
1754 health_code_update();
1755
1756 /* Wait for more data. */
1757 health_poll_entry();
1758 ret = lttng_consumer_poll_socket(consumer_sockpoll);
1759 health_poll_exit();
1760 if (ret) {
1761 goto error_fatal;
1762 }
1763
1764 health_code_update();
1765
1766 ret = lttng_ustconsumer_recv_metadata(sock, key, offset,
1767 len, version, channel, 0, 1);
1768 if (ret < 0) {
1769 /* error receiving from sessiond */
1770 goto error_fatal;
1771 } else {
1772 ret_code = ret;
1773 goto end_msg_sessiond;
1774 }
1775 }
1776 case LTTNG_CONSUMER_SETUP_METADATA:
1777 {
1778 int ret;
1779
1780 ret = setup_metadata(ctx, msg.u.setup_metadata.key);
1781 if (ret) {
1782 ret_code = ret;
1783 }
1784 goto end_msg_sessiond;
1785 }
1786 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
1787 {
1788 if (msg.u.snapshot_channel.metadata) {
1789 ret = snapshot_metadata(msg.u.snapshot_channel.key,
1790 msg.u.snapshot_channel.pathname,
1791 msg.u.snapshot_channel.relayd_id,
1792 ctx);
1793 if (ret < 0) {
1794 ERR("Snapshot metadata failed");
1795 ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
1796 }
1797 } else {
1798 ret = snapshot_channel(msg.u.snapshot_channel.key,
1799 msg.u.snapshot_channel.pathname,
1800 msg.u.snapshot_channel.relayd_id,
1801 msg.u.snapshot_channel.nb_packets_per_stream,
1802 ctx);
1803 if (ret < 0) {
1804 ERR("Snapshot channel failed");
1805 ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL;
1806 }
1807 }
1808
1809 health_code_update();
1810 ret = consumer_send_status_msg(sock, ret_code);
1811 if (ret < 0) {
1812 /* Somehow, the session daemon is not responding anymore. */
1813 goto end_nosignal;
1814 }
1815 health_code_update();
1816 break;
1817 }
1818 case LTTNG_CONSUMER_DISCARDED_EVENTS:
1819 {
1820 int ret = 0;
1821 uint64_t discarded_events;
1822 struct lttng_ht_iter iter;
1823 struct lttng_ht *ht;
1824 struct lttng_consumer_stream *stream;
1825 uint64_t id = msg.u.discarded_events.session_id;
1826 uint64_t key = msg.u.discarded_events.channel_key;
1827
1828 DBG("UST consumer discarded events command for session id %"
1829 PRIu64, id);
1830 rcu_read_lock();
1831 pthread_mutex_lock(&consumer_data.lock);
1832
1833 ht = consumer_data.stream_list_ht;
1834
1835 /*
1836 * We only need a reference to the channel, but they are not
1837 * directly indexed, so we just use the first matching stream
1838 * to extract the information we need, we default to 0 if not
1839 * found (no events are dropped if the channel is not yet in
1840 * use).
1841 */
1842 discarded_events = 0;
1843 cds_lfht_for_each_entry_duplicate(ht->ht,
1844 ht->hash_fct(&id, lttng_ht_seed),
1845 ht->match_fct, &id,
1846 &iter.iter, stream, node_session_id.node) {
1847 if (stream->chan->key == key) {
1848 discarded_events = stream->chan->discarded_events;
1849 break;
1850 }
1851 }
1852 pthread_mutex_unlock(&consumer_data.lock);
1853 rcu_read_unlock();
1854
1855 DBG("UST consumer discarded events command for session id %"
1856 PRIu64 ", channel key %" PRIu64, id, key);
1857
1858 health_code_update();
1859
1860 /* Send back returned value to session daemon */
1861 ret = lttcomm_send_unix_sock(sock, &discarded_events, sizeof(discarded_events));
1862 if (ret < 0) {
1863 PERROR("send discarded events");
1864 goto error_fatal;
1865 }
1866
1867 break;
1868 }
1869 case LTTNG_CONSUMER_LOST_PACKETS:
1870 {
1871 int ret;
1872 uint64_t lost_packets;
1873 struct lttng_ht_iter iter;
1874 struct lttng_ht *ht;
1875 struct lttng_consumer_stream *stream;
1876 uint64_t id = msg.u.lost_packets.session_id;
1877 uint64_t key = msg.u.lost_packets.channel_key;
1878
1879 DBG("UST consumer lost packets command for session id %"
1880 PRIu64, id);
1881 rcu_read_lock();
1882 pthread_mutex_lock(&consumer_data.lock);
1883
1884 ht = consumer_data.stream_list_ht;
1885
1886 /*
1887 * We only need a reference to the channel, but they are not
1888 * directly indexed, so we just use the first matching stream
1889 * to extract the information we need, we default to 0 if not
1890 * found (no packets lost if the channel is not yet in use).
1891 */
1892 lost_packets = 0;
1893 cds_lfht_for_each_entry_duplicate(ht->ht,
1894 ht->hash_fct(&id, lttng_ht_seed),
1895 ht->match_fct, &id,
1896 &iter.iter, stream, node_session_id.node) {
1897 if (stream->chan->key == key) {
1898 lost_packets = stream->chan->lost_packets;
1899 break;
1900 }
1901 }
1902 pthread_mutex_unlock(&consumer_data.lock);
1903 rcu_read_unlock();
1904
1905 DBG("UST consumer lost packets command for session id %"
1906 PRIu64 ", channel key %" PRIu64, id, key);
1907
1908 health_code_update();
1909
1910 /* Send back returned value to session daemon */
1911 ret = lttcomm_send_unix_sock(sock, &lost_packets,
1912 sizeof(lost_packets));
1913 if (ret < 0) {
1914 PERROR("send lost packets");
1915 goto error_fatal;
1916 }
1917
1918 break;
1919 }
1920 default:
1921 break;
1922 }
1923
1924 end_nosignal:
1925 rcu_read_unlock();
1926
1927 health_code_update();
1928
1929 /*
1930 * Return 1 to indicate success since the 0 value can be a socket
1931 * shutdown during the recv() or send() call.
1932 */
1933 return 1;
1934
1935 end_msg_sessiond:
1936 /*
1937 * The returned value here is not useful since either way we'll return 1 to
1938 * the caller because the session daemon socket management is done
1939 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
1940 */
1941 ret = consumer_send_status_msg(sock, ret_code);
1942 if (ret < 0) {
1943 goto error_fatal;
1944 }
1945 rcu_read_unlock();
1946
1947 health_code_update();
1948
1949 return 1;
1950 end_channel_error:
1951 if (channel) {
1952 /*
1953 * Free channel here since no one has a reference to it. We don't
1954 * free after that because a stream can store this pointer.
1955 */
1956 destroy_channel(channel);
1957 }
1958 /* We have to send a status channel message indicating an error. */
1959 ret = consumer_send_status_channel(sock, NULL);
1960 if (ret < 0) {
1961 /* Stop everything if session daemon can not be notified. */
1962 goto error_fatal;
1963 }
1964 rcu_read_unlock();
1965
1966 health_code_update();
1967
1968 return 1;
1969 error_fatal:
1970 rcu_read_unlock();
1971 /* This will issue a consumer stop. */
1972 return -1;
1973 }
1974
1975 void lttng_ustctl_flush_buffer(struct lttng_consumer_stream *stream,
1976 int producer_active)
1977 {
1978 assert(stream);
1979 assert(stream->ustream);
1980
1981 ustctl_flush_buffer(stream->ustream, producer_active);
1982 }
1983
1984 /*
1985 * Take a snapshot for a specific fd
1986 *
1987 * Returns 0 on success, < 0 on error
1988 */
1989 int lttng_ustconsumer_take_snapshot(struct lttng_consumer_stream *stream)
1990 {
1991 assert(stream);
1992 assert(stream->ustream);
1993
1994 return ustctl_snapshot(stream->ustream);
1995 }
1996
1997 /*
1998 * Get the produced position
1999 *
2000 * Returns 0 on success, < 0 on error
2001 */
2002 int lttng_ustconsumer_get_produced_snapshot(
2003 struct lttng_consumer_stream *stream, unsigned long *pos)
2004 {
2005 assert(stream);
2006 assert(stream->ustream);
2007 assert(pos);
2008
2009 return ustctl_snapshot_get_produced(stream->ustream, pos);
2010 }
2011
2012 /*
2013 * Get the consumed position
2014 *
2015 * Returns 0 on success, < 0 on error
2016 */
2017 int lttng_ustconsumer_get_consumed_snapshot(
2018 struct lttng_consumer_stream *stream, unsigned long *pos)
2019 {
2020 assert(stream);
2021 assert(stream->ustream);
2022 assert(pos);
2023
2024 return ustctl_snapshot_get_consumed(stream->ustream, pos);
2025 }
2026
2027 void lttng_ustconsumer_flush_buffer(struct lttng_consumer_stream *stream,
2028 int producer)
2029 {
2030 assert(stream);
2031 assert(stream->ustream);
2032
2033 ustctl_flush_buffer(stream->ustream, producer);
2034 }
2035
2036 int lttng_ustconsumer_get_current_timestamp(
2037 struct lttng_consumer_stream *stream, uint64_t *ts)
2038 {
2039 assert(stream);
2040 assert(stream->ustream);
2041 assert(ts);
2042
2043 return ustctl_get_current_timestamp(stream->ustream, ts);
2044 }
2045
2046 int lttng_ustconsumer_get_sequence_number(
2047 struct lttng_consumer_stream *stream, uint64_t *seq)
2048 {
2049 assert(stream);
2050 assert(stream->ustream);
2051 assert(seq);
2052
2053 return ustctl_get_sequence_number(stream->ustream, seq);
2054 }
2055
2056 /*
2057 * Called when the stream signals the consumer that it has hung up.
2058 */
2059 void lttng_ustconsumer_on_stream_hangup(struct lttng_consumer_stream *stream)
2060 {
2061 assert(stream);
2062 assert(stream->ustream);
2063
2064 pthread_mutex_lock(&stream->lock);
2065 if (!stream->quiescent) {
2066 ustctl_flush_buffer(stream->ustream, 0);
2067 stream->quiescent = true;
2068 }
2069 pthread_mutex_unlock(&stream->lock);
2070 stream->hangup_flush_done = 1;
2071 }
2072
2073 void lttng_ustconsumer_del_channel(struct lttng_consumer_channel *chan)
2074 {
2075 int i;
2076
2077 assert(chan);
2078 assert(chan->uchan);
2079
2080 if (chan->switch_timer_enabled == 1) {
2081 consumer_timer_switch_stop(chan);
2082 }
2083 for (i = 0; i < chan->nr_stream_fds; i++) {
2084 int ret;
2085
2086 ret = close(chan->stream_fds[i]);
2087 if (ret) {
2088 PERROR("close");
2089 }
2090 if (chan->shm_path[0]) {
2091 char shm_path[PATH_MAX];
2092
2093 ret = get_stream_shm_path(shm_path, chan->shm_path, i);
2094 if (ret) {
2095 ERR("Cannot get stream shm path");
2096 }
2097 ret = run_as_unlink(shm_path, chan->uid, chan->gid);
2098 if (ret) {
2099 PERROR("unlink %s", shm_path);
2100 }
2101 }
2102 }
2103 }
2104
2105 void lttng_ustconsumer_free_channel(struct lttng_consumer_channel *chan)
2106 {
2107 assert(chan);
2108 assert(chan->uchan);
2109
2110 consumer_metadata_cache_destroy(chan);
2111 ustctl_destroy_channel(chan->uchan);
2112 /* Try to rmdir all directories under shm_path root. */
2113 if (chan->root_shm_path[0]) {
2114 (void) run_as_recursive_rmdir(chan->root_shm_path,
2115 chan->uid, chan->gid);
2116 }
2117 free(chan->stream_fds);
2118 }
2119
2120 void lttng_ustconsumer_del_stream(struct lttng_consumer_stream *stream)
2121 {
2122 assert(stream);
2123 assert(stream->ustream);
2124
2125 if (stream->chan->switch_timer_enabled == 1) {
2126 consumer_timer_switch_stop(stream->chan);
2127 }
2128 ustctl_destroy_stream(stream->ustream);
2129 }
2130
2131 int lttng_ustconsumer_get_wakeup_fd(struct lttng_consumer_stream *stream)
2132 {
2133 assert(stream);
2134 assert(stream->ustream);
2135
2136 return ustctl_stream_get_wakeup_fd(stream->ustream);
2137 }
2138
2139 int lttng_ustconsumer_close_wakeup_fd(struct lttng_consumer_stream *stream)
2140 {
2141 assert(stream);
2142 assert(stream->ustream);
2143
2144 return ustctl_stream_close_wakeup_fd(stream->ustream);
2145 }
2146
2147 /*
2148 * Populate index values of a UST stream. Values are set in big endian order.
2149 *
2150 * Return 0 on success or else a negative value.
2151 */
2152 static int get_index_values(struct ctf_packet_index *index,
2153 struct ustctl_consumer_stream *ustream)
2154 {
2155 int ret;
2156
2157 ret = ustctl_get_timestamp_begin(ustream, &index->timestamp_begin);
2158 if (ret < 0) {
2159 PERROR("ustctl_get_timestamp_begin");
2160 goto error;
2161 }
2162 index->timestamp_begin = htobe64(index->timestamp_begin);
2163
2164 ret = ustctl_get_timestamp_end(ustream, &index->timestamp_end);
2165 if (ret < 0) {
2166 PERROR("ustctl_get_timestamp_end");
2167 goto error;
2168 }
2169 index->timestamp_end = htobe64(index->timestamp_end);
2170
2171 ret = ustctl_get_events_discarded(ustream, &index->events_discarded);
2172 if (ret < 0) {
2173 PERROR("ustctl_get_events_discarded");
2174 goto error;
2175 }
2176 index->events_discarded = htobe64(index->events_discarded);
2177
2178 ret = ustctl_get_content_size(ustream, &index->content_size);
2179 if (ret < 0) {
2180 PERROR("ustctl_get_content_size");
2181 goto error;
2182 }
2183 index->content_size = htobe64(index->content_size);
2184
2185 ret = ustctl_get_packet_size(ustream, &index->packet_size);
2186 if (ret < 0) {
2187 PERROR("ustctl_get_packet_size");
2188 goto error;
2189 }
2190 index->packet_size = htobe64(index->packet_size);
2191
2192 ret = ustctl_get_stream_id(ustream, &index->stream_id);
2193 if (ret < 0) {
2194 PERROR("ustctl_get_stream_id");
2195 goto error;
2196 }
2197 index->stream_id = htobe64(index->stream_id);
2198
2199 ret = ustctl_get_instance_id(ustream, &index->stream_instance_id);
2200 if (ret < 0) {
2201 PERROR("ustctl_get_instance_id");
2202 goto error;
2203 }
2204 index->stream_instance_id = htobe64(index->stream_instance_id);
2205
2206 ret = ustctl_get_sequence_number(ustream, &index->packet_seq_num);
2207 if (ret < 0) {
2208 PERROR("ustctl_get_sequence_number");
2209 goto error;
2210 }
2211 index->packet_seq_num = htobe64(index->packet_seq_num);
2212
2213 error:
2214 return ret;
2215 }
2216
2217 static
2218 void metadata_stream_reset_cache(struct lttng_consumer_stream *stream,
2219 struct consumer_metadata_cache *cache)
2220 {
2221 DBG("Metadata stream update to version %" PRIu64,
2222 cache->version);
2223 stream->ust_metadata_pushed = 0;
2224 stream->metadata_version = cache->version;
2225 stream->reset_metadata_flag = 1;
2226 }
2227
2228 /*
2229 * Check if the version of the metadata stream and metadata cache match.
2230 * If the cache got updated, reset the metadata stream.
2231 * The stream lock and metadata cache lock MUST be held.
2232 * Return 0 on success, a negative value on error.
2233 */
2234 static
2235 int metadata_stream_check_version(struct lttng_consumer_stream *stream)
2236 {
2237 int ret = 0;
2238 struct consumer_metadata_cache *cache = stream->chan->metadata_cache;
2239
2240 if (cache->version == stream->metadata_version) {
2241 goto end;
2242 }
2243 metadata_stream_reset_cache(stream, cache);
2244
2245 end:
2246 return ret;
2247 }
2248
2249 /*
2250 * Write up to one packet from the metadata cache to the channel.
2251 *
2252 * Returns the number of bytes pushed in the cache, or a negative value
2253 * on error.
2254 */
2255 static
2256 int commit_one_metadata_packet(struct lttng_consumer_stream *stream)
2257 {
2258 ssize_t write_len;
2259 int ret;
2260
2261 pthread_mutex_lock(&stream->chan->metadata_cache->lock);
2262 ret = metadata_stream_check_version(stream);
2263 if (ret < 0) {
2264 goto end;
2265 }
2266 if (stream->chan->metadata_cache->max_offset
2267 == stream->ust_metadata_pushed) {
2268 ret = 0;
2269 goto end;
2270 }
2271
2272 write_len = ustctl_write_one_packet_to_channel(stream->chan->uchan,
2273 &stream->chan->metadata_cache->data[stream->ust_metadata_pushed],
2274 stream->chan->metadata_cache->max_offset
2275 - stream->ust_metadata_pushed);
2276 assert(write_len != 0);
2277 if (write_len < 0) {
2278 ERR("Writing one metadata packet");
2279 ret = -1;
2280 goto end;
2281 }
2282 stream->ust_metadata_pushed += write_len;
2283
2284 assert(stream->chan->metadata_cache->max_offset >=
2285 stream->ust_metadata_pushed);
2286 ret = write_len;
2287
2288 end:
2289 pthread_mutex_unlock(&stream->chan->metadata_cache->lock);
2290 return ret;
2291 }
2292
2293
2294 /*
2295 * Sync metadata meaning request them to the session daemon and snapshot to the
2296 * metadata thread can consumer them.
2297 *
2298 * Metadata stream lock is held here, but we need to release it when
2299 * interacting with sessiond, else we cause a deadlock with live
2300 * awaiting on metadata to be pushed out.
2301 *
2302 * Return 0 if new metadatda is available, EAGAIN if the metadata stream
2303 * is empty or a negative value on error.
2304 */
2305 int lttng_ustconsumer_sync_metadata(struct lttng_consumer_local_data *ctx,
2306 struct lttng_consumer_stream *metadata)
2307 {
2308 int ret;
2309 int retry = 0;
2310
2311 assert(ctx);
2312 assert(metadata);
2313
2314 pthread_mutex_unlock(&metadata->lock);
2315 /*
2316 * Request metadata from the sessiond, but don't wait for the flush
2317 * because we locked the metadata thread.
2318 */
2319 ret = lttng_ustconsumer_request_metadata(ctx, metadata->chan, 0, 0);
2320 pthread_mutex_lock(&metadata->lock);
2321 if (ret < 0) {
2322 goto end;
2323 }
2324
2325 ret = commit_one_metadata_packet(metadata);
2326 if (ret <= 0) {
2327 goto end;
2328 } else if (ret > 0) {
2329 retry = 1;
2330 }
2331
2332 ustctl_flush_buffer(metadata->ustream, 1);
2333 ret = ustctl_snapshot(metadata->ustream);
2334 if (ret < 0) {
2335 if (errno != EAGAIN) {
2336 ERR("Sync metadata, taking UST snapshot");
2337 goto end;
2338 }
2339 DBG("No new metadata when syncing them.");
2340 /* No new metadata, exit. */
2341 ret = ENODATA;
2342 goto end;
2343 }
2344
2345 /*
2346 * After this flush, we still need to extract metadata.
2347 */
2348 if (retry) {
2349 ret = EAGAIN;
2350 }
2351
2352 end:
2353 return ret;
2354 }
2355
2356 /*
2357 * Return 0 on success else a negative value.
2358 */
2359 static int notify_if_more_data(struct lttng_consumer_stream *stream,
2360 struct lttng_consumer_local_data *ctx)
2361 {
2362 int ret;
2363 struct ustctl_consumer_stream *ustream;
2364
2365 assert(stream);
2366 assert(ctx);
2367
2368 ustream = stream->ustream;
2369
2370 /*
2371 * First, we are going to check if there is a new subbuffer available
2372 * before reading the stream wait_fd.
2373 */
2374 /* Get the next subbuffer */
2375 ret = ustctl_get_next_subbuf(ustream);
2376 if (ret) {
2377 /* No more data found, flag the stream. */
2378 stream->has_data = 0;
2379 ret = 0;
2380 goto end;
2381 }
2382
2383 ret = ustctl_put_subbuf(ustream);
2384 assert(!ret);
2385
2386 /* This stream still has data. Flag it and wake up the data thread. */
2387 stream->has_data = 1;
2388
2389 if (stream->monitor && !stream->hangup_flush_done && !ctx->has_wakeup) {
2390 ssize_t writelen;
2391
2392 writelen = lttng_pipe_write(ctx->consumer_wakeup_pipe, "!", 1);
2393 if (writelen < 0 && errno != EAGAIN && errno != EWOULDBLOCK) {
2394 ret = writelen;
2395 goto end;
2396 }
2397
2398 /* The wake up pipe has been notified. */
2399 ctx->has_wakeup = 1;
2400 }
2401 ret = 0;
2402
2403 end:
2404 return ret;
2405 }
2406
2407 static
2408 int update_stream_stats(struct lttng_consumer_stream *stream)
2409 {
2410 int ret;
2411 uint64_t seq, discarded;
2412
2413 ret = ustctl_get_sequence_number(stream->ustream, &seq);
2414 if (ret < 0) {
2415 PERROR("ustctl_get_sequence_number");
2416 goto end;
2417 }
2418 /*
2419 * Start the sequence when we extract the first packet in case we don't
2420 * start at 0 (for example if a consumer is not connected to the
2421 * session immediately after the beginning).
2422 */
2423 if (stream->last_sequence_number == -1ULL) {
2424 stream->last_sequence_number = seq;
2425 } else if (seq > stream->last_sequence_number) {
2426 stream->chan->lost_packets += seq -
2427 stream->last_sequence_number - 1;
2428 } else {
2429 /* seq <= last_sequence_number */
2430 ERR("Sequence number inconsistent : prev = %" PRIu64
2431 ", current = %" PRIu64,
2432 stream->last_sequence_number, seq);
2433 ret = -1;
2434 goto end;
2435 }
2436 stream->last_sequence_number = seq;
2437
2438 ret = ustctl_get_events_discarded(stream->ustream, &discarded);
2439 if (ret < 0) {
2440 PERROR("kernctl_get_events_discarded");
2441 goto end;
2442 }
2443 if (discarded < stream->last_discarded_events) {
2444 /*
2445 * Overflow has occurred. We assume only one wrap-around
2446 * has occurred.
2447 */
2448 stream->chan->discarded_events +=
2449 (1ULL << (CAA_BITS_PER_LONG - 1)) -
2450 stream->last_discarded_events + discarded;
2451 } else {
2452 stream->chan->discarded_events += discarded -
2453 stream->last_discarded_events;
2454 }
2455 stream->last_discarded_events = discarded;
2456 ret = 0;
2457
2458 end:
2459 return ret;
2460 }
2461
2462 /*
2463 * Read subbuffer from the given stream.
2464 *
2465 * Stream lock MUST be acquired.
2466 *
2467 * Return 0 on success else a negative value.
2468 */
2469 int lttng_ustconsumer_read_subbuffer(struct lttng_consumer_stream *stream,
2470 struct lttng_consumer_local_data *ctx)
2471 {
2472 unsigned long len, subbuf_size, padding;
2473 int err, write_index = 1;
2474 long ret = 0;
2475 struct ustctl_consumer_stream *ustream;
2476 struct ctf_packet_index index;
2477 const char *subbuf_addr;
2478
2479 assert(stream);
2480 assert(stream->ustream);
2481 assert(ctx);
2482
2483 DBG("In UST read_subbuffer (wait_fd: %d, name: %s)", stream->wait_fd,
2484 stream->name);
2485
2486 /* Ease our life for what's next. */
2487 ustream = stream->ustream;
2488
2489 /*
2490 * We can consume the 1 byte written into the wait_fd by UST. Don't trigger
2491 * error if we cannot read this one byte (read returns 0), or if the error
2492 * is EAGAIN or EWOULDBLOCK.
2493 *
2494 * This is only done when the stream is monitored by a thread, before the
2495 * flush is done after a hangup and if the stream is not flagged with data
2496 * since there might be nothing to consume in the wait fd but still have
2497 * data available flagged by the consumer wake up pipe.
2498 */
2499 if (stream->monitor && !stream->hangup_flush_done && !stream->has_data) {
2500 char dummy;
2501 ssize_t readlen;
2502
2503 readlen = lttng_read(stream->wait_fd, &dummy, 1);
2504 if (readlen < 0 && errno != EAGAIN && errno != EWOULDBLOCK) {
2505 ret = readlen;
2506 goto end;
2507 }
2508 }
2509
2510 retry:
2511 /* Get the next subbuffer */
2512 err = ustctl_get_next_subbuf(ustream);
2513 if (err != 0) {
2514 /*
2515 * Populate metadata info if the existing info has
2516 * already been read.
2517 */
2518 if (stream->metadata_flag) {
2519 ret = commit_one_metadata_packet(stream);
2520 if (ret <= 0) {
2521 goto end;
2522 }
2523 ustctl_flush_buffer(stream->ustream, 1);
2524 goto retry;
2525 }
2526
2527 ret = err; /* ustctl_get_next_subbuf returns negative, caller expect positive. */
2528 /*
2529 * This is a debug message even for single-threaded consumer,
2530 * because poll() have more relaxed criterions than get subbuf,
2531 * so get_subbuf may fail for short race windows where poll()
2532 * would issue wakeups.
2533 */
2534 DBG("Reserving sub buffer failed (everything is normal, "
2535 "it is due to concurrency) [ret: %d]", err);
2536 goto end;
2537 }
2538 assert(stream->chan->output == CONSUMER_CHANNEL_MMAP);
2539
2540 if (!stream->metadata_flag) {
2541 index.offset = htobe64(stream->out_fd_offset);
2542 ret = get_index_values(&index, ustream);
2543 if (ret < 0) {
2544 err = ustctl_put_subbuf(ustream);
2545 assert(err == 0);
2546 goto end;
2547 }
2548
2549 /* Update the stream's sequence and discarded events count. */
2550 ret = update_stream_stats(stream);
2551 if (ret < 0) {
2552 PERROR("kernctl_get_events_discarded");
2553 err = ustctl_put_subbuf(ustream);
2554 assert(err == 0);
2555 goto end;
2556 }
2557 } else {
2558 write_index = 0;
2559 }
2560
2561 /* Get the full padded subbuffer size */
2562 err = ustctl_get_padded_subbuf_size(ustream, &len);
2563 assert(err == 0);
2564
2565 /* Get subbuffer data size (without padding) */
2566 err = ustctl_get_subbuf_size(ustream, &subbuf_size);
2567 assert(err == 0);
2568
2569 /* Make sure we don't get a subbuffer size bigger than the padded */
2570 assert(len >= subbuf_size);
2571
2572 padding = len - subbuf_size;
2573
2574 ret = get_current_subbuf_addr(stream, &subbuf_addr);
2575 if (ret) {
2576 write_index = 0;
2577 goto error_put_subbuf;
2578 }
2579
2580 /* write the subbuffer to the tracefile */
2581 ret = lttng_consumer_on_read_subbuffer_mmap(
2582 ctx, stream, subbuf_addr, subbuf_size, padding, &index);
2583 /*
2584 * The mmap operation should write subbuf_size amount of data when
2585 * network streaming or the full padding (len) size when we are _not_
2586 * streaming.
2587 */
2588 if ((ret != subbuf_size && stream->relayd_id != (uint64_t) -1ULL) ||
2589 (ret != len && stream->relayd_id == (uint64_t) -1ULL)) {
2590 /*
2591 * Display the error but continue processing to try to release the
2592 * subbuffer. This is a DBG statement since any unexpected kill or
2593 * signal, the application gets unregistered, relayd gets closed or
2594 * anything that affects the buffer lifetime will trigger this error.
2595 * So, for the sake of the user, don't print this error since it can
2596 * happen and it is OK with the code flow.
2597 */
2598 DBG("Error writing to tracefile "
2599 "(ret: %ld != len: %lu != subbuf_size: %lu)",
2600 ret, len, subbuf_size);
2601 write_index = 0;
2602 }
2603 error_put_subbuf:
2604 err = ustctl_put_next_subbuf(ustream);
2605 assert(err == 0);
2606
2607 /*
2608 * This will consumer the byte on the wait_fd if and only if there is not
2609 * next subbuffer to be acquired.
2610 */
2611 if (!stream->metadata_flag) {
2612 ret = notify_if_more_data(stream, ctx);
2613 if (ret < 0) {
2614 goto end;
2615 }
2616 }
2617
2618 /* Write index if needed. */
2619 if (!write_index) {
2620 goto end;
2621 }
2622
2623 if (stream->chan->live_timer_interval && !stream->metadata_flag) {
2624 /*
2625 * In live, block until all the metadata is sent.
2626 */
2627 pthread_mutex_lock(&stream->metadata_timer_lock);
2628 assert(!stream->missed_metadata_flush);
2629 stream->waiting_on_metadata = true;
2630 pthread_mutex_unlock(&stream->metadata_timer_lock);
2631
2632 err = consumer_stream_sync_metadata(ctx, stream->session_id);
2633
2634 pthread_mutex_lock(&stream->metadata_timer_lock);
2635 stream->waiting_on_metadata = false;
2636 if (stream->missed_metadata_flush) {
2637 stream->missed_metadata_flush = false;
2638 pthread_mutex_unlock(&stream->metadata_timer_lock);
2639 (void) consumer_flush_ust_index(stream);
2640 } else {
2641 pthread_mutex_unlock(&stream->metadata_timer_lock);
2642 }
2643
2644 if (err < 0) {
2645 goto end;
2646 }
2647 }
2648
2649 assert(!stream->metadata_flag);
2650 err = consumer_stream_write_index(stream, &index);
2651 if (err < 0) {
2652 goto end;
2653 }
2654
2655 end:
2656 return ret;
2657 }
2658
2659 /*
2660 * Called when a stream is created.
2661 *
2662 * Return 0 on success or else a negative value.
2663 */
2664 int lttng_ustconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
2665 {
2666 int ret;
2667
2668 assert(stream);
2669
2670 /* Don't create anything if this is set for streaming. */
2671 if (stream->relayd_id == (uint64_t) -1ULL && stream->chan->monitor) {
2672 ret = utils_create_stream_file(stream->chan->pathname, stream->name,
2673 stream->chan->tracefile_size, stream->tracefile_count_current,
2674 stream->uid, stream->gid, NULL);
2675 if (ret < 0) {
2676 goto error;
2677 }
2678 stream->out_fd = ret;
2679 stream->tracefile_size_current = 0;
2680
2681 if (!stream->metadata_flag) {
2682 struct lttng_index_file *index_file;
2683
2684 index_file = lttng_index_file_create(stream->chan->pathname,
2685 stream->name, stream->uid, stream->gid,
2686 stream->chan->tracefile_size,
2687 stream->tracefile_count_current,
2688 CTF_INDEX_MAJOR, CTF_INDEX_MINOR);
2689 if (!index_file) {
2690 goto error;
2691 }
2692 stream->index_file = index_file;
2693 }
2694 }
2695 ret = 0;
2696
2697 error:
2698 return ret;
2699 }
2700
2701 /*
2702 * Check if data is still being extracted from the buffers for a specific
2703 * stream. Consumer data lock MUST be acquired before calling this function
2704 * and the stream lock.
2705 *
2706 * Return 1 if the traced data are still getting read else 0 meaning that the
2707 * data is available for trace viewer reading.
2708 */
2709 int lttng_ustconsumer_data_pending(struct lttng_consumer_stream *stream)
2710 {
2711 int ret;
2712
2713 assert(stream);
2714 assert(stream->ustream);
2715
2716 DBG("UST consumer checking data pending");
2717
2718 if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) {
2719 ret = 0;
2720 goto end;
2721 }
2722
2723 if (stream->chan->type == CONSUMER_CHANNEL_TYPE_METADATA) {
2724 uint64_t contiguous, pushed;
2725
2726 /* Ease our life a bit. */
2727 contiguous = stream->chan->metadata_cache->max_offset;
2728 pushed = stream->ust_metadata_pushed;
2729
2730 /*
2731 * We can simply check whether all contiguously available data
2732 * has been pushed to the ring buffer, since the push operation
2733 * is performed within get_next_subbuf(), and because both
2734 * get_next_subbuf() and put_next_subbuf() are issued atomically
2735 * thanks to the stream lock within
2736 * lttng_ustconsumer_read_subbuffer(). This basically means that
2737 * whetnever ust_metadata_pushed is incremented, the associated
2738 * metadata has been consumed from the metadata stream.
2739 */
2740 DBG("UST consumer metadata pending check: contiguous %" PRIu64 " vs pushed %" PRIu64,
2741 contiguous, pushed);
2742 assert(((int64_t) (contiguous - pushed)) >= 0);
2743 if ((contiguous != pushed) ||
2744 (((int64_t) contiguous - pushed) > 0 || contiguous == 0)) {
2745 ret = 1; /* Data is pending */
2746 goto end;
2747 }
2748 } else {
2749 ret = ustctl_get_next_subbuf(stream->ustream);
2750 if (ret == 0) {
2751 /*
2752 * There is still data so let's put back this
2753 * subbuffer.
2754 */
2755 ret = ustctl_put_subbuf(stream->ustream);
2756 assert(ret == 0);
2757 ret = 1; /* Data is pending */
2758 goto end;
2759 }
2760 }
2761
2762 /* Data is NOT pending so ready to be read. */
2763 ret = 0;
2764
2765 end:
2766 return ret;
2767 }
2768
2769 /*
2770 * Stop a given metadata channel timer if enabled and close the wait fd which
2771 * is the poll pipe of the metadata stream.
2772 *
2773 * This MUST be called with the metadata channel acquired.
2774 */
2775 void lttng_ustconsumer_close_metadata(struct lttng_consumer_channel *metadata)
2776 {
2777 int ret;
2778
2779 assert(metadata);
2780 assert(metadata->type == CONSUMER_CHANNEL_TYPE_METADATA);
2781
2782 DBG("Closing metadata channel key %" PRIu64, metadata->key);
2783
2784 if (metadata->switch_timer_enabled == 1) {
2785 consumer_timer_switch_stop(metadata);
2786 }
2787
2788 if (!metadata->metadata_stream) {
2789 goto end;
2790 }
2791
2792 /*
2793 * Closing write side so the thread monitoring the stream wakes up if any
2794 * and clean the metadata stream.
2795 */
2796 if (metadata->metadata_stream->ust_metadata_poll_pipe[1] >= 0) {
2797 ret = close(metadata->metadata_stream->ust_metadata_poll_pipe[1]);
2798 if (ret < 0) {
2799 PERROR("closing metadata pipe write side");
2800 }
2801 metadata->metadata_stream->ust_metadata_poll_pipe[1] = -1;
2802 }
2803
2804 end:
2805 return;
2806 }
2807
2808 /*
2809 * Close every metadata stream wait fd of the metadata hash table. This
2810 * function MUST be used very carefully so not to run into a race between the
2811 * metadata thread handling streams and this function closing their wait fd.
2812 *
2813 * For UST, this is used when the session daemon hangs up. Its the metadata
2814 * producer so calling this is safe because we are assured that no state change
2815 * can occur in the metadata thread for the streams in the hash table.
2816 */
2817 void lttng_ustconsumer_close_all_metadata(struct lttng_ht *metadata_ht)
2818 {
2819 struct lttng_ht_iter iter;
2820 struct lttng_consumer_stream *stream;
2821
2822 assert(metadata_ht);
2823 assert(metadata_ht->ht);
2824
2825 DBG("UST consumer closing all metadata streams");
2826
2827 rcu_read_lock();
2828 cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream,
2829 node.node) {
2830
2831 health_code_update();
2832
2833 pthread_mutex_lock(&stream->chan->lock);
2834 lttng_ustconsumer_close_metadata(stream->chan);
2835 pthread_mutex_unlock(&stream->chan->lock);
2836
2837 }
2838 rcu_read_unlock();
2839 }
2840
2841 void lttng_ustconsumer_close_stream_wakeup(struct lttng_consumer_stream *stream)
2842 {
2843 int ret;
2844
2845 ret = ustctl_stream_close_wakeup_fd(stream->ustream);
2846 if (ret < 0) {
2847 ERR("Unable to close wakeup fd");
2848 }
2849 }
2850
2851 /*
2852 * Please refer to consumer-timer.c before adding any lock within this
2853 * function or any of its callees. Timers have a very strict locking
2854 * semantic with respect to teardown. Failure to respect this semantic
2855 * introduces deadlocks.
2856 *
2857 * DON'T hold the metadata lock when calling this function, else this
2858 * can cause deadlock involving consumer awaiting for metadata to be
2859 * pushed out due to concurrent interaction with the session daemon.
2860 */
2861 int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
2862 struct lttng_consumer_channel *channel, int timer, int wait)
2863 {
2864 struct lttcomm_metadata_request_msg request;
2865 struct lttcomm_consumer_msg msg;
2866 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
2867 uint64_t len, key, offset, version;
2868 int ret;
2869
2870 assert(channel);
2871 assert(channel->metadata_cache);
2872
2873 memset(&request, 0, sizeof(request));
2874
2875 /* send the metadata request to sessiond */
2876 switch (consumer_data.type) {
2877 case LTTNG_CONSUMER64_UST:
2878 request.bits_per_long = 64;
2879 break;
2880 case LTTNG_CONSUMER32_UST:
2881 request.bits_per_long = 32;
2882 break;
2883 default:
2884 request.bits_per_long = 0;
2885 break;
2886 }
2887
2888 request.session_id = channel->session_id;
2889 request.session_id_per_pid = channel->session_id_per_pid;
2890 /*
2891 * Request the application UID here so the metadata of that application can
2892 * be sent back. The channel UID corresponds to the user UID of the session
2893 * used for the rights on the stream file(s).
2894 */
2895 request.uid = channel->ust_app_uid;
2896 request.key = channel->key;
2897
2898 DBG("Sending metadata request to sessiond, session id %" PRIu64
2899 ", per-pid %" PRIu64 ", app UID %u and channel key %" PRIu64,
2900 request.session_id, request.session_id_per_pid, request.uid,
2901 request.key);
2902
2903 pthread_mutex_lock(&ctx->metadata_socket_lock);
2904
2905 health_code_update();
2906
2907 ret = lttcomm_send_unix_sock(ctx->consumer_metadata_socket, &request,
2908 sizeof(request));
2909 if (ret < 0) {
2910 ERR("Asking metadata to sessiond");
2911 goto end;
2912 }
2913
2914 health_code_update();
2915
2916 /* Receive the metadata from sessiond */
2917 ret = lttcomm_recv_unix_sock(ctx->consumer_metadata_socket, &msg,
2918 sizeof(msg));
2919 if (ret != sizeof(msg)) {
2920 DBG("Consumer received unexpected message size %d (expects %zu)",
2921 ret, sizeof(msg));
2922 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
2923 /*
2924 * The ret value might 0 meaning an orderly shutdown but this is ok
2925 * since the caller handles this.
2926 */
2927 goto end;
2928 }
2929
2930 health_code_update();
2931
2932 if (msg.cmd_type == LTTNG_ERR_UND) {
2933 /* No registry found */
2934 (void) consumer_send_status_msg(ctx->consumer_metadata_socket,
2935 ret_code);
2936 ret = 0;
2937 goto end;
2938 } else if (msg.cmd_type != LTTNG_CONSUMER_PUSH_METADATA) {
2939 ERR("Unexpected cmd_type received %d", msg.cmd_type);
2940 ret = -1;
2941 goto end;
2942 }
2943
2944 len = msg.u.push_metadata.len;
2945 key = msg.u.push_metadata.key;
2946 offset = msg.u.push_metadata.target_offset;
2947 version = msg.u.push_metadata.version;
2948
2949 assert(key == channel->key);
2950 if (len == 0) {
2951 DBG("No new metadata to receive for key %" PRIu64, key);
2952 }
2953
2954 health_code_update();
2955
2956 /* Tell session daemon we are ready to receive the metadata. */
2957 ret = consumer_send_status_msg(ctx->consumer_metadata_socket,
2958 LTTCOMM_CONSUMERD_SUCCESS);
2959 if (ret < 0 || len == 0) {
2960 /*
2961 * Somehow, the session daemon is not responding anymore or there is
2962 * nothing to receive.
2963 */
2964 goto end;
2965 }
2966
2967 health_code_update();
2968
2969 ret = lttng_ustconsumer_recv_metadata(ctx->consumer_metadata_socket,
2970 key, offset, len, version, channel, timer, wait);
2971 if (ret >= 0) {
2972 /*
2973 * Only send the status msg if the sessiond is alive meaning a positive
2974 * ret code.
2975 */
2976 (void) consumer_send_status_msg(ctx->consumer_metadata_socket, ret);
2977 }
2978 ret = 0;
2979
2980 end:
2981 health_code_update();
2982
2983 pthread_mutex_unlock(&ctx->metadata_socket_lock);
2984 return ret;
2985 }
2986
2987 /*
2988 * Return the ustctl call for the get stream id.
2989 */
2990 int lttng_ustconsumer_get_stream_id(struct lttng_consumer_stream *stream,
2991 uint64_t *stream_id)
2992 {
2993 assert(stream);
2994 assert(stream_id);
2995
2996 return ustctl_get_stream_id(stream->ustream, stream_id);
2997 }
This page took 0.16626 seconds and 4 git commands to generate.