5334d13585842b0d63ab7f61d2b8cfa33e2b3b5e
[lttng-tools.git] / src / common / kernel-consumer / kernel-consumer.c
1 /*
2 * Copyright (C) 2011 Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0-only
7 *
8 */
9
10 #define _LGPL_SOURCE
11 #include <assert.h>
12 #include <poll.h>
13 #include <pthread.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <sys/mman.h>
17 #include <sys/socket.h>
18 #include <sys/types.h>
19 #include <inttypes.h>
20 #include <unistd.h>
21 #include <sys/stat.h>
22 #include <stdint.h>
23
24 #include <bin/lttng-consumerd/health-consumerd.h>
25 #include <common/common.h>
26 #include <common/kernel-ctl/kernel-ctl.h>
27 #include <common/sessiond-comm/sessiond-comm.h>
28 #include <common/sessiond-comm/relayd.h>
29 #include <common/compat/fcntl.h>
30 #include <common/compat/endian.h>
31 #include <common/pipe.h>
32 #include <common/relayd/relayd.h>
33 #include <common/utils.h>
34 #include <common/consumer/consumer-stream.h>
35 #include <common/index/index.h>
36 #include <common/consumer/consumer-timer.h>
37 #include <common/optional.h>
38 #include <common/buffer-view.h>
39 #include <common/consumer/consumer.h>
40 #include <common/consumer/metadata-bucket.h>
41
42 #include "kernel-consumer.h"
43
44 extern struct lttng_consumer_global_data the_consumer_data;
45 extern int consumer_poll_timeout;
46
47 /*
48 * Take a snapshot for a specific fd
49 *
50 * Returns 0 on success, < 0 on error
51 */
52 int lttng_kconsumer_take_snapshot(struct lttng_consumer_stream *stream)
53 {
54 int ret = 0;
55 int infd = stream->wait_fd;
56
57 ret = kernctl_snapshot(infd);
58 /*
59 * -EAGAIN is not an error, it just means that there is no data to
60 * be read.
61 */
62 if (ret != 0 && ret != -EAGAIN) {
63 PERROR("Getting sub-buffer snapshot.");
64 }
65
66 return ret;
67 }
68
69 /*
70 * Sample consumed and produced positions for a specific fd.
71 *
72 * Returns 0 on success, < 0 on error.
73 */
74 int lttng_kconsumer_sample_snapshot_positions(
75 struct lttng_consumer_stream *stream)
76 {
77 assert(stream);
78
79 return kernctl_snapshot_sample_positions(stream->wait_fd);
80 }
81
82 /*
83 * Get the produced position
84 *
85 * Returns 0 on success, < 0 on error
86 */
87 int lttng_kconsumer_get_produced_snapshot(struct lttng_consumer_stream *stream,
88 unsigned long *pos)
89 {
90 int ret;
91 int infd = stream->wait_fd;
92
93 ret = kernctl_snapshot_get_produced(infd, pos);
94 if (ret != 0) {
95 PERROR("kernctl_snapshot_get_produced");
96 }
97
98 return ret;
99 }
100
101 /*
102 * Get the consumerd position
103 *
104 * Returns 0 on success, < 0 on error
105 */
106 int lttng_kconsumer_get_consumed_snapshot(struct lttng_consumer_stream *stream,
107 unsigned long *pos)
108 {
109 int ret;
110 int infd = stream->wait_fd;
111
112 ret = kernctl_snapshot_get_consumed(infd, pos);
113 if (ret != 0) {
114 PERROR("kernctl_snapshot_get_consumed");
115 }
116
117 return ret;
118 }
119
120 static
121 int get_current_subbuf_addr(struct lttng_consumer_stream *stream,
122 const char **addr)
123 {
124 int ret;
125 unsigned long mmap_offset;
126 const char *mmap_base = stream->mmap_base;
127
128 ret = kernctl_get_mmap_read_offset(stream->wait_fd, &mmap_offset);
129 if (ret < 0) {
130 PERROR("Failed to get mmap read offset");
131 goto error;
132 }
133
134 *addr = mmap_base + mmap_offset;
135 error:
136 return ret;
137 }
138
139 /*
140 * Take a snapshot of all the stream of a channel
141 * RCU read-side lock must be held across this function to ensure existence of
142 * channel. The channel lock must be held by the caller.
143 *
144 * Returns 0 on success, < 0 on error
145 */
146 static int lttng_kconsumer_snapshot_channel(
147 struct lttng_consumer_channel *channel,
148 uint64_t key, char *path, uint64_t relayd_id,
149 uint64_t nb_packets_per_stream,
150 struct lttng_consumer_local_data *ctx)
151 {
152 int ret;
153 struct lttng_consumer_stream *stream;
154
155 DBG("Kernel consumer snapshot channel %" PRIu64, key);
156
157 rcu_read_lock();
158
159 /* Splice is not supported yet for channel snapshot. */
160 if (channel->output != CONSUMER_CHANNEL_MMAP) {
161 ERR("Unsupported output type for channel \"%s\": mmap output is required to record a snapshot",
162 channel->name);
163 ret = -1;
164 goto end;
165 }
166
167 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
168 unsigned long consumed_pos, produced_pos;
169
170 health_code_update();
171
172 /*
173 * Lock stream because we are about to change its state.
174 */
175 pthread_mutex_lock(&stream->lock);
176
177 assert(channel->trace_chunk);
178 if (!lttng_trace_chunk_get(channel->trace_chunk)) {
179 /*
180 * Can't happen barring an internal error as the channel
181 * holds a reference to the trace chunk.
182 */
183 ERR("Failed to acquire reference to channel's trace chunk");
184 ret = -1;
185 goto end_unlock;
186 }
187 assert(!stream->trace_chunk);
188 stream->trace_chunk = channel->trace_chunk;
189
190 /*
191 * Assign the received relayd ID so we can use it for streaming. The streams
192 * are not visible to anyone so this is OK to change it.
193 */
194 stream->net_seq_idx = relayd_id;
195 channel->relayd_id = relayd_id;
196 if (relayd_id != (uint64_t) -1ULL) {
197 ret = consumer_send_relayd_stream(stream, path);
198 if (ret < 0) {
199 ERR("sending stream to relayd");
200 goto end_unlock;
201 }
202 } else {
203 ret = consumer_stream_create_output_files(stream,
204 false);
205 if (ret < 0) {
206 goto end_unlock;
207 }
208 DBG("Kernel consumer snapshot stream (%" PRIu64 ")",
209 stream->key);
210 }
211
212 ret = kernctl_buffer_flush_empty(stream->wait_fd);
213 if (ret < 0) {
214 /*
215 * Doing a buffer flush which does not take into
216 * account empty packets. This is not perfect
217 * for stream intersection, but required as a
218 * fall-back when "flush_empty" is not
219 * implemented by lttng-modules.
220 */
221 ret = kernctl_buffer_flush(stream->wait_fd);
222 if (ret < 0) {
223 ERR("Failed to flush kernel stream");
224 goto end_unlock;
225 }
226 goto end_unlock;
227 }
228
229 ret = lttng_kconsumer_take_snapshot(stream);
230 if (ret < 0) {
231 ERR("Taking kernel snapshot");
232 goto end_unlock;
233 }
234
235 ret = lttng_kconsumer_get_produced_snapshot(stream, &produced_pos);
236 if (ret < 0) {
237 ERR("Produced kernel snapshot position");
238 goto end_unlock;
239 }
240
241 ret = lttng_kconsumer_get_consumed_snapshot(stream, &consumed_pos);
242 if (ret < 0) {
243 ERR("Consumerd kernel snapshot position");
244 goto end_unlock;
245 }
246
247 consumed_pos = consumer_get_consume_start_pos(consumed_pos,
248 produced_pos, nb_packets_per_stream,
249 stream->max_sb_size);
250
251 while ((long) (consumed_pos - produced_pos) < 0) {
252 ssize_t read_len;
253 unsigned long len, padded_len;
254 const char *subbuf_addr;
255 struct lttng_buffer_view subbuf_view;
256
257 health_code_update();
258 DBG("Kernel consumer taking snapshot at pos %lu", consumed_pos);
259
260 ret = kernctl_get_subbuf(stream->wait_fd, &consumed_pos);
261 if (ret < 0) {
262 if (ret != -EAGAIN) {
263 PERROR("kernctl_get_subbuf snapshot");
264 goto end_unlock;
265 }
266 DBG("Kernel consumer get subbuf failed. Skipping it.");
267 consumed_pos += stream->max_sb_size;
268 stream->chan->lost_packets++;
269 continue;
270 }
271
272 ret = kernctl_get_subbuf_size(stream->wait_fd, &len);
273 if (ret < 0) {
274 ERR("Snapshot kernctl_get_subbuf_size");
275 goto error_put_subbuf;
276 }
277
278 ret = kernctl_get_padded_subbuf_size(stream->wait_fd, &padded_len);
279 if (ret < 0) {
280 ERR("Snapshot kernctl_get_padded_subbuf_size");
281 goto error_put_subbuf;
282 }
283
284 ret = get_current_subbuf_addr(stream, &subbuf_addr);
285 if (ret) {
286 goto error_put_subbuf;
287 }
288
289 subbuf_view = lttng_buffer_view_init(
290 subbuf_addr, 0, padded_len);
291 read_len = lttng_consumer_on_read_subbuffer_mmap(
292 stream, &subbuf_view,
293 padded_len - len);
294 /*
295 * We write the padded len in local tracefiles but the data len
296 * when using a relay. Display the error but continue processing
297 * to try to release the subbuffer.
298 */
299 if (relayd_id != (uint64_t) -1ULL) {
300 if (read_len != len) {
301 ERR("Error sending to the relay (ret: %zd != len: %lu)",
302 read_len, len);
303 }
304 } else {
305 if (read_len != padded_len) {
306 ERR("Error writing to tracefile (ret: %zd != len: %lu)",
307 read_len, padded_len);
308 }
309 }
310
311 ret = kernctl_put_subbuf(stream->wait_fd);
312 if (ret < 0) {
313 ERR("Snapshot kernctl_put_subbuf");
314 goto end_unlock;
315 }
316 consumed_pos += stream->max_sb_size;
317 }
318
319 if (relayd_id == (uint64_t) -1ULL) {
320 if (stream->out_fd >= 0) {
321 ret = close(stream->out_fd);
322 if (ret < 0) {
323 PERROR("Kernel consumer snapshot close out_fd");
324 goto end_unlock;
325 }
326 stream->out_fd = -1;
327 }
328 } else {
329 close_relayd_stream(stream);
330 stream->net_seq_idx = (uint64_t) -1ULL;
331 }
332 lttng_trace_chunk_put(stream->trace_chunk);
333 stream->trace_chunk = NULL;
334 pthread_mutex_unlock(&stream->lock);
335 }
336
337 /* All good! */
338 ret = 0;
339 goto end;
340
341 error_put_subbuf:
342 ret = kernctl_put_subbuf(stream->wait_fd);
343 if (ret < 0) {
344 ERR("Snapshot kernctl_put_subbuf error path");
345 }
346 end_unlock:
347 pthread_mutex_unlock(&stream->lock);
348 end:
349 rcu_read_unlock();
350 return ret;
351 }
352
353 /*
354 * Read the whole metadata available for a snapshot.
355 * RCU read-side lock must be held across this function to ensure existence of
356 * metadata_channel. The channel lock must be held by the caller.
357 *
358 * Returns 0 on success, < 0 on error
359 */
360 static int lttng_kconsumer_snapshot_metadata(
361 struct lttng_consumer_channel *metadata_channel,
362 uint64_t key, char *path, uint64_t relayd_id,
363 struct lttng_consumer_local_data *ctx)
364 {
365 int ret, use_relayd = 0;
366 ssize_t ret_read;
367 struct lttng_consumer_stream *metadata_stream;
368
369 assert(ctx);
370
371 DBG("Kernel consumer snapshot metadata with key %" PRIu64 " at path %s",
372 key, path);
373
374 rcu_read_lock();
375
376 metadata_stream = metadata_channel->metadata_stream;
377 assert(metadata_stream);
378
379 pthread_mutex_lock(&metadata_stream->lock);
380 assert(metadata_channel->trace_chunk);
381 assert(metadata_stream->trace_chunk);
382
383 /* Flag once that we have a valid relayd for the stream. */
384 if (relayd_id != (uint64_t) -1ULL) {
385 use_relayd = 1;
386 }
387
388 if (use_relayd) {
389 ret = consumer_send_relayd_stream(metadata_stream, path);
390 if (ret < 0) {
391 goto error_snapshot;
392 }
393 } else {
394 ret = consumer_stream_create_output_files(metadata_stream,
395 false);
396 if (ret < 0) {
397 goto error_snapshot;
398 }
399 }
400
401 do {
402 health_code_update();
403
404 ret_read = lttng_consumer_read_subbuffer(metadata_stream, ctx, true);
405 if (ret_read < 0) {
406 ERR("Kernel snapshot reading metadata subbuffer (ret: %zd)",
407 ret_read);
408 ret = ret_read;
409 goto error_snapshot;
410 }
411 } while (ret_read > 0);
412
413 if (use_relayd) {
414 close_relayd_stream(metadata_stream);
415 metadata_stream->net_seq_idx = (uint64_t) -1ULL;
416 } else {
417 if (metadata_stream->out_fd >= 0) {
418 ret = close(metadata_stream->out_fd);
419 if (ret < 0) {
420 PERROR("Kernel consumer snapshot metadata close out_fd");
421 /*
422 * Don't go on error here since the snapshot was successful at this
423 * point but somehow the close failed.
424 */
425 }
426 metadata_stream->out_fd = -1;
427 lttng_trace_chunk_put(metadata_stream->trace_chunk);
428 metadata_stream->trace_chunk = NULL;
429 }
430 }
431
432 ret = 0;
433 error_snapshot:
434 pthread_mutex_unlock(&metadata_stream->lock);
435 cds_list_del(&metadata_stream->send_node);
436 consumer_stream_destroy(metadata_stream, NULL);
437 metadata_channel->metadata_stream = NULL;
438 rcu_read_unlock();
439 return ret;
440 }
441
442 /*
443 * Receive command from session daemon and process it.
444 *
445 * Return 1 on success else a negative value or 0.
446 */
447 int lttng_kconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
448 int sock, struct pollfd *consumer_sockpoll)
449 {
450 int ret_func;
451 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
452 struct lttcomm_consumer_msg msg;
453
454 health_code_update();
455
456 {
457 ssize_t ret_recv;
458
459 ret_recv = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg));
460 if (ret_recv != sizeof(msg)) {
461 if (ret_recv > 0) {
462 lttng_consumer_send_error(ctx,
463 LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
464 ret_recv = -1;
465 }
466 return ret_recv;
467 }
468 }
469
470 health_code_update();
471
472 /* Deprecated command */
473 assert(msg.cmd_type != LTTNG_CONSUMER_STOP);
474
475 health_code_update();
476
477 /* relayd needs RCU read-side protection */
478 rcu_read_lock();
479
480 switch (msg.cmd_type) {
481 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
482 {
483 /* Session daemon status message are handled in the following call. */
484 consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
485 msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll,
486 &msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id,
487 msg.u.relayd_sock.relayd_session_id);
488 goto end_nosignal;
489 }
490 case LTTNG_CONSUMER_ADD_CHANNEL:
491 {
492 struct lttng_consumer_channel *new_channel;
493 int ret_send_status, ret_add_channel = 0;
494 const uint64_t chunk_id = msg.u.channel.chunk_id.value;
495
496 health_code_update();
497
498 /* First send a status message before receiving the fds. */
499 ret_send_status = consumer_send_status_msg(sock, ret_code);
500 if (ret_send_status < 0) {
501 /* Somehow, the session daemon is not responding anymore. */
502 goto error_fatal;
503 }
504
505 health_code_update();
506
507 DBG("consumer_add_channel %" PRIu64, msg.u.channel.channel_key);
508 new_channel = consumer_allocate_channel(msg.u.channel.channel_key,
509 msg.u.channel.session_id,
510 msg.u.channel.chunk_id.is_set ?
511 &chunk_id : NULL,
512 msg.u.channel.pathname,
513 msg.u.channel.name,
514 msg.u.channel.relayd_id, msg.u.channel.output,
515 msg.u.channel.tracefile_size,
516 msg.u.channel.tracefile_count, 0,
517 msg.u.channel.monitor,
518 msg.u.channel.live_timer_interval,
519 msg.u.channel.is_live,
520 NULL, NULL);
521 if (new_channel == NULL) {
522 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
523 goto end_nosignal;
524 }
525 new_channel->nb_init_stream_left = msg.u.channel.nb_init_streams;
526 switch (msg.u.channel.output) {
527 case LTTNG_EVENT_SPLICE:
528 new_channel->output = CONSUMER_CHANNEL_SPLICE;
529 break;
530 case LTTNG_EVENT_MMAP:
531 new_channel->output = CONSUMER_CHANNEL_MMAP;
532 break;
533 default:
534 ERR("Channel output unknown %d", msg.u.channel.output);
535 goto end_nosignal;
536 }
537
538 /* Translate and save channel type. */
539 switch (msg.u.channel.type) {
540 case CONSUMER_CHANNEL_TYPE_DATA:
541 case CONSUMER_CHANNEL_TYPE_METADATA:
542 new_channel->type = msg.u.channel.type;
543 break;
544 default:
545 assert(0);
546 goto end_nosignal;
547 };
548
549 health_code_update();
550
551 if (ctx->on_recv_channel != NULL) {
552 int ret_recv_channel =
553 ctx->on_recv_channel(new_channel);
554 if (ret_recv_channel == 0) {
555 ret_add_channel = consumer_add_channel(
556 new_channel, ctx);
557 } else if (ret_recv_channel < 0) {
558 goto end_nosignal;
559 }
560 } else {
561 ret_add_channel =
562 consumer_add_channel(new_channel, ctx);
563 }
564 if (msg.u.channel.type == CONSUMER_CHANNEL_TYPE_DATA &&
565 !ret_add_channel) {
566 int monitor_start_ret;
567
568 DBG("Consumer starting monitor timer");
569 consumer_timer_live_start(new_channel,
570 msg.u.channel.live_timer_interval);
571 monitor_start_ret = consumer_timer_monitor_start(
572 new_channel,
573 msg.u.channel.monitor_timer_interval);
574 if (monitor_start_ret < 0) {
575 ERR("Starting channel monitoring timer failed");
576 goto end_nosignal;
577 }
578 }
579
580 health_code_update();
581
582 /* If we received an error in add_channel, we need to report it. */
583 if (ret_add_channel < 0) {
584 ret_send_status = consumer_send_status_msg(
585 sock, ret_add_channel);
586 if (ret_send_status < 0) {
587 goto error_fatal;
588 }
589 goto end_nosignal;
590 }
591
592 goto end_nosignal;
593 }
594 case LTTNG_CONSUMER_ADD_STREAM:
595 {
596 int fd;
597 struct lttng_pipe *stream_pipe;
598 struct lttng_consumer_stream *new_stream;
599 struct lttng_consumer_channel *channel;
600 int alloc_ret = 0;
601 int ret_send_status, ret_poll, ret_get_max_subbuf_size;
602 ssize_t ret_pipe_write, ret_recv;
603
604 /*
605 * Get stream's channel reference. Needed when adding the stream to the
606 * global hash table.
607 */
608 channel = consumer_find_channel(msg.u.stream.channel_key);
609 if (!channel) {
610 /*
611 * We could not find the channel. Can happen if cpu hotplug
612 * happens while tearing down.
613 */
614 ERR("Unable to find channel key %" PRIu64, msg.u.stream.channel_key);
615 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
616 }
617
618 health_code_update();
619
620 /* First send a status message before receiving the fds. */
621 ret_send_status = consumer_send_status_msg(sock, ret_code);
622 if (ret_send_status < 0) {
623 /* Somehow, the session daemon is not responding anymore. */
624 goto error_add_stream_fatal;
625 }
626
627 health_code_update();
628
629 if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
630 /* Channel was not found. */
631 goto error_add_stream_nosignal;
632 }
633
634 /* Blocking call */
635 health_poll_entry();
636 ret_poll = lttng_consumer_poll_socket(consumer_sockpoll);
637 health_poll_exit();
638 if (ret_poll) {
639 goto error_add_stream_fatal;
640 }
641
642 health_code_update();
643
644 /* Get stream file descriptor from socket */
645 ret_recv = lttcomm_recv_fds_unix_sock(sock, &fd, 1);
646 if (ret_recv != sizeof(fd)) {
647 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD);
648 ret_func = ret_recv;
649 goto end;
650 }
651
652 health_code_update();
653
654 /*
655 * Send status code to session daemon only if the recv works. If the
656 * above recv() failed, the session daemon is notified through the
657 * error socket and the teardown is eventually done.
658 */
659 ret_send_status = consumer_send_status_msg(sock, ret_code);
660 if (ret_send_status < 0) {
661 /* Somehow, the session daemon is not responding anymore. */
662 goto error_add_stream_nosignal;
663 }
664
665 health_code_update();
666
667 pthread_mutex_lock(&channel->lock);
668 new_stream = consumer_stream_create(
669 channel,
670 channel->key,
671 fd,
672 channel->name,
673 channel->relayd_id,
674 channel->session_id,
675 channel->trace_chunk,
676 msg.u.stream.cpu,
677 &alloc_ret,
678 channel->type,
679 channel->monitor);
680 if (new_stream == NULL) {
681 switch (alloc_ret) {
682 case -ENOMEM:
683 case -EINVAL:
684 default:
685 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
686 break;
687 }
688 pthread_mutex_unlock(&channel->lock);
689 goto error_add_stream_nosignal;
690 }
691
692 new_stream->wait_fd = fd;
693 ret_get_max_subbuf_size = kernctl_get_max_subbuf_size(
694 new_stream->wait_fd, &new_stream->max_sb_size);
695 if (ret_get_max_subbuf_size < 0) {
696 pthread_mutex_unlock(&channel->lock);
697 ERR("Failed to get kernel maximal subbuffer size");
698 goto error_add_stream_nosignal;
699 }
700
701 consumer_stream_update_channel_attributes(new_stream,
702 channel);
703
704 /*
705 * We've just assigned the channel to the stream so increment the
706 * refcount right now. We don't need to increment the refcount for
707 * streams in no monitor because we handle manually the cleanup of
708 * those. It is very important to make sure there is NO prior
709 * consumer_del_stream() calls or else the refcount will be unbalanced.
710 */
711 if (channel->monitor) {
712 uatomic_inc(&new_stream->chan->refcount);
713 }
714
715 /*
716 * The buffer flush is done on the session daemon side for the kernel
717 * so no need for the stream "hangup_flush_done" variable to be
718 * tracked. This is important for a kernel stream since we don't rely
719 * on the flush state of the stream to read data. It's not the case for
720 * user space tracing.
721 */
722 new_stream->hangup_flush_done = 0;
723
724 health_code_update();
725
726 pthread_mutex_lock(&new_stream->lock);
727 if (ctx->on_recv_stream) {
728 int ret_recv_stream = ctx->on_recv_stream(new_stream);
729 if (ret_recv_stream < 0) {
730 pthread_mutex_unlock(&new_stream->lock);
731 pthread_mutex_unlock(&channel->lock);
732 consumer_stream_free(new_stream);
733 goto error_add_stream_nosignal;
734 }
735 }
736 health_code_update();
737
738 if (new_stream->metadata_flag) {
739 channel->metadata_stream = new_stream;
740 }
741
742 /* Do not monitor this stream. */
743 if (!channel->monitor) {
744 DBG("Kernel consumer add stream %s in no monitor mode with "
745 "relayd id %" PRIu64, new_stream->name,
746 new_stream->net_seq_idx);
747 cds_list_add(&new_stream->send_node, &channel->streams.head);
748 pthread_mutex_unlock(&new_stream->lock);
749 pthread_mutex_unlock(&channel->lock);
750 goto end_add_stream;
751 }
752
753 /* Send stream to relayd if the stream has an ID. */
754 if (new_stream->net_seq_idx != (uint64_t) -1ULL) {
755 int ret_send_relayd_stream;
756
757 ret_send_relayd_stream = consumer_send_relayd_stream(
758 new_stream, new_stream->chan->pathname);
759 if (ret_send_relayd_stream < 0) {
760 pthread_mutex_unlock(&new_stream->lock);
761 pthread_mutex_unlock(&channel->lock);
762 consumer_stream_free(new_stream);
763 goto error_add_stream_nosignal;
764 }
765
766 /*
767 * If adding an extra stream to an already
768 * existing channel (e.g. cpu hotplug), we need
769 * to send the "streams_sent" command to relayd.
770 */
771 if (channel->streams_sent_to_relayd) {
772 int ret_send_relayd_streams_sent;
773
774 ret_send_relayd_streams_sent =
775 consumer_send_relayd_streams_sent(
776 new_stream->net_seq_idx);
777 if (ret_send_relayd_streams_sent < 0) {
778 pthread_mutex_unlock(&new_stream->lock);
779 pthread_mutex_unlock(&channel->lock);
780 goto error_add_stream_nosignal;
781 }
782 }
783 }
784 pthread_mutex_unlock(&new_stream->lock);
785 pthread_mutex_unlock(&channel->lock);
786
787 /* Get the right pipe where the stream will be sent. */
788 if (new_stream->metadata_flag) {
789 consumer_add_metadata_stream(new_stream);
790 stream_pipe = ctx->consumer_metadata_pipe;
791 } else {
792 consumer_add_data_stream(new_stream);
793 stream_pipe = ctx->consumer_data_pipe;
794 }
795
796 /* Visible to other threads */
797 new_stream->globally_visible = 1;
798
799 health_code_update();
800
801 ret_pipe_write = lttng_pipe_write(
802 stream_pipe, &new_stream, sizeof(new_stream));
803 if (ret_pipe_write < 0) {
804 ERR("Consumer write %s stream to pipe %d",
805 new_stream->metadata_flag ? "metadata" : "data",
806 lttng_pipe_get_writefd(stream_pipe));
807 if (new_stream->metadata_flag) {
808 consumer_del_stream_for_metadata(new_stream);
809 } else {
810 consumer_del_stream_for_data(new_stream);
811 }
812 goto error_add_stream_nosignal;
813 }
814
815 DBG("Kernel consumer ADD_STREAM %s (fd: %d) %s with relayd id %" PRIu64,
816 new_stream->name, fd, new_stream->chan->pathname, new_stream->relayd_stream_id);
817 end_add_stream:
818 break;
819 error_add_stream_nosignal:
820 goto end_nosignal;
821 error_add_stream_fatal:
822 goto error_fatal;
823 }
824 case LTTNG_CONSUMER_STREAMS_SENT:
825 {
826 struct lttng_consumer_channel *channel;
827 int ret_send_status;
828
829 /*
830 * Get stream's channel reference. Needed when adding the stream to the
831 * global hash table.
832 */
833 channel = consumer_find_channel(msg.u.sent_streams.channel_key);
834 if (!channel) {
835 /*
836 * We could not find the channel. Can happen if cpu hotplug
837 * happens while tearing down.
838 */
839 ERR("Unable to find channel key %" PRIu64,
840 msg.u.sent_streams.channel_key);
841 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
842 }
843
844 health_code_update();
845
846 /*
847 * Send status code to session daemon.
848 */
849 ret_send_status = consumer_send_status_msg(sock, ret_code);
850 if (ret_send_status < 0 ||
851 ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
852 /* Somehow, the session daemon is not responding anymore. */
853 goto error_streams_sent_nosignal;
854 }
855
856 health_code_update();
857
858 /*
859 * We should not send this message if we don't monitor the
860 * streams in this channel.
861 */
862 if (!channel->monitor) {
863 goto end_error_streams_sent;
864 }
865
866 health_code_update();
867 /* Send stream to relayd if the stream has an ID. */
868 if (msg.u.sent_streams.net_seq_idx != (uint64_t) -1ULL) {
869 int ret_send_relay_streams;
870
871 ret_send_relay_streams = consumer_send_relayd_streams_sent(
872 msg.u.sent_streams.net_seq_idx);
873 if (ret_send_relay_streams < 0) {
874 goto error_streams_sent_nosignal;
875 }
876 channel->streams_sent_to_relayd = true;
877 }
878 end_error_streams_sent:
879 break;
880 error_streams_sent_nosignal:
881 goto end_nosignal;
882 }
883 case LTTNG_CONSUMER_UPDATE_STREAM:
884 {
885 rcu_read_unlock();
886 return -ENOSYS;
887 }
888 case LTTNG_CONSUMER_DESTROY_RELAYD:
889 {
890 uint64_t index = msg.u.destroy_relayd.net_seq_idx;
891 struct consumer_relayd_sock_pair *relayd;
892 int ret_send_status;
893
894 DBG("Kernel consumer destroying relayd %" PRIu64, index);
895
896 /* Get relayd reference if exists. */
897 relayd = consumer_find_relayd(index);
898 if (relayd == NULL) {
899 DBG("Unable to find relayd %" PRIu64, index);
900 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
901 }
902
903 /*
904 * Each relayd socket pair has a refcount of stream attached to it
905 * which tells if the relayd is still active or not depending on the
906 * refcount value.
907 *
908 * This will set the destroy flag of the relayd object and destroy it
909 * if the refcount reaches zero when called.
910 *
911 * The destroy can happen either here or when a stream fd hangs up.
912 */
913 if (relayd) {
914 consumer_flag_relayd_for_destroy(relayd);
915 }
916
917 health_code_update();
918
919 ret_send_status = consumer_send_status_msg(sock, ret_code);
920 if (ret_send_status < 0) {
921 /* Somehow, the session daemon is not responding anymore. */
922 goto error_fatal;
923 }
924
925 goto end_nosignal;
926 }
927 case LTTNG_CONSUMER_DATA_PENDING:
928 {
929 int32_t ret_data_pending;
930 uint64_t id = msg.u.data_pending.session_id;
931 ssize_t ret_send;
932
933 DBG("Kernel consumer data pending command for id %" PRIu64, id);
934
935 ret_data_pending = consumer_data_pending(id);
936
937 health_code_update();
938
939 /* Send back returned value to session daemon */
940 ret_send = lttcomm_send_unix_sock(sock, &ret_data_pending,
941 sizeof(ret_data_pending));
942 if (ret_send < 0) {
943 PERROR("send data pending ret code");
944 goto error_fatal;
945 }
946
947 /*
948 * No need to send back a status message since the data pending
949 * returned value is the response.
950 */
951 break;
952 }
953 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
954 {
955 struct lttng_consumer_channel *channel;
956 uint64_t key = msg.u.snapshot_channel.key;
957 int ret_send_status;
958
959 channel = consumer_find_channel(key);
960 if (!channel) {
961 ERR("Channel %" PRIu64 " not found", key);
962 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
963 } else {
964 pthread_mutex_lock(&channel->lock);
965 if (msg.u.snapshot_channel.metadata == 1) {
966 int ret_snapshot;
967
968 ret_snapshot = lttng_kconsumer_snapshot_metadata(
969 channel, key,
970 msg.u.snapshot_channel.pathname,
971 msg.u.snapshot_channel.relayd_id,
972 ctx);
973 if (ret_snapshot < 0) {
974 ERR("Snapshot metadata failed");
975 ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED;
976 }
977 } else {
978 int ret_snapshot;
979
980 ret_snapshot = lttng_kconsumer_snapshot_channel(
981 channel, key,
982 msg.u.snapshot_channel.pathname,
983 msg.u.snapshot_channel.relayd_id,
984 msg.u.snapshot_channel
985 .nb_packets_per_stream,
986 ctx);
987 if (ret_snapshot < 0) {
988 ERR("Snapshot channel failed");
989 ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED;
990 }
991 }
992 pthread_mutex_unlock(&channel->lock);
993 }
994 health_code_update();
995
996 ret_send_status = consumer_send_status_msg(sock, ret_code);
997 if (ret_send_status < 0) {
998 /* Somehow, the session daemon is not responding anymore. */
999 goto end_nosignal;
1000 }
1001 break;
1002 }
1003 case LTTNG_CONSUMER_DESTROY_CHANNEL:
1004 {
1005 uint64_t key = msg.u.destroy_channel.key;
1006 struct lttng_consumer_channel *channel;
1007 int ret_send_status;
1008
1009 channel = consumer_find_channel(key);
1010 if (!channel) {
1011 ERR("Kernel consumer destroy channel %" PRIu64 " not found", key);
1012 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1013 }
1014
1015 health_code_update();
1016
1017 ret_send_status = consumer_send_status_msg(sock, ret_code);
1018 if (ret_send_status < 0) {
1019 /* Somehow, the session daemon is not responding anymore. */
1020 goto end_destroy_channel;
1021 }
1022
1023 health_code_update();
1024
1025 /* Stop right now if no channel was found. */
1026 if (!channel) {
1027 goto end_destroy_channel;
1028 }
1029
1030 /*
1031 * This command should ONLY be issued for channel with streams set in
1032 * no monitor mode.
1033 */
1034 assert(!channel->monitor);
1035
1036 /*
1037 * The refcount should ALWAYS be 0 in the case of a channel in no
1038 * monitor mode.
1039 */
1040 assert(!uatomic_sub_return(&channel->refcount, 1));
1041
1042 consumer_del_channel(channel);
1043 end_destroy_channel:
1044 goto end_nosignal;
1045 }
1046 case LTTNG_CONSUMER_DISCARDED_EVENTS:
1047 {
1048 ssize_t ret;
1049 uint64_t count;
1050 struct lttng_consumer_channel *channel;
1051 uint64_t id = msg.u.discarded_events.session_id;
1052 uint64_t key = msg.u.discarded_events.channel_key;
1053
1054 DBG("Kernel consumer discarded events command for session id %"
1055 PRIu64 ", channel key %" PRIu64, id, key);
1056
1057 channel = consumer_find_channel(key);
1058 if (!channel) {
1059 ERR("Kernel consumer discarded events channel %"
1060 PRIu64 " not found", key);
1061 count = 0;
1062 } else {
1063 count = channel->discarded_events;
1064 }
1065
1066 health_code_update();
1067
1068 /* Send back returned value to session daemon */
1069 ret = lttcomm_send_unix_sock(sock, &count, sizeof(count));
1070 if (ret < 0) {
1071 PERROR("send discarded events");
1072 goto error_fatal;
1073 }
1074
1075 break;
1076 }
1077 case LTTNG_CONSUMER_LOST_PACKETS:
1078 {
1079 ssize_t ret;
1080 uint64_t count;
1081 struct lttng_consumer_channel *channel;
1082 uint64_t id = msg.u.lost_packets.session_id;
1083 uint64_t key = msg.u.lost_packets.channel_key;
1084
1085 DBG("Kernel consumer lost packets command for session id %"
1086 PRIu64 ", channel key %" PRIu64, id, key);
1087
1088 channel = consumer_find_channel(key);
1089 if (!channel) {
1090 ERR("Kernel consumer lost packets channel %"
1091 PRIu64 " not found", key);
1092 count = 0;
1093 } else {
1094 count = channel->lost_packets;
1095 }
1096
1097 health_code_update();
1098
1099 /* Send back returned value to session daemon */
1100 ret = lttcomm_send_unix_sock(sock, &count, sizeof(count));
1101 if (ret < 0) {
1102 PERROR("send lost packets");
1103 goto error_fatal;
1104 }
1105
1106 break;
1107 }
1108 case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE:
1109 {
1110 int channel_monitor_pipe;
1111 int ret_send_status, ret_set_channel_monitor_pipe;
1112 ssize_t ret_recv;
1113
1114 ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1115 /* Successfully received the command's type. */
1116 ret_send_status = consumer_send_status_msg(sock, ret_code);
1117 if (ret_send_status < 0) {
1118 goto error_fatal;
1119 }
1120
1121 ret_recv = lttcomm_recv_fds_unix_sock(
1122 sock, &channel_monitor_pipe, 1);
1123 if (ret_recv != sizeof(channel_monitor_pipe)) {
1124 ERR("Failed to receive channel monitor pipe");
1125 goto error_fatal;
1126 }
1127
1128 DBG("Received channel monitor pipe (%d)", channel_monitor_pipe);
1129 ret_set_channel_monitor_pipe =
1130 consumer_timer_thread_set_channel_monitor_pipe(
1131 channel_monitor_pipe);
1132 if (!ret_set_channel_monitor_pipe) {
1133 int flags;
1134 int ret_fcntl;
1135
1136 ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1137 /* Set the pipe as non-blocking. */
1138 ret_fcntl = fcntl(channel_monitor_pipe, F_GETFL, 0);
1139 if (ret_fcntl == -1) {
1140 PERROR("fcntl get flags of the channel monitoring pipe");
1141 goto error_fatal;
1142 }
1143 flags = ret_fcntl;
1144
1145 ret_fcntl = fcntl(channel_monitor_pipe, F_SETFL,
1146 flags | O_NONBLOCK);
1147 if (ret_fcntl == -1) {
1148 PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe");
1149 goto error_fatal;
1150 }
1151 DBG("Channel monitor pipe set as non-blocking");
1152 } else {
1153 ret_code = LTTCOMM_CONSUMERD_ALREADY_SET;
1154 }
1155 ret_send_status = consumer_send_status_msg(sock, ret_code);
1156 if (ret_send_status < 0) {
1157 goto error_fatal;
1158 }
1159 break;
1160 }
1161 case LTTNG_CONSUMER_ROTATE_CHANNEL:
1162 {
1163 struct lttng_consumer_channel *channel;
1164 uint64_t key = msg.u.rotate_channel.key;
1165 int ret_send_status;
1166
1167 DBG("Consumer rotate channel %" PRIu64, key);
1168
1169 channel = consumer_find_channel(key);
1170 if (!channel) {
1171 ERR("Channel %" PRIu64 " not found", key);
1172 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1173 } else {
1174 /*
1175 * Sample the rotate position of all the streams in this channel.
1176 */
1177 int ret_rotate_channel;
1178
1179 ret_rotate_channel = lttng_consumer_rotate_channel(
1180 channel, key,
1181 msg.u.rotate_channel.relayd_id,
1182 msg.u.rotate_channel.metadata, ctx);
1183 if (ret_rotate_channel < 0) {
1184 ERR("Rotate channel failed");
1185 ret_code = LTTCOMM_CONSUMERD_ROTATION_FAIL;
1186 }
1187
1188 health_code_update();
1189 }
1190
1191 ret_send_status = consumer_send_status_msg(sock, ret_code);
1192 if (ret_send_status < 0) {
1193 /* Somehow, the session daemon is not responding anymore. */
1194 goto error_rotate_channel;
1195 }
1196 if (channel) {
1197 /* Rotate the streams that are ready right now. */
1198 int ret_rotate;
1199
1200 ret_rotate = lttng_consumer_rotate_ready_streams(
1201 channel, key, ctx);
1202 if (ret_rotate < 0) {
1203 ERR("Rotate ready streams failed");
1204 }
1205 }
1206 break;
1207 error_rotate_channel:
1208 goto end_nosignal;
1209 }
1210 case LTTNG_CONSUMER_CLEAR_CHANNEL:
1211 {
1212 struct lttng_consumer_channel *channel;
1213 uint64_t key = msg.u.clear_channel.key;
1214 int ret_send_status;
1215
1216 channel = consumer_find_channel(key);
1217 if (!channel) {
1218 DBG("Channel %" PRIu64 " not found", key);
1219 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1220 } else {
1221 int ret_clear_channel;
1222
1223 ret_clear_channel =
1224 lttng_consumer_clear_channel(channel);
1225 if (ret_clear_channel) {
1226 ERR("Clear channel failed");
1227 ret_code = ret_clear_channel;
1228 }
1229
1230 health_code_update();
1231 }
1232
1233 ret_send_status = consumer_send_status_msg(sock, ret_code);
1234 if (ret_send_status < 0) {
1235 /* Somehow, the session daemon is not responding anymore. */
1236 goto end_nosignal;
1237 }
1238
1239 break;
1240 }
1241 case LTTNG_CONSUMER_INIT:
1242 {
1243 int ret_send_status;
1244
1245 ret_code = lttng_consumer_init_command(ctx,
1246 msg.u.init.sessiond_uuid);
1247 health_code_update();
1248 ret_send_status = consumer_send_status_msg(sock, ret_code);
1249 if (ret_send_status < 0) {
1250 /* Somehow, the session daemon is not responding anymore. */
1251 goto end_nosignal;
1252 }
1253 break;
1254 }
1255 case LTTNG_CONSUMER_CREATE_TRACE_CHUNK:
1256 {
1257 const struct lttng_credentials credentials = {
1258 .uid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.create_trace_chunk.credentials.value.uid),
1259 .gid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.create_trace_chunk.credentials.value.gid),
1260 };
1261 const bool is_local_trace =
1262 !msg.u.create_trace_chunk.relayd_id.is_set;
1263 const uint64_t relayd_id =
1264 msg.u.create_trace_chunk.relayd_id.value;
1265 const char *chunk_override_name =
1266 *msg.u.create_trace_chunk.override_name ?
1267 msg.u.create_trace_chunk.override_name :
1268 NULL;
1269 struct lttng_directory_handle *chunk_directory_handle = NULL;
1270
1271 /*
1272 * The session daemon will only provide a chunk directory file
1273 * descriptor for local traces.
1274 */
1275 if (is_local_trace) {
1276 int chunk_dirfd;
1277 int ret_send_status;
1278 ssize_t ret_recv;
1279
1280 /* Acnowledge the reception of the command. */
1281 ret_send_status = consumer_send_status_msg(
1282 sock, LTTCOMM_CONSUMERD_SUCCESS);
1283 if (ret_send_status < 0) {
1284 /* Somehow, the session daemon is not responding anymore. */
1285 goto end_nosignal;
1286 }
1287
1288 ret_recv = lttcomm_recv_fds_unix_sock(
1289 sock, &chunk_dirfd, 1);
1290 if (ret_recv != sizeof(chunk_dirfd)) {
1291 ERR("Failed to receive trace chunk directory file descriptor");
1292 goto error_fatal;
1293 }
1294
1295 DBG("Received trace chunk directory fd (%d)",
1296 chunk_dirfd);
1297 chunk_directory_handle = lttng_directory_handle_create_from_dirfd(
1298 chunk_dirfd);
1299 if (!chunk_directory_handle) {
1300 ERR("Failed to initialize chunk directory handle from directory file descriptor");
1301 if (close(chunk_dirfd)) {
1302 PERROR("Failed to close chunk directory file descriptor");
1303 }
1304 goto error_fatal;
1305 }
1306 }
1307
1308 ret_code = lttng_consumer_create_trace_chunk(
1309 !is_local_trace ? &relayd_id : NULL,
1310 msg.u.create_trace_chunk.session_id,
1311 msg.u.create_trace_chunk.chunk_id,
1312 (time_t) msg.u.create_trace_chunk
1313 .creation_timestamp,
1314 chunk_override_name,
1315 msg.u.create_trace_chunk.credentials.is_set ?
1316 &credentials :
1317 NULL,
1318 chunk_directory_handle);
1319 lttng_directory_handle_put(chunk_directory_handle);
1320 goto end_msg_sessiond;
1321 }
1322 case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK:
1323 {
1324 enum lttng_trace_chunk_command_type close_command =
1325 msg.u.close_trace_chunk.close_command.value;
1326 const uint64_t relayd_id =
1327 msg.u.close_trace_chunk.relayd_id.value;
1328 struct lttcomm_consumer_close_trace_chunk_reply reply;
1329 char path[LTTNG_PATH_MAX];
1330 ssize_t ret_send;
1331
1332 ret_code = lttng_consumer_close_trace_chunk(
1333 msg.u.close_trace_chunk.relayd_id.is_set ?
1334 &relayd_id :
1335 NULL,
1336 msg.u.close_trace_chunk.session_id,
1337 msg.u.close_trace_chunk.chunk_id,
1338 (time_t) msg.u.close_trace_chunk.close_timestamp,
1339 msg.u.close_trace_chunk.close_command.is_set ?
1340 &close_command :
1341 NULL, path);
1342 reply.ret_code = ret_code;
1343 reply.path_length = strlen(path) + 1;
1344 ret_send = lttcomm_send_unix_sock(sock, &reply, sizeof(reply));
1345 if (ret_send != sizeof(reply)) {
1346 goto error_fatal;
1347 }
1348 ret_send = lttcomm_send_unix_sock(
1349 sock, path, reply.path_length);
1350 if (ret_send != reply.path_length) {
1351 goto error_fatal;
1352 }
1353 goto end_nosignal;
1354 }
1355 case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS:
1356 {
1357 const uint64_t relayd_id =
1358 msg.u.trace_chunk_exists.relayd_id.value;
1359
1360 ret_code = lttng_consumer_trace_chunk_exists(
1361 msg.u.trace_chunk_exists.relayd_id.is_set ?
1362 &relayd_id : NULL,
1363 msg.u.trace_chunk_exists.session_id,
1364 msg.u.trace_chunk_exists.chunk_id);
1365 goto end_msg_sessiond;
1366 }
1367 case LTTNG_CONSUMER_OPEN_CHANNEL_PACKETS:
1368 {
1369 const uint64_t key = msg.u.open_channel_packets.key;
1370 struct lttng_consumer_channel *channel =
1371 consumer_find_channel(key);
1372
1373 if (channel) {
1374 pthread_mutex_lock(&channel->lock);
1375 ret_code = lttng_consumer_open_channel_packets(channel);
1376 pthread_mutex_unlock(&channel->lock);
1377 } else {
1378 WARN("Channel %" PRIu64 " not found", key);
1379 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1380 }
1381
1382 health_code_update();
1383 goto end_msg_sessiond;
1384 }
1385 default:
1386 goto end_nosignal;
1387 }
1388
1389 end_nosignal:
1390 /*
1391 * Return 1 to indicate success since the 0 value can be a socket
1392 * shutdown during the recv() or send() call.
1393 */
1394 ret_func = 1;
1395 goto end;
1396 error_fatal:
1397 /* This will issue a consumer stop. */
1398 ret_func = -1;
1399 goto end;
1400 end_msg_sessiond:
1401 /*
1402 * The returned value here is not useful since either way we'll return 1 to
1403 * the caller because the session daemon socket management is done
1404 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
1405 */
1406 {
1407 int ret_send_status;
1408
1409 ret_send_status = consumer_send_status_msg(sock, ret_code);
1410 if (ret_send_status < 0) {
1411 goto error_fatal;
1412 }
1413 }
1414
1415 ret_func = 1;
1416
1417 end:
1418 health_code_update();
1419 rcu_read_unlock();
1420 return ret_func;
1421 }
1422
1423 /*
1424 * Sync metadata meaning request them to the session daemon and snapshot to the
1425 * metadata thread can consumer them.
1426 *
1427 * Metadata stream lock MUST be acquired.
1428 */
1429 enum sync_metadata_status lttng_kconsumer_sync_metadata(
1430 struct lttng_consumer_stream *metadata)
1431 {
1432 int ret;
1433 enum sync_metadata_status status;
1434
1435 assert(metadata);
1436
1437 ret = kernctl_buffer_flush(metadata->wait_fd);
1438 if (ret < 0) {
1439 ERR("Failed to flush kernel stream");
1440 status = SYNC_METADATA_STATUS_ERROR;
1441 goto end;
1442 }
1443
1444 ret = kernctl_snapshot(metadata->wait_fd);
1445 if (ret < 0) {
1446 if (errno == EAGAIN) {
1447 /* No new metadata, exit. */
1448 DBG("Sync metadata, no new kernel metadata");
1449 status = SYNC_METADATA_STATUS_NO_DATA;
1450 } else {
1451 ERR("Sync metadata, taking kernel snapshot failed.");
1452 status = SYNC_METADATA_STATUS_ERROR;
1453 }
1454 } else {
1455 status = SYNC_METADATA_STATUS_NEW_DATA;
1456 }
1457
1458 end:
1459 return status;
1460 }
1461
1462 static
1463 int extract_common_subbuffer_info(struct lttng_consumer_stream *stream,
1464 struct stream_subbuffer *subbuf)
1465 {
1466 int ret;
1467
1468 ret = kernctl_get_subbuf_size(
1469 stream->wait_fd, &subbuf->info.data.subbuf_size);
1470 if (ret) {
1471 goto end;
1472 }
1473
1474 ret = kernctl_get_padded_subbuf_size(
1475 stream->wait_fd, &subbuf->info.data.padded_subbuf_size);
1476 if (ret) {
1477 goto end;
1478 }
1479
1480 end:
1481 return ret;
1482 }
1483
1484 static
1485 int extract_metadata_subbuffer_info(struct lttng_consumer_stream *stream,
1486 struct stream_subbuffer *subbuf)
1487 {
1488 int ret;
1489
1490 ret = extract_common_subbuffer_info(stream, subbuf);
1491 if (ret) {
1492 goto end;
1493 }
1494
1495 ret = kernctl_get_metadata_version(
1496 stream->wait_fd, &subbuf->info.metadata.version);
1497 if (ret) {
1498 goto end;
1499 }
1500
1501 end:
1502 return ret;
1503 }
1504
1505 static
1506 int extract_data_subbuffer_info(struct lttng_consumer_stream *stream,
1507 struct stream_subbuffer *subbuf)
1508 {
1509 int ret;
1510
1511 ret = extract_common_subbuffer_info(stream, subbuf);
1512 if (ret) {
1513 goto end;
1514 }
1515
1516 ret = kernctl_get_packet_size(
1517 stream->wait_fd, &subbuf->info.data.packet_size);
1518 if (ret < 0) {
1519 PERROR("Failed to get sub-buffer packet size");
1520 goto end;
1521 }
1522
1523 ret = kernctl_get_content_size(
1524 stream->wait_fd, &subbuf->info.data.content_size);
1525 if (ret < 0) {
1526 PERROR("Failed to get sub-buffer content size");
1527 goto end;
1528 }
1529
1530 ret = kernctl_get_timestamp_begin(
1531 stream->wait_fd, &subbuf->info.data.timestamp_begin);
1532 if (ret < 0) {
1533 PERROR("Failed to get sub-buffer begin timestamp");
1534 goto end;
1535 }
1536
1537 ret = kernctl_get_timestamp_end(
1538 stream->wait_fd, &subbuf->info.data.timestamp_end);
1539 if (ret < 0) {
1540 PERROR("Failed to get sub-buffer end timestamp");
1541 goto end;
1542 }
1543
1544 ret = kernctl_get_events_discarded(
1545 stream->wait_fd, &subbuf->info.data.events_discarded);
1546 if (ret) {
1547 PERROR("Failed to get sub-buffer events discarded count");
1548 goto end;
1549 }
1550
1551 ret = kernctl_get_sequence_number(stream->wait_fd,
1552 &subbuf->info.data.sequence_number.value);
1553 if (ret) {
1554 /* May not be supported by older LTTng-modules. */
1555 if (ret != -ENOTTY) {
1556 PERROR("Failed to get sub-buffer sequence number");
1557 goto end;
1558 }
1559 } else {
1560 subbuf->info.data.sequence_number.is_set = true;
1561 }
1562
1563 ret = kernctl_get_stream_id(
1564 stream->wait_fd, &subbuf->info.data.stream_id);
1565 if (ret < 0) {
1566 PERROR("Failed to get stream id");
1567 goto end;
1568 }
1569
1570 ret = kernctl_get_instance_id(stream->wait_fd,
1571 &subbuf->info.data.stream_instance_id.value);
1572 if (ret) {
1573 /* May not be supported by older LTTng-modules. */
1574 if (ret != -ENOTTY) {
1575 PERROR("Failed to get stream instance id");
1576 goto end;
1577 }
1578 } else {
1579 subbuf->info.data.stream_instance_id.is_set = true;
1580 }
1581 end:
1582 return ret;
1583 }
1584
1585 static
1586 enum get_next_subbuffer_status get_subbuffer_common(
1587 struct lttng_consumer_stream *stream,
1588 struct stream_subbuffer *subbuffer)
1589 {
1590 int ret;
1591 enum get_next_subbuffer_status status;
1592
1593 ret = kernctl_get_next_subbuf(stream->wait_fd);
1594 switch (ret) {
1595 case 0:
1596 status = GET_NEXT_SUBBUFFER_STATUS_OK;
1597 break;
1598 case -ENODATA:
1599 case -EAGAIN:
1600 /*
1601 * The caller only expects -ENODATA when there is no data to
1602 * read, but the kernel tracer returns -EAGAIN when there is
1603 * currently no data for a non-finalized stream, and -ENODATA
1604 * when there is no data for a finalized stream. Those can be
1605 * combined into a -ENODATA return value.
1606 */
1607 status = GET_NEXT_SUBBUFFER_STATUS_NO_DATA;
1608 goto end;
1609 default:
1610 status = GET_NEXT_SUBBUFFER_STATUS_ERROR;
1611 goto end;
1612 }
1613
1614 ret = stream->read_subbuffer_ops.extract_subbuffer_info(
1615 stream, subbuffer);
1616 if (ret) {
1617 status = GET_NEXT_SUBBUFFER_STATUS_ERROR;
1618 }
1619 end:
1620 return status;
1621 }
1622
1623 static
1624 enum get_next_subbuffer_status get_next_subbuffer_splice(
1625 struct lttng_consumer_stream *stream,
1626 struct stream_subbuffer *subbuffer)
1627 {
1628 const enum get_next_subbuffer_status status =
1629 get_subbuffer_common(stream, subbuffer);
1630
1631 if (status != GET_NEXT_SUBBUFFER_STATUS_OK) {
1632 goto end;
1633 }
1634
1635 subbuffer->buffer.fd = stream->wait_fd;
1636 end:
1637 return status;
1638 }
1639
1640 static
1641 enum get_next_subbuffer_status get_next_subbuffer_mmap(
1642 struct lttng_consumer_stream *stream,
1643 struct stream_subbuffer *subbuffer)
1644 {
1645 int ret;
1646 enum get_next_subbuffer_status status;
1647 const char *addr;
1648
1649 status = get_subbuffer_common(stream, subbuffer);
1650 if (status != GET_NEXT_SUBBUFFER_STATUS_OK) {
1651 goto end;
1652 }
1653
1654 ret = get_current_subbuf_addr(stream, &addr);
1655 if (ret) {
1656 status = GET_NEXT_SUBBUFFER_STATUS_ERROR;
1657 goto end;
1658 }
1659
1660 subbuffer->buffer.buffer = lttng_buffer_view_init(
1661 addr, 0, subbuffer->info.data.padded_subbuf_size);
1662 end:
1663 return status;
1664 }
1665
1666 static
1667 enum get_next_subbuffer_status get_next_subbuffer_metadata_check(struct lttng_consumer_stream *stream,
1668 struct stream_subbuffer *subbuffer)
1669 {
1670 int ret;
1671 const char *addr;
1672 bool coherent;
1673 enum get_next_subbuffer_status status;
1674
1675 ret = kernctl_get_next_subbuf_metadata_check(stream->wait_fd,
1676 &coherent);
1677 if (ret) {
1678 goto end;
1679 }
1680
1681 ret = stream->read_subbuffer_ops.extract_subbuffer_info(
1682 stream, subbuffer);
1683 if (ret) {
1684 goto end;
1685 }
1686
1687 LTTNG_OPTIONAL_SET(&subbuffer->info.metadata.coherent, coherent);
1688
1689 ret = get_current_subbuf_addr(stream, &addr);
1690 if (ret) {
1691 goto end;
1692 }
1693
1694 subbuffer->buffer.buffer = lttng_buffer_view_init(
1695 addr, 0, subbuffer->info.data.padded_subbuf_size);
1696 DBG("Got metadata packet with padded_subbuf_size = %lu, coherent = %s",
1697 subbuffer->info.metadata.padded_subbuf_size,
1698 coherent ? "true" : "false");
1699 end:
1700 /*
1701 * The caller only expects -ENODATA when there is no data to read, but
1702 * the kernel tracer returns -EAGAIN when there is currently no data
1703 * for a non-finalized stream, and -ENODATA when there is no data for a
1704 * finalized stream. Those can be combined into a -ENODATA return value.
1705 */
1706 switch (ret) {
1707 case 0:
1708 status = GET_NEXT_SUBBUFFER_STATUS_OK;
1709 break;
1710 case -ENODATA:
1711 case -EAGAIN:
1712 /*
1713 * The caller only expects -ENODATA when there is no data to
1714 * read, but the kernel tracer returns -EAGAIN when there is
1715 * currently no data for a non-finalized stream, and -ENODATA
1716 * when there is no data for a finalized stream. Those can be
1717 * combined into a -ENODATA return value.
1718 */
1719 status = GET_NEXT_SUBBUFFER_STATUS_NO_DATA;
1720 break;
1721 default:
1722 status = GET_NEXT_SUBBUFFER_STATUS_ERROR;
1723 break;
1724 }
1725
1726 return status;
1727 }
1728
1729 static
1730 int put_next_subbuffer(struct lttng_consumer_stream *stream,
1731 struct stream_subbuffer *subbuffer)
1732 {
1733 const int ret = kernctl_put_next_subbuf(stream->wait_fd);
1734
1735 if (ret) {
1736 if (ret == -EFAULT) {
1737 PERROR("Error in unreserving sub buffer");
1738 } else if (ret == -EIO) {
1739 /* Should never happen with newer LTTng versions */
1740 PERROR("Reader has been pushed by the writer, last sub-buffer corrupted");
1741 }
1742 }
1743
1744 return ret;
1745 }
1746
1747 static
1748 bool is_get_next_check_metadata_available(int tracer_fd)
1749 {
1750 const int ret = kernctl_get_next_subbuf_metadata_check(tracer_fd, NULL);
1751 const bool available = ret != -ENOTTY;
1752
1753 if (ret == 0) {
1754 /* get succeeded, make sure to put the subbuffer. */
1755 kernctl_put_subbuf(tracer_fd);
1756 }
1757
1758 return available;
1759 }
1760
1761 static
1762 int signal_metadata(struct lttng_consumer_stream *stream,
1763 struct lttng_consumer_local_data *ctx)
1764 {
1765 ASSERT_LOCKED(stream->metadata_rdv_lock);
1766 return pthread_cond_broadcast(&stream->metadata_rdv) ? -errno : 0;
1767 }
1768
1769 static
1770 int lttng_kconsumer_set_stream_ops(
1771 struct lttng_consumer_stream *stream)
1772 {
1773 int ret = 0;
1774
1775 if (stream->metadata_flag && stream->chan->is_live) {
1776 DBG("Attempting to enable metadata bucketization for live consumers");
1777 if (is_get_next_check_metadata_available(stream->wait_fd)) {
1778 DBG("Kernel tracer supports get_next_subbuffer_metadata_check, metadata will be accumulated until a coherent state is reached");
1779 stream->read_subbuffer_ops.get_next_subbuffer =
1780 get_next_subbuffer_metadata_check;
1781 ret = consumer_stream_enable_metadata_bucketization(
1782 stream);
1783 if (ret) {
1784 goto end;
1785 }
1786 } else {
1787 /*
1788 * The kernel tracer version is too old to indicate
1789 * when the metadata stream has reached a "coherent"
1790 * (parseable) point.
1791 *
1792 * This means that a live viewer may see an incoherent
1793 * sequence of metadata and fail to parse it.
1794 */
1795 WARN("Kernel tracer does not support get_next_subbuffer_metadata_check which may cause live clients to fail to parse the metadata stream");
1796 metadata_bucket_destroy(stream->metadata_bucket);
1797 stream->metadata_bucket = NULL;
1798 }
1799
1800 stream->read_subbuffer_ops.on_sleep = signal_metadata;
1801 }
1802
1803 if (!stream->read_subbuffer_ops.get_next_subbuffer) {
1804 if (stream->chan->output == CONSUMER_CHANNEL_MMAP) {
1805 stream->read_subbuffer_ops.get_next_subbuffer =
1806 get_next_subbuffer_mmap;
1807 } else {
1808 stream->read_subbuffer_ops.get_next_subbuffer =
1809 get_next_subbuffer_splice;
1810 }
1811 }
1812
1813 if (stream->metadata_flag) {
1814 stream->read_subbuffer_ops.extract_subbuffer_info =
1815 extract_metadata_subbuffer_info;
1816 } else {
1817 stream->read_subbuffer_ops.extract_subbuffer_info =
1818 extract_data_subbuffer_info;
1819 if (stream->chan->is_live) {
1820 stream->read_subbuffer_ops.send_live_beacon =
1821 consumer_flush_kernel_index;
1822 }
1823 }
1824
1825 stream->read_subbuffer_ops.put_next_subbuffer = put_next_subbuffer;
1826 end:
1827 return ret;
1828 }
1829
1830 int lttng_kconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
1831 {
1832 int ret;
1833
1834 assert(stream);
1835
1836 /*
1837 * Don't create anything if this is set for streaming or if there is
1838 * no current trace chunk on the parent channel.
1839 */
1840 if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor &&
1841 stream->chan->trace_chunk) {
1842 ret = consumer_stream_create_output_files(stream, true);
1843 if (ret) {
1844 goto error;
1845 }
1846 }
1847
1848 if (stream->output == LTTNG_EVENT_MMAP) {
1849 /* get the len of the mmap region */
1850 unsigned long mmap_len;
1851
1852 ret = kernctl_get_mmap_len(stream->wait_fd, &mmap_len);
1853 if (ret != 0) {
1854 PERROR("kernctl_get_mmap_len");
1855 goto error_close_fd;
1856 }
1857 stream->mmap_len = (size_t) mmap_len;
1858
1859 stream->mmap_base = mmap(NULL, stream->mmap_len, PROT_READ,
1860 MAP_PRIVATE, stream->wait_fd, 0);
1861 if (stream->mmap_base == MAP_FAILED) {
1862 PERROR("Error mmaping");
1863 ret = -1;
1864 goto error_close_fd;
1865 }
1866 }
1867
1868 ret = lttng_kconsumer_set_stream_ops(stream);
1869 if (ret) {
1870 goto error_close_fd;
1871 }
1872
1873 /* we return 0 to let the library handle the FD internally */
1874 return 0;
1875
1876 error_close_fd:
1877 if (stream->out_fd >= 0) {
1878 int err;
1879
1880 err = close(stream->out_fd);
1881 assert(!err);
1882 stream->out_fd = -1;
1883 }
1884 error:
1885 return ret;
1886 }
1887
1888 /*
1889 * Check if data is still being extracted from the buffers for a specific
1890 * stream. Consumer data lock MUST be acquired before calling this function
1891 * and the stream lock.
1892 *
1893 * Return 1 if the traced data are still getting read else 0 meaning that the
1894 * data is available for trace viewer reading.
1895 */
1896 int lttng_kconsumer_data_pending(struct lttng_consumer_stream *stream)
1897 {
1898 int ret;
1899
1900 assert(stream);
1901
1902 if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) {
1903 ret = 0;
1904 goto end;
1905 }
1906
1907 ret = kernctl_get_next_subbuf(stream->wait_fd);
1908 if (ret == 0) {
1909 /* There is still data so let's put back this subbuffer. */
1910 ret = kernctl_put_subbuf(stream->wait_fd);
1911 assert(ret == 0);
1912 ret = 1; /* Data is pending */
1913 goto end;
1914 }
1915
1916 /* Data is NOT pending and ready to be read. */
1917 ret = 0;
1918
1919 end:
1920 return ret;
1921 }
This page took 0.066177 seconds and 4 git commands to generate.