consumerd: refactor: split read_subbuf into sub-operations
[lttng-tools.git] / src / common / kernel-consumer / kernel-consumer.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _LGPL_SOURCE
20 #include <assert.h>
21 #include <poll.h>
22 #include <pthread.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/mman.h>
26 #include <sys/socket.h>
27 #include <sys/types.h>
28 #include <inttypes.h>
29 #include <unistd.h>
30 #include <sys/stat.h>
31
32 #include <bin/lttng-consumerd/health-consumerd.h>
33 #include <common/common.h>
34 #include <common/kernel-ctl/kernel-ctl.h>
35 #include <common/sessiond-comm/sessiond-comm.h>
36 #include <common/sessiond-comm/relayd.h>
37 #include <common/compat/fcntl.h>
38 #include <common/compat/endian.h>
39 #include <common/pipe.h>
40 #include <common/relayd/relayd.h>
41 #include <common/utils.h>
42 #include <common/consumer/consumer-stream.h>
43 #include <common/index/index.h>
44 #include <common/consumer/consumer-timer.h>
45 #include <common/optional.h>
46 #include <common/buffer-view.h>
47 #include <common/consumer/consumer.h>
48 #include <stdint.h>
49
50 #include "kernel-consumer.h"
51
52 extern struct lttng_consumer_global_data consumer_data;
53 extern int consumer_poll_timeout;
54 extern volatile int consumer_quit;
55
56 /*
57 * Take a snapshot for a specific fd
58 *
59 * Returns 0 on success, < 0 on error
60 */
61 int lttng_kconsumer_take_snapshot(struct lttng_consumer_stream *stream)
62 {
63 int ret = 0;
64 int infd = stream->wait_fd;
65
66 ret = kernctl_snapshot(infd);
67 /*
68 * -EAGAIN is not an error, it just means that there is no data to
69 * be read.
70 */
71 if (ret != 0 && ret != -EAGAIN) {
72 PERROR("Getting sub-buffer snapshot.");
73 }
74
75 return ret;
76 }
77
78 /*
79 * Get the produced position
80 *
81 * Returns 0 on success, < 0 on error
82 */
83 int lttng_kconsumer_get_produced_snapshot(struct lttng_consumer_stream *stream,
84 unsigned long *pos)
85 {
86 int ret;
87 int infd = stream->wait_fd;
88
89 ret = kernctl_snapshot_get_produced(infd, pos);
90 if (ret != 0) {
91 PERROR("kernctl_snapshot_get_produced");
92 }
93
94 return ret;
95 }
96
97 /*
98 * Get the consumerd position
99 *
100 * Returns 0 on success, < 0 on error
101 */
102 int lttng_kconsumer_get_consumed_snapshot(struct lttng_consumer_stream *stream,
103 unsigned long *pos)
104 {
105 int ret;
106 int infd = stream->wait_fd;
107
108 ret = kernctl_snapshot_get_consumed(infd, pos);
109 if (ret != 0) {
110 PERROR("kernctl_snapshot_get_consumed");
111 }
112
113 return ret;
114 }
115
116 static
117 int get_current_subbuf_addr(struct lttng_consumer_stream *stream,
118 const char **addr)
119 {
120 int ret;
121 unsigned long mmap_offset;
122 const char *mmap_base = stream->mmap_base;
123
124 ret = kernctl_get_mmap_read_offset(stream->wait_fd, &mmap_offset);
125 if (ret < 0) {
126 PERROR("Failed to get mmap read offset");
127 goto error;
128 }
129
130 *addr = mmap_base + mmap_offset;
131 error:
132 return ret;
133 }
134
135 /*
136 * Take a snapshot of all the stream of a channel
137 *
138 * Returns 0 on success, < 0 on error
139 */
140 int lttng_kconsumer_snapshot_channel(uint64_t key, char *path,
141 uint64_t relayd_id, uint64_t nb_packets_per_stream,
142 struct lttng_consumer_local_data *ctx)
143 {
144 int ret;
145 struct lttng_consumer_channel *channel;
146 struct lttng_consumer_stream *stream;
147
148 DBG("Kernel consumer snapshot channel %" PRIu64, key);
149
150 rcu_read_lock();
151
152 channel = consumer_find_channel(key);
153 if (!channel) {
154 ERR("No channel found for key %" PRIu64, key);
155 ret = -1;
156 goto end;
157 }
158
159 /* Splice is not supported yet for channel snapshot. */
160 if (channel->output != CONSUMER_CHANNEL_MMAP) {
161 ERR("Unsupported output %d", channel->output);
162 ret = -1;
163 goto end;
164 }
165
166 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
167 unsigned long consumed_pos, produced_pos;
168
169 health_code_update();
170
171 /*
172 * Lock stream because we are about to change its state.
173 */
174 pthread_mutex_lock(&stream->lock);
175
176 /*
177 * Assign the received relayd ID so we can use it for streaming. The streams
178 * are not visible to anyone so this is OK to change it.
179 */
180 stream->relayd_id = relayd_id;
181 channel->relayd_id = relayd_id;
182 if (relayd_id != (uint64_t) -1ULL) {
183 ret = consumer_send_relayd_stream(stream, path);
184 if (ret < 0) {
185 ERR("sending stream to relayd");
186 goto end_unlock;
187 }
188 } else {
189 ret = utils_create_stream_file(path, stream->name,
190 stream->chan->tracefile_size,
191 stream->tracefile_count_current,
192 stream->uid, stream->gid, NULL);
193 if (ret < 0) {
194 ERR("utils_create_stream_file");
195 goto end_unlock;
196 }
197
198 stream->out_fd = ret;
199 stream->tracefile_size_current = 0;
200
201 DBG("Kernel consumer snapshot stream %s/%s (%" PRIu64 ")",
202 path, stream->name, stream->key);
203 }
204 if (relayd_id != -1ULL) {
205 ret = consumer_send_relayd_streams_sent(relayd_id);
206 if (ret < 0) {
207 ERR("sending streams sent to relayd");
208 goto end_unlock;
209 }
210 channel->streams_sent_to_relayd = true;
211 }
212
213 ret = kernctl_buffer_flush_empty(stream->wait_fd);
214 if (ret < 0) {
215 /*
216 * Doing a buffer flush which does not take into
217 * account empty packets. This is not perfect
218 * for stream intersection, but required as a
219 * fall-back when "flush_empty" is not
220 * implemented by lttng-modules.
221 */
222 ret = kernctl_buffer_flush(stream->wait_fd);
223 if (ret < 0) {
224 ERR("Failed to flush kernel stream");
225 goto end_unlock;
226 }
227 goto end_unlock;
228 }
229
230 ret = lttng_kconsumer_take_snapshot(stream);
231 if (ret < 0) {
232 ERR("Taking kernel snapshot");
233 goto end_unlock;
234 }
235
236 ret = lttng_kconsumer_get_produced_snapshot(stream, &produced_pos);
237 if (ret < 0) {
238 ERR("Produced kernel snapshot position");
239 goto end_unlock;
240 }
241
242 ret = lttng_kconsumer_get_consumed_snapshot(stream, &consumed_pos);
243 if (ret < 0) {
244 ERR("Consumerd kernel snapshot position");
245 goto end_unlock;
246 }
247
248 if (stream->max_sb_size == 0) {
249 ret = kernctl_get_max_subbuf_size(stream->wait_fd,
250 &stream->max_sb_size);
251 if (ret < 0) {
252 ERR("Getting kernel max_sb_size");
253 goto end_unlock;
254 }
255 }
256
257 consumed_pos = consumer_get_consume_start_pos(consumed_pos,
258 produced_pos, nb_packets_per_stream,
259 stream->max_sb_size);
260
261 while (consumed_pos < produced_pos) {
262 ssize_t read_len;
263 unsigned long len, padded_len;
264 const char *subbuf_addr;
265 struct lttng_buffer_view subbuf_view;
266
267 health_code_update();
268 DBG("Kernel consumer taking snapshot at pos %lu", consumed_pos);
269
270 ret = kernctl_get_subbuf(stream->wait_fd, &consumed_pos);
271 if (ret < 0) {
272 if (ret != -EAGAIN) {
273 PERROR("kernctl_get_subbuf snapshot");
274 goto end_unlock;
275 }
276 DBG("Kernel consumer get subbuf failed. Skipping it.");
277 consumed_pos += stream->max_sb_size;
278 stream->chan->lost_packets++;
279 continue;
280 }
281
282 ret = kernctl_get_subbuf_size(stream->wait_fd, &len);
283 if (ret < 0) {
284 ERR("Snapshot kernctl_get_subbuf_size");
285 goto error_put_subbuf;
286 }
287
288 ret = kernctl_get_padded_subbuf_size(stream->wait_fd, &padded_len);
289 if (ret < 0) {
290 ERR("Snapshot kernctl_get_padded_subbuf_size");
291 goto error_put_subbuf;
292 }
293
294 ret = get_current_subbuf_addr(stream, &subbuf_addr);
295 if (ret) {
296 goto error_put_subbuf;
297 }
298
299 subbuf_view = lttng_buffer_view_init(
300 subbuf_addr, 0, padded_len);
301 read_len = lttng_consumer_on_read_subbuffer_mmap(ctx,
302 stream, &subbuf_view,
303 padded_len - len);
304 /*
305 * We write the padded len in local tracefiles but the data len
306 * when using a relay. Display the error but continue processing
307 * to try to release the subbuffer.
308 */
309 if (relayd_id != (uint64_t) -1ULL) {
310 if (read_len != len) {
311 ERR("Error sending to the relay (ret: %zd != len: %lu)",
312 read_len, len);
313 }
314 } else {
315 if (read_len != padded_len) {
316 ERR("Error writing to tracefile (ret: %zd != len: %lu)",
317 read_len, padded_len);
318 }
319 }
320
321 ret = kernctl_put_subbuf(stream->wait_fd);
322 if (ret < 0) {
323 ERR("Snapshot kernctl_put_subbuf");
324 goto end_unlock;
325 }
326 consumed_pos += stream->max_sb_size;
327 }
328
329 if (relayd_id == (uint64_t) -1ULL) {
330 if (stream->out_fd >= 0) {
331 ret = close(stream->out_fd);
332 if (ret < 0) {
333 PERROR("Kernel consumer snapshot close out_fd");
334 goto end_unlock;
335 }
336 stream->out_fd = -1;
337 }
338 } else {
339 close_relayd_stream(stream);
340 stream->relayd_id = (uint64_t) -1ULL;
341 }
342 pthread_mutex_unlock(&stream->lock);
343 }
344
345 /* All good! */
346 ret = 0;
347 goto end;
348
349 error_put_subbuf:
350 ret = kernctl_put_subbuf(stream->wait_fd);
351 if (ret < 0) {
352 ERR("Snapshot kernctl_put_subbuf error path");
353 }
354 end_unlock:
355 pthread_mutex_unlock(&stream->lock);
356 end:
357 rcu_read_unlock();
358 return ret;
359 }
360
361 /*
362 * Read the whole metadata available for a snapshot.
363 *
364 * Returns 0 on success, < 0 on error
365 */
366 static int lttng_kconsumer_snapshot_metadata(uint64_t key, char *path,
367 uint64_t relayd_id, struct lttng_consumer_local_data *ctx)
368 {
369 int ret, use_relayd = 0;
370 ssize_t ret_read;
371 struct lttng_consumer_channel *metadata_channel;
372 struct lttng_consumer_stream *metadata_stream;
373
374 assert(ctx);
375
376 DBG("Kernel consumer snapshot metadata with key %" PRIu64 " at path %s",
377 key, path);
378
379 rcu_read_lock();
380
381 metadata_channel = consumer_find_channel(key);
382 if (!metadata_channel) {
383 ERR("Kernel snapshot metadata not found for key %" PRIu64, key);
384 ret = -1;
385 goto error_no_channel;
386 }
387
388 metadata_stream = metadata_channel->metadata_stream;
389 assert(metadata_stream);
390 pthread_mutex_lock(&metadata_stream->lock);
391
392 /* Flag once that we have a valid relayd for the stream. */
393 if (relayd_id != (uint64_t) -1ULL) {
394 use_relayd = 1;
395 }
396
397 if (use_relayd) {
398 ret = consumer_send_relayd_stream(metadata_stream, path);
399 if (ret < 0) {
400 goto error_snapshot;
401 }
402 } else {
403 ret = utils_create_stream_file(path, metadata_stream->name,
404 metadata_stream->chan->tracefile_size,
405 metadata_stream->tracefile_count_current,
406 metadata_stream->uid, metadata_stream->gid, NULL);
407 if (ret < 0) {
408 goto error_snapshot;
409 }
410 metadata_stream->out_fd = ret;
411 }
412
413 do {
414 health_code_update();
415
416 ret_read = lttng_consumer_read_subbuffer(metadata_stream, ctx, true);
417 if (ret_read < 0) {
418 if (ret_read != -EAGAIN) {
419 ERR("Kernel snapshot reading metadata subbuffer (ret: %zd)",
420 ret_read);
421 ret = ret_read;
422 goto error_snapshot;
423 }
424 /* ret_read is negative at this point so we will exit the loop. */
425 continue;
426 }
427 } while (ret_read >= 0);
428
429 if (use_relayd) {
430 close_relayd_stream(metadata_stream);
431 metadata_stream->relayd_id = (uint64_t) -1ULL;
432 } else {
433 if (metadata_stream->out_fd >= 0) {
434 ret = close(metadata_stream->out_fd);
435 if (ret < 0) {
436 PERROR("Kernel consumer snapshot metadata close out_fd");
437 /*
438 * Don't go on error here since the snapshot was successful at this
439 * point but somehow the close failed.
440 */
441 }
442 metadata_stream->out_fd = -1;
443 }
444 }
445
446 ret = 0;
447 error_snapshot:
448 pthread_mutex_unlock(&metadata_stream->lock);
449 cds_list_del(&metadata_stream->send_node);
450 consumer_stream_destroy(metadata_stream, NULL);
451 metadata_channel->metadata_stream = NULL;
452 error_no_channel:
453 rcu_read_unlock();
454 return ret;
455 }
456
457 /*
458 * Receive command from session daemon and process it.
459 *
460 * Return 1 on success else a negative value or 0.
461 */
462 int lttng_kconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
463 int sock, struct pollfd *consumer_sockpoll)
464 {
465 ssize_t ret;
466 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
467 struct lttcomm_consumer_msg msg;
468
469 health_code_update();
470
471 ret = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg));
472 if (ret != sizeof(msg)) {
473 if (ret > 0) {
474 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
475 ret = -1;
476 }
477 return ret;
478 }
479
480 health_code_update();
481
482 /* Deprecated command */
483 assert(msg.cmd_type != LTTNG_CONSUMER_STOP);
484
485 health_code_update();
486
487 /* relayd needs RCU read-side protection */
488 rcu_read_lock();
489
490 switch (msg.cmd_type) {
491 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
492 {
493 /* Session daemon status message are handled in the following call. */
494 consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
495 msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll,
496 &msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id,
497 msg.u.relayd_sock.relayd_session_id);
498 goto end_nosignal;
499 }
500 case LTTNG_CONSUMER_ADD_CHANNEL:
501 {
502 struct lttng_consumer_channel *new_channel;
503 int ret_recv;
504
505 health_code_update();
506
507 /* First send a status message before receiving the fds. */
508 ret = consumer_send_status_msg(sock, ret_code);
509 if (ret < 0) {
510 /* Somehow, the session daemon is not responding anymore. */
511 goto error_fatal;
512 }
513
514 health_code_update();
515
516 DBG("consumer_add_channel %" PRIu64, msg.u.channel.channel_key);
517 new_channel = consumer_allocate_channel(msg.u.channel.channel_key,
518 msg.u.channel.session_id, msg.u.channel.pathname,
519 msg.u.channel.name, msg.u.channel.uid, msg.u.channel.gid,
520 msg.u.channel.relayd_id, msg.u.channel.output,
521 msg.u.channel.tracefile_size,
522 msg.u.channel.tracefile_count, 0,
523 msg.u.channel.monitor,
524 msg.u.channel.live_timer_interval,
525 msg.u.channel.is_live,
526 NULL, NULL);
527 if (new_channel == NULL) {
528 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
529 goto end_nosignal;
530 }
531 new_channel->nb_init_stream_left = msg.u.channel.nb_init_streams;
532 switch (msg.u.channel.output) {
533 case LTTNG_EVENT_SPLICE:
534 new_channel->output = CONSUMER_CHANNEL_SPLICE;
535 break;
536 case LTTNG_EVENT_MMAP:
537 new_channel->output = CONSUMER_CHANNEL_MMAP;
538 break;
539 default:
540 ERR("Channel output unknown %d", msg.u.channel.output);
541 goto end_nosignal;
542 }
543
544 /* Translate and save channel type. */
545 switch (msg.u.channel.type) {
546 case CONSUMER_CHANNEL_TYPE_DATA:
547 case CONSUMER_CHANNEL_TYPE_METADATA:
548 new_channel->type = msg.u.channel.type;
549 break;
550 default:
551 assert(0);
552 goto end_nosignal;
553 };
554
555 health_code_update();
556
557 if (ctx->on_recv_channel != NULL) {
558 ret_recv = ctx->on_recv_channel(new_channel);
559 if (ret_recv == 0) {
560 ret = consumer_add_channel(new_channel, ctx);
561 } else if (ret_recv < 0) {
562 goto end_nosignal;
563 }
564 } else {
565 ret = consumer_add_channel(new_channel, ctx);
566 }
567 if (CONSUMER_CHANNEL_TYPE_DATA) {
568 consumer_timer_live_start(new_channel,
569 msg.u.channel.live_timer_interval);
570 }
571
572 health_code_update();
573
574 /* If we received an error in add_channel, we need to report it. */
575 if (ret < 0) {
576 ret = consumer_send_status_msg(sock, ret);
577 if (ret < 0) {
578 goto error_fatal;
579 }
580 goto end_nosignal;
581 }
582
583 goto end_nosignal;
584 }
585 case LTTNG_CONSUMER_ADD_STREAM:
586 {
587 int fd;
588 struct lttng_pipe *stream_pipe;
589 struct lttng_consumer_stream *new_stream;
590 struct lttng_consumer_channel *channel;
591 int alloc_ret = 0;
592
593 /*
594 * Get stream's channel reference. Needed when adding the stream to the
595 * global hash table.
596 */
597 channel = consumer_find_channel(msg.u.stream.channel_key);
598 if (!channel) {
599 /*
600 * We could not find the channel. Can happen if cpu hotplug
601 * happens while tearing down.
602 */
603 ERR("Unable to find channel key %" PRIu64, msg.u.stream.channel_key);
604 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
605 }
606
607 health_code_update();
608
609 /* First send a status message before receiving the fds. */
610 ret = consumer_send_status_msg(sock, ret_code);
611 if (ret < 0) {
612 /* Somehow, the session daemon is not responding anymore. */
613 goto error_fatal;
614 }
615
616 health_code_update();
617
618 if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
619 /* Channel was not found. */
620 goto end_nosignal;
621 }
622
623 /* Blocking call */
624 health_poll_entry();
625 ret = lttng_consumer_poll_socket(consumer_sockpoll);
626 health_poll_exit();
627 if (ret) {
628 goto error_fatal;
629 }
630
631 health_code_update();
632
633 /* Get stream file descriptor from socket */
634 ret = lttcomm_recv_fds_unix_sock(sock, &fd, 1);
635 if (ret != sizeof(fd)) {
636 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD);
637 rcu_read_unlock();
638 return ret;
639 }
640
641 health_code_update();
642
643 /*
644 * Send status code to session daemon only if the recv works. If the
645 * above recv() failed, the session daemon is notified through the
646 * error socket and the teardown is eventually done.
647 */
648 ret = consumer_send_status_msg(sock, ret_code);
649 if (ret < 0) {
650 /* Somehow, the session daemon is not responding anymore. */
651 goto end_nosignal;
652 }
653
654 health_code_update();
655
656 pthread_mutex_lock(&channel->lock);
657 new_stream = consumer_stream_create(
658 channel,
659 channel->key,
660 fd,
661 LTTNG_CONSUMER_ACTIVE_STREAM,
662 channel->name,
663 channel->uid,
664 channel->gid,
665 channel->relayd_id,
666 channel->session_id,
667 msg.u.stream.cpu,
668 &alloc_ret,
669 channel->type,
670 channel->monitor);
671 if (new_stream == NULL) {
672 switch (alloc_ret) {
673 case -ENOMEM:
674 case -EINVAL:
675 default:
676 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
677 break;
678 }
679 pthread_mutex_unlock(&channel->lock);
680 goto end_nosignal;
681 }
682
683 new_stream->wait_fd = fd;
684 ret = kernctl_get_max_subbuf_size(new_stream->wait_fd,
685 &new_stream->max_sb_size);
686 if (ret < 0) {
687 pthread_mutex_unlock(&channel->lock);
688 ERR("Failed to get kernel maximal subbuffer size");
689 goto end_nosignal;
690 }
691
692 /*
693 * We've just assigned the channel to the stream so increment the
694 * refcount right now. We don't need to increment the refcount for
695 * streams in no monitor because we handle manually the cleanup of
696 * those. It is very important to make sure there is NO prior
697 * consumer_del_stream() calls or else the refcount will be unbalanced.
698 */
699 if (channel->monitor) {
700 uatomic_inc(&new_stream->chan->refcount);
701 }
702
703 /*
704 * The buffer flush is done on the session daemon side for the kernel
705 * so no need for the stream "hangup_flush_done" variable to be
706 * tracked. This is important for a kernel stream since we don't rely
707 * on the flush state of the stream to read data. It's not the case for
708 * user space tracing.
709 */
710 new_stream->hangup_flush_done = 0;
711
712 health_code_update();
713
714 if (ctx->on_recv_stream) {
715 ret = ctx->on_recv_stream(new_stream);
716 if (ret < 0) {
717 consumer_stream_free(new_stream);
718 goto end_nosignal;
719 }
720 }
721
722 health_code_update();
723
724 if (new_stream->metadata_flag) {
725 channel->metadata_stream = new_stream;
726 }
727
728 /* Do not monitor this stream. */
729 if (!channel->monitor) {
730 DBG("Kernel consumer add stream %s in no monitor mode with "
731 "relayd id %" PRIu64, new_stream->name,
732 new_stream->relayd_id);
733 cds_list_add(&new_stream->send_node, &channel->streams.head);
734 pthread_mutex_unlock(&channel->lock);
735 break;
736 }
737
738 /* Send stream to relayd if the stream has an ID. */
739 if (new_stream->relayd_id != (uint64_t) -1ULL) {
740 ret = consumer_send_relayd_stream(new_stream,
741 new_stream->chan->pathname);
742 if (ret < 0) {
743 pthread_mutex_unlock(&channel->lock);
744 consumer_stream_free(new_stream);
745 goto end_nosignal;
746 }
747
748 /*
749 * If adding an extra stream to an already
750 * existing channel (e.g. cpu hotplug), we need
751 * to send the "streams_sent" command to relayd.
752 */
753 if (channel->streams_sent_to_relayd) {
754 ret = consumer_send_relayd_streams_sent(
755 new_stream->relayd_id);
756 if (ret < 0) {
757 pthread_mutex_unlock(&channel->lock);
758 goto end_nosignal;
759 }
760 }
761 }
762 pthread_mutex_unlock(&channel->lock);
763
764 /* Get the right pipe where the stream will be sent. */
765 if (new_stream->metadata_flag) {
766 ret = consumer_add_metadata_stream(new_stream);
767 if (ret) {
768 ERR("Consumer add metadata stream %" PRIu64 " failed. Continuing",
769 new_stream->key);
770 consumer_stream_free(new_stream);
771 goto end_nosignal;
772 }
773 stream_pipe = ctx->consumer_metadata_pipe;
774 } else {
775 ret = consumer_add_data_stream(new_stream);
776 if (ret) {
777 ERR("Consumer add stream %" PRIu64 " failed. Continuing",
778 new_stream->key);
779 consumer_stream_free(new_stream);
780 goto end_nosignal;
781 }
782 stream_pipe = ctx->consumer_data_pipe;
783 }
784
785 /* Vitible to other threads */
786 new_stream->globally_visible = 1;
787
788 health_code_update();
789
790 ret = lttng_pipe_write(stream_pipe, &new_stream, sizeof(new_stream));
791 if (ret < 0) {
792 ERR("Consumer write %s stream to pipe %d",
793 new_stream->metadata_flag ? "metadata" : "data",
794 lttng_pipe_get_writefd(stream_pipe));
795 if (new_stream->metadata_flag) {
796 consumer_del_stream_for_metadata(new_stream);
797 } else {
798 consumer_del_stream_for_data(new_stream);
799 }
800 goto end_nosignal;
801 }
802
803 DBG("Kernel consumer ADD_STREAM %s (fd: %d) with relayd id %" PRIu64,
804 new_stream->name, fd, new_stream->relayd_stream_id);
805 break;
806 }
807 case LTTNG_CONSUMER_STREAMS_SENT:
808 {
809 struct lttng_consumer_channel *channel;
810
811 /*
812 * Get stream's channel reference. Needed when adding the stream to the
813 * global hash table.
814 */
815 channel = consumer_find_channel(msg.u.sent_streams.channel_key);
816 if (!channel) {
817 /*
818 * We could not find the channel. Can happen if cpu hotplug
819 * happens while tearing down.
820 */
821 ERR("Unable to find channel key %" PRIu64,
822 msg.u.sent_streams.channel_key);
823 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
824 }
825
826 health_code_update();
827
828 /*
829 * Send status code to session daemon.
830 */
831 ret = consumer_send_status_msg(sock, ret_code);
832 if (ret < 0 || ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
833 /* Somehow, the session daemon is not responding anymore. */
834 goto end_nosignal;
835 }
836
837 health_code_update();
838
839 /*
840 * We should not send this message if we don't monitor the
841 * streams in this channel.
842 */
843 if (!channel->monitor) {
844 break;
845 }
846
847 health_code_update();
848 /* Send stream to relayd if the stream has an ID. */
849 if (msg.u.sent_streams.net_seq_idx != (uint64_t) -1ULL) {
850 ret = consumer_send_relayd_streams_sent(
851 msg.u.sent_streams.net_seq_idx);
852 if (ret < 0) {
853 goto end_nosignal;
854 }
855 channel->streams_sent_to_relayd = true;
856 }
857 break;
858 }
859 case LTTNG_CONSUMER_UPDATE_STREAM:
860 {
861 rcu_read_unlock();
862 return -ENOSYS;
863 }
864 case LTTNG_CONSUMER_DESTROY_RELAYD:
865 {
866 uint64_t index = msg.u.destroy_relayd.net_seq_idx;
867 struct consumer_relayd_sock_pair *relayd;
868
869 DBG("Kernel consumer destroying relayd %" PRIu64, index);
870
871 /* Get relayd reference if exists. */
872 relayd = consumer_find_relayd(index);
873 if (relayd == NULL) {
874 DBG("Unable to find relayd %" PRIu64, index);
875 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
876 }
877
878 /*
879 * Each relayd socket pair has a refcount of stream attached to it
880 * which tells if the relayd is still active or not depending on the
881 * refcount value.
882 *
883 * This will set the destroy flag of the relayd object and destroy it
884 * if the refcount reaches zero when called.
885 *
886 * The destroy can happen either here or when a stream fd hangs up.
887 */
888 if (relayd) {
889 consumer_flag_relayd_for_destroy(relayd);
890 }
891
892 health_code_update();
893
894 ret = consumer_send_status_msg(sock, ret_code);
895 if (ret < 0) {
896 /* Somehow, the session daemon is not responding anymore. */
897 goto error_fatal;
898 }
899
900 goto end_nosignal;
901 }
902 case LTTNG_CONSUMER_DATA_PENDING:
903 {
904 int32_t ret;
905 uint64_t id = msg.u.data_pending.session_id;
906
907 DBG("Kernel consumer data pending command for id %" PRIu64, id);
908
909 ret = consumer_data_pending(id);
910
911 health_code_update();
912
913 /* Send back returned value to session daemon */
914 ret = lttcomm_send_unix_sock(sock, &ret, sizeof(ret));
915 if (ret < 0) {
916 PERROR("send data pending ret code");
917 goto error_fatal;
918 }
919
920 /*
921 * No need to send back a status message since the data pending
922 * returned value is the response.
923 */
924 break;
925 }
926 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
927 {
928 if (msg.u.snapshot_channel.metadata == 1) {
929 ret = lttng_kconsumer_snapshot_metadata(msg.u.snapshot_channel.key,
930 msg.u.snapshot_channel.pathname,
931 msg.u.snapshot_channel.relayd_id, ctx);
932 if (ret < 0) {
933 ERR("Snapshot metadata failed");
934 ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
935 }
936 } else {
937 ret = lttng_kconsumer_snapshot_channel(msg.u.snapshot_channel.key,
938 msg.u.snapshot_channel.pathname,
939 msg.u.snapshot_channel.relayd_id,
940 msg.u.snapshot_channel.nb_packets_per_stream,
941 ctx);
942 if (ret < 0) {
943 ERR("Snapshot channel failed");
944 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
945 }
946 }
947
948 health_code_update();
949
950 ret = consumer_send_status_msg(sock, ret_code);
951 if (ret < 0) {
952 /* Somehow, the session daemon is not responding anymore. */
953 goto end_nosignal;
954 }
955 break;
956 }
957 case LTTNG_CONSUMER_DESTROY_CHANNEL:
958 {
959 uint64_t key = msg.u.destroy_channel.key;
960 struct lttng_consumer_channel *channel;
961
962 channel = consumer_find_channel(key);
963 if (!channel) {
964 ERR("Kernel consumer destroy channel %" PRIu64 " not found", key);
965 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
966 }
967
968 health_code_update();
969
970 ret = consumer_send_status_msg(sock, ret_code);
971 if (ret < 0) {
972 /* Somehow, the session daemon is not responding anymore. */
973 goto end_nosignal;
974 }
975
976 health_code_update();
977
978 /* Stop right now if no channel was found. */
979 if (!channel) {
980 goto end_nosignal;
981 }
982
983 /*
984 * This command should ONLY be issued for channel with streams set in
985 * no monitor mode.
986 */
987 assert(!channel->monitor);
988
989 /*
990 * The refcount should ALWAYS be 0 in the case of a channel in no
991 * monitor mode.
992 */
993 assert(!uatomic_sub_return(&channel->refcount, 1));
994
995 consumer_del_channel(channel);
996
997 goto end_nosignal;
998 }
999 case LTTNG_CONSUMER_DISCARDED_EVENTS:
1000 {
1001 uint64_t ret;
1002 struct lttng_consumer_channel *channel;
1003 uint64_t id = msg.u.discarded_events.session_id;
1004 uint64_t key = msg.u.discarded_events.channel_key;
1005
1006 DBG("Kernel consumer discarded events command for session id %"
1007 PRIu64 ", channel key %" PRIu64, id, key);
1008
1009 channel = consumer_find_channel(key);
1010 if (!channel) {
1011 ERR("Kernel consumer discarded events channel %"
1012 PRIu64 " not found", key);
1013 ret = 0;
1014 } else {
1015 ret = channel->discarded_events;
1016 }
1017
1018 health_code_update();
1019
1020 /* Send back returned value to session daemon */
1021 ret = lttcomm_send_unix_sock(sock, &ret, sizeof(ret));
1022 if (ret < 0) {
1023 PERROR("send discarded events");
1024 goto error_fatal;
1025 }
1026
1027 break;
1028 }
1029 case LTTNG_CONSUMER_LOST_PACKETS:
1030 {
1031 uint64_t ret;
1032 struct lttng_consumer_channel *channel;
1033 uint64_t id = msg.u.lost_packets.session_id;
1034 uint64_t key = msg.u.lost_packets.channel_key;
1035
1036 DBG("Kernel consumer lost packets command for session id %"
1037 PRIu64 ", channel key %" PRIu64, id, key);
1038
1039 channel = consumer_find_channel(key);
1040 if (!channel) {
1041 ERR("Kernel consumer lost packets channel %"
1042 PRIu64 " not found", key);
1043 ret = 0;
1044 } else {
1045 ret = channel->lost_packets;
1046 }
1047
1048 health_code_update();
1049
1050 /* Send back returned value to session daemon */
1051 ret = lttcomm_send_unix_sock(sock, &ret, sizeof(ret));
1052 if (ret < 0) {
1053 PERROR("send lost packets");
1054 goto error_fatal;
1055 }
1056
1057 break;
1058 }
1059 default:
1060 goto end_nosignal;
1061 }
1062
1063 end_nosignal:
1064 rcu_read_unlock();
1065
1066 /*
1067 * Return 1 to indicate success since the 0 value can be a socket
1068 * shutdown during the recv() or send() call.
1069 */
1070 health_code_update();
1071 return 1;
1072
1073 error_fatal:
1074 rcu_read_unlock();
1075 /* This will issue a consumer stop. */
1076 return -1;
1077 }
1078
1079 /*
1080 * Sync metadata meaning request them to the session daemon and snapshot to the
1081 * metadata thread can consumer them.
1082 *
1083 * Metadata stream lock MUST be acquired.
1084 *
1085 * Return 0 if new metadatda is available, EAGAIN if the metadata stream
1086 * is empty or a negative value on error.
1087 */
1088 int lttng_kconsumer_sync_metadata(struct lttng_consumer_stream *metadata)
1089 {
1090 int ret;
1091
1092 assert(metadata);
1093
1094 ret = kernctl_buffer_flush(metadata->wait_fd);
1095 if (ret < 0) {
1096 ERR("Failed to flush kernel stream");
1097 goto end;
1098 }
1099
1100 ret = kernctl_snapshot(metadata->wait_fd);
1101 if (ret < 0) {
1102 if (ret != -EAGAIN) {
1103 ERR("Sync metadata, taking kernel snapshot failed.");
1104 goto end;
1105 }
1106 DBG("Sync metadata, no new kernel metadata");
1107 /* No new metadata, exit. */
1108 ret = ENODATA;
1109 goto end;
1110 }
1111
1112 end:
1113 return ret;
1114 }
1115
1116 static
1117 int extract_common_subbuffer_info(struct lttng_consumer_stream *stream,
1118 struct stream_subbuffer *subbuf)
1119 {
1120 int ret;
1121
1122 ret = kernctl_get_subbuf_size(
1123 stream->wait_fd, &subbuf->info.data.subbuf_size);
1124 if (ret) {
1125 goto end;
1126 }
1127
1128 ret = kernctl_get_padded_subbuf_size(
1129 stream->wait_fd, &subbuf->info.data.padded_subbuf_size);
1130 if (ret) {
1131 goto end;
1132 }
1133
1134 end:
1135 return ret;
1136 }
1137
1138 static
1139 int extract_metadata_subbuffer_info(struct lttng_consumer_stream *stream,
1140 struct stream_subbuffer *subbuf)
1141 {
1142 int ret;
1143
1144 ret = extract_common_subbuffer_info(stream, subbuf);
1145 if (ret) {
1146 goto end;
1147 }
1148
1149 ret = kernctl_get_metadata_version(
1150 stream->wait_fd, &subbuf->info.metadata.version);
1151 if (ret) {
1152 goto end;
1153 }
1154
1155 end:
1156 return ret;
1157 }
1158
1159 static
1160 int extract_data_subbuffer_info(struct lttng_consumer_stream *stream,
1161 struct stream_subbuffer *subbuf)
1162 {
1163 int ret;
1164
1165 ret = extract_common_subbuffer_info(stream, subbuf);
1166 if (ret) {
1167 goto end;
1168 }
1169
1170 ret = kernctl_get_packet_size(
1171 stream->wait_fd, &subbuf->info.data.packet_size);
1172 if (ret < 0) {
1173 PERROR("Failed to get sub-buffer packet size");
1174 goto end;
1175 }
1176
1177 ret = kernctl_get_content_size(
1178 stream->wait_fd, &subbuf->info.data.content_size);
1179 if (ret < 0) {
1180 PERROR("Failed to get sub-buffer content size");
1181 goto end;
1182 }
1183
1184 ret = kernctl_get_timestamp_begin(
1185 stream->wait_fd, &subbuf->info.data.timestamp_begin);
1186 if (ret < 0) {
1187 PERROR("Failed to get sub-buffer begin timestamp");
1188 goto end;
1189 }
1190
1191 ret = kernctl_get_timestamp_end(
1192 stream->wait_fd, &subbuf->info.data.timestamp_end);
1193 if (ret < 0) {
1194 PERROR("Failed to get sub-buffer end timestamp");
1195 goto end;
1196 }
1197
1198 ret = kernctl_get_events_discarded(
1199 stream->wait_fd, &subbuf->info.data.events_discarded);
1200 if (ret) {
1201 PERROR("Failed to get sub-buffer events discarded count");
1202 goto end;
1203 }
1204
1205 ret = kernctl_get_sequence_number(stream->wait_fd,
1206 &subbuf->info.data.sequence_number.value);
1207 if (ret) {
1208 /* May not be supported by older LTTng-modules. */
1209 if (ret != -ENOTTY) {
1210 PERROR("Failed to get sub-buffer sequence number");
1211 goto end;
1212 }
1213 } else {
1214 subbuf->info.data.sequence_number.is_set = true;
1215 }
1216
1217 ret = kernctl_get_stream_id(
1218 stream->wait_fd, &subbuf->info.data.stream_id);
1219 if (ret < 0) {
1220 PERROR("Failed to get stream id");
1221 goto end;
1222 }
1223
1224 ret = kernctl_get_instance_id(stream->wait_fd,
1225 &subbuf->info.data.stream_instance_id.value);
1226 if (ret) {
1227 /* May not be supported by older LTTng-modules. */
1228 if (ret != -ENOTTY) {
1229 PERROR("Failed to get stream instance id");
1230 goto end;
1231 }
1232 } else {
1233 subbuf->info.data.stream_instance_id.is_set = true;
1234 }
1235 end:
1236 return ret;
1237 }
1238
1239 static
1240 int get_subbuffer_common(struct lttng_consumer_stream *stream,
1241 struct stream_subbuffer *subbuffer)
1242 {
1243 int ret;
1244
1245 ret = kernctl_get_next_subbuf(stream->wait_fd);
1246 if (ret) {
1247 goto end;
1248 }
1249
1250 ret = stream->read_subbuffer_ops.extract_subbuffer_info(
1251 stream, subbuffer);
1252 end:
1253 return ret;
1254 }
1255
1256 static
1257 int get_next_subbuffer_splice(struct lttng_consumer_stream *stream,
1258 struct stream_subbuffer *subbuffer)
1259 {
1260 int ret;
1261
1262 ret = get_subbuffer_common(stream, subbuffer);
1263 if (ret) {
1264 goto end;
1265 }
1266
1267 subbuffer->buffer.fd = stream->wait_fd;
1268 end:
1269 return ret;
1270 }
1271
1272 static
1273 int get_next_subbuffer_mmap(struct lttng_consumer_stream *stream,
1274 struct stream_subbuffer *subbuffer)
1275 {
1276 int ret;
1277 const char *addr;
1278
1279 ret = get_subbuffer_common(stream, subbuffer);
1280 if (ret) {
1281 goto end;
1282 }
1283
1284 ret = get_current_subbuf_addr(stream, &addr);
1285 if (ret) {
1286 goto end;
1287 }
1288
1289 subbuffer->buffer.buffer = lttng_buffer_view_init(
1290 addr, 0, subbuffer->info.data.padded_subbuf_size);
1291 end:
1292 return ret;
1293 }
1294
1295 static
1296 int put_next_subbuffer(struct lttng_consumer_stream *stream,
1297 struct stream_subbuffer *subbuffer)
1298 {
1299 const int ret = kernctl_put_next_subbuf(stream->wait_fd);
1300
1301 if (ret) {
1302 if (ret == -EFAULT) {
1303 PERROR("Error in unreserving sub buffer");
1304 } else if (ret == -EIO) {
1305 /* Should never happen with newer LTTng versions */
1306 PERROR("Reader has been pushed by the writer, last sub-buffer corrupted");
1307 }
1308 }
1309
1310 return ret;
1311 }
1312
1313 static void lttng_kconsumer_set_stream_ops(
1314 struct lttng_consumer_stream *stream)
1315 {
1316 if (stream->chan->output == CONSUMER_CHANNEL_MMAP) {
1317 stream->read_subbuffer_ops.get_next_subbuffer =
1318 get_next_subbuffer_mmap;
1319 } else {
1320 stream->read_subbuffer_ops.get_next_subbuffer =
1321 get_next_subbuffer_splice;
1322 }
1323
1324 if (stream->metadata_flag) {
1325 stream->read_subbuffer_ops.extract_subbuffer_info =
1326 extract_metadata_subbuffer_info;
1327 } else {
1328 stream->read_subbuffer_ops.extract_subbuffer_info =
1329 extract_data_subbuffer_info;
1330 if (stream->chan->is_live) {
1331 stream->read_subbuffer_ops.send_live_beacon =
1332 consumer_flush_kernel_index;
1333 }
1334 }
1335
1336 stream->read_subbuffer_ops.put_next_subbuffer = put_next_subbuffer;
1337 }
1338
1339 int lttng_kconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
1340 {
1341 int ret;
1342
1343 assert(stream);
1344
1345 /*
1346 * Don't create anything if this is set for streaming or should not be
1347 * monitored.
1348 */
1349 if (stream->relayd_id == (uint64_t) -1ULL && stream->chan->monitor) {
1350 ret = utils_create_stream_file(stream->chan->pathname, stream->name,
1351 stream->chan->tracefile_size, stream->tracefile_count_current,
1352 stream->uid, stream->gid, NULL);
1353 if (ret < 0) {
1354 goto error;
1355 }
1356 stream->out_fd = ret;
1357 stream->tracefile_size_current = 0;
1358
1359 if (!stream->metadata_flag) {
1360 struct lttng_index_file *index_file;
1361
1362 index_file = lttng_index_file_create(stream->chan->pathname,
1363 stream->name, stream->uid, stream->gid,
1364 stream->chan->tracefile_size,
1365 stream->tracefile_count_current,
1366 CTF_INDEX_MAJOR, CTF_INDEX_MINOR);
1367 if (!index_file) {
1368 goto error;
1369 }
1370 stream->index_file = index_file;
1371 }
1372 }
1373
1374 if (stream->output == LTTNG_EVENT_MMAP) {
1375 /* get the len of the mmap region */
1376 unsigned long mmap_len;
1377
1378 ret = kernctl_get_mmap_len(stream->wait_fd, &mmap_len);
1379 if (ret != 0) {
1380 PERROR("kernctl_get_mmap_len");
1381 goto error_close_fd;
1382 }
1383 stream->mmap_len = (size_t) mmap_len;
1384
1385 stream->mmap_base = mmap(NULL, stream->mmap_len, PROT_READ,
1386 MAP_PRIVATE, stream->wait_fd, 0);
1387 if (stream->mmap_base == MAP_FAILED) {
1388 PERROR("Error mmaping");
1389 ret = -1;
1390 goto error_close_fd;
1391 }
1392 }
1393
1394 lttng_kconsumer_set_stream_ops(stream);
1395
1396 /* we return 0 to let the library handle the FD internally */
1397 return 0;
1398
1399 error_close_fd:
1400 if (stream->out_fd >= 0) {
1401 int err;
1402
1403 err = close(stream->out_fd);
1404 assert(!err);
1405 stream->out_fd = -1;
1406 }
1407 error:
1408 return ret;
1409 }
1410
1411 /*
1412 * Check if data is still being extracted from the buffers for a specific
1413 * stream. Consumer data lock MUST be acquired before calling this function
1414 * and the stream lock.
1415 *
1416 * Return 1 if the traced data are still getting read else 0 meaning that the
1417 * data is available for trace viewer reading.
1418 */
1419 int lttng_kconsumer_data_pending(struct lttng_consumer_stream *stream)
1420 {
1421 int ret;
1422
1423 assert(stream);
1424
1425 if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) {
1426 ret = 0;
1427 goto end;
1428 }
1429
1430 ret = kernctl_get_next_subbuf(stream->wait_fd);
1431 if (ret == 0) {
1432 /* There is still data so let's put back this subbuffer. */
1433 ret = kernctl_put_subbuf(stream->wait_fd);
1434 assert(ret == 0);
1435 ret = 1; /* Data is pending */
1436 goto end;
1437 }
1438
1439 /* Data is NOT pending and ready to be read. */
1440 ret = 0;
1441
1442 end:
1443 return ret;
1444 }
This page took 0.1014 seconds and 5 git commands to generate.