Backport: relayd: track listener's epoll fd using the fd-tracker
[lttng-tools.git] / src / bin / lttng-relayd / main.c
1 /*
2 * Copyright (C) 2012 - Julien Desfossez <jdesfossez@efficios.com>
3 * David Goulet <dgoulet@efficios.com>
4 * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 * 2015 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2 only,
9 * as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21 #define _LGPL_SOURCE
22 #include <getopt.h>
23 #include <grp.h>
24 #include <limits.h>
25 #include <pthread.h>
26 #include <signal.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <sys/mman.h>
31 #include <sys/mount.h>
32 #include <sys/resource.h>
33 #include <sys/socket.h>
34 #include <sys/stat.h>
35 #include <sys/types.h>
36 #include <sys/wait.h>
37 #include <sys/resource.h>
38 #include <inttypes.h>
39 #include <urcu/futex.h>
40 #include <urcu/uatomic.h>
41 #include <unistd.h>
42 #include <fcntl.h>
43 #include <ctype.h>
44
45 #include <lttng/lttng.h>
46 #include <common/common.h>
47 #include <common/compat/poll.h>
48 #include <common/compat/socket.h>
49 #include <common/compat/endian.h>
50 #include <common/compat/getenv.h>
51 #include <common/defaults.h>
52 #include <common/daemonize.h>
53 #include <common/futex.h>
54 #include <common/sessiond-comm/sessiond-comm.h>
55 #include <common/sessiond-comm/inet.h>
56 #include <common/sessiond-comm/relayd.h>
57 #include <common/uri.h>
58 #include <common/utils.h>
59 #include <common/config/session-config.h>
60 #include <common/dynamic-buffer.h>
61 #include <common/buffer-view.h>
62 #include <common/fd-tracker/utils.h>
63 #include <urcu/rculist.h>
64
65 #include "version.h"
66 #include "cmd.h"
67 #include "ctf-trace.h"
68 #include "index.h"
69 #include "utils.h"
70 #include "lttng-relayd.h"
71 #include "live.h"
72 #include "health-relayd.h"
73 #include "testpoint.h"
74 #include "viewer-stream.h"
75 #include "session.h"
76 #include "stream.h"
77 #include "connection.h"
78 #include "tracefile-array.h"
79 #include "tcp_keep_alive.h"
80
81 enum relay_connection_status {
82 RELAY_CONNECTION_STATUS_OK,
83 /* An error occured while processing an event on the connection. */
84 RELAY_CONNECTION_STATUS_ERROR,
85 /* Connection closed/shutdown cleanly. */
86 RELAY_CONNECTION_STATUS_CLOSED,
87 };
88
89 /* command line options */
90 char *opt_output_path, *opt_working_directory;
91 static int opt_daemon, opt_background, opt_print_version;
92 int opt_group_output_by_session;
93 int opt_group_output_by_host;
94
95 /*
96 * We need to wait for listener and live listener threads, as well as
97 * health check thread, before being ready to signal readiness.
98 */
99 #define NR_LTTNG_RELAY_READY 3
100 static int lttng_relay_ready = NR_LTTNG_RELAY_READY;
101
102 /* Size of receive buffer. */
103 #define RECV_DATA_BUFFER_SIZE 65536
104
105 static int recv_child_signal; /* Set to 1 when a SIGUSR1 signal is received. */
106 static pid_t child_ppid; /* Internal parent PID use with daemonize. */
107
108 static struct lttng_uri *control_uri;
109 static struct lttng_uri *data_uri;
110 static struct lttng_uri *live_uri;
111
112 const char *progname;
113
114 const char *tracing_group_name = DEFAULT_TRACING_GROUP;
115 static int tracing_group_name_override;
116
117 const char * const config_section_name = "relayd";
118
119 /*
120 * Quit pipe for all threads. This permits a single cancellation point
121 * for all threads when receiving an event on the pipe.
122 */
123 int thread_quit_pipe[2] = { -1, -1 };
124
125 /*
126 * This pipe is used to inform the worker thread that a command is queued and
127 * ready to be processed.
128 */
129 static int relay_conn_pipe[2] = { -1, -1 };
130
131 /* Shared between threads */
132 static int dispatch_thread_exit;
133
134 static pthread_t listener_thread;
135 static pthread_t dispatcher_thread;
136 static pthread_t worker_thread;
137 static pthread_t health_thread;
138
139 /*
140 * last_relay_stream_id_lock protects last_relay_stream_id increment
141 * atomicity on 32-bit architectures.
142 */
143 static pthread_mutex_t last_relay_stream_id_lock = PTHREAD_MUTEX_INITIALIZER;
144 static uint64_t last_relay_stream_id;
145
146 /*
147 * Relay command queue.
148 *
149 * The relay_thread_listener and relay_thread_dispatcher communicate with this
150 * queue.
151 */
152 static struct relay_conn_queue relay_conn_queue;
153
154 /* Cap of file desriptors to be in simultaneous use by the relay daemon. */
155 static unsigned int lttng_opt_fd_cap;
156
157 /* Global relay stream hash table. */
158 struct lttng_ht *relay_streams_ht;
159
160 /* Global relay viewer stream hash table. */
161 struct lttng_ht *viewer_streams_ht;
162
163 /* Global relay sessions hash table. */
164 struct lttng_ht *sessions_ht;
165
166 /* Relayd health monitoring */
167 struct health_app *health_relayd;
168
169 /* Global fd tracker. */
170 struct fd_tracker *the_fd_tracker;
171
172 static struct option long_options[] = {
173 { "control-port", 1, 0, 'C', },
174 { "data-port", 1, 0, 'D', },
175 { "live-port", 1, 0, 'L', },
176 { "daemonize", 0, 0, 'd', },
177 { "background", 0, 0, 'b', },
178 { "group", 1, 0, 'g', },
179 { "fd-cap", 1, 0, '\0', },
180 { "help", 0, 0, 'h', },
181 { "output", 1, 0, 'o', },
182 { "verbose", 0, 0, 'v', },
183 { "config", 1, 0, 'f' },
184 { "version", 0, 0, 'V' },
185 { "working-directory", 1, 0, 'w', },
186 { "group-output-by-session", 0, 0, 's', },
187 { "group-output-by-host", 0, 0, 'p', },
188 { NULL, 0, 0, 0, },
189 };
190
191 static const char *config_ignore_options[] = { "help", "config", "version" };
192
193 static void print_version(void) {
194 fprintf(stdout, "%s\n", VERSION);
195 }
196
197 static void relayd_config_log(void)
198 {
199 DBG("LTTng-relayd " VERSION " - " VERSION_NAME "%s%s",
200 GIT_VERSION[0] == '\0' ? "" : " - " GIT_VERSION,
201 EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " EXTRA_VERSION_NAME);
202 if (EXTRA_VERSION_DESCRIPTION[0] != '\0') {
203 DBG("LTTng-relayd extra version description:\n\t" EXTRA_VERSION_DESCRIPTION "\n");
204 }
205 if (EXTRA_VERSION_PATCHES[0] != '\0') {
206 DBG("LTTng-relayd extra patches:\n\t" EXTRA_VERSION_PATCHES "\n");
207 }
208 }
209
210 /*
211 * Take an option from the getopt output and set it in the right variable to be
212 * used later.
213 *
214 * Return 0 on success else a negative value.
215 */
216 static int set_option(int opt, const char *arg, const char *optname)
217 {
218 int ret;
219
220 switch (opt) {
221 case 0:
222 if (!strcmp(optname, "fd-cap")) {
223 unsigned long v;
224
225 errno = 0;
226 v = strtoul(arg, NULL, 0);
227 if (errno != 0 || !isdigit(arg[0])) {
228 ERR("Wrong value in --fd-cap parameter: %s", arg);
229 ret = -1;
230 goto end;
231 }
232 if (v < DEFAULT_RELAYD_MINIMAL_FD_CAP) {
233 ERR("File descriptor cap must be set to at least %d",
234 DEFAULT_RELAYD_MINIMAL_FD_CAP);
235 }
236 if (v >= UINT_MAX) {
237 ERR("File descriptor cap overflow in --fd-cap parameter: %s", arg);
238 ret = -1;
239 goto end;
240 }
241 lttng_opt_fd_cap = (unsigned int) v;
242 DBG3("File descriptor cap set to %u", lttng_opt_fd_cap);
243
244 } else {
245 fprintf(stderr, "unknown option %s", optname);
246 if (arg) {
247 fprintf(stderr, " with arg %s\n", arg);
248 }
249 }
250 break;
251 case 'C':
252 if (lttng_is_setuid_setgid()) {
253 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
254 "-C, --control-port");
255 } else {
256 ret = uri_parse(arg, &control_uri);
257 if (ret < 0) {
258 ERR("Invalid control URI specified");
259 goto end;
260 }
261 if (control_uri->port == 0) {
262 control_uri->port = DEFAULT_NETWORK_CONTROL_PORT;
263 }
264 }
265 break;
266 case 'D':
267 if (lttng_is_setuid_setgid()) {
268 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
269 "-D, -data-port");
270 } else {
271 ret = uri_parse(arg, &data_uri);
272 if (ret < 0) {
273 ERR("Invalid data URI specified");
274 goto end;
275 }
276 if (data_uri->port == 0) {
277 data_uri->port = DEFAULT_NETWORK_DATA_PORT;
278 }
279 }
280 break;
281 case 'L':
282 if (lttng_is_setuid_setgid()) {
283 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
284 "-L, -live-port");
285 } else {
286 ret = uri_parse(arg, &live_uri);
287 if (ret < 0) {
288 ERR("Invalid live URI specified");
289 goto end;
290 }
291 if (live_uri->port == 0) {
292 live_uri->port = DEFAULT_NETWORK_VIEWER_PORT;
293 }
294 }
295 break;
296 case 'd':
297 opt_daemon = 1;
298 break;
299 case 'b':
300 opt_background = 1;
301 break;
302 case 'g':
303 if (lttng_is_setuid_setgid()) {
304 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
305 "-g, --group");
306 } else {
307 tracing_group_name = strdup(arg);
308 if (tracing_group_name == NULL) {
309 ret = -errno;
310 PERROR("strdup");
311 goto end;
312 }
313 tracing_group_name_override = 1;
314 }
315 break;
316 case 'h':
317 ret = utils_show_man_page(8, "lttng-relayd");
318 if (ret) {
319 ERR("Cannot view man page lttng-relayd(8)");
320 perror("exec");
321 }
322 exit(EXIT_FAILURE);
323 case 'V':
324 opt_print_version = 1;
325 break;
326 case 'o':
327 if (lttng_is_setuid_setgid()) {
328 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
329 "-o, --output");
330 } else {
331 ret = asprintf(&opt_output_path, "%s", arg);
332 if (ret < 0) {
333 ret = -errno;
334 PERROR("asprintf opt_output_path");
335 goto end;
336 }
337 }
338 break;
339 case 'w':
340 if (lttng_is_setuid_setgid()) {
341 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
342 "-w, --working-directory");
343 } else {
344 ret = asprintf(&opt_working_directory, "%s", arg);
345 if (ret < 0) {
346 ret = -errno;
347 PERROR("asprintf working_directory");
348 goto end;
349 }
350 }
351 break;
352
353 case 'v':
354 /* Verbose level can increase using multiple -v */
355 if (arg) {
356 lttng_opt_verbose = config_parse_value(arg);
357 } else {
358 /* Only 3 level of verbosity (-vvv). */
359 if (lttng_opt_verbose < 3) {
360 lttng_opt_verbose += 1;
361 }
362 }
363 break;
364 case 's':
365 if (opt_group_output_by_host) {
366 ERR("Cannot set --group-output-by-session, --group-output-by-host already defined");
367 exit(EXIT_FAILURE);
368 }
369 opt_group_output_by_session = 1;
370 break;
371 case 'p':
372 if (opt_group_output_by_session) {
373 ERR("Cannot set --group-output-by-host, --group-output-by-session already defined");
374 exit(EXIT_FAILURE);
375 }
376 opt_group_output_by_host = 1;
377 break;
378 default:
379 /* Unknown option or other error.
380 * Error is printed by getopt, just return */
381 ret = -1;
382 goto end;
383 }
384
385 /* All good. */
386 ret = 0;
387
388 end:
389 return ret;
390 }
391
392 /*
393 * config_entry_handler_cb used to handle options read from a config file.
394 * See config_entry_handler_cb comment in common/config/session-config.h for the
395 * return value conventions.
396 */
397 static int config_entry_handler(const struct config_entry *entry, void *unused)
398 {
399 int ret = 0, i;
400
401 if (!entry || !entry->name || !entry->value) {
402 ret = -EINVAL;
403 goto end;
404 }
405
406 /* Check if the option is to be ignored */
407 for (i = 0; i < sizeof(config_ignore_options) / sizeof(char *); i++) {
408 if (!strcmp(entry->name, config_ignore_options[i])) {
409 goto end;
410 }
411 }
412
413 for (i = 0; i < (sizeof(long_options) / sizeof(struct option)) - 1; i++) {
414 /* Ignore if entry name is not fully matched. */
415 if (strcmp(entry->name, long_options[i].name)) {
416 continue;
417 }
418
419 /*
420 * If the option takes no argument on the command line,
421 * we have to check if the value is "true". We support
422 * non-zero numeric values, true, on and yes.
423 */
424 if (!long_options[i].has_arg) {
425 ret = config_parse_value(entry->value);
426 if (ret <= 0) {
427 if (ret) {
428 WARN("Invalid configuration value \"%s\" for option %s",
429 entry->value, entry->name);
430 }
431 /* False, skip boolean config option. */
432 goto end;
433 }
434 }
435
436 ret = set_option(long_options[i].val, entry->value, entry->name);
437 goto end;
438 }
439
440 WARN("Unrecognized option \"%s\" in daemon configuration file.",
441 entry->name);
442
443 end:
444 return ret;
445 }
446
447 static void parse_env_options(void)
448 {
449 char *value = NULL;
450
451 value = lttng_secure_getenv(DEFAULT_LTTNG_RELAYD_WORKING_DIRECTORY_ENV);
452 if (value) {
453 opt_working_directory = value;
454 }
455 }
456
457 static int set_options(int argc, char **argv)
458 {
459 int c, ret = 0, option_index = 0, retval = 0;
460 int orig_optopt = optopt, orig_optind = optind;
461 char *default_address, *optstring;
462 const char *config_path = NULL;
463
464 optstring = utils_generate_optstring(long_options,
465 sizeof(long_options) / sizeof(struct option));
466 if (!optstring) {
467 retval = -ENOMEM;
468 goto exit;
469 }
470
471 /* Check for the --config option */
472
473 while ((c = getopt_long(argc, argv, optstring, long_options,
474 &option_index)) != -1) {
475 if (c == '?') {
476 retval = -EINVAL;
477 goto exit;
478 } else if (c != 'f') {
479 continue;
480 }
481
482 if (lttng_is_setuid_setgid()) {
483 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
484 "-f, --config");
485 } else {
486 config_path = utils_expand_path(optarg);
487 if (!config_path) {
488 ERR("Failed to resolve path: %s", optarg);
489 }
490 }
491 }
492
493 ret = config_get_section_entries(config_path, config_section_name,
494 config_entry_handler, NULL);
495 if (ret) {
496 if (ret > 0) {
497 ERR("Invalid configuration option at line %i", ret);
498 }
499 retval = -1;
500 goto exit;
501 }
502
503 /* Reset getopt's global state */
504 optopt = orig_optopt;
505 optind = orig_optind;
506 while (1) {
507 c = getopt_long(argc, argv, optstring, long_options, &option_index);
508 if (c == -1) {
509 break;
510 }
511
512 ret = set_option(c, optarg, long_options[option_index].name);
513 if (ret < 0) {
514 retval = -1;
515 goto exit;
516 }
517 }
518
519 /* assign default values */
520 if (control_uri == NULL) {
521 ret = asprintf(&default_address,
522 "tcp://" DEFAULT_NETWORK_CONTROL_BIND_ADDRESS ":%d",
523 DEFAULT_NETWORK_CONTROL_PORT);
524 if (ret < 0) {
525 PERROR("asprintf default data address");
526 retval = -1;
527 goto exit;
528 }
529
530 ret = uri_parse(default_address, &control_uri);
531 free(default_address);
532 if (ret < 0) {
533 ERR("Invalid control URI specified");
534 retval = -1;
535 goto exit;
536 }
537 }
538 if (data_uri == NULL) {
539 ret = asprintf(&default_address,
540 "tcp://" DEFAULT_NETWORK_DATA_BIND_ADDRESS ":%d",
541 DEFAULT_NETWORK_DATA_PORT);
542 if (ret < 0) {
543 PERROR("asprintf default data address");
544 retval = -1;
545 goto exit;
546 }
547
548 ret = uri_parse(default_address, &data_uri);
549 free(default_address);
550 if (ret < 0) {
551 ERR("Invalid data URI specified");
552 retval = -1;
553 goto exit;
554 }
555 }
556 if (live_uri == NULL) {
557 ret = asprintf(&default_address,
558 "tcp://" DEFAULT_NETWORK_VIEWER_BIND_ADDRESS ":%d",
559 DEFAULT_NETWORK_VIEWER_PORT);
560 if (ret < 0) {
561 PERROR("asprintf default viewer control address");
562 retval = -1;
563 goto exit;
564 }
565
566 ret = uri_parse(default_address, &live_uri);
567 free(default_address);
568 if (ret < 0) {
569 ERR("Invalid viewer control URI specified");
570 retval = -1;
571 goto exit;
572 }
573 }
574 if (lttng_opt_fd_cap == 0) {
575 int ret;
576 struct rlimit rlimit;
577
578 ret = getrlimit(RLIMIT_NOFILE, &rlimit);
579 if (ret) {
580 PERROR("Failed to get file descriptor limit");
581 retval = -1;
582 }
583
584 lttng_opt_fd_cap = rlimit.rlim_cur;
585 }
586
587 if (!opt_group_output_by_session && !opt_group_output_by_host) {
588 /* Group by host by default */
589 opt_group_output_by_host = 1;
590 }
591
592 exit:
593 free(optstring);
594 return retval;
595 }
596
597 static void print_global_objects(void)
598 {
599 print_viewer_streams();
600 print_relay_streams();
601 print_sessions();
602 }
603
604 /*
605 * Cleanup the daemon
606 */
607 static void relayd_cleanup(void)
608 {
609 print_global_objects();
610
611 DBG("Cleaning up");
612
613 if (viewer_streams_ht)
614 lttng_ht_destroy(viewer_streams_ht);
615 if (relay_streams_ht)
616 lttng_ht_destroy(relay_streams_ht);
617 if (sessions_ht)
618 lttng_ht_destroy(sessions_ht);
619
620 /* free the dynamically allocated opt_output_path */
621 free(opt_output_path);
622
623 /* Close thread quit pipes */
624 (void) fd_tracker_util_pipe_close(the_fd_tracker, thread_quit_pipe);
625
626 uri_free(control_uri);
627 uri_free(data_uri);
628 /* Live URI is freed in the live thread. */
629
630 if (tracing_group_name_override) {
631 free((void *) tracing_group_name);
632 }
633 fd_tracker_log(the_fd_tracker);
634 }
635
636 /*
637 * Write to writable pipe used to notify a thread.
638 */
639 static int notify_thread_pipe(int wpipe)
640 {
641 ssize_t ret;
642
643 ret = lttng_write(wpipe, "!", 1);
644 if (ret < 1) {
645 PERROR("write poll pipe");
646 goto end;
647 }
648 ret = 0;
649 end:
650 return ret;
651 }
652
653 static int notify_health_quit_pipe(int *pipe)
654 {
655 ssize_t ret;
656
657 ret = lttng_write(pipe[1], "4", 1);
658 if (ret < 1) {
659 PERROR("write relay health quit");
660 goto end;
661 }
662 ret = 0;
663 end:
664 return ret;
665 }
666
667 /*
668 * Stop all relayd and relayd-live threads.
669 */
670 int lttng_relay_stop_threads(void)
671 {
672 int retval = 0;
673
674 /* Stopping all threads */
675 DBG("Terminating all threads");
676 if (notify_thread_pipe(thread_quit_pipe[1])) {
677 ERR("write error on thread quit pipe");
678 retval = -1;
679 }
680
681 if (notify_health_quit_pipe(health_quit_pipe)) {
682 ERR("write error on health quit pipe");
683 }
684
685 /* Dispatch thread */
686 CMM_STORE_SHARED(dispatch_thread_exit, 1);
687 futex_nto1_wake(&relay_conn_queue.futex);
688
689 if (relayd_live_stop()) {
690 ERR("Error stopping live threads");
691 retval = -1;
692 }
693 return retval;
694 }
695
696 /*
697 * Signal handler for the daemon
698 *
699 * Simply stop all worker threads, leaving main() return gracefully after
700 * joining all threads and calling cleanup().
701 */
702 static void sighandler(int sig)
703 {
704 switch (sig) {
705 case SIGINT:
706 DBG("SIGINT caught");
707 if (lttng_relay_stop_threads()) {
708 ERR("Error stopping threads");
709 }
710 break;
711 case SIGTERM:
712 DBG("SIGTERM caught");
713 if (lttng_relay_stop_threads()) {
714 ERR("Error stopping threads");
715 }
716 break;
717 case SIGUSR1:
718 CMM_STORE_SHARED(recv_child_signal, 1);
719 break;
720 default:
721 break;
722 }
723 }
724
725 /*
726 * Setup signal handler for :
727 * SIGINT, SIGTERM, SIGPIPE
728 */
729 static int set_signal_handler(void)
730 {
731 int ret = 0;
732 struct sigaction sa;
733 sigset_t sigset;
734
735 if ((ret = sigemptyset(&sigset)) < 0) {
736 PERROR("sigemptyset");
737 return ret;
738 }
739
740 sa.sa_mask = sigset;
741 sa.sa_flags = 0;
742
743 sa.sa_handler = sighandler;
744 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
745 PERROR("sigaction");
746 return ret;
747 }
748
749 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
750 PERROR("sigaction");
751 return ret;
752 }
753
754 if ((ret = sigaction(SIGUSR1, &sa, NULL)) < 0) {
755 PERROR("sigaction");
756 return ret;
757 }
758
759 sa.sa_handler = SIG_IGN;
760 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
761 PERROR("sigaction");
762 return ret;
763 }
764
765 DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT");
766
767 return ret;
768 }
769
770 void lttng_relay_notify_ready(void)
771 {
772 /* Notify the parent of the fork() process that we are ready. */
773 if (opt_daemon || opt_background) {
774 if (uatomic_sub_return(&lttng_relay_ready, 1) == 0) {
775 kill(child_ppid, SIGUSR1);
776 }
777 }
778 }
779
780 /*
781 * Init thread quit pipe.
782 *
783 * Return -1 on error or 0 if all pipes are created.
784 */
785 static int init_thread_quit_pipe(void)
786 {
787 return fd_tracker_util_pipe_open_cloexec(the_fd_tracker,
788 "Quit pipe", thread_quit_pipe);
789 }
790
791 /*
792 * Init health quit pipe.
793 *
794 * Return -1 on error or 0 if all pipes are created.
795 */
796 static int init_health_quit_pipe(void)
797 {
798 return fd_tracker_util_pipe_open_cloexec(the_fd_tracker,
799 "Health quit pipe", health_quit_pipe);
800 }
801
802 /*
803 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
804 */
805 static int create_named_thread_poll_set(struct lttng_poll_event *events,
806 int size, const char *name)
807 {
808 int ret;
809
810 if (events == NULL || size == 0) {
811 ret = -1;
812 goto error;
813 }
814
815 ret = fd_tracker_util_poll_create(the_fd_tracker,
816 name, events, 1, LTTNG_CLOEXEC);
817
818 /* Add quit pipe */
819 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
820 if (ret < 0) {
821 goto error;
822 }
823
824 return 0;
825
826 error:
827 return ret;
828 }
829
830 /*
831 * Check if the thread quit pipe was triggered.
832 *
833 * Return 1 if it was triggered else 0;
834 */
835 static int check_thread_quit_pipe(int fd, uint32_t events)
836 {
837 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
838 return 1;
839 }
840
841 return 0;
842 }
843
844 /*
845 * Create and init socket from uri.
846 */
847 static struct lttcomm_sock *relay_socket_create(struct lttng_uri *uri)
848 {
849 int ret;
850 struct lttcomm_sock *sock = NULL;
851
852 sock = lttcomm_alloc_sock_from_uri(uri);
853 if (sock == NULL) {
854 ERR("Allocating socket");
855 goto error;
856 }
857
858 ret = lttcomm_create_sock(sock);
859 if (ret < 0) {
860 goto error;
861 }
862 DBG("Listening on sock %d", sock->fd);
863
864 ret = sock->ops->bind(sock);
865 if (ret < 0) {
866 goto error;
867 }
868
869 ret = sock->ops->listen(sock, -1);
870 if (ret < 0) {
871 goto error;
872
873 }
874
875 return sock;
876
877 error:
878 if (sock) {
879 lttcomm_destroy_sock(sock);
880 }
881 return NULL;
882 }
883
884 /*
885 * This thread manages the listening for new connections on the network
886 */
887 static void *relay_thread_listener(void *data)
888 {
889 int i, ret, pollfd, err = -1;
890 uint32_t revents, nb_fd;
891 struct lttng_poll_event events;
892 struct lttcomm_sock *control_sock, *data_sock;
893
894 DBG("[thread] Relay listener started");
895
896 health_register(health_relayd, HEALTH_RELAYD_TYPE_LISTENER);
897
898 health_code_update();
899
900 control_sock = relay_socket_create(control_uri);
901 if (!control_sock) {
902 goto error_sock_control;
903 }
904
905 data_sock = relay_socket_create(data_uri);
906 if (!data_sock) {
907 goto error_sock_relay;
908 }
909
910 /*
911 * Pass 3 as size here for the thread quit pipe, control and
912 * data socket.
913 */
914 ret = create_named_thread_poll_set(&events, 3, "Listener thread epoll");
915 if (ret < 0) {
916 goto error_create_poll;
917 }
918
919 /* Add the control socket */
920 ret = lttng_poll_add(&events, control_sock->fd, LPOLLIN | LPOLLRDHUP);
921 if (ret < 0) {
922 goto error_poll_add;
923 }
924
925 /* Add the data socket */
926 ret = lttng_poll_add(&events, data_sock->fd, LPOLLIN | LPOLLRDHUP);
927 if (ret < 0) {
928 goto error_poll_add;
929 }
930
931 lttng_relay_notify_ready();
932
933 if (testpoint(relayd_thread_listener)) {
934 goto error_testpoint;
935 }
936
937 while (1) {
938 health_code_update();
939
940 DBG("Listener accepting connections");
941
942 restart:
943 health_poll_entry();
944 ret = lttng_poll_wait(&events, -1);
945 health_poll_exit();
946 if (ret < 0) {
947 /*
948 * Restart interrupted system call.
949 */
950 if (errno == EINTR) {
951 goto restart;
952 }
953 goto error;
954 }
955
956 nb_fd = ret;
957
958 DBG("Relay new connection received");
959 for (i = 0; i < nb_fd; i++) {
960 health_code_update();
961
962 /* Fetch once the poll data */
963 revents = LTTNG_POLL_GETEV(&events, i);
964 pollfd = LTTNG_POLL_GETFD(&events, i);
965
966 if (!revents) {
967 /*
968 * No activity for this FD (poll
969 * implementation).
970 */
971 continue;
972 }
973
974 /* Thread quit pipe has been closed. Killing thread. */
975 ret = check_thread_quit_pipe(pollfd, revents);
976 if (ret) {
977 err = 0;
978 goto exit;
979 }
980
981 if (revents & LPOLLIN) {
982 /*
983 * A new connection is requested, therefore a
984 * sessiond/consumerd connection is allocated in
985 * this thread, enqueued to a global queue and
986 * dequeued (and freed) in the worker thread.
987 */
988 int val = 1;
989 struct relay_connection *new_conn;
990 struct lttcomm_sock *newsock;
991 enum connection_type type;
992
993 if (pollfd == data_sock->fd) {
994 type = RELAY_DATA;
995 newsock = data_sock->ops->accept(data_sock);
996 DBG("Relay data connection accepted, socket %d",
997 newsock->fd);
998 } else {
999 assert(pollfd == control_sock->fd);
1000 type = RELAY_CONTROL;
1001 newsock = control_sock->ops->accept(control_sock);
1002 DBG("Relay control connection accepted, socket %d",
1003 newsock->fd);
1004 }
1005 if (!newsock) {
1006 PERROR("accepting sock");
1007 goto error;
1008 }
1009
1010 ret = setsockopt(newsock->fd, SOL_SOCKET, SO_REUSEADDR, &val,
1011 sizeof(val));
1012 if (ret < 0) {
1013 PERROR("setsockopt inet");
1014 lttcomm_destroy_sock(newsock);
1015 goto error;
1016 }
1017
1018 ret = socket_apply_keep_alive_config(newsock->fd);
1019 if (ret < 0) {
1020 ERR("Failed to apply TCP keep-alive configuration on socket (%i)",
1021 newsock->fd);
1022 lttcomm_destroy_sock(newsock);
1023 goto error;
1024 }
1025
1026 new_conn = connection_create(newsock, type);
1027 if (!new_conn) {
1028 lttcomm_destroy_sock(newsock);
1029 goto error;
1030 }
1031
1032 /* Enqueue request for the dispatcher thread. */
1033 cds_wfcq_enqueue(&relay_conn_queue.head, &relay_conn_queue.tail,
1034 &new_conn->qnode);
1035
1036 /*
1037 * Wake the dispatch queue futex.
1038 * Implicit memory barrier with the
1039 * exchange in cds_wfcq_enqueue.
1040 */
1041 futex_nto1_wake(&relay_conn_queue.futex);
1042 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1043 ERR("socket poll error");
1044 goto error;
1045 } else {
1046 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
1047 goto error;
1048 }
1049 }
1050 }
1051
1052 exit:
1053 error:
1054 error_poll_add:
1055 error_testpoint:
1056 (void) fd_tracker_util_poll_clean(the_fd_tracker, &events);
1057 error_create_poll:
1058 if (data_sock->fd >= 0) {
1059 ret = data_sock->ops->close(data_sock);
1060 if (ret) {
1061 PERROR("close");
1062 }
1063 }
1064 lttcomm_destroy_sock(data_sock);
1065 error_sock_relay:
1066 if (control_sock->fd >= 0) {
1067 ret = control_sock->ops->close(control_sock);
1068 if (ret) {
1069 PERROR("close");
1070 }
1071 }
1072 lttcomm_destroy_sock(control_sock);
1073 error_sock_control:
1074 if (err) {
1075 health_error();
1076 ERR("Health error occurred in %s", __func__);
1077 }
1078 health_unregister(health_relayd);
1079 DBG("Relay listener thread cleanup complete");
1080 lttng_relay_stop_threads();
1081 return NULL;
1082 }
1083
1084 /*
1085 * This thread manages the dispatching of the requests to worker threads
1086 */
1087 static void *relay_thread_dispatcher(void *data)
1088 {
1089 int err = -1;
1090 ssize_t ret;
1091 struct cds_wfcq_node *node;
1092 struct relay_connection *new_conn = NULL;
1093
1094 DBG("[thread] Relay dispatcher started");
1095
1096 health_register(health_relayd, HEALTH_RELAYD_TYPE_DISPATCHER);
1097
1098 if (testpoint(relayd_thread_dispatcher)) {
1099 goto error_testpoint;
1100 }
1101
1102 health_code_update();
1103
1104 for (;;) {
1105 health_code_update();
1106
1107 /* Atomically prepare the queue futex */
1108 futex_nto1_prepare(&relay_conn_queue.futex);
1109
1110 if (CMM_LOAD_SHARED(dispatch_thread_exit)) {
1111 break;
1112 }
1113
1114 do {
1115 health_code_update();
1116
1117 /* Dequeue commands */
1118 node = cds_wfcq_dequeue_blocking(&relay_conn_queue.head,
1119 &relay_conn_queue.tail);
1120 if (node == NULL) {
1121 DBG("Woken up but nothing in the relay command queue");
1122 /* Continue thread execution */
1123 break;
1124 }
1125 new_conn = caa_container_of(node, struct relay_connection, qnode);
1126
1127 DBG("Dispatching request waiting on sock %d", new_conn->sock->fd);
1128
1129 /*
1130 * Inform worker thread of the new request. This
1131 * call is blocking so we can be assured that
1132 * the data will be read at some point in time
1133 * or wait to the end of the world :)
1134 */
1135 ret = lttng_write(relay_conn_pipe[1], &new_conn, sizeof(new_conn));
1136 if (ret < 0) {
1137 PERROR("write connection pipe");
1138 connection_put(new_conn);
1139 goto error;
1140 }
1141 } while (node != NULL);
1142
1143 /* Futex wait on queue. Blocking call on futex() */
1144 health_poll_entry();
1145 futex_nto1_wait(&relay_conn_queue.futex);
1146 health_poll_exit();
1147 }
1148
1149 /* Normal exit, no error */
1150 err = 0;
1151
1152 error:
1153 error_testpoint:
1154 if (err) {
1155 health_error();
1156 ERR("Health error occurred in %s", __func__);
1157 }
1158 health_unregister(health_relayd);
1159 DBG("Dispatch thread dying");
1160 lttng_relay_stop_threads();
1161 return NULL;
1162 }
1163
1164 /*
1165 * Set index data from the control port to a given index object.
1166 */
1167 static int set_index_control_data(struct relay_index *index,
1168 struct lttcomm_relayd_index *data,
1169 struct relay_connection *conn)
1170 {
1171 struct ctf_packet_index index_data;
1172
1173 /*
1174 * The index on disk is encoded in big endian.
1175 */
1176 index_data.packet_size = htobe64(data->packet_size);
1177 index_data.content_size = htobe64(data->content_size);
1178 index_data.timestamp_begin = htobe64(data->timestamp_begin);
1179 index_data.timestamp_end = htobe64(data->timestamp_end);
1180 index_data.events_discarded = htobe64(data->events_discarded);
1181 index_data.stream_id = htobe64(data->stream_id);
1182
1183 if (conn->minor >= 8) {
1184 index->index_data.stream_instance_id = htobe64(data->stream_instance_id);
1185 index->index_data.packet_seq_num = htobe64(data->packet_seq_num);
1186 }
1187
1188 return relay_index_set_data(index, &index_data);
1189 }
1190
1191 /*
1192 * Handle the RELAYD_CREATE_SESSION command.
1193 *
1194 * On success, send back the session id or else return a negative value.
1195 */
1196 static int relay_create_session(const struct lttcomm_relayd_hdr *recv_hdr,
1197 struct relay_connection *conn,
1198 const struct lttng_buffer_view *payload)
1199 {
1200 int ret = 0;
1201 ssize_t send_ret;
1202 struct relay_session *session;
1203 struct lttcomm_relayd_status_session reply;
1204 char session_name[LTTNG_NAME_MAX];
1205 char hostname[LTTNG_HOST_NAME_MAX];
1206 uint32_t live_timer = 0;
1207 bool snapshot = false;
1208
1209 memset(session_name, 0, LTTNG_NAME_MAX);
1210 memset(hostname, 0, LTTNG_HOST_NAME_MAX);
1211
1212 memset(&reply, 0, sizeof(reply));
1213
1214 switch (conn->minor) {
1215 case 1:
1216 case 2:
1217 case 3:
1218 break;
1219 case 4: /* LTTng sessiond 2.4 */
1220 default:
1221 ret = cmd_create_session_2_4(payload, session_name,
1222 hostname, &live_timer, &snapshot);
1223 }
1224 if (ret < 0) {
1225 goto send_reply;
1226 }
1227
1228 session = session_create(session_name, hostname, live_timer,
1229 snapshot, conn->major, conn->minor);
1230 if (!session) {
1231 ret = -1;
1232 goto send_reply;
1233 }
1234 assert(!conn->session);
1235 conn->session = session;
1236 DBG("Created session %" PRIu64, session->id);
1237
1238 reply.session_id = htobe64(session->id);
1239
1240 send_reply:
1241 if (ret < 0) {
1242 reply.ret_code = htobe32(LTTNG_ERR_FATAL);
1243 } else {
1244 reply.ret_code = htobe32(LTTNG_OK);
1245 }
1246
1247 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1248 if (send_ret < (ssize_t) sizeof(reply)) {
1249 ERR("Failed to send \"create session\" command reply (ret = %zd)",
1250 send_ret);
1251 ret = -1;
1252 }
1253
1254 return ret;
1255 }
1256
1257 /*
1258 * When we have received all the streams and the metadata for a channel,
1259 * we make them visible to the viewer threads.
1260 */
1261 static void publish_connection_local_streams(struct relay_connection *conn)
1262 {
1263 struct relay_stream *stream;
1264 struct relay_session *session = conn->session;
1265
1266 /*
1267 * We publish all streams belonging to a session atomically wrt
1268 * session lock.
1269 */
1270 pthread_mutex_lock(&session->lock);
1271 rcu_read_lock();
1272 cds_list_for_each_entry_rcu(stream, &session->recv_list,
1273 recv_node) {
1274 stream_publish(stream);
1275 }
1276 rcu_read_unlock();
1277
1278 /*
1279 * Inform the viewer that there are new streams in the session.
1280 */
1281 if (session->viewer_attached) {
1282 uatomic_set(&session->new_streams, 1);
1283 }
1284 pthread_mutex_unlock(&session->lock);
1285 }
1286
1287 /*
1288 * relay_add_stream: allocate a new stream for a session
1289 */
1290 static int relay_add_stream(const struct lttcomm_relayd_hdr *recv_hdr,
1291 struct relay_connection *conn,
1292 const struct lttng_buffer_view *payload)
1293 {
1294 int ret;
1295 ssize_t send_ret;
1296 struct relay_session *session = conn->session;
1297 struct relay_stream *stream = NULL;
1298 struct lttcomm_relayd_status_stream reply;
1299 struct ctf_trace *trace = NULL;
1300 uint64_t stream_handle = -1ULL;
1301 char *path_name = NULL, *channel_name = NULL;
1302 uint64_t tracefile_size = 0, tracefile_count = 0;
1303
1304 if (!session || !conn->version_check_done) {
1305 ERR("Trying to add a stream before version check");
1306 ret = -1;
1307 goto end_no_session;
1308 }
1309
1310 switch (session->minor) {
1311 case 1: /* LTTng sessiond 2.1. Allocates path_name and channel_name. */
1312 ret = cmd_recv_stream_2_1(payload, &path_name,
1313 &channel_name, session);
1314 break;
1315 case 2: /* LTTng sessiond 2.2. Allocates path_name and channel_name. */
1316 default:
1317 ret = cmd_recv_stream_2_2(payload, &path_name,
1318 &channel_name, &tracefile_size, &tracefile_count,
1319 session);
1320 break;
1321 }
1322 if (ret < 0) {
1323 goto send_reply;
1324 }
1325
1326 trace = ctf_trace_get_by_path_or_create(session, path_name);
1327 if (!trace) {
1328 goto send_reply;
1329 }
1330 /* This stream here has one reference on the trace. */
1331
1332 pthread_mutex_lock(&last_relay_stream_id_lock);
1333 stream_handle = ++last_relay_stream_id;
1334 pthread_mutex_unlock(&last_relay_stream_id_lock);
1335
1336 /* We pass ownership of path_name and channel_name. */
1337 stream = stream_create(trace, stream_handle, path_name,
1338 channel_name, tracefile_size, tracefile_count);
1339 path_name = NULL;
1340 channel_name = NULL;
1341
1342 /*
1343 * Streams are the owners of their trace. Reference to trace is
1344 * kept within stream_create().
1345 */
1346 ctf_trace_put(trace);
1347
1348 send_reply:
1349 memset(&reply, 0, sizeof(reply));
1350 reply.handle = htobe64(stream_handle);
1351 if (!stream) {
1352 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1353 } else {
1354 reply.ret_code = htobe32(LTTNG_OK);
1355 }
1356
1357 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1358 sizeof(struct lttcomm_relayd_status_stream), 0);
1359 if (send_ret < (ssize_t) sizeof(reply)) {
1360 ERR("Failed to send \"add stream\" command reply (ret = %zd)",
1361 send_ret);
1362 ret = -1;
1363 }
1364
1365 end_no_session:
1366 free(path_name);
1367 free(channel_name);
1368 return ret;
1369 }
1370
1371 /*
1372 * relay_close_stream: close a specific stream
1373 */
1374 static int relay_close_stream(const struct lttcomm_relayd_hdr *recv_hdr,
1375 struct relay_connection *conn,
1376 const struct lttng_buffer_view *payload)
1377 {
1378 int ret;
1379 ssize_t send_ret;
1380 struct relay_session *session = conn->session;
1381 struct lttcomm_relayd_close_stream stream_info;
1382 struct lttcomm_relayd_generic_reply reply;
1383 struct relay_stream *stream;
1384
1385 DBG("Close stream received");
1386
1387 if (!session || !conn->version_check_done) {
1388 ERR("Trying to close a stream before version check");
1389 ret = -1;
1390 goto end_no_session;
1391 }
1392
1393 if (payload->size < sizeof(stream_info)) {
1394 ERR("Unexpected payload size in \"relay_close_stream\": expected >= %zu bytes, got %zu bytes",
1395 sizeof(stream_info), payload->size);
1396 ret = -1;
1397 goto end_no_session;
1398 }
1399 memcpy(&stream_info, payload->data, sizeof(stream_info));
1400 stream_info.stream_id = be64toh(stream_info.stream_id);
1401 stream_info.last_net_seq_num = be64toh(stream_info.last_net_seq_num);
1402
1403 stream = stream_get_by_id(stream_info.stream_id);
1404 if (!stream) {
1405 ret = -1;
1406 goto end;
1407 }
1408
1409 /*
1410 * Set last_net_seq_num before the close flag. Required by data
1411 * pending check.
1412 */
1413 pthread_mutex_lock(&stream->lock);
1414 stream->last_net_seq_num = stream_info.last_net_seq_num;
1415 pthread_mutex_unlock(&stream->lock);
1416
1417 /*
1418 * This is one of the conditions which may trigger a stream close
1419 * with the others being:
1420 * 1) A close command is received for a stream
1421 * 2) The control connection owning the stream is closed
1422 * 3) We have received all of the stream's data _after_ a close
1423 * request.
1424 */
1425 try_stream_close(stream);
1426 if (stream->is_metadata) {
1427 struct relay_viewer_stream *vstream;
1428
1429 vstream = viewer_stream_get_by_id(stream->stream_handle);
1430 if (vstream) {
1431 if (vstream->metadata_sent == stream->metadata_received) {
1432 /*
1433 * Since all the metadata has been sent to the
1434 * viewer and that we have a request to close
1435 * its stream, we can safely teardown the
1436 * corresponding metadata viewer stream.
1437 */
1438 viewer_stream_put(vstream);
1439 }
1440 /* Put local reference. */
1441 viewer_stream_put(vstream);
1442 }
1443 }
1444 stream_put(stream);
1445 ret = 0;
1446
1447 end:
1448 memset(&reply, 0, sizeof(reply));
1449 if (ret < 0) {
1450 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1451 } else {
1452 reply.ret_code = htobe32(LTTNG_OK);
1453 }
1454 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1455 sizeof(struct lttcomm_relayd_generic_reply), 0);
1456 if (send_ret < (ssize_t) sizeof(reply)) {
1457 ERR("Failed to send \"close stream\" command reply (ret = %zd)",
1458 send_ret);
1459 ret = -1;
1460 }
1461
1462 end_no_session:
1463 return ret;
1464 }
1465
1466 /*
1467 * relay_reset_metadata: reset a metadata stream
1468 */
1469 static
1470 int relay_reset_metadata(const struct lttcomm_relayd_hdr *recv_hdr,
1471 struct relay_connection *conn,
1472 const struct lttng_buffer_view *payload)
1473 {
1474 int ret;
1475 ssize_t send_ret;
1476 struct relay_session *session = conn->session;
1477 struct lttcomm_relayd_reset_metadata stream_info;
1478 struct lttcomm_relayd_generic_reply reply;
1479 struct relay_stream *stream;
1480
1481 DBG("Reset metadata received");
1482
1483 if (!session || !conn->version_check_done) {
1484 ERR("Trying to reset a metadata stream before version check");
1485 ret = -1;
1486 goto end_no_session;
1487 }
1488
1489 if (payload->size < sizeof(stream_info)) {
1490 ERR("Unexpected payload size in \"relay_reset_metadata\": expected >= %zu bytes, got %zu bytes",
1491 sizeof(stream_info), payload->size);
1492 ret = -1;
1493 goto end_no_session;
1494 }
1495 memcpy(&stream_info, payload->data, sizeof(stream_info));
1496 stream_info.stream_id = be64toh(stream_info.stream_id);
1497 stream_info.version = be64toh(stream_info.version);
1498
1499 DBG("Update metadata to version %" PRIu64, stream_info.version);
1500
1501 /* Unsupported for live sessions for now. */
1502 if (session->live_timer != 0) {
1503 ret = -1;
1504 goto end;
1505 }
1506
1507 stream = stream_get_by_id(stream_info.stream_id);
1508 if (!stream) {
1509 ret = -1;
1510 goto end;
1511 }
1512 pthread_mutex_lock(&stream->lock);
1513 if (!stream->is_metadata) {
1514 ret = -1;
1515 goto end_unlock;
1516 }
1517
1518 ret = utils_rotate_stream_file(stream->path_name, stream->channel_name,
1519 0, 0, -1, -1, stream->stream_fd->fd, NULL,
1520 &stream->stream_fd->fd);
1521 if (ret < 0) {
1522 ERR("Failed to rotate metadata file %s of channel %s",
1523 stream->path_name, stream->channel_name);
1524 goto end_unlock;
1525 }
1526
1527 end_unlock:
1528 pthread_mutex_unlock(&stream->lock);
1529 stream_put(stream);
1530
1531 end:
1532 memset(&reply, 0, sizeof(reply));
1533 if (ret < 0) {
1534 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1535 } else {
1536 reply.ret_code = htobe32(LTTNG_OK);
1537 }
1538 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1539 sizeof(struct lttcomm_relayd_generic_reply), 0);
1540 if (send_ret < (ssize_t) sizeof(reply)) {
1541 ERR("Failed to send \"reset metadata\" command reply (ret = %zd)",
1542 send_ret);
1543 ret = -1;
1544 }
1545
1546 end_no_session:
1547 return ret;
1548 }
1549
1550 /*
1551 * relay_unknown_command: send -1 if received unknown command
1552 */
1553 static void relay_unknown_command(struct relay_connection *conn)
1554 {
1555 struct lttcomm_relayd_generic_reply reply;
1556 ssize_t send_ret;
1557
1558 memset(&reply, 0, sizeof(reply));
1559 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1560 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1561 if (send_ret < sizeof(reply)) {
1562 ERR("Failed to send \"unknown command\" command reply (ret = %zd)", send_ret);
1563 }
1564 }
1565
1566 /*
1567 * relay_start: send an acknowledgment to the client to tell if we are
1568 * ready to receive data. We are ready if a session is established.
1569 */
1570 static int relay_start(const struct lttcomm_relayd_hdr *recv_hdr,
1571 struct relay_connection *conn,
1572 const struct lttng_buffer_view *payload)
1573 {
1574 int ret = 0;
1575 ssize_t send_ret;
1576 struct lttcomm_relayd_generic_reply reply;
1577 struct relay_session *session = conn->session;
1578
1579 if (!session) {
1580 DBG("Trying to start the streaming without a session established");
1581 ret = htobe32(LTTNG_ERR_UNK);
1582 }
1583
1584 memset(&reply, 0, sizeof(reply));
1585 reply.ret_code = htobe32(LTTNG_OK);
1586 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1587 sizeof(reply), 0);
1588 if (send_ret < (ssize_t) sizeof(reply)) {
1589 ERR("Failed to send \"relay_start\" command reply (ret = %zd)",
1590 send_ret);
1591 ret = -1;
1592 }
1593
1594 return ret;
1595 }
1596
1597 /*
1598 * Append padding to the file pointed by the file descriptor fd.
1599 */
1600 static int write_padding_to_file(int fd, uint32_t size)
1601 {
1602 ssize_t ret = 0;
1603 char *zeros;
1604
1605 if (size == 0) {
1606 goto end;
1607 }
1608
1609 zeros = zmalloc(size);
1610 if (zeros == NULL) {
1611 PERROR("zmalloc zeros for padding");
1612 ret = -1;
1613 goto end;
1614 }
1615
1616 ret = lttng_write(fd, zeros, size);
1617 if (ret < size) {
1618 PERROR("write padding to file");
1619 }
1620
1621 free(zeros);
1622
1623 end:
1624 return ret;
1625 }
1626
1627 /*
1628 * relay_recv_metadata: receive the metadata for the session.
1629 */
1630 static int relay_recv_metadata(const struct lttcomm_relayd_hdr *recv_hdr,
1631 struct relay_connection *conn,
1632 const struct lttng_buffer_view *payload)
1633 {
1634 int ret = 0;
1635 ssize_t size_ret;
1636 struct relay_session *session = conn->session;
1637 struct lttcomm_relayd_metadata_payload metadata_payload_header;
1638 struct relay_stream *metadata_stream;
1639 uint64_t metadata_payload_size;
1640
1641 if (!session) {
1642 ERR("Metadata sent before version check");
1643 ret = -1;
1644 goto end;
1645 }
1646
1647 if (recv_hdr->data_size < sizeof(struct lttcomm_relayd_metadata_payload)) {
1648 ERR("Incorrect data size");
1649 ret = -1;
1650 goto end;
1651 }
1652 metadata_payload_size = recv_hdr->data_size -
1653 sizeof(struct lttcomm_relayd_metadata_payload);
1654
1655 memcpy(&metadata_payload_header, payload->data,
1656 sizeof(metadata_payload_header));
1657 metadata_payload_header.stream_id = be64toh(
1658 metadata_payload_header.stream_id);
1659 metadata_payload_header.padding_size = be32toh(
1660 metadata_payload_header.padding_size);
1661
1662 metadata_stream = stream_get_by_id(metadata_payload_header.stream_id);
1663 if (!metadata_stream) {
1664 ret = -1;
1665 goto end;
1666 }
1667
1668 pthread_mutex_lock(&metadata_stream->lock);
1669
1670 size_ret = lttng_write(metadata_stream->stream_fd->fd,
1671 payload->data + sizeof(metadata_payload_header),
1672 metadata_payload_size);
1673 if (size_ret < metadata_payload_size) {
1674 ERR("Relay error writing metadata on file");
1675 ret = -1;
1676 goto end_put;
1677 }
1678
1679 size_ret = write_padding_to_file(metadata_stream->stream_fd->fd,
1680 metadata_payload_header.padding_size);
1681 if (size_ret < (int64_t) metadata_payload_header.padding_size) {
1682 ret = -1;
1683 goto end_put;
1684 }
1685
1686 metadata_stream->metadata_received +=
1687 metadata_payload_size + metadata_payload_header.padding_size;
1688 DBG2("Relay metadata written. Updated metadata_received %" PRIu64,
1689 metadata_stream->metadata_received);
1690
1691 end_put:
1692 pthread_mutex_unlock(&metadata_stream->lock);
1693 stream_put(metadata_stream);
1694 end:
1695 return ret;
1696 }
1697
1698 /*
1699 * relay_send_version: send relayd version number
1700 */
1701 static int relay_send_version(const struct lttcomm_relayd_hdr *recv_hdr,
1702 struct relay_connection *conn,
1703 const struct lttng_buffer_view *payload)
1704 {
1705 int ret;
1706 ssize_t send_ret;
1707 struct lttcomm_relayd_version reply, msg;
1708 bool compatible = true;
1709
1710 conn->version_check_done = true;
1711
1712 /* Get version from the other side. */
1713 if (payload->size < sizeof(msg)) {
1714 ERR("Unexpected payload size in \"relay_send_version\": expected >= %zu bytes, got %zu bytes",
1715 sizeof(msg), payload->size);
1716 ret = -1;
1717 goto end;
1718 }
1719
1720 memcpy(&msg, payload->data, sizeof(msg));
1721 msg.major = be32toh(msg.major);
1722 msg.minor = be32toh(msg.minor);
1723
1724 memset(&reply, 0, sizeof(reply));
1725 reply.major = RELAYD_VERSION_COMM_MAJOR;
1726 reply.minor = RELAYD_VERSION_COMM_MINOR;
1727
1728 /* Major versions must be the same */
1729 if (reply.major != msg.major) {
1730 DBG("Incompatible major versions (%u vs %u), deleting session",
1731 reply.major, msg.major);
1732 compatible = false;
1733 }
1734
1735 conn->major = reply.major;
1736 /* We adapt to the lowest compatible version */
1737 if (reply.minor <= msg.minor) {
1738 conn->minor = reply.minor;
1739 } else {
1740 conn->minor = msg.minor;
1741 }
1742
1743 reply.major = htobe32(reply.major);
1744 reply.minor = htobe32(reply.minor);
1745 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1746 sizeof(reply), 0);
1747 if (send_ret < (ssize_t) sizeof(reply)) {
1748 ERR("Failed to send \"send version\" command reply (ret = %zd)",
1749 send_ret);
1750 ret = -1;
1751 goto end;
1752 } else {
1753 ret = 0;
1754 }
1755
1756 if (!compatible) {
1757 ret = -1;
1758 goto end;
1759 }
1760
1761 DBG("Version check done using protocol %u.%u", conn->major,
1762 conn->minor);
1763
1764 end:
1765 return ret;
1766 }
1767
1768 /*
1769 * Check for data pending for a given stream id from the session daemon.
1770 */
1771 static int relay_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
1772 struct relay_connection *conn,
1773 const struct lttng_buffer_view *payload)
1774 {
1775 struct relay_session *session = conn->session;
1776 struct lttcomm_relayd_data_pending msg;
1777 struct lttcomm_relayd_generic_reply reply;
1778 struct relay_stream *stream;
1779 ssize_t send_ret;
1780 int ret;
1781
1782 DBG("Data pending command received");
1783
1784 if (!session || !conn->version_check_done) {
1785 ERR("Trying to check for data before version check");
1786 ret = -1;
1787 goto end_no_session;
1788 }
1789
1790 if (payload->size < sizeof(msg)) {
1791 ERR("Unexpected payload size in \"relay_data_pending\": expected >= %zu bytes, got %zu bytes",
1792 sizeof(msg), payload->size);
1793 ret = -1;
1794 goto end_no_session;
1795 }
1796 memcpy(&msg, payload->data, sizeof(msg));
1797 msg.stream_id = be64toh(msg.stream_id);
1798 msg.last_net_seq_num = be64toh(msg.last_net_seq_num);
1799
1800 stream = stream_get_by_id(msg.stream_id);
1801 if (stream == NULL) {
1802 ret = -1;
1803 goto end;
1804 }
1805
1806 pthread_mutex_lock(&stream->lock);
1807
1808 DBG("Data pending for stream id %" PRIu64 " prev_seq %" PRIu64
1809 " and last_seq %" PRIu64, msg.stream_id,
1810 stream->prev_seq, msg.last_net_seq_num);
1811
1812 /* Avoid wrapping issue */
1813 if (((int64_t) (stream->prev_seq - msg.last_net_seq_num)) >= 0) {
1814 /* Data has in fact been written and is NOT pending */
1815 ret = 0;
1816 } else {
1817 /* Data still being streamed thus pending */
1818 ret = 1;
1819 }
1820
1821 stream->data_pending_check_done = true;
1822 pthread_mutex_unlock(&stream->lock);
1823
1824 stream_put(stream);
1825 end:
1826
1827 memset(&reply, 0, sizeof(reply));
1828 reply.ret_code = htobe32(ret);
1829 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1830 if (send_ret < (ssize_t) sizeof(reply)) {
1831 ERR("Failed to send \"data pending\" command reply (ret = %zd)",
1832 send_ret);
1833 ret = -1;
1834 }
1835
1836 end_no_session:
1837 return ret;
1838 }
1839
1840 /*
1841 * Wait for the control socket to reach a quiescent state.
1842 *
1843 * Note that for now, when receiving this command from the session
1844 * daemon, this means that every subsequent commands or data received on
1845 * the control socket has been handled. So, this is why we simply return
1846 * OK here.
1847 */
1848 static int relay_quiescent_control(const struct lttcomm_relayd_hdr *recv_hdr,
1849 struct relay_connection *conn,
1850 const struct lttng_buffer_view *payload)
1851 {
1852 int ret;
1853 ssize_t send_ret;
1854 struct relay_stream *stream;
1855 struct lttcomm_relayd_quiescent_control msg;
1856 struct lttcomm_relayd_generic_reply reply;
1857
1858 DBG("Checking quiescent state on control socket");
1859
1860 if (!conn->session || !conn->version_check_done) {
1861 ERR("Trying to check for data before version check");
1862 ret = -1;
1863 goto end_no_session;
1864 }
1865
1866 if (payload->size < sizeof(msg)) {
1867 ERR("Unexpected payload size in \"relay_quiescent_control\": expected >= %zu bytes, got %zu bytes",
1868 sizeof(msg), payload->size);
1869 ret = -1;
1870 goto end_no_session;
1871 }
1872 memcpy(&msg, payload->data, sizeof(msg));
1873 msg.stream_id = be64toh(msg.stream_id);
1874
1875 stream = stream_get_by_id(msg.stream_id);
1876 if (!stream) {
1877 goto reply;
1878 }
1879 pthread_mutex_lock(&stream->lock);
1880 stream->data_pending_check_done = true;
1881 pthread_mutex_unlock(&stream->lock);
1882
1883 DBG("Relay quiescent control pending flag set to %" PRIu64, msg.stream_id);
1884 stream_put(stream);
1885 reply:
1886 memset(&reply, 0, sizeof(reply));
1887 reply.ret_code = htobe32(LTTNG_OK);
1888 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1889 if (send_ret < (ssize_t) sizeof(reply)) {
1890 ERR("Failed to send \"quiescent control\" command reply (ret = %zd)",
1891 send_ret);
1892 ret = -1;
1893 } else {
1894 ret = 0;
1895 }
1896
1897 end_no_session:
1898 return ret;
1899 }
1900
1901 /*
1902 * Initialize a data pending command. This means that a consumer is about
1903 * to ask for data pending for each stream it holds. Simply iterate over
1904 * all streams of a session and set the data_pending_check_done flag.
1905 *
1906 * This command returns to the client a LTTNG_OK code.
1907 */
1908 static int relay_begin_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
1909 struct relay_connection *conn,
1910 const struct lttng_buffer_view *payload)
1911 {
1912 int ret;
1913 ssize_t send_ret;
1914 struct lttng_ht_iter iter;
1915 struct lttcomm_relayd_begin_data_pending msg;
1916 struct lttcomm_relayd_generic_reply reply;
1917 struct relay_stream *stream;
1918
1919 assert(recv_hdr);
1920 assert(conn);
1921
1922 DBG("Init streams for data pending");
1923
1924 if (!conn->session || !conn->version_check_done) {
1925 ERR("Trying to check for data before version check");
1926 ret = -1;
1927 goto end_no_session;
1928 }
1929
1930 if (payload->size < sizeof(msg)) {
1931 ERR("Unexpected payload size in \"relay_begin_data_pending\": expected >= %zu bytes, got %zu bytes",
1932 sizeof(msg), payload->size);
1933 ret = -1;
1934 goto end_no_session;
1935 }
1936 memcpy(&msg, payload->data, sizeof(msg));
1937 msg.session_id = be64toh(msg.session_id);
1938
1939 /*
1940 * Iterate over all streams to set the begin data pending flag.
1941 * For now, the streams are indexed by stream handle so we have
1942 * to iterate over all streams to find the one associated with
1943 * the right session_id.
1944 */
1945 rcu_read_lock();
1946 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
1947 node.node) {
1948 if (!stream_get(stream)) {
1949 continue;
1950 }
1951 if (stream->trace->session->id == msg.session_id) {
1952 pthread_mutex_lock(&stream->lock);
1953 stream->data_pending_check_done = false;
1954 pthread_mutex_unlock(&stream->lock);
1955 DBG("Set begin data pending flag to stream %" PRIu64,
1956 stream->stream_handle);
1957 }
1958 stream_put(stream);
1959 }
1960 rcu_read_unlock();
1961
1962 memset(&reply, 0, sizeof(reply));
1963 /* All good, send back reply. */
1964 reply.ret_code = htobe32(LTTNG_OK);
1965
1966 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1967 if (send_ret < (ssize_t) sizeof(reply)) {
1968 ERR("Failed to send \"begin data pending\" command reply (ret = %zd)",
1969 send_ret);
1970 ret = -1;
1971 } else {
1972 ret = 0;
1973 }
1974
1975 end_no_session:
1976 return ret;
1977 }
1978
1979 /*
1980 * End data pending command. This will check, for a given session id, if
1981 * each stream associated with it has its data_pending_check_done flag
1982 * set. If not, this means that the client lost track of the stream but
1983 * the data is still being streamed on our side. In this case, we inform
1984 * the client that data is in flight.
1985 *
1986 * Return to the client if there is data in flight or not with a ret_code.
1987 */
1988 static int relay_end_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
1989 struct relay_connection *conn,
1990 const struct lttng_buffer_view *payload)
1991 {
1992 int ret;
1993 ssize_t send_ret;
1994 struct lttng_ht_iter iter;
1995 struct lttcomm_relayd_end_data_pending msg;
1996 struct lttcomm_relayd_generic_reply reply;
1997 struct relay_stream *stream;
1998 uint32_t is_data_inflight = 0;
1999
2000 DBG("End data pending command");
2001
2002 if (!conn->session || !conn->version_check_done) {
2003 ERR("Trying to check for data before version check");
2004 ret = -1;
2005 goto end_no_session;
2006 }
2007
2008 if (payload->size < sizeof(msg)) {
2009 ERR("Unexpected payload size in \"relay_end_data_pending\": expected >= %zu bytes, got %zu bytes",
2010 sizeof(msg), payload->size);
2011 ret = -1;
2012 goto end_no_session;
2013 }
2014 memcpy(&msg, payload->data, sizeof(msg));
2015 msg.session_id = be64toh(msg.session_id);
2016
2017 /*
2018 * Iterate over all streams to see if the begin data pending
2019 * flag is set.
2020 */
2021 rcu_read_lock();
2022 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
2023 node.node) {
2024 if (!stream_get(stream)) {
2025 continue;
2026 }
2027 if (stream->trace->session->id != msg.session_id) {
2028 stream_put(stream);
2029 continue;
2030 }
2031 pthread_mutex_lock(&stream->lock);
2032 if (!stream->data_pending_check_done) {
2033 if (!stream->closed || !(((int64_t) (stream->prev_seq - stream->last_net_seq_num)) >= 0)) {
2034 is_data_inflight = 1;
2035 DBG("Data is still in flight for stream %" PRIu64,
2036 stream->stream_handle);
2037 pthread_mutex_unlock(&stream->lock);
2038 stream_put(stream);
2039 break;
2040 }
2041 }
2042 pthread_mutex_unlock(&stream->lock);
2043 stream_put(stream);
2044 }
2045 rcu_read_unlock();
2046
2047 memset(&reply, 0, sizeof(reply));
2048 /* All good, send back reply. */
2049 reply.ret_code = htobe32(is_data_inflight);
2050
2051 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2052 if (send_ret < (ssize_t) sizeof(reply)) {
2053 ERR("Failed to send \"end data pending\" command reply (ret = %zd)",
2054 send_ret);
2055 ret = -1;
2056 } else {
2057 ret = 0;
2058 }
2059
2060 end_no_session:
2061 return ret;
2062 }
2063
2064 /*
2065 * Receive an index for a specific stream.
2066 *
2067 * Return 0 on success else a negative value.
2068 */
2069 static int relay_recv_index(const struct lttcomm_relayd_hdr *recv_hdr,
2070 struct relay_connection *conn,
2071 const struct lttng_buffer_view *payload)
2072 {
2073 int ret;
2074 ssize_t send_ret;
2075 struct relay_session *session = conn->session;
2076 struct lttcomm_relayd_index index_info;
2077 struct relay_index *index;
2078 struct lttcomm_relayd_generic_reply reply;
2079 struct relay_stream *stream;
2080 size_t msg_len;
2081
2082 assert(conn);
2083
2084 DBG("Relay receiving index");
2085
2086 if (!session || !conn->version_check_done) {
2087 ERR("Trying to close a stream before version check");
2088 ret = -1;
2089 goto end_no_session;
2090 }
2091
2092 msg_len = lttcomm_relayd_index_len(
2093 lttng_to_index_major(conn->major, conn->minor),
2094 lttng_to_index_minor(conn->major, conn->minor));
2095 if (payload->size < msg_len) {
2096 ERR("Unexpected payload size in \"relay_recv_index\": expected >= %zu bytes, got %zu bytes",
2097 msg_len, payload->size);
2098 ret = -1;
2099 goto end_no_session;
2100 }
2101 memcpy(&index_info, payload->data, msg_len);
2102 index_info.relay_stream_id = be64toh(index_info.relay_stream_id);
2103 index_info.net_seq_num = be64toh(index_info.net_seq_num);
2104 index_info.packet_size = be64toh(index_info.packet_size);
2105 index_info.content_size = be64toh(index_info.content_size);
2106 index_info.timestamp_begin = be64toh(index_info.timestamp_begin);
2107 index_info.timestamp_end = be64toh(index_info.timestamp_end);
2108 index_info.events_discarded = be64toh(index_info.events_discarded);
2109 index_info.stream_id = be64toh(index_info.stream_id);
2110
2111 if (conn->minor >= 8) {
2112 index_info.stream_instance_id =
2113 be64toh(index_info.stream_instance_id);
2114 index_info.packet_seq_num = be64toh(index_info.packet_seq_num);
2115 }
2116
2117 stream = stream_get_by_id(index_info.relay_stream_id);
2118 if (!stream) {
2119 ERR("stream_get_by_id not found");
2120 ret = -1;
2121 goto end;
2122 }
2123 pthread_mutex_lock(&stream->lock);
2124
2125 /* Live beacon handling */
2126 if (index_info.packet_size == 0) {
2127 DBG("Received live beacon for stream %" PRIu64,
2128 stream->stream_handle);
2129
2130 /*
2131 * Only flag a stream inactive when it has already
2132 * received data and no indexes are in flight.
2133 */
2134 if (stream->index_received_seqcount > 0
2135 && stream->indexes_in_flight == 0) {
2136 stream->beacon_ts_end = index_info.timestamp_end;
2137 }
2138 ret = 0;
2139 goto end_stream_put;
2140 } else {
2141 stream->beacon_ts_end = -1ULL;
2142 }
2143
2144 if (stream->ctf_stream_id == -1ULL) {
2145 stream->ctf_stream_id = index_info.stream_id;
2146 }
2147 index = relay_index_get_by_id_or_create(stream, index_info.net_seq_num);
2148 if (!index) {
2149 ret = -1;
2150 ERR("relay_index_get_by_id_or_create index NULL");
2151 goto end_stream_put;
2152 }
2153 if (set_index_control_data(index, &index_info, conn)) {
2154 ERR("set_index_control_data error");
2155 relay_index_put(index);
2156 ret = -1;
2157 goto end_stream_put;
2158 }
2159 ret = relay_index_try_flush(index);
2160 if (ret == 0) {
2161 tracefile_array_commit_seq(stream->tfa);
2162 stream->index_received_seqcount++;
2163 } else if (ret > 0) {
2164 /* no flush. */
2165 ret = 0;
2166 } else {
2167 ERR("relay_index_try_flush error %d", ret);
2168 relay_index_put(index);
2169 ret = -1;
2170 }
2171
2172 end_stream_put:
2173 pthread_mutex_unlock(&stream->lock);
2174 stream_put(stream);
2175
2176 end:
2177
2178 memset(&reply, 0, sizeof(reply));
2179 if (ret < 0) {
2180 reply.ret_code = htobe32(LTTNG_ERR_UNK);
2181 } else {
2182 reply.ret_code = htobe32(LTTNG_OK);
2183 }
2184 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2185 if (send_ret < (ssize_t) sizeof(reply)) {
2186 ERR("Failed to send \"recv index\" command reply (ret = %zd)", send_ret);
2187 ret = -1;
2188 }
2189
2190 end_no_session:
2191 return ret;
2192 }
2193
2194 /*
2195 * Receive the streams_sent message.
2196 *
2197 * Return 0 on success else a negative value.
2198 */
2199 static int relay_streams_sent(const struct lttcomm_relayd_hdr *recv_hdr,
2200 struct relay_connection *conn,
2201 const struct lttng_buffer_view *payload)
2202 {
2203 int ret;
2204 ssize_t send_ret;
2205 struct lttcomm_relayd_generic_reply reply;
2206
2207 assert(conn);
2208
2209 DBG("Relay receiving streams_sent");
2210
2211 if (!conn->session || !conn->version_check_done) {
2212 ERR("Trying to close a stream before version check");
2213 ret = -1;
2214 goto end_no_session;
2215 }
2216
2217 /*
2218 * Publish every pending stream in the connection recv list which are
2219 * now ready to be used by the viewer.
2220 */
2221 publish_connection_local_streams(conn);
2222
2223 memset(&reply, 0, sizeof(reply));
2224 reply.ret_code = htobe32(LTTNG_OK);
2225 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2226 if (send_ret < (ssize_t) sizeof(reply)) {
2227 ERR("Failed to send \"streams sent\" command reply (ret = %zd)",
2228 send_ret);
2229 ret = -1;
2230 } else {
2231 /* Success. */
2232 ret = 0;
2233 }
2234
2235 end_no_session:
2236 return ret;
2237 }
2238
2239 #define DBG_CMD(cmd_name, conn) \
2240 DBG3("Processing \"%s\" command for socket %i", cmd_name, conn->sock->fd);
2241
2242 static int relay_process_control_command(struct relay_connection *conn,
2243 const struct lttcomm_relayd_hdr *header,
2244 const struct lttng_buffer_view *payload)
2245 {
2246 int ret = 0;
2247
2248 switch (header->cmd) {
2249 case RELAYD_CREATE_SESSION:
2250 DBG_CMD("RELAYD_CREATE_SESSION", conn);
2251 ret = relay_create_session(header, conn, payload);
2252 break;
2253 case RELAYD_ADD_STREAM:
2254 DBG_CMD("RELAYD_ADD_STREAM", conn);
2255 ret = relay_add_stream(header, conn, payload);
2256 break;
2257 case RELAYD_START_DATA:
2258 DBG_CMD("RELAYD_START_DATA", conn);
2259 ret = relay_start(header, conn, payload);
2260 break;
2261 case RELAYD_SEND_METADATA:
2262 DBG_CMD("RELAYD_SEND_METADATA", conn);
2263 ret = relay_recv_metadata(header, conn, payload);
2264 break;
2265 case RELAYD_VERSION:
2266 DBG_CMD("RELAYD_VERSION", conn);
2267 ret = relay_send_version(header, conn, payload);
2268 break;
2269 case RELAYD_CLOSE_STREAM:
2270 DBG_CMD("RELAYD_CLOSE_STREAM", conn);
2271 ret = relay_close_stream(header, conn, payload);
2272 break;
2273 case RELAYD_DATA_PENDING:
2274 DBG_CMD("RELAYD_DATA_PENDING", conn);
2275 ret = relay_data_pending(header, conn, payload);
2276 break;
2277 case RELAYD_QUIESCENT_CONTROL:
2278 DBG_CMD("RELAYD_QUIESCENT_CONTROL", conn);
2279 ret = relay_quiescent_control(header, conn, payload);
2280 break;
2281 case RELAYD_BEGIN_DATA_PENDING:
2282 DBG_CMD("RELAYD_BEGIN_DATA_PENDING", conn);
2283 ret = relay_begin_data_pending(header, conn, payload);
2284 break;
2285 case RELAYD_END_DATA_PENDING:
2286 DBG_CMD("RELAYD_END_DATA_PENDING", conn);
2287 ret = relay_end_data_pending(header, conn, payload);
2288 break;
2289 case RELAYD_SEND_INDEX:
2290 DBG_CMD("RELAYD_SEND_INDEX", conn);
2291 ret = relay_recv_index(header, conn, payload);
2292 break;
2293 case RELAYD_STREAMS_SENT:
2294 DBG_CMD("RELAYD_STREAMS_SENT", conn);
2295 ret = relay_streams_sent(header, conn, payload);
2296 break;
2297 case RELAYD_RESET_METADATA:
2298 DBG_CMD("RELAYD_RESET_METADATA", conn);
2299 ret = relay_reset_metadata(header, conn, payload);
2300 break;
2301 case RELAYD_UPDATE_SYNC_INFO:
2302 default:
2303 ERR("Received unknown command (%u)", header->cmd);
2304 relay_unknown_command(conn);
2305 ret = -1;
2306 goto end;
2307 }
2308
2309 end:
2310 return ret;
2311 }
2312
2313 static enum relay_connection_status relay_process_control_receive_payload(
2314 struct relay_connection *conn)
2315 {
2316 int ret = 0;
2317 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
2318 struct lttng_dynamic_buffer *reception_buffer =
2319 &conn->protocol.ctrl.reception_buffer;
2320 struct ctrl_connection_state_receive_payload *state =
2321 &conn->protocol.ctrl.state.receive_payload;
2322 struct lttng_buffer_view payload_view;
2323
2324 if (state->left_to_receive == 0) {
2325 /* Short-circuit for payload-less commands. */
2326 goto reception_complete;
2327 }
2328 ret = conn->sock->ops->recvmsg(conn->sock,
2329 reception_buffer->data + state->received,
2330 state->left_to_receive, MSG_DONTWAIT);
2331 if (ret < 0) {
2332 if (errno != EAGAIN && errno != EWOULDBLOCK) {
2333 PERROR("Unable to receive command payload on sock %d",
2334 conn->sock->fd);
2335 status = RELAY_CONNECTION_STATUS_ERROR;
2336 }
2337 goto end;
2338 } else if (ret == 0) {
2339 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
2340 status = RELAY_CONNECTION_STATUS_CLOSED;
2341 goto end;
2342 }
2343
2344 assert(ret > 0);
2345 assert(ret <= state->left_to_receive);
2346
2347 state->left_to_receive -= ret;
2348 state->received += ret;
2349
2350 if (state->left_to_receive > 0) {
2351 /*
2352 * Can't transition to the protocol's next state, wait to
2353 * receive the rest of the header.
2354 */
2355 DBG3("Partial reception of control connection protocol payload (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
2356 state->received, state->left_to_receive,
2357 conn->sock->fd);
2358 goto end;
2359 }
2360
2361 reception_complete:
2362 DBG("Done receiving control command payload: fd = %i, payload size = %" PRIu64 " bytes",
2363 conn->sock->fd, state->received);
2364 /*
2365 * The payload required to process the command has been received.
2366 * A view to the reception buffer is forwarded to the various
2367 * commands and the state of the control is reset on success.
2368 *
2369 * Commands are responsible for sending their reply to the peer.
2370 */
2371 payload_view = lttng_buffer_view_from_dynamic_buffer(reception_buffer,
2372 0, -1);
2373 ret = relay_process_control_command(conn,
2374 &state->header, &payload_view);
2375 if (ret < 0) {
2376 status = RELAY_CONNECTION_STATUS_ERROR;
2377 goto end;
2378 }
2379
2380 ret = connection_reset_protocol_state(conn);
2381 if (ret) {
2382 status = RELAY_CONNECTION_STATUS_ERROR;
2383 }
2384 end:
2385 return status;
2386 }
2387
2388 static enum relay_connection_status relay_process_control_receive_header(
2389 struct relay_connection *conn)
2390 {
2391 int ret = 0;
2392 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
2393 struct lttcomm_relayd_hdr header;
2394 struct lttng_dynamic_buffer *reception_buffer =
2395 &conn->protocol.ctrl.reception_buffer;
2396 struct ctrl_connection_state_receive_header *state =
2397 &conn->protocol.ctrl.state.receive_header;
2398
2399 assert(state->left_to_receive != 0);
2400
2401 ret = conn->sock->ops->recvmsg(conn->sock,
2402 reception_buffer->data + state->received,
2403 state->left_to_receive, MSG_DONTWAIT);
2404 if (ret < 0) {
2405 if (errno != EAGAIN && errno != EWOULDBLOCK) {
2406 PERROR("Unable to receive control command header on sock %d",
2407 conn->sock->fd);
2408 status = RELAY_CONNECTION_STATUS_ERROR;
2409 }
2410 goto end;
2411 } else if (ret == 0) {
2412 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
2413 status = RELAY_CONNECTION_STATUS_CLOSED;
2414 goto end;
2415 }
2416
2417 assert(ret > 0);
2418 assert(ret <= state->left_to_receive);
2419
2420 state->left_to_receive -= ret;
2421 state->received += ret;
2422
2423 if (state->left_to_receive > 0) {
2424 /*
2425 * Can't transition to the protocol's next state, wait to
2426 * receive the rest of the header.
2427 */
2428 DBG3("Partial reception of control connection protocol header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
2429 state->received, state->left_to_receive,
2430 conn->sock->fd);
2431 goto end;
2432 }
2433
2434 /* Transition to next state: receiving the command's payload. */
2435 conn->protocol.ctrl.state_id =
2436 CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD;
2437 memcpy(&header, reception_buffer->data, sizeof(header));
2438 header.circuit_id = be64toh(header.circuit_id);
2439 header.data_size = be64toh(header.data_size);
2440 header.cmd = be32toh(header.cmd);
2441 header.cmd_version = be32toh(header.cmd_version);
2442 memcpy(&conn->protocol.ctrl.state.receive_payload.header,
2443 &header, sizeof(header));
2444
2445 DBG("Done receiving control command header: fd = %i, cmd = %" PRIu32 ", cmd_version = %" PRIu32 ", payload size = %" PRIu64 " bytes",
2446 conn->sock->fd, header.cmd, header.cmd_version,
2447 header.data_size);
2448
2449 if (header.data_size > DEFAULT_NETWORK_RELAYD_CTRL_MAX_PAYLOAD_SIZE) {
2450 ERR("Command header indicates a payload (%" PRIu64 " bytes) that exceeds the maximal payload size allowed on a control connection.",
2451 header.data_size);
2452 status = RELAY_CONNECTION_STATUS_ERROR;
2453 goto end;
2454 }
2455
2456 conn->protocol.ctrl.state.receive_payload.left_to_receive =
2457 header.data_size;
2458 conn->protocol.ctrl.state.receive_payload.received = 0;
2459 ret = lttng_dynamic_buffer_set_size(reception_buffer,
2460 header.data_size);
2461 if (ret) {
2462 status = RELAY_CONNECTION_STATUS_ERROR;
2463 goto end;
2464 }
2465
2466 if (header.data_size == 0) {
2467 /*
2468 * Manually invoke the next state as the poll loop
2469 * will not wake-up to allow us to proceed further.
2470 */
2471 status = relay_process_control_receive_payload(conn);
2472 }
2473 end:
2474 return status;
2475 }
2476
2477 /*
2478 * Process the commands received on the control socket
2479 */
2480 static enum relay_connection_status relay_process_control(
2481 struct relay_connection *conn)
2482 {
2483 enum relay_connection_status status;
2484
2485 switch (conn->protocol.ctrl.state_id) {
2486 case CTRL_CONNECTION_STATE_RECEIVE_HEADER:
2487 status = relay_process_control_receive_header(conn);
2488 break;
2489 case CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD:
2490 status = relay_process_control_receive_payload(conn);
2491 break;
2492 default:
2493 ERR("Unknown control connection protocol state encountered.");
2494 abort();
2495 }
2496
2497 return status;
2498 }
2499
2500 /*
2501 * Handle index for a data stream.
2502 *
2503 * Called with the stream lock held.
2504 *
2505 * Return 0 on success else a negative value.
2506 */
2507 static int handle_index_data(struct relay_stream *stream, uint64_t net_seq_num,
2508 bool rotate_index)
2509 {
2510 int ret = 0;
2511 uint64_t data_offset;
2512 struct relay_index *index;
2513
2514 /* Get data offset because we are about to update the index. */
2515 data_offset = htobe64(stream->tracefile_size_current);
2516
2517 DBG("handle_index_data: stream %" PRIu64 " net_seq_num %" PRIu64 " data offset %" PRIu64,
2518 stream->stream_handle, net_seq_num, stream->tracefile_size_current);
2519
2520 /*
2521 * Lookup for an existing index for that stream id/sequence
2522 * number. If it exists, the control thread has already received the
2523 * data for it, thus we need to write it to disk.
2524 */
2525 index = relay_index_get_by_id_or_create(stream, net_seq_num);
2526 if (!index) {
2527 ret = -1;
2528 goto end;
2529 }
2530
2531 if (rotate_index || !stream->index_file) {
2532 uint32_t major, minor;
2533
2534 /* Put ref on previous index_file. */
2535 if (stream->index_file) {
2536 lttng_index_file_put(stream->index_file);
2537 stream->index_file = NULL;
2538 }
2539 major = stream->trace->session->major;
2540 minor = stream->trace->session->minor;
2541 stream->index_file = lttng_index_file_create(stream->path_name,
2542 stream->channel_name,
2543 -1, -1, stream->tracefile_size,
2544 tracefile_array_get_file_index_head(stream->tfa),
2545 lttng_to_index_major(major, minor),
2546 lttng_to_index_minor(major, minor));
2547 if (!stream->index_file) {
2548 ret = -1;
2549 /* Put self-ref for this index due to error. */
2550 relay_index_put(index);
2551 index = NULL;
2552 goto end;
2553 }
2554 }
2555
2556 if (relay_index_set_file(index, stream->index_file, data_offset)) {
2557 ret = -1;
2558 /* Put self-ref for this index due to error. */
2559 relay_index_put(index);
2560 index = NULL;
2561 goto end;
2562 }
2563
2564 ret = relay_index_try_flush(index);
2565 if (ret == 0) {
2566 tracefile_array_commit_seq(stream->tfa);
2567 stream->index_received_seqcount++;
2568 } else if (ret > 0) {
2569 /* No flush. */
2570 ret = 0;
2571 } else {
2572 /* Put self-ref for this index due to error. */
2573 relay_index_put(index);
2574 index = NULL;
2575 ret = -1;
2576 }
2577 end:
2578 return ret;
2579 }
2580
2581 static enum relay_connection_status relay_process_data_receive_header(
2582 struct relay_connection *conn)
2583 {
2584 int ret;
2585 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
2586 struct data_connection_state_receive_header *state =
2587 &conn->protocol.data.state.receive_header;
2588 struct lttcomm_relayd_data_hdr header;
2589 struct relay_stream *stream;
2590
2591 assert(state->left_to_receive != 0);
2592
2593 ret = conn->sock->ops->recvmsg(conn->sock,
2594 state->header_reception_buffer + state->received,
2595 state->left_to_receive, MSG_DONTWAIT);
2596 if (ret < 0) {
2597 if (errno != EAGAIN && errno != EWOULDBLOCK) {
2598 PERROR("Unable to receive data header on sock %d", conn->sock->fd);
2599 status = RELAY_CONNECTION_STATUS_ERROR;
2600 }
2601 goto end;
2602 } else if (ret == 0) {
2603 /* Orderly shutdown. Not necessary to print an error. */
2604 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
2605 status = RELAY_CONNECTION_STATUS_CLOSED;
2606 goto end;
2607 }
2608
2609 assert(ret > 0);
2610 assert(ret <= state->left_to_receive);
2611
2612 state->left_to_receive -= ret;
2613 state->received += ret;
2614
2615 if (state->left_to_receive > 0) {
2616 /*
2617 * Can't transition to the protocol's next state, wait to
2618 * receive the rest of the header.
2619 */
2620 DBG3("Partial reception of data connection header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
2621 state->received, state->left_to_receive,
2622 conn->sock->fd);
2623 ret = 0;
2624 goto end;
2625 }
2626
2627 /* Transition to next state: receiving the payload. */
2628 conn->protocol.data.state_id = DATA_CONNECTION_STATE_RECEIVE_PAYLOAD;
2629
2630 memcpy(&header, state->header_reception_buffer, sizeof(header));
2631 header.circuit_id = be64toh(header.circuit_id);
2632 header.stream_id = be64toh(header.stream_id);
2633 header.data_size = be32toh(header.data_size);
2634 header.net_seq_num = be64toh(header.net_seq_num);
2635 header.padding_size = be32toh(header.padding_size);
2636 memcpy(&conn->protocol.data.state.receive_payload.header, &header, sizeof(header));
2637
2638 conn->protocol.data.state.receive_payload.left_to_receive =
2639 header.data_size;
2640 conn->protocol.data.state.receive_payload.received = 0;
2641 conn->protocol.data.state.receive_payload.rotate_index = false;
2642
2643 DBG("Received data connection header on fd %i: circuit_id = %" PRIu64 ", stream_id = %" PRIu64 ", data_size = %" PRIu32 ", net_seq_num = %" PRIu64 ", padding_size = %" PRIu32,
2644 conn->sock->fd, header.circuit_id,
2645 header.stream_id, header.data_size,
2646 header.net_seq_num, header.padding_size);
2647
2648 stream = stream_get_by_id(header.stream_id);
2649 if (!stream) {
2650 DBG("relay_process_data_receive_payload: Cannot find stream %" PRIu64,
2651 header.stream_id);
2652 /* Protocol error. */
2653 status = RELAY_CONNECTION_STATUS_ERROR;
2654 goto end;
2655 }
2656
2657 pthread_mutex_lock(&stream->lock);
2658
2659 /* Check if a rotation is needed. */
2660 if (stream->tracefile_size > 0 &&
2661 (stream->tracefile_size_current + header.data_size) >
2662 stream->tracefile_size) {
2663 uint64_t old_id, new_id;
2664
2665 old_id = tracefile_array_get_file_index_head(stream->tfa);
2666 tracefile_array_file_rotate(stream->tfa);
2667
2668 /* new_id is updated by utils_rotate_stream_file. */
2669 new_id = old_id;
2670
2671 ret = utils_rotate_stream_file(stream->path_name,
2672 stream->channel_name, stream->tracefile_size,
2673 stream->tracefile_count, -1,
2674 -1, stream->stream_fd->fd,
2675 &new_id, &stream->stream_fd->fd);
2676 if (ret < 0) {
2677 ERR("Failed to rotate stream output file");
2678 status = RELAY_CONNECTION_STATUS_ERROR;
2679 goto end_stream_unlock;
2680 }
2681
2682 /*
2683 * Reset current size because we just performed a stream
2684 * rotation.
2685 */
2686 stream->tracefile_size_current = 0;
2687 conn->protocol.data.state.receive_payload.rotate_index = true;
2688 }
2689
2690 ret = 0;
2691 end_stream_unlock:
2692 pthread_mutex_unlock(&stream->lock);
2693 stream_put(stream);
2694 end:
2695 return status;
2696 }
2697
2698 static enum relay_connection_status relay_process_data_receive_payload(
2699 struct relay_connection *conn)
2700 {
2701 int ret;
2702 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
2703 struct relay_stream *stream;
2704 struct data_connection_state_receive_payload *state =
2705 &conn->protocol.data.state.receive_payload;
2706 const size_t chunk_size = RECV_DATA_BUFFER_SIZE;
2707 char data_buffer[chunk_size];
2708 bool partial_recv = false;
2709 bool new_stream = false, close_requested = false;
2710 uint64_t left_to_receive = state->left_to_receive;
2711 struct relay_session *session;
2712
2713 DBG3("Receiving data for stream id %" PRIu64 " seqnum %" PRIu64 ", %" PRIu64" bytes received, %" PRIu64 " bytes left to receive",
2714 state->header.stream_id, state->header.net_seq_num,
2715 state->received, left_to_receive);
2716
2717 stream = stream_get_by_id(state->header.stream_id);
2718 if (!stream) {
2719 /* Protocol error. */
2720 ERR("relay_process_data_receive_payload: cannot find stream %" PRIu64,
2721 state->header.stream_id);
2722 status = RELAY_CONNECTION_STATUS_ERROR;
2723 goto end;
2724 }
2725
2726 pthread_mutex_lock(&stream->lock);
2727 session = stream->trace->session;
2728 if (!conn->session) {
2729 ret = connection_set_session(conn, session);
2730 if (ret) {
2731 status = RELAY_CONNECTION_STATUS_ERROR;
2732 goto end_stream_unlock;
2733 }
2734 }
2735
2736 /*
2737 * The size of the "chunk" received on any iteration is bounded by:
2738 * - the data left to receive,
2739 * - the data immediately available on the socket,
2740 * - the on-stack data buffer
2741 */
2742 while (left_to_receive > 0 && !partial_recv) {
2743 ssize_t write_ret;
2744 size_t recv_size = min(left_to_receive, chunk_size);
2745
2746 ret = conn->sock->ops->recvmsg(conn->sock, data_buffer,
2747 recv_size, MSG_DONTWAIT);
2748 if (ret < 0) {
2749 if (errno != EAGAIN && errno != EWOULDBLOCK) {
2750 PERROR("Socket %d error", conn->sock->fd);
2751 status = RELAY_CONNECTION_STATUS_ERROR;
2752 }
2753 goto end_stream_unlock;
2754 } else if (ret == 0) {
2755 /* No more data ready to be consumed on socket. */
2756 DBG3("No more data ready for consumption on data socket of stream id %" PRIu64,
2757 state->header.stream_id);
2758 status = RELAY_CONNECTION_STATUS_CLOSED;
2759 break;
2760 } else if (ret < (int) recv_size) {
2761 /*
2762 * All the data available on the socket has been
2763 * consumed.
2764 */
2765 partial_recv = true;
2766 }
2767
2768 recv_size = ret;
2769
2770 /* Write data to stream output fd. */
2771 write_ret = lttng_write(stream->stream_fd->fd, data_buffer,
2772 recv_size);
2773 if (write_ret < (ssize_t) recv_size) {
2774 ERR("Relay error writing data to file");
2775 status = RELAY_CONNECTION_STATUS_ERROR;
2776 goto end_stream_unlock;
2777 }
2778
2779 left_to_receive -= recv_size;
2780 state->received += recv_size;
2781 state->left_to_receive = left_to_receive;
2782
2783 DBG2("Relay wrote %zd bytes to tracefile for stream id %" PRIu64,
2784 write_ret, stream->stream_handle);
2785 }
2786
2787 if (state->left_to_receive > 0) {
2788 /*
2789 * Did not receive all the data expected, wait for more data to
2790 * become available on the socket.
2791 */
2792 DBG3("Partial receive on data connection of stream id %" PRIu64 ", %" PRIu64 " bytes received, %" PRIu64 " bytes left to receive",
2793 state->header.stream_id, state->received,
2794 state->left_to_receive);
2795 goto end_stream_unlock;
2796 }
2797
2798 ret = write_padding_to_file(stream->stream_fd->fd,
2799 state->header.padding_size);
2800 if ((int64_t) ret < (int64_t) state->header.padding_size) {
2801 ERR("write_padding_to_file: fail stream %" PRIu64 " net_seq_num %" PRIu64 " ret %d",
2802 stream->stream_handle,
2803 state->header.net_seq_num, ret);
2804 status = RELAY_CONNECTION_STATUS_ERROR;
2805 goto end_stream_unlock;
2806 }
2807
2808
2809 if (session->minor >= 4 && !session->snapshot) {
2810 ret = handle_index_data(stream, state->header.net_seq_num,
2811 state->rotate_index);
2812 if (ret < 0) {
2813 ERR("handle_index_data: fail stream %" PRIu64 " net_seq_num %" PRIu64 " ret %d",
2814 stream->stream_handle,
2815 state->header.net_seq_num, ret);
2816 status = RELAY_CONNECTION_STATUS_ERROR;
2817 goto end_stream_unlock;
2818 }
2819 }
2820
2821 stream->tracefile_size_current += state->header.data_size +
2822 state->header.padding_size;
2823
2824 if (stream->prev_seq == -1ULL) {
2825 new_stream = true;
2826 }
2827
2828 stream->prev_seq = state->header.net_seq_num;
2829
2830 /*
2831 * Resetting the protocol state (to RECEIVE_HEADER) will trash the
2832 * contents of *state which are aliased (union) to the same location as
2833 * the new state. Don't use it beyond this point.
2834 */
2835 connection_reset_protocol_state(conn);
2836 state = NULL;
2837
2838 end_stream_unlock:
2839 close_requested = stream->close_requested;
2840 pthread_mutex_unlock(&stream->lock);
2841 if (close_requested && left_to_receive == 0) {
2842 try_stream_close(stream);
2843 }
2844
2845 if (new_stream) {
2846 pthread_mutex_lock(&session->lock);
2847 uatomic_set(&session->new_streams, 1);
2848 pthread_mutex_unlock(&session->lock);
2849 }
2850
2851 stream_put(stream);
2852 end:
2853 return status;
2854 }
2855
2856 /*
2857 * relay_process_data: Process the data received on the data socket
2858 */
2859 static enum relay_connection_status relay_process_data(
2860 struct relay_connection *conn)
2861 {
2862 enum relay_connection_status status;
2863
2864 switch (conn->protocol.data.state_id) {
2865 case DATA_CONNECTION_STATE_RECEIVE_HEADER:
2866 status = relay_process_data_receive_header(conn);
2867 break;
2868 case DATA_CONNECTION_STATE_RECEIVE_PAYLOAD:
2869 status = relay_process_data_receive_payload(conn);
2870 break;
2871 default:
2872 ERR("Unexpected data connection communication state.");
2873 abort();
2874 }
2875
2876 return status;
2877 }
2878
2879 static void cleanup_connection_pollfd(struct lttng_poll_event *events, int pollfd)
2880 {
2881 int ret;
2882
2883 (void) lttng_poll_del(events, pollfd);
2884
2885 ret = close(pollfd);
2886 if (ret < 0) {
2887 ERR("Closing pollfd %d", pollfd);
2888 }
2889 }
2890
2891 static void relay_thread_close_connection(struct lttng_poll_event *events,
2892 int pollfd, struct relay_connection *conn)
2893 {
2894 const char *type_str;
2895
2896 switch (conn->type) {
2897 case RELAY_DATA:
2898 type_str = "Data";
2899 break;
2900 case RELAY_CONTROL:
2901 type_str = "Control";
2902 break;
2903 case RELAY_VIEWER_COMMAND:
2904 type_str = "Viewer Command";
2905 break;
2906 case RELAY_VIEWER_NOTIFICATION:
2907 type_str = "Viewer Notification";
2908 break;
2909 default:
2910 type_str = "Unknown";
2911 }
2912 cleanup_connection_pollfd(events, pollfd);
2913 connection_put(conn);
2914 DBG("%s connection closed with %d", type_str, pollfd);
2915 }
2916
2917 /*
2918 * This thread does the actual work
2919 */
2920 static void *relay_thread_worker(void *data)
2921 {
2922 int ret, err = -1, last_seen_data_fd = -1;
2923 uint32_t nb_fd;
2924 struct lttng_poll_event events;
2925 struct lttng_ht *relay_connections_ht;
2926 struct lttng_ht_iter iter;
2927 struct relay_connection *destroy_conn = NULL;
2928
2929 DBG("[thread] Relay worker started");
2930
2931 rcu_register_thread();
2932
2933 health_register(health_relayd, HEALTH_RELAYD_TYPE_WORKER);
2934
2935 if (testpoint(relayd_thread_worker)) {
2936 goto error_testpoint;
2937 }
2938
2939 health_code_update();
2940
2941 /* table of connections indexed on socket */
2942 relay_connections_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2943 if (!relay_connections_ht) {
2944 goto relay_connections_ht_error;
2945 }
2946
2947 ret = create_named_thread_poll_set(&events, 2, "Worker thread epoll");
2948 if (ret < 0) {
2949 goto error_poll_create;
2950 }
2951
2952 ret = lttng_poll_add(&events, relay_conn_pipe[0], LPOLLIN | LPOLLRDHUP);
2953 if (ret < 0) {
2954 goto error;
2955 }
2956
2957 restart:
2958 while (1) {
2959 int idx = -1, i, seen_control = 0, last_notdel_data_fd = -1;
2960
2961 health_code_update();
2962
2963 /* Infinite blocking call, waiting for transmission */
2964 DBG3("Relayd worker thread polling...");
2965 health_poll_entry();
2966 ret = lttng_poll_wait(&events, -1);
2967 health_poll_exit();
2968 if (ret < 0) {
2969 /*
2970 * Restart interrupted system call.
2971 */
2972 if (errno == EINTR) {
2973 goto restart;
2974 }
2975 goto error;
2976 }
2977
2978 nb_fd = ret;
2979
2980 /*
2981 * Process control. The control connection is
2982 * prioritized so we don't starve it with high
2983 * throughput tracing data on the data connection.
2984 */
2985 for (i = 0; i < nb_fd; i++) {
2986 /* Fetch once the poll data */
2987 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
2988 int pollfd = LTTNG_POLL_GETFD(&events, i);
2989
2990 health_code_update();
2991
2992 if (!revents) {
2993 /*
2994 * No activity for this FD (poll
2995 * implementation).
2996 */
2997 continue;
2998 }
2999
3000 /* Thread quit pipe has been closed. Killing thread. */
3001 ret = check_thread_quit_pipe(pollfd, revents);
3002 if (ret) {
3003 err = 0;
3004 goto exit;
3005 }
3006
3007 /* Inspect the relay conn pipe for new connection */
3008 if (pollfd == relay_conn_pipe[0]) {
3009 if (revents & LPOLLIN) {
3010 struct relay_connection *conn;
3011
3012 ret = lttng_read(relay_conn_pipe[0], &conn, sizeof(conn));
3013 if (ret < 0) {
3014 goto error;
3015 }
3016 lttng_poll_add(&events, conn->sock->fd,
3017 LPOLLIN | LPOLLRDHUP);
3018 connection_ht_add(relay_connections_ht, conn);
3019 DBG("Connection socket %d added", conn->sock->fd);
3020 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3021 ERR("Relay connection pipe error");
3022 goto error;
3023 } else {
3024 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
3025 goto error;
3026 }
3027 } else {
3028 struct relay_connection *ctrl_conn;
3029
3030 ctrl_conn = connection_get_by_sock(relay_connections_ht, pollfd);
3031 /* If not found, there is a synchronization issue. */
3032 assert(ctrl_conn);
3033
3034 if (ctrl_conn->type == RELAY_DATA) {
3035 if (revents & LPOLLIN) {
3036 /*
3037 * Flag the last seen data fd not deleted. It will be
3038 * used as the last seen fd if any fd gets deleted in
3039 * this first loop.
3040 */
3041 last_notdel_data_fd = pollfd;
3042 }
3043 goto put_ctrl_connection;
3044 }
3045 assert(ctrl_conn->type == RELAY_CONTROL);
3046
3047 if (revents & LPOLLIN) {
3048 enum relay_connection_status status;
3049
3050 status = relay_process_control(ctrl_conn);
3051 if (status != RELAY_CONNECTION_STATUS_OK) {
3052 /*
3053 * On socket error flag the session as aborted to force
3054 * the cleanup of its stream otherwise it can leak
3055 * during the lifetime of the relayd.
3056 *
3057 * This prevents situations in which streams can be
3058 * left opened because an index was received, the
3059 * control connection is closed, and the data
3060 * connection is closed (uncleanly) before the packet's
3061 * data provided.
3062 *
3063 * Since the control connection encountered an error,
3064 * it is okay to be conservative and close the
3065 * session right now as we can't rely on the protocol
3066 * being respected anymore.
3067 */
3068 if (status == RELAY_CONNECTION_STATUS_ERROR) {
3069 session_abort(ctrl_conn->session);
3070 }
3071
3072 /* Clear the connection on error or close. */
3073 relay_thread_close_connection(&events,
3074 pollfd,
3075 ctrl_conn);
3076 }
3077 seen_control = 1;
3078 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3079 relay_thread_close_connection(&events,
3080 pollfd, ctrl_conn);
3081 if (last_seen_data_fd == pollfd) {
3082 last_seen_data_fd = last_notdel_data_fd;
3083 }
3084 } else {
3085 ERR("Unexpected poll events %u for control sock %d",
3086 revents, pollfd);
3087 connection_put(ctrl_conn);
3088 goto error;
3089 }
3090 put_ctrl_connection:
3091 connection_put(ctrl_conn);
3092 }
3093 }
3094
3095 /*
3096 * The last loop handled a control request, go back to poll to make
3097 * sure we prioritise the control socket.
3098 */
3099 if (seen_control) {
3100 continue;
3101 }
3102
3103 if (last_seen_data_fd >= 0) {
3104 for (i = 0; i < nb_fd; i++) {
3105 int pollfd = LTTNG_POLL_GETFD(&events, i);
3106
3107 health_code_update();
3108
3109 if (last_seen_data_fd == pollfd) {
3110 idx = i;
3111 break;
3112 }
3113 }
3114 }
3115
3116 /* Process data connection. */
3117 for (i = idx + 1; i < nb_fd; i++) {
3118 /* Fetch the poll data. */
3119 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
3120 int pollfd = LTTNG_POLL_GETFD(&events, i);
3121 struct relay_connection *data_conn;
3122
3123 health_code_update();
3124
3125 if (!revents) {
3126 /* No activity for this FD (poll implementation). */
3127 continue;
3128 }
3129
3130 /* Skip the command pipe. It's handled in the first loop. */
3131 if (pollfd == relay_conn_pipe[0]) {
3132 continue;
3133 }
3134
3135 data_conn = connection_get_by_sock(relay_connections_ht, pollfd);
3136 if (!data_conn) {
3137 /* Skip it. Might be removed before. */
3138 continue;
3139 }
3140 if (data_conn->type == RELAY_CONTROL) {
3141 goto put_data_connection;
3142 }
3143 assert(data_conn->type == RELAY_DATA);
3144
3145 if (revents & LPOLLIN) {
3146 enum relay_connection_status status;
3147
3148 status = relay_process_data(data_conn);
3149 /* Connection closed or error. */
3150 if (status != RELAY_CONNECTION_STATUS_OK) {
3151 /*
3152 * On socket error flag the session as aborted to force
3153 * the cleanup of its stream otherwise it can leak
3154 * during the lifetime of the relayd.
3155 *
3156 * This prevents situations in which streams can be
3157 * left opened because an index was received, the
3158 * control connection is closed, and the data
3159 * connection is closed (uncleanly) before the packet's
3160 * data provided.
3161 *
3162 * Since the data connection encountered an error,
3163 * it is okay to be conservative and close the
3164 * session right now as we can't rely on the protocol
3165 * being respected anymore.
3166 */
3167 if (status == RELAY_CONNECTION_STATUS_ERROR) {
3168 session_abort(data_conn->session);
3169 }
3170 relay_thread_close_connection(&events, pollfd,
3171 data_conn);
3172 /*
3173 * Every goto restart call sets the last seen fd where
3174 * here we don't really care since we gracefully
3175 * continue the loop after the connection is deleted.
3176 */
3177 } else {
3178 /* Keep last seen port. */
3179 last_seen_data_fd = pollfd;
3180 connection_put(data_conn);
3181 goto restart;
3182 }
3183 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3184 relay_thread_close_connection(&events, pollfd,
3185 data_conn);
3186 } else {
3187 ERR("Unknown poll events %u for data sock %d",
3188 revents, pollfd);
3189 }
3190 put_data_connection:
3191 connection_put(data_conn);
3192 }
3193 last_seen_data_fd = -1;
3194 }
3195
3196 /* Normal exit, no error */
3197 ret = 0;
3198
3199 exit:
3200 error:
3201 /* Cleanup reamaining connection object. */
3202 rcu_read_lock();
3203 cds_lfht_for_each_entry(relay_connections_ht->ht, &iter.iter,
3204 destroy_conn,
3205 sock_n.node) {
3206 health_code_update();
3207
3208 session_abort(destroy_conn->session);
3209
3210 /*
3211 * No need to grab another ref, because we own
3212 * destroy_conn.
3213 */
3214 relay_thread_close_connection(&events, destroy_conn->sock->fd,
3215 destroy_conn);
3216 }
3217 rcu_read_unlock();
3218
3219 (void) fd_tracker_util_poll_clean(the_fd_tracker, &events);
3220 error_poll_create:
3221 lttng_ht_destroy(relay_connections_ht);
3222 relay_connections_ht_error:
3223 /* Close relay conn pipes */
3224 (void) fd_tracker_util_pipe_close(the_fd_tracker,
3225 relay_conn_pipe);
3226 if (err) {
3227 DBG("Thread exited with error");
3228 }
3229 DBG("Worker thread cleanup complete");
3230 error_testpoint:
3231 if (err) {
3232 health_error();
3233 ERR("Health error occurred in %s", __func__);
3234 }
3235 health_unregister(health_relayd);
3236 rcu_unregister_thread();
3237 lttng_relay_stop_threads();
3238 return NULL;
3239 }
3240
3241 /*
3242 * Create the relay command pipe to wake thread_manage_apps.
3243 * Closed in cleanup().
3244 */
3245 static int create_relay_conn_pipe(void)
3246 {
3247 return fd_tracker_util_pipe_open_cloexec(the_fd_tracker,
3248 "Relayd connection pipe", relay_conn_pipe);
3249 }
3250
3251 /*
3252 * main
3253 */
3254 int main(int argc, char **argv)
3255 {
3256 int ret = 0, retval = 0;
3257 void *status;
3258
3259 /* Parse environment variables */
3260 parse_env_options();
3261
3262 /*
3263 * Parse arguments.
3264 * Command line arguments overwrite environment.
3265 */
3266 progname = argv[0];
3267 if (set_options(argc, argv)) {
3268 retval = -1;
3269 goto exit_options;
3270 }
3271
3272 if (set_signal_handler()) {
3273 retval = -1;
3274 goto exit_options;
3275 }
3276
3277 relayd_config_log();
3278
3279 if (opt_print_version) {
3280 print_version();
3281 retval = 0;
3282 goto exit_options;
3283 }
3284
3285 ret = fclose(stdin);
3286 if (ret) {
3287 PERROR("Failed to close stdin");
3288 goto exit_options;
3289 }
3290 /* Try to create directory if -o, --output is specified. */
3291 if (opt_output_path) {
3292 if (*opt_output_path != '/') {
3293 ERR("Please specify an absolute path for -o, --output PATH");
3294 retval = -1;
3295 goto exit_options;
3296 }
3297
3298 ret = utils_mkdir_recursive(opt_output_path, S_IRWXU | S_IRWXG,
3299 -1, -1);
3300 if (ret < 0) {
3301 ERR("Unable to create %s", opt_output_path);
3302 retval = -1;
3303 goto exit_options;
3304 }
3305 }
3306
3307 /* Daemonize */
3308 if (opt_daemon || opt_background) {
3309 ret = lttng_daemonize(&child_ppid, &recv_child_signal,
3310 !opt_background);
3311 if (ret < 0) {
3312 retval = -1;
3313 goto exit_options;
3314 }
3315 }
3316
3317 if (opt_working_directory) {
3318 ret = utils_change_working_dir(opt_working_directory);
3319 if (ret) {
3320 ERR("Changing working directory");
3321 goto exit_options;
3322 }
3323 }
3324 /*
3325 * The RCU thread registration (and use, through the fd-tracker's
3326 * creation) is done after the daemonization to allow us to not
3327 * deal with liburcu's fork() management as the call RCU needs to
3328 * be restored.
3329 */
3330 rcu_register_thread();
3331
3332 the_fd_tracker = fd_tracker_create(lttng_opt_fd_cap);
3333 if (!the_fd_tracker) {
3334 retval = -1;
3335 goto exit_options;
3336 }
3337
3338 /* Initialize thread health monitoring */
3339 health_relayd = health_app_create(NR_HEALTH_RELAYD_TYPES);
3340 if (!health_relayd) {
3341 PERROR("health_app_create error");
3342 retval = -1;
3343 goto exit_health_app_create;
3344 }
3345
3346 /* Create thread quit pipe */
3347 if (init_thread_quit_pipe()) {
3348 retval = -1;
3349 goto exit_init_data;
3350 }
3351
3352 /* Setup the thread apps communication pipe. */
3353 if (create_relay_conn_pipe()) {
3354 retval = -1;
3355 goto exit_init_data;
3356 }
3357
3358 /* Init relay command queue. */
3359 cds_wfcq_init(&relay_conn_queue.head, &relay_conn_queue.tail);
3360
3361 /* Initialize communication library */
3362 lttcomm_init();
3363 lttcomm_inet_init();
3364
3365 /* tables of sessions indexed by session ID */
3366 sessions_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3367 if (!sessions_ht) {
3368 retval = -1;
3369 goto exit_init_data;
3370 }
3371
3372 /* tables of streams indexed by stream ID */
3373 relay_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3374 if (!relay_streams_ht) {
3375 retval = -1;
3376 goto exit_init_data;
3377 }
3378
3379 /* tables of streams indexed by stream ID */
3380 viewer_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3381 if (!viewer_streams_ht) {
3382 retval = -1;
3383 goto exit_init_data;
3384 }
3385
3386 ret = init_health_quit_pipe();
3387 if (ret) {
3388 retval = -1;
3389 goto exit_health_quit_pipe;
3390 }
3391
3392 /* Create thread to manage the client socket */
3393 ret = pthread_create(&health_thread, default_pthread_attr(),
3394 thread_manage_health, (void *) NULL);
3395 if (ret) {
3396 errno = ret;
3397 PERROR("pthread_create health");
3398 retval = -1;
3399 goto exit_health_thread;
3400 }
3401
3402 /* Setup the dispatcher thread */
3403 ret = pthread_create(&dispatcher_thread, default_pthread_attr(),
3404 relay_thread_dispatcher, (void *) NULL);
3405 if (ret) {
3406 errno = ret;
3407 PERROR("pthread_create dispatcher");
3408 retval = -1;
3409 goto exit_dispatcher_thread;
3410 }
3411
3412 /* Setup the worker thread */
3413 ret = pthread_create(&worker_thread, default_pthread_attr(),
3414 relay_thread_worker, NULL);
3415 if (ret) {
3416 errno = ret;
3417 PERROR("pthread_create worker");
3418 retval = -1;
3419 goto exit_worker_thread;
3420 }
3421
3422 /* Setup the listener thread */
3423 ret = pthread_create(&listener_thread, default_pthread_attr(),
3424 relay_thread_listener, (void *) NULL);
3425 if (ret) {
3426 errno = ret;
3427 PERROR("pthread_create listener");
3428 retval = -1;
3429 goto exit_listener_thread;
3430 }
3431
3432 ret = relayd_live_create(live_uri);
3433 if (ret) {
3434 ERR("Starting live viewer threads");
3435 retval = -1;
3436 goto exit_live;
3437 }
3438
3439 /*
3440 * This is where we start awaiting program completion (e.g. through
3441 * signal that asks threads to teardown).
3442 */
3443
3444 ret = relayd_live_join();
3445 if (ret) {
3446 retval = -1;
3447 }
3448 exit_live:
3449
3450 ret = pthread_join(listener_thread, &status);
3451 if (ret) {
3452 errno = ret;
3453 PERROR("pthread_join listener_thread");
3454 retval = -1;
3455 }
3456
3457 exit_listener_thread:
3458 ret = pthread_join(worker_thread, &status);
3459 if (ret) {
3460 errno = ret;
3461 PERROR("pthread_join worker_thread");
3462 retval = -1;
3463 }
3464
3465 exit_worker_thread:
3466 ret = pthread_join(dispatcher_thread, &status);
3467 if (ret) {
3468 errno = ret;
3469 PERROR("pthread_join dispatcher_thread");
3470 retval = -1;
3471 }
3472 exit_dispatcher_thread:
3473
3474 ret = pthread_join(health_thread, &status);
3475 if (ret) {
3476 errno = ret;
3477 PERROR("pthread_join health_thread");
3478 retval = -1;
3479 }
3480 exit_health_thread:
3481
3482 (void) fd_tracker_util_pipe_close(the_fd_tracker, health_quit_pipe);
3483 exit_health_quit_pipe:
3484
3485 exit_init_data:
3486 health_app_destroy(health_relayd);
3487 exit_health_app_create:
3488 exit_options:
3489 /*
3490 * Wait for all pending call_rcu work to complete before tearing
3491 * down data structures. call_rcu worker may be trying to
3492 * perform lookups in those structures.
3493 */
3494 rcu_barrier();
3495 relayd_cleanup();
3496
3497 /* Ensure all prior call_rcu are done. */
3498 rcu_barrier();
3499
3500 fd_tracker_destroy(the_fd_tracker);
3501 rcu_unregister_thread();
3502
3503 if (!retval) {
3504 exit(EXIT_SUCCESS);
3505 } else {
3506 exit(EXIT_FAILURE);
3507 }
3508 }
This page took 0.187528 seconds and 5 git commands to generate.