c3c9a4fa667bd38c7ec0d866c402d5bf736d39f3
[lttng-tools.git] / src / bin / lttng-relayd / main.c
1 /*
2 * Copyright (C) 2012 - Julien Desfossez <jdesfossez@efficios.com>
3 * David Goulet <dgoulet@efficios.com>
4 * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 * 2015 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2 only,
9 * as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21 #define _LGPL_SOURCE
22 #include <getopt.h>
23 #include <grp.h>
24 #include <limits.h>
25 #include <pthread.h>
26 #include <signal.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <sys/mman.h>
31 #include <sys/mount.h>
32 #include <sys/resource.h>
33 #include <sys/socket.h>
34 #include <sys/stat.h>
35 #include <sys/types.h>
36 #include <sys/wait.h>
37 #include <inttypes.h>
38 #include <urcu/futex.h>
39 #include <urcu/uatomic.h>
40 #include <unistd.h>
41 #include <fcntl.h>
42
43 #include <lttng/lttng.h>
44 #include <common/common.h>
45 #include <common/compat/poll.h>
46 #include <common/compat/socket.h>
47 #include <common/compat/endian.h>
48 #include <common/compat/getenv.h>
49 #include <common/defaults.h>
50 #include <common/daemonize.h>
51 #include <common/futex.h>
52 #include <common/sessiond-comm/sessiond-comm.h>
53 #include <common/sessiond-comm/inet.h>
54 #include <common/sessiond-comm/relayd.h>
55 #include <common/uri.h>
56 #include <common/utils.h>
57 #include <common/config/session-config.h>
58 #include <common/dynamic-buffer.h>
59 #include <common/buffer-view.h>
60 #include <urcu/rculist.h>
61
62 #include "cmd.h"
63 #include "ctf-trace.h"
64 #include "index.h"
65 #include "utils.h"
66 #include "lttng-relayd.h"
67 #include "live.h"
68 #include "health-relayd.h"
69 #include "testpoint.h"
70 #include "viewer-stream.h"
71 #include "session.h"
72 #include "stream.h"
73 #include "connection.h"
74 #include "tracefile-array.h"
75 #include "tcp_keep_alive.h"
76
77 enum relay_connection_status {
78 RELAY_CONNECTION_STATUS_OK,
79 /* An error occured while processing an event on the connection. */
80 RELAY_CONNECTION_STATUS_ERROR,
81 /* Connection closed/shutdown cleanly. */
82 RELAY_CONNECTION_STATUS_CLOSED,
83 };
84
85 /* command line options */
86 char *opt_output_path, *opt_working_directory;
87 static int opt_daemon, opt_background;
88 int opt_group_output_by_session;
89 int opt_group_output_by_host;
90
91 /*
92 * We need to wait for listener and live listener threads, as well as
93 * health check thread, before being ready to signal readiness.
94 */
95 #define NR_LTTNG_RELAY_READY 3
96 static int lttng_relay_ready = NR_LTTNG_RELAY_READY;
97
98 /* Size of receive buffer. */
99 #define RECV_DATA_BUFFER_SIZE 65536
100
101 static int recv_child_signal; /* Set to 1 when a SIGUSR1 signal is received. */
102 static pid_t child_ppid; /* Internal parent PID use with daemonize. */
103
104 static struct lttng_uri *control_uri;
105 static struct lttng_uri *data_uri;
106 static struct lttng_uri *live_uri;
107
108 const char *progname;
109
110 const char *tracing_group_name = DEFAULT_TRACING_GROUP;
111 static int tracing_group_name_override;
112
113 const char * const config_section_name = "relayd";
114
115 /*
116 * Quit pipe for all threads. This permits a single cancellation point
117 * for all threads when receiving an event on the pipe.
118 */
119 int thread_quit_pipe[2] = { -1, -1 };
120
121 /*
122 * This pipe is used to inform the worker thread that a command is queued and
123 * ready to be processed.
124 */
125 static int relay_conn_pipe[2] = { -1, -1 };
126
127 /* Shared between threads */
128 static int dispatch_thread_exit;
129
130 static pthread_t listener_thread;
131 static pthread_t dispatcher_thread;
132 static pthread_t worker_thread;
133 static pthread_t health_thread;
134
135 /*
136 * last_relay_stream_id_lock protects last_relay_stream_id increment
137 * atomicity on 32-bit architectures.
138 */
139 static pthread_mutex_t last_relay_stream_id_lock = PTHREAD_MUTEX_INITIALIZER;
140 static uint64_t last_relay_stream_id;
141
142 /*
143 * Relay command queue.
144 *
145 * The relay_thread_listener and relay_thread_dispatcher communicate with this
146 * queue.
147 */
148 static struct relay_conn_queue relay_conn_queue;
149
150 /* Global relay stream hash table. */
151 struct lttng_ht *relay_streams_ht;
152
153 /* Global relay viewer stream hash table. */
154 struct lttng_ht *viewer_streams_ht;
155
156 /* Global relay sessions hash table. */
157 struct lttng_ht *sessions_ht;
158
159 /* Relayd health monitoring */
160 struct health_app *health_relayd;
161
162 static struct option long_options[] = {
163 { "control-port", 1, 0, 'C', },
164 { "data-port", 1, 0, 'D', },
165 { "live-port", 1, 0, 'L', },
166 { "daemonize", 0, 0, 'd', },
167 { "background", 0, 0, 'b', },
168 { "group", 1, 0, 'g', },
169 { "help", 0, 0, 'h', },
170 { "output", 1, 0, 'o', },
171 { "verbose", 0, 0, 'v', },
172 { "config", 1, 0, 'f' },
173 { "version", 0, 0, 'V' },
174 { "working-directory", 1, 0, 'w', },
175 { "group-output-by-session", 0, 0, 's', },
176 { "group-output-by-host", 0, 0, 'p', },
177 { NULL, 0, 0, 0, },
178 };
179
180 static const char *config_ignore_options[] = { "help", "config", "version" };
181
182 /*
183 * Take an option from the getopt output and set it in the right variable to be
184 * used later.
185 *
186 * Return 0 on success else a negative value.
187 */
188 static int set_option(int opt, const char *arg, const char *optname)
189 {
190 int ret;
191
192 switch (opt) {
193 case 0:
194 fprintf(stderr, "option %s", optname);
195 if (arg) {
196 fprintf(stderr, " with arg %s\n", arg);
197 }
198 break;
199 case 'C':
200 if (lttng_is_setuid_setgid()) {
201 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
202 "-C, --control-port");
203 } else {
204 ret = uri_parse(arg, &control_uri);
205 if (ret < 0) {
206 ERR("Invalid control URI specified");
207 goto end;
208 }
209 if (control_uri->port == 0) {
210 control_uri->port = DEFAULT_NETWORK_CONTROL_PORT;
211 }
212 }
213 break;
214 case 'D':
215 if (lttng_is_setuid_setgid()) {
216 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
217 "-D, -data-port");
218 } else {
219 ret = uri_parse(arg, &data_uri);
220 if (ret < 0) {
221 ERR("Invalid data URI specified");
222 goto end;
223 }
224 if (data_uri->port == 0) {
225 data_uri->port = DEFAULT_NETWORK_DATA_PORT;
226 }
227 }
228 break;
229 case 'L':
230 if (lttng_is_setuid_setgid()) {
231 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
232 "-L, -live-port");
233 } else {
234 ret = uri_parse(arg, &live_uri);
235 if (ret < 0) {
236 ERR("Invalid live URI specified");
237 goto end;
238 }
239 if (live_uri->port == 0) {
240 live_uri->port = DEFAULT_NETWORK_VIEWER_PORT;
241 }
242 }
243 break;
244 case 'd':
245 opt_daemon = 1;
246 break;
247 case 'b':
248 opt_background = 1;
249 break;
250 case 'g':
251 if (lttng_is_setuid_setgid()) {
252 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
253 "-g, --group");
254 } else {
255 tracing_group_name = strdup(arg);
256 if (tracing_group_name == NULL) {
257 ret = -errno;
258 PERROR("strdup");
259 goto end;
260 }
261 tracing_group_name_override = 1;
262 }
263 break;
264 case 'h':
265 ret = utils_show_man_page(8, "lttng-relayd");
266 if (ret) {
267 ERR("Cannot view man page lttng-relayd(8)");
268 perror("exec");
269 }
270 exit(EXIT_FAILURE);
271 case 'V':
272 fprintf(stdout, "%s\n", VERSION);
273 exit(EXIT_SUCCESS);
274 case 'o':
275 if (lttng_is_setuid_setgid()) {
276 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
277 "-o, --output");
278 } else {
279 ret = asprintf(&opt_output_path, "%s", arg);
280 if (ret < 0) {
281 ret = -errno;
282 PERROR("asprintf opt_output_path");
283 goto end;
284 }
285 }
286 break;
287 case 'w':
288 if (lttng_is_setuid_setgid()) {
289 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
290 "-w, --working-directory");
291 } else {
292 ret = asprintf(&opt_working_directory, "%s", arg);
293 if (ret < 0) {
294 ret = -errno;
295 PERROR("asprintf working_directory");
296 goto end;
297 }
298 }
299 break;
300
301 case 'v':
302 /* Verbose level can increase using multiple -v */
303 if (arg) {
304 lttng_opt_verbose = config_parse_value(arg);
305 } else {
306 /* Only 3 level of verbosity (-vvv). */
307 if (lttng_opt_verbose < 3) {
308 lttng_opt_verbose += 1;
309 }
310 }
311 break;
312 case 's':
313 if (opt_group_output_by_host) {
314 ERR("Cannot set --group-output-by-session, --group-output-by-host already defined");
315 exit(EXIT_FAILURE);
316 }
317 opt_group_output_by_session = 1;
318 break;
319 case 'p':
320 if (opt_group_output_by_session) {
321 ERR("Cannot set --group-output-by-host, --group-output-by-session already defined");
322 exit(EXIT_FAILURE);
323 }
324 opt_group_output_by_host = 1;
325 break;
326 default:
327 /* Unknown option or other error.
328 * Error is printed by getopt, just return */
329 ret = -1;
330 goto end;
331 }
332
333 /* All good. */
334 ret = 0;
335
336 end:
337 return ret;
338 }
339
340 /*
341 * config_entry_handler_cb used to handle options read from a config file.
342 * See config_entry_handler_cb comment in common/config/session-config.h for the
343 * return value conventions.
344 */
345 static int config_entry_handler(const struct config_entry *entry, void *unused)
346 {
347 int ret = 0, i;
348
349 if (!entry || !entry->name || !entry->value) {
350 ret = -EINVAL;
351 goto end;
352 }
353
354 /* Check if the option is to be ignored */
355 for (i = 0; i < sizeof(config_ignore_options) / sizeof(char *); i++) {
356 if (!strcmp(entry->name, config_ignore_options[i])) {
357 goto end;
358 }
359 }
360
361 for (i = 0; i < (sizeof(long_options) / sizeof(struct option)) - 1; i++) {
362 /* Ignore if entry name is not fully matched. */
363 if (strcmp(entry->name, long_options[i].name)) {
364 continue;
365 }
366
367 /*
368 * If the option takes no argument on the command line,
369 * we have to check if the value is "true". We support
370 * non-zero numeric values, true, on and yes.
371 */
372 if (!long_options[i].has_arg) {
373 ret = config_parse_value(entry->value);
374 if (ret <= 0) {
375 if (ret) {
376 WARN("Invalid configuration value \"%s\" for option %s",
377 entry->value, entry->name);
378 }
379 /* False, skip boolean config option. */
380 goto end;
381 }
382 }
383
384 ret = set_option(long_options[i].val, entry->value, entry->name);
385 goto end;
386 }
387
388 WARN("Unrecognized option \"%s\" in daemon configuration file.",
389 entry->name);
390
391 end:
392 return ret;
393 }
394
395 static void parse_env_options(void)
396 {
397 char *value = NULL;
398
399 value = lttng_secure_getenv(DEFAULT_LTTNG_RELAYD_WORKING_DIRECTORY_ENV);
400 if (value) {
401 opt_working_directory = value;
402 }
403 }
404
405 static int set_options(int argc, char **argv)
406 {
407 int c, ret = 0, option_index = 0, retval = 0;
408 int orig_optopt = optopt, orig_optind = optind;
409 char *default_address, *optstring;
410 const char *config_path = NULL;
411
412 optstring = utils_generate_optstring(long_options,
413 sizeof(long_options) / sizeof(struct option));
414 if (!optstring) {
415 retval = -ENOMEM;
416 goto exit;
417 }
418
419 /* Check for the --config option */
420
421 while ((c = getopt_long(argc, argv, optstring, long_options,
422 &option_index)) != -1) {
423 if (c == '?') {
424 retval = -EINVAL;
425 goto exit;
426 } else if (c != 'f') {
427 continue;
428 }
429
430 if (lttng_is_setuid_setgid()) {
431 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
432 "-f, --config");
433 } else {
434 config_path = utils_expand_path(optarg);
435 if (!config_path) {
436 ERR("Failed to resolve path: %s", optarg);
437 }
438 }
439 }
440
441 ret = config_get_section_entries(config_path, config_section_name,
442 config_entry_handler, NULL);
443 if (ret) {
444 if (ret > 0) {
445 ERR("Invalid configuration option at line %i", ret);
446 }
447 retval = -1;
448 goto exit;
449 }
450
451 /* Reset getopt's global state */
452 optopt = orig_optopt;
453 optind = orig_optind;
454 while (1) {
455 c = getopt_long(argc, argv, optstring, long_options, &option_index);
456 if (c == -1) {
457 break;
458 }
459
460 ret = set_option(c, optarg, long_options[option_index].name);
461 if (ret < 0) {
462 retval = -1;
463 goto exit;
464 }
465 }
466
467 /* assign default values */
468 if (control_uri == NULL) {
469 ret = asprintf(&default_address,
470 "tcp://" DEFAULT_NETWORK_CONTROL_BIND_ADDRESS ":%d",
471 DEFAULT_NETWORK_CONTROL_PORT);
472 if (ret < 0) {
473 PERROR("asprintf default data address");
474 retval = -1;
475 goto exit;
476 }
477
478 ret = uri_parse(default_address, &control_uri);
479 free(default_address);
480 if (ret < 0) {
481 ERR("Invalid control URI specified");
482 retval = -1;
483 goto exit;
484 }
485 }
486 if (data_uri == NULL) {
487 ret = asprintf(&default_address,
488 "tcp://" DEFAULT_NETWORK_DATA_BIND_ADDRESS ":%d",
489 DEFAULT_NETWORK_DATA_PORT);
490 if (ret < 0) {
491 PERROR("asprintf default data address");
492 retval = -1;
493 goto exit;
494 }
495
496 ret = uri_parse(default_address, &data_uri);
497 free(default_address);
498 if (ret < 0) {
499 ERR("Invalid data URI specified");
500 retval = -1;
501 goto exit;
502 }
503 }
504 if (live_uri == NULL) {
505 ret = asprintf(&default_address,
506 "tcp://" DEFAULT_NETWORK_VIEWER_BIND_ADDRESS ":%d",
507 DEFAULT_NETWORK_VIEWER_PORT);
508 if (ret < 0) {
509 PERROR("asprintf default viewer control address");
510 retval = -1;
511 goto exit;
512 }
513
514 ret = uri_parse(default_address, &live_uri);
515 free(default_address);
516 if (ret < 0) {
517 ERR("Invalid viewer control URI specified");
518 retval = -1;
519 goto exit;
520 }
521 }
522
523 if (!opt_group_output_by_session && !opt_group_output_by_host) {
524 /* Group by host by default */
525 opt_group_output_by_host = 1;
526 }
527
528 exit:
529 free(optstring);
530 return retval;
531 }
532
533 static void print_global_objects(void)
534 {
535 rcu_register_thread();
536
537 print_viewer_streams();
538 print_relay_streams();
539 print_sessions();
540
541 rcu_unregister_thread();
542 }
543
544 /*
545 * Cleanup the daemon
546 */
547 static void relayd_cleanup(void)
548 {
549 print_global_objects();
550
551 DBG("Cleaning up");
552
553 if (viewer_streams_ht)
554 lttng_ht_destroy(viewer_streams_ht);
555 if (relay_streams_ht)
556 lttng_ht_destroy(relay_streams_ht);
557 if (sessions_ht)
558 lttng_ht_destroy(sessions_ht);
559
560 /* free the dynamically allocated opt_output_path */
561 free(opt_output_path);
562
563 /* Close thread quit pipes */
564 utils_close_pipe(thread_quit_pipe);
565
566 uri_free(control_uri);
567 uri_free(data_uri);
568 /* Live URI is freed in the live thread. */
569
570 if (tracing_group_name_override) {
571 free((void *) tracing_group_name);
572 }
573 }
574
575 /*
576 * Write to writable pipe used to notify a thread.
577 */
578 static int notify_thread_pipe(int wpipe)
579 {
580 ssize_t ret;
581
582 ret = lttng_write(wpipe, "!", 1);
583 if (ret < 1) {
584 PERROR("write poll pipe");
585 goto end;
586 }
587 ret = 0;
588 end:
589 return ret;
590 }
591
592 static int notify_health_quit_pipe(int *pipe)
593 {
594 ssize_t ret;
595
596 ret = lttng_write(pipe[1], "4", 1);
597 if (ret < 1) {
598 PERROR("write relay health quit");
599 goto end;
600 }
601 ret = 0;
602 end:
603 return ret;
604 }
605
606 /*
607 * Stop all relayd and relayd-live threads.
608 */
609 int lttng_relay_stop_threads(void)
610 {
611 int retval = 0;
612
613 /* Stopping all threads */
614 DBG("Terminating all threads");
615 if (notify_thread_pipe(thread_quit_pipe[1])) {
616 ERR("write error on thread quit pipe");
617 retval = -1;
618 }
619
620 if (notify_health_quit_pipe(health_quit_pipe)) {
621 ERR("write error on health quit pipe");
622 }
623
624 /* Dispatch thread */
625 CMM_STORE_SHARED(dispatch_thread_exit, 1);
626 futex_nto1_wake(&relay_conn_queue.futex);
627
628 if (relayd_live_stop()) {
629 ERR("Error stopping live threads");
630 retval = -1;
631 }
632 return retval;
633 }
634
635 /*
636 * Signal handler for the daemon
637 *
638 * Simply stop all worker threads, leaving main() return gracefully after
639 * joining all threads and calling cleanup().
640 */
641 static void sighandler(int sig)
642 {
643 switch (sig) {
644 case SIGINT:
645 DBG("SIGINT caught");
646 if (lttng_relay_stop_threads()) {
647 ERR("Error stopping threads");
648 }
649 break;
650 case SIGTERM:
651 DBG("SIGTERM caught");
652 if (lttng_relay_stop_threads()) {
653 ERR("Error stopping threads");
654 }
655 break;
656 case SIGUSR1:
657 CMM_STORE_SHARED(recv_child_signal, 1);
658 break;
659 default:
660 break;
661 }
662 }
663
664 /*
665 * Setup signal handler for :
666 * SIGINT, SIGTERM, SIGPIPE
667 */
668 static int set_signal_handler(void)
669 {
670 int ret = 0;
671 struct sigaction sa;
672 sigset_t sigset;
673
674 if ((ret = sigemptyset(&sigset)) < 0) {
675 PERROR("sigemptyset");
676 return ret;
677 }
678
679 sa.sa_mask = sigset;
680 sa.sa_flags = 0;
681
682 sa.sa_handler = sighandler;
683 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
684 PERROR("sigaction");
685 return ret;
686 }
687
688 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
689 PERROR("sigaction");
690 return ret;
691 }
692
693 if ((ret = sigaction(SIGUSR1, &sa, NULL)) < 0) {
694 PERROR("sigaction");
695 return ret;
696 }
697
698 sa.sa_handler = SIG_IGN;
699 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
700 PERROR("sigaction");
701 return ret;
702 }
703
704 DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT");
705
706 return ret;
707 }
708
709 void lttng_relay_notify_ready(void)
710 {
711 /* Notify the parent of the fork() process that we are ready. */
712 if (opt_daemon || opt_background) {
713 if (uatomic_sub_return(&lttng_relay_ready, 1) == 0) {
714 kill(child_ppid, SIGUSR1);
715 }
716 }
717 }
718
719 /*
720 * Init thread quit pipe.
721 *
722 * Return -1 on error or 0 if all pipes are created.
723 */
724 static int init_thread_quit_pipe(void)
725 {
726 int ret;
727
728 ret = utils_create_pipe_cloexec(thread_quit_pipe);
729
730 return ret;
731 }
732
733 /*
734 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
735 */
736 static int create_thread_poll_set(struct lttng_poll_event *events, int size)
737 {
738 int ret;
739
740 if (events == NULL || size == 0) {
741 ret = -1;
742 goto error;
743 }
744
745 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
746 if (ret < 0) {
747 goto error;
748 }
749
750 /* Add quit pipe */
751 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
752 if (ret < 0) {
753 goto error;
754 }
755
756 return 0;
757
758 error:
759 return ret;
760 }
761
762 /*
763 * Check if the thread quit pipe was triggered.
764 *
765 * Return 1 if it was triggered else 0;
766 */
767 static int check_thread_quit_pipe(int fd, uint32_t events)
768 {
769 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
770 return 1;
771 }
772
773 return 0;
774 }
775
776 /*
777 * Create and init socket from uri.
778 */
779 static struct lttcomm_sock *relay_socket_create(struct lttng_uri *uri)
780 {
781 int ret;
782 struct lttcomm_sock *sock = NULL;
783
784 sock = lttcomm_alloc_sock_from_uri(uri);
785 if (sock == NULL) {
786 ERR("Allocating socket");
787 goto error;
788 }
789
790 ret = lttcomm_create_sock(sock);
791 if (ret < 0) {
792 goto error;
793 }
794 DBG("Listening on sock %d", sock->fd);
795
796 ret = sock->ops->bind(sock);
797 if (ret < 0) {
798 goto error;
799 }
800
801 ret = sock->ops->listen(sock, -1);
802 if (ret < 0) {
803 goto error;
804
805 }
806
807 return sock;
808
809 error:
810 if (sock) {
811 lttcomm_destroy_sock(sock);
812 }
813 return NULL;
814 }
815
816 /*
817 * This thread manages the listening for new connections on the network
818 */
819 static void *relay_thread_listener(void *data)
820 {
821 int i, ret, pollfd, err = -1;
822 uint32_t revents, nb_fd;
823 struct lttng_poll_event events;
824 struct lttcomm_sock *control_sock, *data_sock;
825
826 DBG("[thread] Relay listener started");
827
828 health_register(health_relayd, HEALTH_RELAYD_TYPE_LISTENER);
829
830 health_code_update();
831
832 control_sock = relay_socket_create(control_uri);
833 if (!control_sock) {
834 goto error_sock_control;
835 }
836
837 data_sock = relay_socket_create(data_uri);
838 if (!data_sock) {
839 goto error_sock_relay;
840 }
841
842 /*
843 * Pass 3 as size here for the thread quit pipe, control and
844 * data socket.
845 */
846 ret = create_thread_poll_set(&events, 3);
847 if (ret < 0) {
848 goto error_create_poll;
849 }
850
851 /* Add the control socket */
852 ret = lttng_poll_add(&events, control_sock->fd, LPOLLIN | LPOLLRDHUP);
853 if (ret < 0) {
854 goto error_poll_add;
855 }
856
857 /* Add the data socket */
858 ret = lttng_poll_add(&events, data_sock->fd, LPOLLIN | LPOLLRDHUP);
859 if (ret < 0) {
860 goto error_poll_add;
861 }
862
863 lttng_relay_notify_ready();
864
865 if (testpoint(relayd_thread_listener)) {
866 goto error_testpoint;
867 }
868
869 while (1) {
870 health_code_update();
871
872 DBG("Listener accepting connections");
873
874 restart:
875 health_poll_entry();
876 ret = lttng_poll_wait(&events, -1);
877 health_poll_exit();
878 if (ret < 0) {
879 /*
880 * Restart interrupted system call.
881 */
882 if (errno == EINTR) {
883 goto restart;
884 }
885 goto error;
886 }
887
888 nb_fd = ret;
889
890 DBG("Relay new connection received");
891 for (i = 0; i < nb_fd; i++) {
892 health_code_update();
893
894 /* Fetch once the poll data */
895 revents = LTTNG_POLL_GETEV(&events, i);
896 pollfd = LTTNG_POLL_GETFD(&events, i);
897
898 if (!revents) {
899 /*
900 * No activity for this FD (poll
901 * implementation).
902 */
903 continue;
904 }
905
906 /* Thread quit pipe has been closed. Killing thread. */
907 ret = check_thread_quit_pipe(pollfd, revents);
908 if (ret) {
909 err = 0;
910 goto exit;
911 }
912
913 if (revents & LPOLLIN) {
914 /*
915 * A new connection is requested, therefore a
916 * sessiond/consumerd connection is allocated in
917 * this thread, enqueued to a global queue and
918 * dequeued (and freed) in the worker thread.
919 */
920 int val = 1;
921 struct relay_connection *new_conn;
922 struct lttcomm_sock *newsock;
923 enum connection_type type;
924
925 if (pollfd == data_sock->fd) {
926 type = RELAY_DATA;
927 newsock = data_sock->ops->accept(data_sock);
928 DBG("Relay data connection accepted, socket %d",
929 newsock->fd);
930 } else {
931 assert(pollfd == control_sock->fd);
932 type = RELAY_CONTROL;
933 newsock = control_sock->ops->accept(control_sock);
934 DBG("Relay control connection accepted, socket %d",
935 newsock->fd);
936 }
937 if (!newsock) {
938 PERROR("accepting sock");
939 goto error;
940 }
941
942 ret = setsockopt(newsock->fd, SOL_SOCKET, SO_REUSEADDR, &val,
943 sizeof(val));
944 if (ret < 0) {
945 PERROR("setsockopt inet");
946 lttcomm_destroy_sock(newsock);
947 goto error;
948 }
949
950 ret = socket_apply_keep_alive_config(newsock->fd);
951 if (ret < 0) {
952 ERR("Failed to apply TCP keep-alive configuration on socket (%i)",
953 newsock->fd);
954 lttcomm_destroy_sock(newsock);
955 goto error;
956 }
957
958 new_conn = connection_create(newsock, type);
959 if (!new_conn) {
960 lttcomm_destroy_sock(newsock);
961 goto error;
962 }
963
964 /* Enqueue request for the dispatcher thread. */
965 cds_wfcq_enqueue(&relay_conn_queue.head, &relay_conn_queue.tail,
966 &new_conn->qnode);
967
968 /*
969 * Wake the dispatch queue futex.
970 * Implicit memory barrier with the
971 * exchange in cds_wfcq_enqueue.
972 */
973 futex_nto1_wake(&relay_conn_queue.futex);
974 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
975 ERR("socket poll error");
976 goto error;
977 } else {
978 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
979 goto error;
980 }
981 }
982 }
983
984 exit:
985 error:
986 error_poll_add:
987 error_testpoint:
988 lttng_poll_clean(&events);
989 error_create_poll:
990 if (data_sock->fd >= 0) {
991 ret = data_sock->ops->close(data_sock);
992 if (ret) {
993 PERROR("close");
994 }
995 }
996 lttcomm_destroy_sock(data_sock);
997 error_sock_relay:
998 if (control_sock->fd >= 0) {
999 ret = control_sock->ops->close(control_sock);
1000 if (ret) {
1001 PERROR("close");
1002 }
1003 }
1004 lttcomm_destroy_sock(control_sock);
1005 error_sock_control:
1006 if (err) {
1007 health_error();
1008 ERR("Health error occurred in %s", __func__);
1009 }
1010 health_unregister(health_relayd);
1011 DBG("Relay listener thread cleanup complete");
1012 lttng_relay_stop_threads();
1013 return NULL;
1014 }
1015
1016 /*
1017 * This thread manages the dispatching of the requests to worker threads
1018 */
1019 static void *relay_thread_dispatcher(void *data)
1020 {
1021 int err = -1;
1022 ssize_t ret;
1023 struct cds_wfcq_node *node;
1024 struct relay_connection *new_conn = NULL;
1025
1026 DBG("[thread] Relay dispatcher started");
1027
1028 health_register(health_relayd, HEALTH_RELAYD_TYPE_DISPATCHER);
1029
1030 if (testpoint(relayd_thread_dispatcher)) {
1031 goto error_testpoint;
1032 }
1033
1034 health_code_update();
1035
1036 for (;;) {
1037 health_code_update();
1038
1039 /* Atomically prepare the queue futex */
1040 futex_nto1_prepare(&relay_conn_queue.futex);
1041
1042 if (CMM_LOAD_SHARED(dispatch_thread_exit)) {
1043 break;
1044 }
1045
1046 do {
1047 health_code_update();
1048
1049 /* Dequeue commands */
1050 node = cds_wfcq_dequeue_blocking(&relay_conn_queue.head,
1051 &relay_conn_queue.tail);
1052 if (node == NULL) {
1053 DBG("Woken up but nothing in the relay command queue");
1054 /* Continue thread execution */
1055 break;
1056 }
1057 new_conn = caa_container_of(node, struct relay_connection, qnode);
1058
1059 DBG("Dispatching request waiting on sock %d", new_conn->sock->fd);
1060
1061 /*
1062 * Inform worker thread of the new request. This
1063 * call is blocking so we can be assured that
1064 * the data will be read at some point in time
1065 * or wait to the end of the world :)
1066 */
1067 ret = lttng_write(relay_conn_pipe[1], &new_conn, sizeof(new_conn));
1068 if (ret < 0) {
1069 PERROR("write connection pipe");
1070 connection_put(new_conn);
1071 goto error;
1072 }
1073 } while (node != NULL);
1074
1075 /* Futex wait on queue. Blocking call on futex() */
1076 health_poll_entry();
1077 futex_nto1_wait(&relay_conn_queue.futex);
1078 health_poll_exit();
1079 }
1080
1081 /* Normal exit, no error */
1082 err = 0;
1083
1084 error:
1085 error_testpoint:
1086 if (err) {
1087 health_error();
1088 ERR("Health error occurred in %s", __func__);
1089 }
1090 health_unregister(health_relayd);
1091 DBG("Dispatch thread dying");
1092 lttng_relay_stop_threads();
1093 return NULL;
1094 }
1095
1096 /*
1097 * Set index data from the control port to a given index object.
1098 */
1099 static int set_index_control_data(struct relay_index *index,
1100 struct lttcomm_relayd_index *data,
1101 struct relay_connection *conn)
1102 {
1103 struct ctf_packet_index index_data;
1104
1105 /*
1106 * The index on disk is encoded in big endian.
1107 */
1108 index_data.packet_size = htobe64(data->packet_size);
1109 index_data.content_size = htobe64(data->content_size);
1110 index_data.timestamp_begin = htobe64(data->timestamp_begin);
1111 index_data.timestamp_end = htobe64(data->timestamp_end);
1112 index_data.events_discarded = htobe64(data->events_discarded);
1113 index_data.stream_id = htobe64(data->stream_id);
1114
1115 if (conn->minor >= 8) {
1116 index->index_data.stream_instance_id = htobe64(data->stream_instance_id);
1117 index->index_data.packet_seq_num = htobe64(data->packet_seq_num);
1118 }
1119
1120 return relay_index_set_data(index, &index_data);
1121 }
1122
1123 /*
1124 * Handle the RELAYD_CREATE_SESSION command.
1125 *
1126 * On success, send back the session id or else return a negative value.
1127 */
1128 static int relay_create_session(const struct lttcomm_relayd_hdr *recv_hdr,
1129 struct relay_connection *conn,
1130 const struct lttng_buffer_view *payload)
1131 {
1132 int ret = 0;
1133 ssize_t send_ret;
1134 struct relay_session *session;
1135 struct lttcomm_relayd_status_session reply;
1136 char session_name[LTTNG_NAME_MAX];
1137 char hostname[LTTNG_HOST_NAME_MAX];
1138 uint32_t live_timer = 0;
1139 bool snapshot = false;
1140
1141 memset(session_name, 0, LTTNG_NAME_MAX);
1142 memset(hostname, 0, LTTNG_HOST_NAME_MAX);
1143
1144 memset(&reply, 0, sizeof(reply));
1145
1146 switch (conn->minor) {
1147 case 1:
1148 case 2:
1149 case 3:
1150 break;
1151 case 4: /* LTTng sessiond 2.4 */
1152 default:
1153 ret = cmd_create_session_2_4(payload, session_name,
1154 hostname, &live_timer, &snapshot);
1155 }
1156 if (ret < 0) {
1157 goto send_reply;
1158 }
1159
1160 session = session_create(session_name, hostname, live_timer,
1161 snapshot, conn->major, conn->minor);
1162 if (!session) {
1163 ret = -1;
1164 goto send_reply;
1165 }
1166 assert(!conn->session);
1167 conn->session = session;
1168 DBG("Created session %" PRIu64, session->id);
1169
1170 reply.session_id = htobe64(session->id);
1171
1172 send_reply:
1173 if (ret < 0) {
1174 reply.ret_code = htobe32(LTTNG_ERR_FATAL);
1175 } else {
1176 reply.ret_code = htobe32(LTTNG_OK);
1177 }
1178
1179 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1180 if (send_ret < (ssize_t) sizeof(reply)) {
1181 ERR("Failed to send \"create session\" command reply (ret = %zd)",
1182 send_ret);
1183 ret = -1;
1184 }
1185
1186 return ret;
1187 }
1188
1189 /*
1190 * When we have received all the streams and the metadata for a channel,
1191 * we make them visible to the viewer threads.
1192 */
1193 static void publish_connection_local_streams(struct relay_connection *conn)
1194 {
1195 struct relay_stream *stream;
1196 struct relay_session *session = conn->session;
1197
1198 /*
1199 * We publish all streams belonging to a session atomically wrt
1200 * session lock.
1201 */
1202 pthread_mutex_lock(&session->lock);
1203 rcu_read_lock();
1204 cds_list_for_each_entry_rcu(stream, &session->recv_list,
1205 recv_node) {
1206 stream_publish(stream);
1207 }
1208 rcu_read_unlock();
1209
1210 /*
1211 * Inform the viewer that there are new streams in the session.
1212 */
1213 if (session->viewer_attached) {
1214 uatomic_set(&session->new_streams, 1);
1215 }
1216 pthread_mutex_unlock(&session->lock);
1217 }
1218
1219 /*
1220 * relay_add_stream: allocate a new stream for a session
1221 */
1222 static int relay_add_stream(const struct lttcomm_relayd_hdr *recv_hdr,
1223 struct relay_connection *conn,
1224 const struct lttng_buffer_view *payload)
1225 {
1226 int ret;
1227 ssize_t send_ret;
1228 struct relay_session *session = conn->session;
1229 struct relay_stream *stream = NULL;
1230 struct lttcomm_relayd_status_stream reply;
1231 struct ctf_trace *trace = NULL;
1232 uint64_t stream_handle = -1ULL;
1233 char *path_name = NULL, *channel_name = NULL;
1234 uint64_t tracefile_size = 0, tracefile_count = 0;
1235
1236 if (!session || !conn->version_check_done) {
1237 ERR("Trying to add a stream before version check");
1238 ret = -1;
1239 goto end_no_session;
1240 }
1241
1242 switch (session->minor) {
1243 case 1: /* LTTng sessiond 2.1. Allocates path_name and channel_name. */
1244 ret = cmd_recv_stream_2_1(payload, &path_name,
1245 &channel_name, session);
1246 break;
1247 case 2: /* LTTng sessiond 2.2. Allocates path_name and channel_name. */
1248 default:
1249 ret = cmd_recv_stream_2_2(payload, &path_name,
1250 &channel_name, &tracefile_size, &tracefile_count,
1251 session);
1252 break;
1253 }
1254 if (ret < 0) {
1255 goto send_reply;
1256 }
1257
1258 trace = ctf_trace_get_by_path_or_create(session, path_name);
1259 if (!trace) {
1260 goto send_reply;
1261 }
1262 /* This stream here has one reference on the trace. */
1263
1264 pthread_mutex_lock(&last_relay_stream_id_lock);
1265 stream_handle = ++last_relay_stream_id;
1266 pthread_mutex_unlock(&last_relay_stream_id_lock);
1267
1268 /* We pass ownership of path_name and channel_name. */
1269 stream = stream_create(trace, stream_handle, path_name,
1270 channel_name, tracefile_size, tracefile_count);
1271 path_name = NULL;
1272 channel_name = NULL;
1273
1274 /*
1275 * Streams are the owners of their trace. Reference to trace is
1276 * kept within stream_create().
1277 */
1278 ctf_trace_put(trace);
1279
1280 send_reply:
1281 memset(&reply, 0, sizeof(reply));
1282 reply.handle = htobe64(stream_handle);
1283 if (!stream) {
1284 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1285 } else {
1286 reply.ret_code = htobe32(LTTNG_OK);
1287 }
1288
1289 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1290 sizeof(struct lttcomm_relayd_status_stream), 0);
1291 if (send_ret < (ssize_t) sizeof(reply)) {
1292 ERR("Failed to send \"add stream\" command reply (ret = %zd)",
1293 send_ret);
1294 ret = -1;
1295 }
1296
1297 end_no_session:
1298 free(path_name);
1299 free(channel_name);
1300 return ret;
1301 }
1302
1303 /*
1304 * relay_close_stream: close a specific stream
1305 */
1306 static int relay_close_stream(const struct lttcomm_relayd_hdr *recv_hdr,
1307 struct relay_connection *conn,
1308 const struct lttng_buffer_view *payload)
1309 {
1310 int ret;
1311 ssize_t send_ret;
1312 struct relay_session *session = conn->session;
1313 struct lttcomm_relayd_close_stream stream_info;
1314 struct lttcomm_relayd_generic_reply reply;
1315 struct relay_stream *stream;
1316
1317 DBG("Close stream received");
1318
1319 if (!session || !conn->version_check_done) {
1320 ERR("Trying to close a stream before version check");
1321 ret = -1;
1322 goto end_no_session;
1323 }
1324
1325 if (payload->size < sizeof(stream_info)) {
1326 ERR("Unexpected payload size in \"relay_close_stream\": expected >= %zu bytes, got %zu bytes",
1327 sizeof(stream_info), payload->size);
1328 ret = -1;
1329 goto end_no_session;
1330 }
1331 memcpy(&stream_info, payload->data, sizeof(stream_info));
1332 stream_info.stream_id = be64toh(stream_info.stream_id);
1333 stream_info.last_net_seq_num = be64toh(stream_info.last_net_seq_num);
1334
1335 stream = stream_get_by_id(stream_info.stream_id);
1336 if (!stream) {
1337 ret = -1;
1338 goto end;
1339 }
1340
1341 /*
1342 * Set last_net_seq_num before the close flag. Required by data
1343 * pending check.
1344 */
1345 pthread_mutex_lock(&stream->lock);
1346 stream->last_net_seq_num = stream_info.last_net_seq_num;
1347 pthread_mutex_unlock(&stream->lock);
1348
1349 /*
1350 * This is one of the conditions which may trigger a stream close
1351 * with the others being:
1352 * 1) A close command is received for a stream
1353 * 2) The control connection owning the stream is closed
1354 * 3) We have received all of the stream's data _after_ a close
1355 * request.
1356 */
1357 try_stream_close(stream);
1358 if (stream->is_metadata) {
1359 struct relay_viewer_stream *vstream;
1360
1361 vstream = viewer_stream_get_by_id(stream->stream_handle);
1362 if (vstream) {
1363 if (vstream->metadata_sent == stream->metadata_received) {
1364 /*
1365 * Since all the metadata has been sent to the
1366 * viewer and that we have a request to close
1367 * its stream, we can safely teardown the
1368 * corresponding metadata viewer stream.
1369 */
1370 viewer_stream_put(vstream);
1371 }
1372 /* Put local reference. */
1373 viewer_stream_put(vstream);
1374 }
1375 }
1376 stream_put(stream);
1377 ret = 0;
1378
1379 end:
1380 memset(&reply, 0, sizeof(reply));
1381 if (ret < 0) {
1382 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1383 } else {
1384 reply.ret_code = htobe32(LTTNG_OK);
1385 }
1386 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1387 sizeof(struct lttcomm_relayd_generic_reply), 0);
1388 if (send_ret < (ssize_t) sizeof(reply)) {
1389 ERR("Failed to send \"close stream\" command reply (ret = %zd)",
1390 send_ret);
1391 ret = -1;
1392 }
1393
1394 end_no_session:
1395 return ret;
1396 }
1397
1398 /*
1399 * relay_reset_metadata: reset a metadata stream
1400 */
1401 static
1402 int relay_reset_metadata(const struct lttcomm_relayd_hdr *recv_hdr,
1403 struct relay_connection *conn,
1404 const struct lttng_buffer_view *payload)
1405 {
1406 int ret;
1407 ssize_t send_ret;
1408 struct relay_session *session = conn->session;
1409 struct lttcomm_relayd_reset_metadata stream_info;
1410 struct lttcomm_relayd_generic_reply reply;
1411 struct relay_stream *stream;
1412
1413 DBG("Reset metadata received");
1414
1415 if (!session || !conn->version_check_done) {
1416 ERR("Trying to reset a metadata stream before version check");
1417 ret = -1;
1418 goto end_no_session;
1419 }
1420
1421 if (payload->size < sizeof(stream_info)) {
1422 ERR("Unexpected payload size in \"relay_reset_metadata\": expected >= %zu bytes, got %zu bytes",
1423 sizeof(stream_info), payload->size);
1424 ret = -1;
1425 goto end_no_session;
1426 }
1427 memcpy(&stream_info, payload->data, sizeof(stream_info));
1428 stream_info.stream_id = be64toh(stream_info.stream_id);
1429 stream_info.version = be64toh(stream_info.version);
1430
1431 DBG("Update metadata to version %" PRIu64, stream_info.version);
1432
1433 /* Unsupported for live sessions for now. */
1434 if (session->live_timer != 0) {
1435 ret = -1;
1436 goto end;
1437 }
1438
1439 stream = stream_get_by_id(stream_info.stream_id);
1440 if (!stream) {
1441 ret = -1;
1442 goto end;
1443 }
1444 pthread_mutex_lock(&stream->lock);
1445 if (!stream->is_metadata) {
1446 ret = -1;
1447 goto end_unlock;
1448 }
1449
1450 ret = utils_rotate_stream_file(stream->path_name, stream->channel_name,
1451 0, 0, -1, -1, stream->stream_fd->fd, NULL,
1452 &stream->stream_fd->fd);
1453 if (ret < 0) {
1454 ERR("Failed to rotate metadata file %s of channel %s",
1455 stream->path_name, stream->channel_name);
1456 goto end_unlock;
1457 }
1458
1459 end_unlock:
1460 pthread_mutex_unlock(&stream->lock);
1461 stream_put(stream);
1462
1463 end:
1464 memset(&reply, 0, sizeof(reply));
1465 if (ret < 0) {
1466 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1467 } else {
1468 reply.ret_code = htobe32(LTTNG_OK);
1469 }
1470 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1471 sizeof(struct lttcomm_relayd_generic_reply), 0);
1472 if (send_ret < (ssize_t) sizeof(reply)) {
1473 ERR("Failed to send \"reset metadata\" command reply (ret = %zd)",
1474 send_ret);
1475 ret = -1;
1476 }
1477
1478 end_no_session:
1479 return ret;
1480 }
1481
1482 /*
1483 * relay_unknown_command: send -1 if received unknown command
1484 */
1485 static void relay_unknown_command(struct relay_connection *conn)
1486 {
1487 struct lttcomm_relayd_generic_reply reply;
1488 ssize_t send_ret;
1489
1490 memset(&reply, 0, sizeof(reply));
1491 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1492 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1493 if (send_ret < sizeof(reply)) {
1494 ERR("Failed to send \"unknown command\" command reply (ret = %zd)", send_ret);
1495 }
1496 }
1497
1498 /*
1499 * relay_start: send an acknowledgment to the client to tell if we are
1500 * ready to receive data. We are ready if a session is established.
1501 */
1502 static int relay_start(const struct lttcomm_relayd_hdr *recv_hdr,
1503 struct relay_connection *conn,
1504 const struct lttng_buffer_view *payload)
1505 {
1506 int ret = 0;
1507 ssize_t send_ret;
1508 struct lttcomm_relayd_generic_reply reply;
1509 struct relay_session *session = conn->session;
1510
1511 if (!session) {
1512 DBG("Trying to start the streaming without a session established");
1513 ret = htobe32(LTTNG_ERR_UNK);
1514 }
1515
1516 memset(&reply, 0, sizeof(reply));
1517 reply.ret_code = htobe32(LTTNG_OK);
1518 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1519 sizeof(reply), 0);
1520 if (send_ret < (ssize_t) sizeof(reply)) {
1521 ERR("Failed to send \"relay_start\" command reply (ret = %zd)",
1522 send_ret);
1523 ret = -1;
1524 }
1525
1526 return ret;
1527 }
1528
1529 /*
1530 * Append padding to the file pointed by the file descriptor fd.
1531 */
1532 static int write_padding_to_file(int fd, uint32_t size)
1533 {
1534 ssize_t ret = 0;
1535 char *zeros;
1536
1537 if (size == 0) {
1538 goto end;
1539 }
1540
1541 zeros = zmalloc(size);
1542 if (zeros == NULL) {
1543 PERROR("zmalloc zeros for padding");
1544 ret = -1;
1545 goto end;
1546 }
1547
1548 ret = lttng_write(fd, zeros, size);
1549 if (ret < size) {
1550 PERROR("write padding to file");
1551 }
1552
1553 free(zeros);
1554
1555 end:
1556 return ret;
1557 }
1558
1559 /*
1560 * relay_recv_metadata: receive the metadata for the session.
1561 */
1562 static int relay_recv_metadata(const struct lttcomm_relayd_hdr *recv_hdr,
1563 struct relay_connection *conn,
1564 const struct lttng_buffer_view *payload)
1565 {
1566 int ret = 0;
1567 ssize_t size_ret;
1568 struct relay_session *session = conn->session;
1569 struct lttcomm_relayd_metadata_payload metadata_payload_header;
1570 struct relay_stream *metadata_stream;
1571 uint64_t metadata_payload_size;
1572
1573 if (!session) {
1574 ERR("Metadata sent before version check");
1575 ret = -1;
1576 goto end;
1577 }
1578
1579 if (recv_hdr->data_size < sizeof(struct lttcomm_relayd_metadata_payload)) {
1580 ERR("Incorrect data size");
1581 ret = -1;
1582 goto end;
1583 }
1584 metadata_payload_size = recv_hdr->data_size -
1585 sizeof(struct lttcomm_relayd_metadata_payload);
1586
1587 memcpy(&metadata_payload_header, payload->data,
1588 sizeof(metadata_payload_header));
1589 metadata_payload_header.stream_id = be64toh(
1590 metadata_payload_header.stream_id);
1591 metadata_payload_header.padding_size = be32toh(
1592 metadata_payload_header.padding_size);
1593
1594 metadata_stream = stream_get_by_id(metadata_payload_header.stream_id);
1595 if (!metadata_stream) {
1596 ret = -1;
1597 goto end;
1598 }
1599
1600 pthread_mutex_lock(&metadata_stream->lock);
1601
1602 size_ret = lttng_write(metadata_stream->stream_fd->fd,
1603 payload->data + sizeof(metadata_payload_header),
1604 metadata_payload_size);
1605 if (size_ret < metadata_payload_size) {
1606 ERR("Relay error writing metadata on file");
1607 ret = -1;
1608 goto end_put;
1609 }
1610
1611 size_ret = write_padding_to_file(metadata_stream->stream_fd->fd,
1612 metadata_payload_header.padding_size);
1613 if (size_ret < (int64_t) metadata_payload_header.padding_size) {
1614 ret = -1;
1615 goto end_put;
1616 }
1617
1618 metadata_stream->metadata_received +=
1619 metadata_payload_size + metadata_payload_header.padding_size;
1620 DBG2("Relay metadata written. Updated metadata_received %" PRIu64,
1621 metadata_stream->metadata_received);
1622
1623 end_put:
1624 pthread_mutex_unlock(&metadata_stream->lock);
1625 stream_put(metadata_stream);
1626 end:
1627 return ret;
1628 }
1629
1630 /*
1631 * relay_send_version: send relayd version number
1632 */
1633 static int relay_send_version(const struct lttcomm_relayd_hdr *recv_hdr,
1634 struct relay_connection *conn,
1635 const struct lttng_buffer_view *payload)
1636 {
1637 int ret;
1638 ssize_t send_ret;
1639 struct lttcomm_relayd_version reply, msg;
1640 bool compatible = true;
1641
1642 conn->version_check_done = true;
1643
1644 /* Get version from the other side. */
1645 if (payload->size < sizeof(msg)) {
1646 ERR("Unexpected payload size in \"relay_send_version\": expected >= %zu bytes, got %zu bytes",
1647 sizeof(msg), payload->size);
1648 ret = -1;
1649 goto end;
1650 }
1651
1652 memcpy(&msg, payload->data, sizeof(msg));
1653 msg.major = be32toh(msg.major);
1654 msg.minor = be32toh(msg.minor);
1655
1656 memset(&reply, 0, sizeof(reply));
1657 reply.major = RELAYD_VERSION_COMM_MAJOR;
1658 reply.minor = RELAYD_VERSION_COMM_MINOR;
1659
1660 /* Major versions must be the same */
1661 if (reply.major != msg.major) {
1662 DBG("Incompatible major versions (%u vs %u), deleting session",
1663 reply.major, msg.major);
1664 compatible = false;
1665 }
1666
1667 conn->major = reply.major;
1668 /* We adapt to the lowest compatible version */
1669 if (reply.minor <= msg.minor) {
1670 conn->minor = reply.minor;
1671 } else {
1672 conn->minor = msg.minor;
1673 }
1674
1675 reply.major = htobe32(reply.major);
1676 reply.minor = htobe32(reply.minor);
1677 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1678 sizeof(reply), 0);
1679 if (send_ret < (ssize_t) sizeof(reply)) {
1680 ERR("Failed to send \"send version\" command reply (ret = %zd)",
1681 send_ret);
1682 ret = -1;
1683 goto end;
1684 } else {
1685 ret = 0;
1686 }
1687
1688 if (!compatible) {
1689 ret = -1;
1690 goto end;
1691 }
1692
1693 DBG("Version check done using protocol %u.%u", conn->major,
1694 conn->minor);
1695
1696 end:
1697 return ret;
1698 }
1699
1700 /*
1701 * Check for data pending for a given stream id from the session daemon.
1702 */
1703 static int relay_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
1704 struct relay_connection *conn,
1705 const struct lttng_buffer_view *payload)
1706 {
1707 struct relay_session *session = conn->session;
1708 struct lttcomm_relayd_data_pending msg;
1709 struct lttcomm_relayd_generic_reply reply;
1710 struct relay_stream *stream;
1711 ssize_t send_ret;
1712 int ret;
1713
1714 DBG("Data pending command received");
1715
1716 if (!session || !conn->version_check_done) {
1717 ERR("Trying to check for data before version check");
1718 ret = -1;
1719 goto end_no_session;
1720 }
1721
1722 if (payload->size < sizeof(msg)) {
1723 ERR("Unexpected payload size in \"relay_data_pending\": expected >= %zu bytes, got %zu bytes",
1724 sizeof(msg), payload->size);
1725 ret = -1;
1726 goto end_no_session;
1727 }
1728 memcpy(&msg, payload->data, sizeof(msg));
1729 msg.stream_id = be64toh(msg.stream_id);
1730 msg.last_net_seq_num = be64toh(msg.last_net_seq_num);
1731
1732 stream = stream_get_by_id(msg.stream_id);
1733 if (stream == NULL) {
1734 ret = -1;
1735 goto end;
1736 }
1737
1738 pthread_mutex_lock(&stream->lock);
1739
1740 DBG("Data pending for stream id %" PRIu64 " prev_seq %" PRIu64
1741 " and last_seq %" PRIu64, msg.stream_id,
1742 stream->prev_seq, msg.last_net_seq_num);
1743
1744 /* Avoid wrapping issue */
1745 if (((int64_t) (stream->prev_seq - msg.last_net_seq_num)) >= 0) {
1746 /* Data has in fact been written and is NOT pending */
1747 ret = 0;
1748 } else {
1749 /* Data still being streamed thus pending */
1750 ret = 1;
1751 }
1752
1753 stream->data_pending_check_done = true;
1754 pthread_mutex_unlock(&stream->lock);
1755
1756 stream_put(stream);
1757 end:
1758
1759 memset(&reply, 0, sizeof(reply));
1760 reply.ret_code = htobe32(ret);
1761 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1762 if (send_ret < (ssize_t) sizeof(reply)) {
1763 ERR("Failed to send \"data pending\" command reply (ret = %zd)",
1764 send_ret);
1765 ret = -1;
1766 }
1767
1768 end_no_session:
1769 return ret;
1770 }
1771
1772 /*
1773 * Wait for the control socket to reach a quiescent state.
1774 *
1775 * Note that for now, when receiving this command from the session
1776 * daemon, this means that every subsequent commands or data received on
1777 * the control socket has been handled. So, this is why we simply return
1778 * OK here.
1779 */
1780 static int relay_quiescent_control(const struct lttcomm_relayd_hdr *recv_hdr,
1781 struct relay_connection *conn,
1782 const struct lttng_buffer_view *payload)
1783 {
1784 int ret;
1785 ssize_t send_ret;
1786 struct relay_stream *stream;
1787 struct lttcomm_relayd_quiescent_control msg;
1788 struct lttcomm_relayd_generic_reply reply;
1789
1790 DBG("Checking quiescent state on control socket");
1791
1792 if (!conn->session || !conn->version_check_done) {
1793 ERR("Trying to check for data before version check");
1794 ret = -1;
1795 goto end_no_session;
1796 }
1797
1798 if (payload->size < sizeof(msg)) {
1799 ERR("Unexpected payload size in \"relay_quiescent_control\": expected >= %zu bytes, got %zu bytes",
1800 sizeof(msg), payload->size);
1801 ret = -1;
1802 goto end_no_session;
1803 }
1804 memcpy(&msg, payload->data, sizeof(msg));
1805 msg.stream_id = be64toh(msg.stream_id);
1806
1807 stream = stream_get_by_id(msg.stream_id);
1808 if (!stream) {
1809 goto reply;
1810 }
1811 pthread_mutex_lock(&stream->lock);
1812 stream->data_pending_check_done = true;
1813 pthread_mutex_unlock(&stream->lock);
1814
1815 DBG("Relay quiescent control pending flag set to %" PRIu64, msg.stream_id);
1816 stream_put(stream);
1817 reply:
1818 memset(&reply, 0, sizeof(reply));
1819 reply.ret_code = htobe32(LTTNG_OK);
1820 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1821 if (send_ret < (ssize_t) sizeof(reply)) {
1822 ERR("Failed to send \"quiescent control\" command reply (ret = %zd)",
1823 send_ret);
1824 ret = -1;
1825 } else {
1826 ret = 0;
1827 }
1828
1829 end_no_session:
1830 return ret;
1831 }
1832
1833 /*
1834 * Initialize a data pending command. This means that a consumer is about
1835 * to ask for data pending for each stream it holds. Simply iterate over
1836 * all streams of a session and set the data_pending_check_done flag.
1837 *
1838 * This command returns to the client a LTTNG_OK code.
1839 */
1840 static int relay_begin_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
1841 struct relay_connection *conn,
1842 const struct lttng_buffer_view *payload)
1843 {
1844 int ret;
1845 ssize_t send_ret;
1846 struct lttng_ht_iter iter;
1847 struct lttcomm_relayd_begin_data_pending msg;
1848 struct lttcomm_relayd_generic_reply reply;
1849 struct relay_stream *stream;
1850
1851 assert(recv_hdr);
1852 assert(conn);
1853
1854 DBG("Init streams for data pending");
1855
1856 if (!conn->session || !conn->version_check_done) {
1857 ERR("Trying to check for data before version check");
1858 ret = -1;
1859 goto end_no_session;
1860 }
1861
1862 if (payload->size < sizeof(msg)) {
1863 ERR("Unexpected payload size in \"relay_begin_data_pending\": expected >= %zu bytes, got %zu bytes",
1864 sizeof(msg), payload->size);
1865 ret = -1;
1866 goto end_no_session;
1867 }
1868 memcpy(&msg, payload->data, sizeof(msg));
1869 msg.session_id = be64toh(msg.session_id);
1870
1871 /*
1872 * Iterate over all streams to set the begin data pending flag.
1873 * For now, the streams are indexed by stream handle so we have
1874 * to iterate over all streams to find the one associated with
1875 * the right session_id.
1876 */
1877 rcu_read_lock();
1878 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
1879 node.node) {
1880 if (!stream_get(stream)) {
1881 continue;
1882 }
1883 if (stream->trace->session->id == msg.session_id) {
1884 pthread_mutex_lock(&stream->lock);
1885 stream->data_pending_check_done = false;
1886 pthread_mutex_unlock(&stream->lock);
1887 DBG("Set begin data pending flag to stream %" PRIu64,
1888 stream->stream_handle);
1889 }
1890 stream_put(stream);
1891 }
1892 rcu_read_unlock();
1893
1894 memset(&reply, 0, sizeof(reply));
1895 /* All good, send back reply. */
1896 reply.ret_code = htobe32(LTTNG_OK);
1897
1898 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1899 if (send_ret < (ssize_t) sizeof(reply)) {
1900 ERR("Failed to send \"begin data pending\" command reply (ret = %zd)",
1901 send_ret);
1902 ret = -1;
1903 } else {
1904 ret = 0;
1905 }
1906
1907 end_no_session:
1908 return ret;
1909 }
1910
1911 /*
1912 * End data pending command. This will check, for a given session id, if
1913 * each stream associated with it has its data_pending_check_done flag
1914 * set. If not, this means that the client lost track of the stream but
1915 * the data is still being streamed on our side. In this case, we inform
1916 * the client that data is in flight.
1917 *
1918 * Return to the client if there is data in flight or not with a ret_code.
1919 */
1920 static int relay_end_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
1921 struct relay_connection *conn,
1922 const struct lttng_buffer_view *payload)
1923 {
1924 int ret;
1925 ssize_t send_ret;
1926 struct lttng_ht_iter iter;
1927 struct lttcomm_relayd_end_data_pending msg;
1928 struct lttcomm_relayd_generic_reply reply;
1929 struct relay_stream *stream;
1930 uint32_t is_data_inflight = 0;
1931
1932 DBG("End data pending command");
1933
1934 if (!conn->session || !conn->version_check_done) {
1935 ERR("Trying to check for data before version check");
1936 ret = -1;
1937 goto end_no_session;
1938 }
1939
1940 if (payload->size < sizeof(msg)) {
1941 ERR("Unexpected payload size in \"relay_end_data_pending\": expected >= %zu bytes, got %zu bytes",
1942 sizeof(msg), payload->size);
1943 ret = -1;
1944 goto end_no_session;
1945 }
1946 memcpy(&msg, payload->data, sizeof(msg));
1947 msg.session_id = be64toh(msg.session_id);
1948
1949 /*
1950 * Iterate over all streams to see if the begin data pending
1951 * flag is set.
1952 */
1953 rcu_read_lock();
1954 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
1955 node.node) {
1956 if (!stream_get(stream)) {
1957 continue;
1958 }
1959 if (stream->trace->session->id != msg.session_id) {
1960 stream_put(stream);
1961 continue;
1962 }
1963 pthread_mutex_lock(&stream->lock);
1964 if (!stream->data_pending_check_done) {
1965 if (!stream->closed || !(((int64_t) (stream->prev_seq - stream->last_net_seq_num)) >= 0)) {
1966 is_data_inflight = 1;
1967 DBG("Data is still in flight for stream %" PRIu64,
1968 stream->stream_handle);
1969 pthread_mutex_unlock(&stream->lock);
1970 stream_put(stream);
1971 break;
1972 }
1973 }
1974 pthread_mutex_unlock(&stream->lock);
1975 stream_put(stream);
1976 }
1977 rcu_read_unlock();
1978
1979 memset(&reply, 0, sizeof(reply));
1980 /* All good, send back reply. */
1981 reply.ret_code = htobe32(is_data_inflight);
1982
1983 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1984 if (send_ret < (ssize_t) sizeof(reply)) {
1985 ERR("Failed to send \"end data pending\" command reply (ret = %zd)",
1986 send_ret);
1987 ret = -1;
1988 } else {
1989 ret = 0;
1990 }
1991
1992 end_no_session:
1993 return ret;
1994 }
1995
1996 /*
1997 * Receive an index for a specific stream.
1998 *
1999 * Return 0 on success else a negative value.
2000 */
2001 static int relay_recv_index(const struct lttcomm_relayd_hdr *recv_hdr,
2002 struct relay_connection *conn,
2003 const struct lttng_buffer_view *payload)
2004 {
2005 int ret;
2006 ssize_t send_ret;
2007 struct relay_session *session = conn->session;
2008 struct lttcomm_relayd_index index_info;
2009 struct relay_index *index;
2010 struct lttcomm_relayd_generic_reply reply;
2011 struct relay_stream *stream;
2012 size_t msg_len;
2013
2014 assert(conn);
2015
2016 DBG("Relay receiving index");
2017
2018 if (!session || !conn->version_check_done) {
2019 ERR("Trying to close a stream before version check");
2020 ret = -1;
2021 goto end_no_session;
2022 }
2023
2024 msg_len = lttcomm_relayd_index_len(
2025 lttng_to_index_major(conn->major, conn->minor),
2026 lttng_to_index_minor(conn->major, conn->minor));
2027 if (payload->size < msg_len) {
2028 ERR("Unexpected payload size in \"relay_recv_index\": expected >= %zu bytes, got %zu bytes",
2029 msg_len, payload->size);
2030 ret = -1;
2031 goto end_no_session;
2032 }
2033 memcpy(&index_info, payload->data, msg_len);
2034 index_info.relay_stream_id = be64toh(index_info.relay_stream_id);
2035 index_info.net_seq_num = be64toh(index_info.net_seq_num);
2036 index_info.packet_size = be64toh(index_info.packet_size);
2037 index_info.content_size = be64toh(index_info.content_size);
2038 index_info.timestamp_begin = be64toh(index_info.timestamp_begin);
2039 index_info.timestamp_end = be64toh(index_info.timestamp_end);
2040 index_info.events_discarded = be64toh(index_info.events_discarded);
2041 index_info.stream_id = be64toh(index_info.stream_id);
2042
2043 if (conn->minor >= 8) {
2044 index_info.stream_instance_id =
2045 be64toh(index_info.stream_instance_id);
2046 index_info.packet_seq_num = be64toh(index_info.packet_seq_num);
2047 }
2048
2049 stream = stream_get_by_id(index_info.relay_stream_id);
2050 if (!stream) {
2051 ERR("stream_get_by_id not found");
2052 ret = -1;
2053 goto end;
2054 }
2055 pthread_mutex_lock(&stream->lock);
2056
2057 /* Live beacon handling */
2058 if (index_info.packet_size == 0) {
2059 DBG("Received live beacon for stream %" PRIu64,
2060 stream->stream_handle);
2061
2062 /*
2063 * Only flag a stream inactive when it has already
2064 * received data and no indexes are in flight.
2065 */
2066 if (stream->index_received_seqcount > 0
2067 && stream->indexes_in_flight == 0) {
2068 stream->beacon_ts_end = index_info.timestamp_end;
2069 }
2070 ret = 0;
2071 goto end_stream_put;
2072 } else {
2073 stream->beacon_ts_end = -1ULL;
2074 }
2075
2076 if (stream->ctf_stream_id == -1ULL) {
2077 stream->ctf_stream_id = index_info.stream_id;
2078 }
2079 index = relay_index_get_by_id_or_create(stream, index_info.net_seq_num);
2080 if (!index) {
2081 ret = -1;
2082 ERR("relay_index_get_by_id_or_create index NULL");
2083 goto end_stream_put;
2084 }
2085 if (set_index_control_data(index, &index_info, conn)) {
2086 ERR("set_index_control_data error");
2087 relay_index_put(index);
2088 ret = -1;
2089 goto end_stream_put;
2090 }
2091 ret = relay_index_try_flush(index);
2092 if (ret == 0) {
2093 tracefile_array_commit_seq(stream->tfa);
2094 stream->index_received_seqcount++;
2095 } else if (ret > 0) {
2096 /* no flush. */
2097 ret = 0;
2098 } else {
2099 ERR("relay_index_try_flush error %d", ret);
2100 relay_index_put(index);
2101 ret = -1;
2102 }
2103
2104 end_stream_put:
2105 pthread_mutex_unlock(&stream->lock);
2106 stream_put(stream);
2107
2108 end:
2109
2110 memset(&reply, 0, sizeof(reply));
2111 if (ret < 0) {
2112 reply.ret_code = htobe32(LTTNG_ERR_UNK);
2113 } else {
2114 reply.ret_code = htobe32(LTTNG_OK);
2115 }
2116 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2117 if (send_ret < (ssize_t) sizeof(reply)) {
2118 ERR("Failed to send \"recv index\" command reply (ret = %zd)", send_ret);
2119 ret = -1;
2120 }
2121
2122 end_no_session:
2123 return ret;
2124 }
2125
2126 /*
2127 * Receive the streams_sent message.
2128 *
2129 * Return 0 on success else a negative value.
2130 */
2131 static int relay_streams_sent(const struct lttcomm_relayd_hdr *recv_hdr,
2132 struct relay_connection *conn,
2133 const struct lttng_buffer_view *payload)
2134 {
2135 int ret;
2136 ssize_t send_ret;
2137 struct lttcomm_relayd_generic_reply reply;
2138
2139 assert(conn);
2140
2141 DBG("Relay receiving streams_sent");
2142
2143 if (!conn->session || !conn->version_check_done) {
2144 ERR("Trying to close a stream before version check");
2145 ret = -1;
2146 goto end_no_session;
2147 }
2148
2149 /*
2150 * Publish every pending stream in the connection recv list which are
2151 * now ready to be used by the viewer.
2152 */
2153 publish_connection_local_streams(conn);
2154
2155 memset(&reply, 0, sizeof(reply));
2156 reply.ret_code = htobe32(LTTNG_OK);
2157 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2158 if (send_ret < (ssize_t) sizeof(reply)) {
2159 ERR("Failed to send \"streams sent\" command reply (ret = %zd)",
2160 send_ret);
2161 ret = -1;
2162 } else {
2163 /* Success. */
2164 ret = 0;
2165 }
2166
2167 end_no_session:
2168 return ret;
2169 }
2170
2171 #define DBG_CMD(cmd_name, conn) \
2172 DBG3("Processing \"%s\" command for socket %i", cmd_name, conn->sock->fd);
2173
2174 static int relay_process_control_command(struct relay_connection *conn,
2175 const struct lttcomm_relayd_hdr *header,
2176 const struct lttng_buffer_view *payload)
2177 {
2178 int ret = 0;
2179
2180 switch (header->cmd) {
2181 case RELAYD_CREATE_SESSION:
2182 DBG_CMD("RELAYD_CREATE_SESSION", conn);
2183 ret = relay_create_session(header, conn, payload);
2184 break;
2185 case RELAYD_ADD_STREAM:
2186 DBG_CMD("RELAYD_ADD_STREAM", conn);
2187 ret = relay_add_stream(header, conn, payload);
2188 break;
2189 case RELAYD_START_DATA:
2190 DBG_CMD("RELAYD_START_DATA", conn);
2191 ret = relay_start(header, conn, payload);
2192 break;
2193 case RELAYD_SEND_METADATA:
2194 DBG_CMD("RELAYD_SEND_METADATA", conn);
2195 ret = relay_recv_metadata(header, conn, payload);
2196 break;
2197 case RELAYD_VERSION:
2198 DBG_CMD("RELAYD_VERSION", conn);
2199 ret = relay_send_version(header, conn, payload);
2200 break;
2201 case RELAYD_CLOSE_STREAM:
2202 DBG_CMD("RELAYD_CLOSE_STREAM", conn);
2203 ret = relay_close_stream(header, conn, payload);
2204 break;
2205 case RELAYD_DATA_PENDING:
2206 DBG_CMD("RELAYD_DATA_PENDING", conn);
2207 ret = relay_data_pending(header, conn, payload);
2208 break;
2209 case RELAYD_QUIESCENT_CONTROL:
2210 DBG_CMD("RELAYD_QUIESCENT_CONTROL", conn);
2211 ret = relay_quiescent_control(header, conn, payload);
2212 break;
2213 case RELAYD_BEGIN_DATA_PENDING:
2214 DBG_CMD("RELAYD_BEGIN_DATA_PENDING", conn);
2215 ret = relay_begin_data_pending(header, conn, payload);
2216 break;
2217 case RELAYD_END_DATA_PENDING:
2218 DBG_CMD("RELAYD_END_DATA_PENDING", conn);
2219 ret = relay_end_data_pending(header, conn, payload);
2220 break;
2221 case RELAYD_SEND_INDEX:
2222 DBG_CMD("RELAYD_SEND_INDEX", conn);
2223 ret = relay_recv_index(header, conn, payload);
2224 break;
2225 case RELAYD_STREAMS_SENT:
2226 DBG_CMD("RELAYD_STREAMS_SENT", conn);
2227 ret = relay_streams_sent(header, conn, payload);
2228 break;
2229 case RELAYD_RESET_METADATA:
2230 DBG_CMD("RELAYD_RESET_METADATA", conn);
2231 ret = relay_reset_metadata(header, conn, payload);
2232 break;
2233 case RELAYD_UPDATE_SYNC_INFO:
2234 default:
2235 ERR("Received unknown command (%u)", header->cmd);
2236 relay_unknown_command(conn);
2237 ret = -1;
2238 goto end;
2239 }
2240
2241 end:
2242 return ret;
2243 }
2244
2245 static enum relay_connection_status relay_process_control_receive_payload(
2246 struct relay_connection *conn)
2247 {
2248 int ret = 0;
2249 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
2250 struct lttng_dynamic_buffer *reception_buffer =
2251 &conn->protocol.ctrl.reception_buffer;
2252 struct ctrl_connection_state_receive_payload *state =
2253 &conn->protocol.ctrl.state.receive_payload;
2254 struct lttng_buffer_view payload_view;
2255
2256 if (state->left_to_receive == 0) {
2257 /* Short-circuit for payload-less commands. */
2258 goto reception_complete;
2259 }
2260 ret = conn->sock->ops->recvmsg(conn->sock,
2261 reception_buffer->data + state->received,
2262 state->left_to_receive, MSG_DONTWAIT);
2263 if (ret < 0) {
2264 if (errno != EAGAIN && errno != EWOULDBLOCK) {
2265 PERROR("Unable to receive command payload on sock %d",
2266 conn->sock->fd);
2267 status = RELAY_CONNECTION_STATUS_ERROR;
2268 }
2269 goto end;
2270 } else if (ret == 0) {
2271 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
2272 status = RELAY_CONNECTION_STATUS_CLOSED;
2273 goto end;
2274 }
2275
2276 assert(ret > 0);
2277 assert(ret <= state->left_to_receive);
2278
2279 state->left_to_receive -= ret;
2280 state->received += ret;
2281
2282 if (state->left_to_receive > 0) {
2283 /*
2284 * Can't transition to the protocol's next state, wait to
2285 * receive the rest of the header.
2286 */
2287 DBG3("Partial reception of control connection protocol payload (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
2288 state->received, state->left_to_receive,
2289 conn->sock->fd);
2290 goto end;
2291 }
2292
2293 reception_complete:
2294 DBG("Done receiving control command payload: fd = %i, payload size = %" PRIu64 " bytes",
2295 conn->sock->fd, state->received);
2296 /*
2297 * The payload required to process the command has been received.
2298 * A view to the reception buffer is forwarded to the various
2299 * commands and the state of the control is reset on success.
2300 *
2301 * Commands are responsible for sending their reply to the peer.
2302 */
2303 payload_view = lttng_buffer_view_from_dynamic_buffer(reception_buffer,
2304 0, -1);
2305 ret = relay_process_control_command(conn,
2306 &state->header, &payload_view);
2307 if (ret < 0) {
2308 status = RELAY_CONNECTION_STATUS_ERROR;
2309 goto end;
2310 }
2311
2312 ret = connection_reset_protocol_state(conn);
2313 if (ret) {
2314 status = RELAY_CONNECTION_STATUS_ERROR;
2315 }
2316 end:
2317 return status;
2318 }
2319
2320 static enum relay_connection_status relay_process_control_receive_header(
2321 struct relay_connection *conn)
2322 {
2323 int ret = 0;
2324 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
2325 struct lttcomm_relayd_hdr header;
2326 struct lttng_dynamic_buffer *reception_buffer =
2327 &conn->protocol.ctrl.reception_buffer;
2328 struct ctrl_connection_state_receive_header *state =
2329 &conn->protocol.ctrl.state.receive_header;
2330
2331 assert(state->left_to_receive != 0);
2332
2333 ret = conn->sock->ops->recvmsg(conn->sock,
2334 reception_buffer->data + state->received,
2335 state->left_to_receive, MSG_DONTWAIT);
2336 if (ret < 0) {
2337 if (errno != EAGAIN && errno != EWOULDBLOCK) {
2338 PERROR("Unable to receive control command header on sock %d",
2339 conn->sock->fd);
2340 status = RELAY_CONNECTION_STATUS_ERROR;
2341 }
2342 goto end;
2343 } else if (ret == 0) {
2344 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
2345 status = RELAY_CONNECTION_STATUS_CLOSED;
2346 goto end;
2347 }
2348
2349 assert(ret > 0);
2350 assert(ret <= state->left_to_receive);
2351
2352 state->left_to_receive -= ret;
2353 state->received += ret;
2354
2355 if (state->left_to_receive > 0) {
2356 /*
2357 * Can't transition to the protocol's next state, wait to
2358 * receive the rest of the header.
2359 */
2360 DBG3("Partial reception of control connection protocol header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
2361 state->received, state->left_to_receive,
2362 conn->sock->fd);
2363 goto end;
2364 }
2365
2366 /* Transition to next state: receiving the command's payload. */
2367 conn->protocol.ctrl.state_id =
2368 CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD;
2369 memcpy(&header, reception_buffer->data, sizeof(header));
2370 header.circuit_id = be64toh(header.circuit_id);
2371 header.data_size = be64toh(header.data_size);
2372 header.cmd = be32toh(header.cmd);
2373 header.cmd_version = be32toh(header.cmd_version);
2374 memcpy(&conn->protocol.ctrl.state.receive_payload.header,
2375 &header, sizeof(header));
2376
2377 DBG("Done receiving control command header: fd = %i, cmd = %" PRIu32 ", cmd_version = %" PRIu32 ", payload size = %" PRIu64 " bytes",
2378 conn->sock->fd, header.cmd, header.cmd_version,
2379 header.data_size);
2380
2381 if (header.data_size > DEFAULT_NETWORK_RELAYD_CTRL_MAX_PAYLOAD_SIZE) {
2382 ERR("Command header indicates a payload (%" PRIu64 " bytes) that exceeds the maximal payload size allowed on a control connection.",
2383 header.data_size);
2384 status = RELAY_CONNECTION_STATUS_ERROR;
2385 goto end;
2386 }
2387
2388 conn->protocol.ctrl.state.receive_payload.left_to_receive =
2389 header.data_size;
2390 conn->protocol.ctrl.state.receive_payload.received = 0;
2391 ret = lttng_dynamic_buffer_set_size(reception_buffer,
2392 header.data_size);
2393 if (ret) {
2394 status = RELAY_CONNECTION_STATUS_ERROR;
2395 goto end;
2396 }
2397
2398 if (header.data_size == 0) {
2399 /*
2400 * Manually invoke the next state as the poll loop
2401 * will not wake-up to allow us to proceed further.
2402 */
2403 status = relay_process_control_receive_payload(conn);
2404 }
2405 end:
2406 return status;
2407 }
2408
2409 /*
2410 * Process the commands received on the control socket
2411 */
2412 static enum relay_connection_status relay_process_control(
2413 struct relay_connection *conn)
2414 {
2415 enum relay_connection_status status;
2416
2417 switch (conn->protocol.ctrl.state_id) {
2418 case CTRL_CONNECTION_STATE_RECEIVE_HEADER:
2419 status = relay_process_control_receive_header(conn);
2420 break;
2421 case CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD:
2422 status = relay_process_control_receive_payload(conn);
2423 break;
2424 default:
2425 ERR("Unknown control connection protocol state encountered.");
2426 abort();
2427 }
2428
2429 return status;
2430 }
2431
2432 /*
2433 * Handle index for a data stream.
2434 *
2435 * Called with the stream lock held.
2436 *
2437 * Return 0 on success else a negative value.
2438 */
2439 static int handle_index_data(struct relay_stream *stream, uint64_t net_seq_num,
2440 bool rotate_index)
2441 {
2442 int ret = 0;
2443 uint64_t data_offset;
2444 struct relay_index *index;
2445
2446 /* Get data offset because we are about to update the index. */
2447 data_offset = htobe64(stream->tracefile_size_current);
2448
2449 DBG("handle_index_data: stream %" PRIu64 " net_seq_num %" PRIu64 " data offset %" PRIu64,
2450 stream->stream_handle, net_seq_num, stream->tracefile_size_current);
2451
2452 /*
2453 * Lookup for an existing index for that stream id/sequence
2454 * number. If it exists, the control thread has already received the
2455 * data for it, thus we need to write it to disk.
2456 */
2457 index = relay_index_get_by_id_or_create(stream, net_seq_num);
2458 if (!index) {
2459 ret = -1;
2460 goto end;
2461 }
2462
2463 if (rotate_index || !stream->index_file) {
2464 uint32_t major, minor;
2465
2466 /* Put ref on previous index_file. */
2467 if (stream->index_file) {
2468 lttng_index_file_put(stream->index_file);
2469 stream->index_file = NULL;
2470 }
2471 major = stream->trace->session->major;
2472 minor = stream->trace->session->minor;
2473 stream->index_file = lttng_index_file_create(stream->path_name,
2474 stream->channel_name,
2475 -1, -1, stream->tracefile_size,
2476 tracefile_array_get_file_index_head(stream->tfa),
2477 lttng_to_index_major(major, minor),
2478 lttng_to_index_minor(major, minor));
2479 if (!stream->index_file) {
2480 ret = -1;
2481 /* Put self-ref for this index due to error. */
2482 relay_index_put(index);
2483 index = NULL;
2484 goto end;
2485 }
2486 }
2487
2488 if (relay_index_set_file(index, stream->index_file, data_offset)) {
2489 ret = -1;
2490 /* Put self-ref for this index due to error. */
2491 relay_index_put(index);
2492 index = NULL;
2493 goto end;
2494 }
2495
2496 ret = relay_index_try_flush(index);
2497 if (ret == 0) {
2498 tracefile_array_commit_seq(stream->tfa);
2499 stream->index_received_seqcount++;
2500 } else if (ret > 0) {
2501 /* No flush. */
2502 ret = 0;
2503 } else {
2504 /* Put self-ref for this index due to error. */
2505 relay_index_put(index);
2506 index = NULL;
2507 ret = -1;
2508 }
2509 end:
2510 return ret;
2511 }
2512
2513 static enum relay_connection_status relay_process_data_receive_header(
2514 struct relay_connection *conn)
2515 {
2516 int ret;
2517 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
2518 struct data_connection_state_receive_header *state =
2519 &conn->protocol.data.state.receive_header;
2520 struct lttcomm_relayd_data_hdr header;
2521 struct relay_stream *stream;
2522
2523 assert(state->left_to_receive != 0);
2524
2525 ret = conn->sock->ops->recvmsg(conn->sock,
2526 state->header_reception_buffer + state->received,
2527 state->left_to_receive, MSG_DONTWAIT);
2528 if (ret < 0) {
2529 if (errno != EAGAIN && errno != EWOULDBLOCK) {
2530 PERROR("Unable to receive data header on sock %d", conn->sock->fd);
2531 status = RELAY_CONNECTION_STATUS_ERROR;
2532 }
2533 goto end;
2534 } else if (ret == 0) {
2535 /* Orderly shutdown. Not necessary to print an error. */
2536 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
2537 status = RELAY_CONNECTION_STATUS_CLOSED;
2538 goto end;
2539 }
2540
2541 assert(ret > 0);
2542 assert(ret <= state->left_to_receive);
2543
2544 state->left_to_receive -= ret;
2545 state->received += ret;
2546
2547 if (state->left_to_receive > 0) {
2548 /*
2549 * Can't transition to the protocol's next state, wait to
2550 * receive the rest of the header.
2551 */
2552 DBG3("Partial reception of data connection header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
2553 state->received, state->left_to_receive,
2554 conn->sock->fd);
2555 ret = 0;
2556 goto end;
2557 }
2558
2559 /* Transition to next state: receiving the payload. */
2560 conn->protocol.data.state_id = DATA_CONNECTION_STATE_RECEIVE_PAYLOAD;
2561
2562 memcpy(&header, state->header_reception_buffer, sizeof(header));
2563 header.circuit_id = be64toh(header.circuit_id);
2564 header.stream_id = be64toh(header.stream_id);
2565 header.data_size = be32toh(header.data_size);
2566 header.net_seq_num = be64toh(header.net_seq_num);
2567 header.padding_size = be32toh(header.padding_size);
2568 memcpy(&conn->protocol.data.state.receive_payload.header, &header, sizeof(header));
2569
2570 conn->protocol.data.state.receive_payload.left_to_receive =
2571 header.data_size;
2572 conn->protocol.data.state.receive_payload.received = 0;
2573 conn->protocol.data.state.receive_payload.rotate_index = false;
2574
2575 DBG("Received data connection header on fd %i: circuit_id = %" PRIu64 ", stream_id = %" PRIu64 ", data_size = %" PRIu32 ", net_seq_num = %" PRIu64 ", padding_size = %" PRIu32,
2576 conn->sock->fd, header.circuit_id,
2577 header.stream_id, header.data_size,
2578 header.net_seq_num, header.padding_size);
2579
2580 stream = stream_get_by_id(header.stream_id);
2581 if (!stream) {
2582 DBG("relay_process_data_receive_payload: Cannot find stream %" PRIu64,
2583 header.stream_id);
2584 /* Protocol error. */
2585 status = RELAY_CONNECTION_STATUS_ERROR;
2586 goto end;
2587 }
2588
2589 pthread_mutex_lock(&stream->lock);
2590
2591 /* Check if a rotation is needed. */
2592 if (stream->tracefile_size > 0 &&
2593 (stream->tracefile_size_current + header.data_size) >
2594 stream->tracefile_size) {
2595 uint64_t old_id, new_id;
2596
2597 old_id = tracefile_array_get_file_index_head(stream->tfa);
2598 tracefile_array_file_rotate(stream->tfa);
2599
2600 /* new_id is updated by utils_rotate_stream_file. */
2601 new_id = old_id;
2602
2603 ret = utils_rotate_stream_file(stream->path_name,
2604 stream->channel_name, stream->tracefile_size,
2605 stream->tracefile_count, -1,
2606 -1, stream->stream_fd->fd,
2607 &new_id, &stream->stream_fd->fd);
2608 if (ret < 0) {
2609 ERR("Failed to rotate stream output file");
2610 status = RELAY_CONNECTION_STATUS_ERROR;
2611 goto end_stream_unlock;
2612 }
2613
2614 /*
2615 * Reset current size because we just performed a stream
2616 * rotation.
2617 */
2618 stream->tracefile_size_current = 0;
2619 conn->protocol.data.state.receive_payload.rotate_index = true;
2620 }
2621
2622 ret = 0;
2623 end_stream_unlock:
2624 pthread_mutex_unlock(&stream->lock);
2625 stream_put(stream);
2626 end:
2627 return status;
2628 }
2629
2630 static enum relay_connection_status relay_process_data_receive_payload(
2631 struct relay_connection *conn)
2632 {
2633 int ret;
2634 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
2635 struct relay_stream *stream;
2636 struct data_connection_state_receive_payload *state =
2637 &conn->protocol.data.state.receive_payload;
2638 const size_t chunk_size = RECV_DATA_BUFFER_SIZE;
2639 char data_buffer[chunk_size];
2640 bool partial_recv = false;
2641 bool new_stream = false, close_requested = false;
2642 uint64_t left_to_receive = state->left_to_receive;
2643 struct relay_session *session;
2644
2645 DBG3("Receiving data for stream id %" PRIu64 " seqnum %" PRIu64 ", %" PRIu64" bytes received, %" PRIu64 " bytes left to receive",
2646 state->header.stream_id, state->header.net_seq_num,
2647 state->received, left_to_receive);
2648
2649 stream = stream_get_by_id(state->header.stream_id);
2650 if (!stream) {
2651 /* Protocol error. */
2652 ERR("relay_process_data_receive_payload: cannot find stream %" PRIu64,
2653 state->header.stream_id);
2654 status = RELAY_CONNECTION_STATUS_ERROR;
2655 goto end;
2656 }
2657
2658 pthread_mutex_lock(&stream->lock);
2659 session = stream->trace->session;
2660 if (!conn->session) {
2661 ret = connection_set_session(conn, session);
2662 if (ret) {
2663 status = RELAY_CONNECTION_STATUS_ERROR;
2664 goto end_stream_unlock;
2665 }
2666 }
2667
2668 /*
2669 * The size of the "chunk" received on any iteration is bounded by:
2670 * - the data left to receive,
2671 * - the data immediately available on the socket,
2672 * - the on-stack data buffer
2673 */
2674 while (left_to_receive > 0 && !partial_recv) {
2675 ssize_t write_ret;
2676 size_t recv_size = min(left_to_receive, chunk_size);
2677
2678 ret = conn->sock->ops->recvmsg(conn->sock, data_buffer,
2679 recv_size, MSG_DONTWAIT);
2680 if (ret < 0) {
2681 if (errno != EAGAIN && errno != EWOULDBLOCK) {
2682 PERROR("Socket %d error", conn->sock->fd);
2683 status = RELAY_CONNECTION_STATUS_ERROR;
2684 }
2685 goto end_stream_unlock;
2686 } else if (ret == 0) {
2687 /* No more data ready to be consumed on socket. */
2688 DBG3("No more data ready for consumption on data socket of stream id %" PRIu64,
2689 state->header.stream_id);
2690 status = RELAY_CONNECTION_STATUS_CLOSED;
2691 break;
2692 } else if (ret < (int) recv_size) {
2693 /*
2694 * All the data available on the socket has been
2695 * consumed.
2696 */
2697 partial_recv = true;
2698 }
2699
2700 recv_size = ret;
2701
2702 /* Write data to stream output fd. */
2703 write_ret = lttng_write(stream->stream_fd->fd, data_buffer,
2704 recv_size);
2705 if (write_ret < (ssize_t) recv_size) {
2706 ERR("Relay error writing data to file");
2707 status = RELAY_CONNECTION_STATUS_ERROR;
2708 goto end_stream_unlock;
2709 }
2710
2711 left_to_receive -= recv_size;
2712 state->received += recv_size;
2713 state->left_to_receive = left_to_receive;
2714
2715 DBG2("Relay wrote %zd bytes to tracefile for stream id %" PRIu64,
2716 write_ret, stream->stream_handle);
2717 }
2718
2719 if (state->left_to_receive > 0) {
2720 /*
2721 * Did not receive all the data expected, wait for more data to
2722 * become available on the socket.
2723 */
2724 DBG3("Partial receive on data connection of stream id %" PRIu64 ", %" PRIu64 " bytes received, %" PRIu64 " bytes left to receive",
2725 state->header.stream_id, state->received,
2726 state->left_to_receive);
2727 goto end_stream_unlock;
2728 }
2729
2730 ret = write_padding_to_file(stream->stream_fd->fd,
2731 state->header.padding_size);
2732 if ((int64_t) ret < (int64_t) state->header.padding_size) {
2733 ERR("write_padding_to_file: fail stream %" PRIu64 " net_seq_num %" PRIu64 " ret %d",
2734 stream->stream_handle,
2735 state->header.net_seq_num, ret);
2736 status = RELAY_CONNECTION_STATUS_ERROR;
2737 goto end_stream_unlock;
2738 }
2739
2740
2741 if (session->minor >= 4 && !session->snapshot) {
2742 ret = handle_index_data(stream, state->header.net_seq_num,
2743 state->rotate_index);
2744 if (ret < 0) {
2745 ERR("handle_index_data: fail stream %" PRIu64 " net_seq_num %" PRIu64 " ret %d",
2746 stream->stream_handle,
2747 state->header.net_seq_num, ret);
2748 status = RELAY_CONNECTION_STATUS_ERROR;
2749 goto end_stream_unlock;
2750 }
2751 }
2752
2753 stream->tracefile_size_current += state->header.data_size +
2754 state->header.padding_size;
2755
2756 if (stream->prev_seq == -1ULL) {
2757 new_stream = true;
2758 }
2759
2760 stream->prev_seq = state->header.net_seq_num;
2761
2762 /*
2763 * Resetting the protocol state (to RECEIVE_HEADER) will trash the
2764 * contents of *state which are aliased (union) to the same location as
2765 * the new state. Don't use it beyond this point.
2766 */
2767 connection_reset_protocol_state(conn);
2768 state = NULL;
2769
2770 end_stream_unlock:
2771 close_requested = stream->close_requested;
2772 pthread_mutex_unlock(&stream->lock);
2773 if (close_requested && left_to_receive == 0) {
2774 try_stream_close(stream);
2775 }
2776
2777 if (new_stream) {
2778 pthread_mutex_lock(&session->lock);
2779 uatomic_set(&session->new_streams, 1);
2780 pthread_mutex_unlock(&session->lock);
2781 }
2782
2783 stream_put(stream);
2784 end:
2785 return status;
2786 }
2787
2788 /*
2789 * relay_process_data: Process the data received on the data socket
2790 */
2791 static enum relay_connection_status relay_process_data(
2792 struct relay_connection *conn)
2793 {
2794 enum relay_connection_status status;
2795
2796 switch (conn->protocol.data.state_id) {
2797 case DATA_CONNECTION_STATE_RECEIVE_HEADER:
2798 status = relay_process_data_receive_header(conn);
2799 break;
2800 case DATA_CONNECTION_STATE_RECEIVE_PAYLOAD:
2801 status = relay_process_data_receive_payload(conn);
2802 break;
2803 default:
2804 ERR("Unexpected data connection communication state.");
2805 abort();
2806 }
2807
2808 return status;
2809 }
2810
2811 static void cleanup_connection_pollfd(struct lttng_poll_event *events, int pollfd)
2812 {
2813 int ret;
2814
2815 (void) lttng_poll_del(events, pollfd);
2816
2817 ret = close(pollfd);
2818 if (ret < 0) {
2819 ERR("Closing pollfd %d", pollfd);
2820 }
2821 }
2822
2823 static void relay_thread_close_connection(struct lttng_poll_event *events,
2824 int pollfd, struct relay_connection *conn)
2825 {
2826 const char *type_str;
2827
2828 switch (conn->type) {
2829 case RELAY_DATA:
2830 type_str = "Data";
2831 break;
2832 case RELAY_CONTROL:
2833 type_str = "Control";
2834 break;
2835 case RELAY_VIEWER_COMMAND:
2836 type_str = "Viewer Command";
2837 break;
2838 case RELAY_VIEWER_NOTIFICATION:
2839 type_str = "Viewer Notification";
2840 break;
2841 default:
2842 type_str = "Unknown";
2843 }
2844 cleanup_connection_pollfd(events, pollfd);
2845 connection_put(conn);
2846 DBG("%s connection closed with %d", type_str, pollfd);
2847 }
2848
2849 /*
2850 * This thread does the actual work
2851 */
2852 static void *relay_thread_worker(void *data)
2853 {
2854 int ret, err = -1, last_seen_data_fd = -1;
2855 uint32_t nb_fd;
2856 struct lttng_poll_event events;
2857 struct lttng_ht *relay_connections_ht;
2858 struct lttng_ht_iter iter;
2859 struct relay_connection *destroy_conn = NULL;
2860
2861 DBG("[thread] Relay worker started");
2862
2863 rcu_register_thread();
2864
2865 health_register(health_relayd, HEALTH_RELAYD_TYPE_WORKER);
2866
2867 if (testpoint(relayd_thread_worker)) {
2868 goto error_testpoint;
2869 }
2870
2871 health_code_update();
2872
2873 /* table of connections indexed on socket */
2874 relay_connections_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2875 if (!relay_connections_ht) {
2876 goto relay_connections_ht_error;
2877 }
2878
2879 ret = create_thread_poll_set(&events, 2);
2880 if (ret < 0) {
2881 goto error_poll_create;
2882 }
2883
2884 ret = lttng_poll_add(&events, relay_conn_pipe[0], LPOLLIN | LPOLLRDHUP);
2885 if (ret < 0) {
2886 goto error;
2887 }
2888
2889 restart:
2890 while (1) {
2891 int idx = -1, i, seen_control = 0, last_notdel_data_fd = -1;
2892
2893 health_code_update();
2894
2895 /* Infinite blocking call, waiting for transmission */
2896 DBG3("Relayd worker thread polling...");
2897 health_poll_entry();
2898 ret = lttng_poll_wait(&events, -1);
2899 health_poll_exit();
2900 if (ret < 0) {
2901 /*
2902 * Restart interrupted system call.
2903 */
2904 if (errno == EINTR) {
2905 goto restart;
2906 }
2907 goto error;
2908 }
2909
2910 nb_fd = ret;
2911
2912 /*
2913 * Process control. The control connection is
2914 * prioritized so we don't starve it with high
2915 * throughput tracing data on the data connection.
2916 */
2917 for (i = 0; i < nb_fd; i++) {
2918 /* Fetch once the poll data */
2919 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
2920 int pollfd = LTTNG_POLL_GETFD(&events, i);
2921
2922 health_code_update();
2923
2924 if (!revents) {
2925 /*
2926 * No activity for this FD (poll
2927 * implementation).
2928 */
2929 continue;
2930 }
2931
2932 /* Thread quit pipe has been closed. Killing thread. */
2933 ret = check_thread_quit_pipe(pollfd, revents);
2934 if (ret) {
2935 err = 0;
2936 goto exit;
2937 }
2938
2939 /* Inspect the relay conn pipe for new connection */
2940 if (pollfd == relay_conn_pipe[0]) {
2941 if (revents & LPOLLIN) {
2942 struct relay_connection *conn;
2943
2944 ret = lttng_read(relay_conn_pipe[0], &conn, sizeof(conn));
2945 if (ret < 0) {
2946 goto error;
2947 }
2948 lttng_poll_add(&events, conn->sock->fd,
2949 LPOLLIN | LPOLLRDHUP);
2950 connection_ht_add(relay_connections_ht, conn);
2951 DBG("Connection socket %d added", conn->sock->fd);
2952 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
2953 ERR("Relay connection pipe error");
2954 goto error;
2955 } else {
2956 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
2957 goto error;
2958 }
2959 } else {
2960 struct relay_connection *ctrl_conn;
2961
2962 ctrl_conn = connection_get_by_sock(relay_connections_ht, pollfd);
2963 /* If not found, there is a synchronization issue. */
2964 assert(ctrl_conn);
2965
2966 if (ctrl_conn->type == RELAY_DATA) {
2967 if (revents & LPOLLIN) {
2968 /*
2969 * Flag the last seen data fd not deleted. It will be
2970 * used as the last seen fd if any fd gets deleted in
2971 * this first loop.
2972 */
2973 last_notdel_data_fd = pollfd;
2974 }
2975 goto put_ctrl_connection;
2976 }
2977 assert(ctrl_conn->type == RELAY_CONTROL);
2978
2979 if (revents & LPOLLIN) {
2980 enum relay_connection_status status;
2981
2982 status = relay_process_control(ctrl_conn);
2983 if (status != RELAY_CONNECTION_STATUS_OK) {
2984 /*
2985 * On socket error flag the session as aborted to force
2986 * the cleanup of its stream otherwise it can leak
2987 * during the lifetime of the relayd.
2988 *
2989 * This prevents situations in which streams can be
2990 * left opened because an index was received, the
2991 * control connection is closed, and the data
2992 * connection is closed (uncleanly) before the packet's
2993 * data provided.
2994 *
2995 * Since the control connection encountered an error,
2996 * it is okay to be conservative and close the
2997 * session right now as we can't rely on the protocol
2998 * being respected anymore.
2999 */
3000 if (status == RELAY_CONNECTION_STATUS_ERROR) {
3001 session_abort(ctrl_conn->session);
3002 }
3003
3004 /* Clear the connection on error or close. */
3005 relay_thread_close_connection(&events,
3006 pollfd,
3007 ctrl_conn);
3008 }
3009 seen_control = 1;
3010 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3011 relay_thread_close_connection(&events,
3012 pollfd, ctrl_conn);
3013 if (last_seen_data_fd == pollfd) {
3014 last_seen_data_fd = last_notdel_data_fd;
3015 }
3016 } else {
3017 ERR("Unexpected poll events %u for control sock %d",
3018 revents, pollfd);
3019 connection_put(ctrl_conn);
3020 goto error;
3021 }
3022 put_ctrl_connection:
3023 connection_put(ctrl_conn);
3024 }
3025 }
3026
3027 /*
3028 * The last loop handled a control request, go back to poll to make
3029 * sure we prioritise the control socket.
3030 */
3031 if (seen_control) {
3032 continue;
3033 }
3034
3035 if (last_seen_data_fd >= 0) {
3036 for (i = 0; i < nb_fd; i++) {
3037 int pollfd = LTTNG_POLL_GETFD(&events, i);
3038
3039 health_code_update();
3040
3041 if (last_seen_data_fd == pollfd) {
3042 idx = i;
3043 break;
3044 }
3045 }
3046 }
3047
3048 /* Process data connection. */
3049 for (i = idx + 1; i < nb_fd; i++) {
3050 /* Fetch the poll data. */
3051 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
3052 int pollfd = LTTNG_POLL_GETFD(&events, i);
3053 struct relay_connection *data_conn;
3054
3055 health_code_update();
3056
3057 if (!revents) {
3058 /* No activity for this FD (poll implementation). */
3059 continue;
3060 }
3061
3062 /* Skip the command pipe. It's handled in the first loop. */
3063 if (pollfd == relay_conn_pipe[0]) {
3064 continue;
3065 }
3066
3067 data_conn = connection_get_by_sock(relay_connections_ht, pollfd);
3068 if (!data_conn) {
3069 /* Skip it. Might be removed before. */
3070 continue;
3071 }
3072 if (data_conn->type == RELAY_CONTROL) {
3073 goto put_data_connection;
3074 }
3075 assert(data_conn->type == RELAY_DATA);
3076
3077 if (revents & LPOLLIN) {
3078 enum relay_connection_status status;
3079
3080 status = relay_process_data(data_conn);
3081 /* Connection closed or error. */
3082 if (status != RELAY_CONNECTION_STATUS_OK) {
3083 /*
3084 * On socket error flag the session as aborted to force
3085 * the cleanup of its stream otherwise it can leak
3086 * during the lifetime of the relayd.
3087 *
3088 * This prevents situations in which streams can be
3089 * left opened because an index was received, the
3090 * control connection is closed, and the data
3091 * connection is closed (uncleanly) before the packet's
3092 * data provided.
3093 *
3094 * Since the data connection encountered an error,
3095 * it is okay to be conservative and close the
3096 * session right now as we can't rely on the protocol
3097 * being respected anymore.
3098 */
3099 if (status == RELAY_CONNECTION_STATUS_ERROR) {
3100 session_abort(data_conn->session);
3101 }
3102 relay_thread_close_connection(&events, pollfd,
3103 data_conn);
3104 /*
3105 * Every goto restart call sets the last seen fd where
3106 * here we don't really care since we gracefully
3107 * continue the loop after the connection is deleted.
3108 */
3109 } else {
3110 /* Keep last seen port. */
3111 last_seen_data_fd = pollfd;
3112 connection_put(data_conn);
3113 goto restart;
3114 }
3115 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3116 relay_thread_close_connection(&events, pollfd,
3117 data_conn);
3118 } else {
3119 ERR("Unknown poll events %u for data sock %d",
3120 revents, pollfd);
3121 }
3122 put_data_connection:
3123 connection_put(data_conn);
3124 }
3125 last_seen_data_fd = -1;
3126 }
3127
3128 /* Normal exit, no error */
3129 ret = 0;
3130
3131 exit:
3132 error:
3133 /* Cleanup reamaining connection object. */
3134 rcu_read_lock();
3135 cds_lfht_for_each_entry(relay_connections_ht->ht, &iter.iter,
3136 destroy_conn,
3137 sock_n.node) {
3138 health_code_update();
3139
3140 session_abort(destroy_conn->session);
3141
3142 /*
3143 * No need to grab another ref, because we own
3144 * destroy_conn.
3145 */
3146 relay_thread_close_connection(&events, destroy_conn->sock->fd,
3147 destroy_conn);
3148 }
3149 rcu_read_unlock();
3150
3151 lttng_poll_clean(&events);
3152 error_poll_create:
3153 lttng_ht_destroy(relay_connections_ht);
3154 relay_connections_ht_error:
3155 /* Close relay conn pipes */
3156 utils_close_pipe(relay_conn_pipe);
3157 if (err) {
3158 DBG("Thread exited with error");
3159 }
3160 DBG("Worker thread cleanup complete");
3161 error_testpoint:
3162 if (err) {
3163 health_error();
3164 ERR("Health error occurred in %s", __func__);
3165 }
3166 health_unregister(health_relayd);
3167 rcu_unregister_thread();
3168 lttng_relay_stop_threads();
3169 return NULL;
3170 }
3171
3172 /*
3173 * Create the relay command pipe to wake thread_manage_apps.
3174 * Closed in cleanup().
3175 */
3176 static int create_relay_conn_pipe(void)
3177 {
3178 int ret;
3179
3180 ret = utils_create_pipe_cloexec(relay_conn_pipe);
3181
3182 return ret;
3183 }
3184
3185 /*
3186 * main
3187 */
3188 int main(int argc, char **argv)
3189 {
3190 int ret = 0, retval = 0;
3191 void *status;
3192
3193 /* Parse environment variables */
3194 parse_env_options();
3195
3196 /*
3197 * Parse arguments.
3198 * Command line arguments overwrite environment.
3199 */
3200 progname = argv[0];
3201 if (set_options(argc, argv)) {
3202 retval = -1;
3203 goto exit_options;
3204 }
3205
3206 if (set_signal_handler()) {
3207 retval = -1;
3208 goto exit_options;
3209 }
3210
3211 /* Try to create directory if -o, --output is specified. */
3212 if (opt_output_path) {
3213 if (*opt_output_path != '/') {
3214 ERR("Please specify an absolute path for -o, --output PATH");
3215 retval = -1;
3216 goto exit_options;
3217 }
3218
3219 ret = utils_mkdir_recursive(opt_output_path, S_IRWXU | S_IRWXG,
3220 -1, -1);
3221 if (ret < 0) {
3222 ERR("Unable to create %s", opt_output_path);
3223 retval = -1;
3224 goto exit_options;
3225 }
3226 }
3227
3228 /* Daemonize */
3229 if (opt_daemon || opt_background) {
3230 int i;
3231
3232 ret = lttng_daemonize(&child_ppid, &recv_child_signal,
3233 !opt_background);
3234 if (ret < 0) {
3235 retval = -1;
3236 goto exit_options;
3237 }
3238
3239 /*
3240 * We are in the child. Make sure all other file
3241 * descriptors are closed, in case we are called with
3242 * more opened file descriptors than the standard ones.
3243 */
3244 for (i = 3; i < sysconf(_SC_OPEN_MAX); i++) {
3245 (void) close(i);
3246 }
3247 }
3248
3249
3250 if (opt_working_directory) {
3251 ret = utils_change_working_dir(opt_working_directory);
3252 if (ret) {
3253 ERR("Changing working directory");
3254 goto exit_options;
3255 }
3256 }
3257
3258 /* Initialize thread health monitoring */
3259 health_relayd = health_app_create(NR_HEALTH_RELAYD_TYPES);
3260 if (!health_relayd) {
3261 PERROR("health_app_create error");
3262 retval = -1;
3263 goto exit_health_app_create;
3264 }
3265
3266 /* Create thread quit pipe */
3267 if (init_thread_quit_pipe()) {
3268 retval = -1;
3269 goto exit_init_data;
3270 }
3271
3272 /* Setup the thread apps communication pipe. */
3273 if (create_relay_conn_pipe()) {
3274 retval = -1;
3275 goto exit_init_data;
3276 }
3277
3278 /* Init relay command queue. */
3279 cds_wfcq_init(&relay_conn_queue.head, &relay_conn_queue.tail);
3280
3281 /* Initialize communication library */
3282 lttcomm_init();
3283 lttcomm_inet_init();
3284
3285 /* tables of sessions indexed by session ID */
3286 sessions_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3287 if (!sessions_ht) {
3288 retval = -1;
3289 goto exit_init_data;
3290 }
3291
3292 /* tables of streams indexed by stream ID */
3293 relay_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3294 if (!relay_streams_ht) {
3295 retval = -1;
3296 goto exit_init_data;
3297 }
3298
3299 /* tables of streams indexed by stream ID */
3300 viewer_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3301 if (!viewer_streams_ht) {
3302 retval = -1;
3303 goto exit_init_data;
3304 }
3305
3306 ret = utils_create_pipe(health_quit_pipe);
3307 if (ret) {
3308 retval = -1;
3309 goto exit_health_quit_pipe;
3310 }
3311
3312 /* Create thread to manage the client socket */
3313 ret = pthread_create(&health_thread, default_pthread_attr(),
3314 thread_manage_health, (void *) NULL);
3315 if (ret) {
3316 errno = ret;
3317 PERROR("pthread_create health");
3318 retval = -1;
3319 goto exit_health_thread;
3320 }
3321
3322 /* Setup the dispatcher thread */
3323 ret = pthread_create(&dispatcher_thread, default_pthread_attr(),
3324 relay_thread_dispatcher, (void *) NULL);
3325 if (ret) {
3326 errno = ret;
3327 PERROR("pthread_create dispatcher");
3328 retval = -1;
3329 goto exit_dispatcher_thread;
3330 }
3331
3332 /* Setup the worker thread */
3333 ret = pthread_create(&worker_thread, default_pthread_attr(),
3334 relay_thread_worker, NULL);
3335 if (ret) {
3336 errno = ret;
3337 PERROR("pthread_create worker");
3338 retval = -1;
3339 goto exit_worker_thread;
3340 }
3341
3342 /* Setup the listener thread */
3343 ret = pthread_create(&listener_thread, default_pthread_attr(),
3344 relay_thread_listener, (void *) NULL);
3345 if (ret) {
3346 errno = ret;
3347 PERROR("pthread_create listener");
3348 retval = -1;
3349 goto exit_listener_thread;
3350 }
3351
3352 ret = relayd_live_create(live_uri);
3353 if (ret) {
3354 ERR("Starting live viewer threads");
3355 retval = -1;
3356 goto exit_live;
3357 }
3358
3359 /*
3360 * This is where we start awaiting program completion (e.g. through
3361 * signal that asks threads to teardown).
3362 */
3363
3364 ret = relayd_live_join();
3365 if (ret) {
3366 retval = -1;
3367 }
3368 exit_live:
3369
3370 ret = pthread_join(listener_thread, &status);
3371 if (ret) {
3372 errno = ret;
3373 PERROR("pthread_join listener_thread");
3374 retval = -1;
3375 }
3376
3377 exit_listener_thread:
3378 ret = pthread_join(worker_thread, &status);
3379 if (ret) {
3380 errno = ret;
3381 PERROR("pthread_join worker_thread");
3382 retval = -1;
3383 }
3384
3385 exit_worker_thread:
3386 ret = pthread_join(dispatcher_thread, &status);
3387 if (ret) {
3388 errno = ret;
3389 PERROR("pthread_join dispatcher_thread");
3390 retval = -1;
3391 }
3392 exit_dispatcher_thread:
3393
3394 ret = pthread_join(health_thread, &status);
3395 if (ret) {
3396 errno = ret;
3397 PERROR("pthread_join health_thread");
3398 retval = -1;
3399 }
3400 exit_health_thread:
3401
3402 utils_close_pipe(health_quit_pipe);
3403 exit_health_quit_pipe:
3404
3405 exit_init_data:
3406 health_app_destroy(health_relayd);
3407 exit_health_app_create:
3408 exit_options:
3409 /*
3410 * Wait for all pending call_rcu work to complete before tearing
3411 * down data structures. call_rcu worker may be trying to
3412 * perform lookups in those structures.
3413 */
3414 rcu_barrier();
3415 relayd_cleanup();
3416
3417 /* Ensure all prior call_rcu are done. */
3418 rcu_barrier();
3419
3420 if (!retval) {
3421 exit(EXIT_SUCCESS);
3422 } else {
3423 exit(EXIT_FAILURE);
3424 }
3425 }
This page took 0.139541 seconds and 4 git commands to generate.