relayd: register fd tracker instance to all created trace chunks
[lttng-tools.git] / src / bin / lttng-relayd / main.c
1 /*
2 * Copyright (C) 2012 - Julien Desfossez <jdesfossez@efficios.com>
3 * David Goulet <dgoulet@efficios.com>
4 * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 * 2015 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2 only,
9 * as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21 #define _LGPL_SOURCE
22 #include <getopt.h>
23 #include <grp.h>
24 #include <limits.h>
25 #include <pthread.h>
26 #include <signal.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <sys/mman.h>
31 #include <sys/mount.h>
32 #include <sys/resource.h>
33 #include <sys/socket.h>
34 #include <sys/stat.h>
35 #include <sys/types.h>
36 #include <sys/wait.h>
37 #include <sys/resource.h>
38 #include <inttypes.h>
39 #include <urcu/futex.h>
40 #include <urcu/uatomic.h>
41 #include <urcu/rculist.h>
42 #include <unistd.h>
43 #include <fcntl.h>
44 #include <strings.h>
45 #include <ctype.h>
46
47 #include <lttng/lttng.h>
48 #include <common/common.h>
49 #include <common/compat/poll.h>
50 #include <common/compat/socket.h>
51 #include <common/compat/endian.h>
52 #include <common/compat/getenv.h>
53 #include <common/defaults.h>
54 #include <common/daemonize.h>
55 #include <common/futex.h>
56 #include <common/sessiond-comm/sessiond-comm.h>
57 #include <common/sessiond-comm/inet.h>
58 #include <common/sessiond-comm/relayd.h>
59 #include <common/uri.h>
60 #include <common/utils.h>
61 #include <common/align.h>
62 #include <common/config/session-config.h>
63 #include <common/dynamic-buffer.h>
64 #include <common/buffer-view.h>
65 #include <common/string-utils/format.h>
66 #include <common/fd-tracker/fd-tracker.h>
67 #include <common/fd-tracker/utils.h>
68
69 #include "backward-compatibility-group-by.h"
70 #include "cmd.h"
71 #include "connection.h"
72 #include "ctf-trace.h"
73 #include "health-relayd.h"
74 #include "index.h"
75 #include "live.h"
76 #include "lttng-relayd.h"
77 #include "session.h"
78 #include "sessiond-trace-chunks.h"
79 #include "stream.h"
80 #include "tcp_keep_alive.h"
81 #include "testpoint.h"
82 #include "tracefile-array.h"
83 #include "utils.h"
84 #include "version.h"
85 #include "viewer-stream.h"
86
87 static const char *help_msg =
88 #ifdef LTTNG_EMBED_HELP
89 #include <lttng-relayd.8.h>
90 #else
91 NULL
92 #endif
93 ;
94
95 enum relay_connection_status {
96 RELAY_CONNECTION_STATUS_OK,
97 /* An error occurred while processing an event on the connection. */
98 RELAY_CONNECTION_STATUS_ERROR,
99 /* Connection closed/shutdown cleanly. */
100 RELAY_CONNECTION_STATUS_CLOSED,
101 };
102
103 /* command line options */
104 char *opt_output_path, *opt_working_directory;
105 static int opt_daemon, opt_background, opt_print_version, opt_allow_clear = 1;
106 enum relay_group_output_by opt_group_output_by = RELAYD_GROUP_OUTPUT_BY_UNKNOWN;
107
108 /*
109 * We need to wait for listener and live listener threads, as well as
110 * health check thread, before being ready to signal readiness.
111 */
112 #define NR_LTTNG_RELAY_READY 3
113 static int lttng_relay_ready = NR_LTTNG_RELAY_READY;
114
115 /* Size of receive buffer. */
116 #define RECV_DATA_BUFFER_SIZE 65536
117
118 static int recv_child_signal; /* Set to 1 when a SIGUSR1 signal is received. */
119 static pid_t child_ppid; /* Internal parent PID use with daemonize. */
120
121 static struct lttng_uri *control_uri;
122 static struct lttng_uri *data_uri;
123 static struct lttng_uri *live_uri;
124
125 const char *progname;
126
127 const char *tracing_group_name = DEFAULT_TRACING_GROUP;
128 static int tracing_group_name_override;
129
130 const char * const config_section_name = "relayd";
131
132 /*
133 * Quit pipe for all threads. This permits a single cancellation point
134 * for all threads when receiving an event on the pipe.
135 */
136 int thread_quit_pipe[2] = { -1, -1 };
137
138 /*
139 * This pipe is used to inform the worker thread that a command is queued and
140 * ready to be processed.
141 */
142 static int relay_conn_pipe[2] = { -1, -1 };
143
144 /* Shared between threads */
145 static int dispatch_thread_exit;
146
147 static pthread_t listener_thread;
148 static pthread_t dispatcher_thread;
149 static pthread_t worker_thread;
150 static pthread_t health_thread;
151
152 /*
153 * last_relay_stream_id_lock protects last_relay_stream_id increment
154 * atomicity on 32-bit architectures.
155 */
156 static pthread_mutex_t last_relay_stream_id_lock = PTHREAD_MUTEX_INITIALIZER;
157 static uint64_t last_relay_stream_id;
158
159 /*
160 * Relay command queue.
161 *
162 * The relay_thread_listener and relay_thread_dispatcher communicate with this
163 * queue.
164 */
165 static struct relay_conn_queue relay_conn_queue;
166
167 /* Cap of file desriptors to be in simultaneous use by the relay daemon. */
168 static unsigned int lttng_opt_fd_pool_size = -1;
169
170 /* Global relay stream hash table. */
171 struct lttng_ht *relay_streams_ht;
172
173 /* Global relay viewer stream hash table. */
174 struct lttng_ht *viewer_streams_ht;
175
176 /* Global relay sessions hash table. */
177 struct lttng_ht *sessions_ht;
178
179 /* Relayd health monitoring */
180 struct health_app *health_relayd;
181
182 struct sessiond_trace_chunk_registry *sessiond_trace_chunk_registry;
183
184 /* Global fd tracker. */
185 struct fd_tracker *the_fd_tracker;
186
187 static struct option long_options[] = {
188 { "control-port", 1, 0, 'C', },
189 { "data-port", 1, 0, 'D', },
190 { "live-port", 1, 0, 'L', },
191 { "daemonize", 0, 0, 'd', },
192 { "background", 0, 0, 'b', },
193 { "group", 1, 0, 'g', },
194 { "fd-pool-size", 1, 0, '\0', },
195 { "help", 0, 0, 'h', },
196 { "output", 1, 0, 'o', },
197 { "verbose", 0, 0, 'v', },
198 { "config", 1, 0, 'f' },
199 { "version", 0, 0, 'V' },
200 { "working-directory", 1, 0, 'w', },
201 { "group-output-by-session", 0, 0, 's', },
202 { "group-output-by-host", 0, 0, 'p', },
203 { "disallow-clear", 0, 0, 'x' },
204 { NULL, 0, 0, 0, },
205 };
206
207 static const char *config_ignore_options[] = { "help", "config", "version" };
208
209 static void print_version(void) {
210 fprintf(stdout, "%s\n", VERSION);
211 }
212
213 static void relayd_config_log(void)
214 {
215 DBG("LTTng-relayd " VERSION " - " VERSION_NAME "%s%s",
216 GIT_VERSION[0] == '\0' ? "" : " - " GIT_VERSION,
217 EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " EXTRA_VERSION_NAME);
218 if (EXTRA_VERSION_DESCRIPTION[0] != '\0') {
219 DBG("LTTng-relayd extra version description:\n\t" EXTRA_VERSION_DESCRIPTION "\n");
220 }
221 if (EXTRA_VERSION_PATCHES[0] != '\0') {
222 DBG("LTTng-relayd extra patches:\n\t" EXTRA_VERSION_PATCHES "\n");
223 }
224 }
225
226 /*
227 * Take an option from the getopt output and set it in the right variable to be
228 * used later.
229 *
230 * Return 0 on success else a negative value.
231 */
232 static int set_option(int opt, const char *arg, const char *optname)
233 {
234 int ret;
235
236 switch (opt) {
237 case 0:
238 if (!strcmp(optname, "fd-pool-size")) {
239 unsigned long v;
240
241 errno = 0;
242 v = strtoul(arg, NULL, 0);
243 if (errno != 0 || !isdigit(arg[0])) {
244 ERR("Wrong value in --fd-pool-size parameter: %s", arg);
245 ret = -1;
246 goto end;
247 }
248 if (v >= UINT_MAX) {
249 ERR("File descriptor cap overflow in --fd-pool-size parameter: %s", arg);
250 ret = -1;
251 goto end;
252 }
253 lttng_opt_fd_pool_size = (unsigned int) v;
254 } else {
255 fprintf(stderr, "unknown option %s", optname);
256 if (arg) {
257 fprintf(stderr, " with arg %s\n", arg);
258 }
259 }
260 break;
261 case 'C':
262 if (lttng_is_setuid_setgid()) {
263 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
264 "-C, --control-port");
265 } else {
266 ret = uri_parse(arg, &control_uri);
267 if (ret < 0) {
268 ERR("Invalid control URI specified");
269 goto end;
270 }
271 if (control_uri->port == 0) {
272 control_uri->port = DEFAULT_NETWORK_CONTROL_PORT;
273 }
274 }
275 break;
276 case 'D':
277 if (lttng_is_setuid_setgid()) {
278 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
279 "-D, -data-port");
280 } else {
281 ret = uri_parse(arg, &data_uri);
282 if (ret < 0) {
283 ERR("Invalid data URI specified");
284 goto end;
285 }
286 if (data_uri->port == 0) {
287 data_uri->port = DEFAULT_NETWORK_DATA_PORT;
288 }
289 }
290 break;
291 case 'L':
292 if (lttng_is_setuid_setgid()) {
293 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
294 "-L, -live-port");
295 } else {
296 ret = uri_parse(arg, &live_uri);
297 if (ret < 0) {
298 ERR("Invalid live URI specified");
299 goto end;
300 }
301 if (live_uri->port == 0) {
302 live_uri->port = DEFAULT_NETWORK_VIEWER_PORT;
303 }
304 }
305 break;
306 case 'd':
307 opt_daemon = 1;
308 break;
309 case 'b':
310 opt_background = 1;
311 break;
312 case 'g':
313 if (lttng_is_setuid_setgid()) {
314 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
315 "-g, --group");
316 } else {
317 tracing_group_name = strdup(arg);
318 if (tracing_group_name == NULL) {
319 ret = -errno;
320 PERROR("strdup");
321 goto end;
322 }
323 tracing_group_name_override = 1;
324 }
325 break;
326 case 'h':
327 ret = utils_show_help(8, "lttng-relayd", help_msg);
328 if (ret) {
329 ERR("Cannot show --help for `lttng-relayd`");
330 perror("exec");
331 }
332 exit(EXIT_FAILURE);
333 case 'V':
334 opt_print_version = 1;
335 break;
336 case 'o':
337 if (lttng_is_setuid_setgid()) {
338 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
339 "-o, --output");
340 } else {
341 ret = asprintf(&opt_output_path, "%s", arg);
342 if (ret < 0) {
343 ret = -errno;
344 PERROR("asprintf opt_output_path");
345 goto end;
346 }
347 }
348 break;
349 case 'w':
350 if (lttng_is_setuid_setgid()) {
351 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
352 "-w, --working-directory");
353 } else {
354 ret = asprintf(&opt_working_directory, "%s", arg);
355 if (ret < 0) {
356 ret = -errno;
357 PERROR("asprintf opt_working_directory");
358 goto end;
359 }
360 }
361 break;
362
363 case 'v':
364 /* Verbose level can increase using multiple -v */
365 if (arg) {
366 lttng_opt_verbose = config_parse_value(arg);
367 } else {
368 /* Only 3 level of verbosity (-vvv). */
369 if (lttng_opt_verbose < 3) {
370 lttng_opt_verbose += 1;
371 }
372 }
373 break;
374 case 's':
375 if (opt_group_output_by != RELAYD_GROUP_OUTPUT_BY_UNKNOWN) {
376 ERR("Cannot set --group-output-by-session, another --group-output-by argument is present");
377 exit(EXIT_FAILURE);
378 }
379 opt_group_output_by = RELAYD_GROUP_OUTPUT_BY_SESSION;
380 break;
381 case 'p':
382 if (opt_group_output_by != RELAYD_GROUP_OUTPUT_BY_UNKNOWN) {
383 ERR("Cannot set --group-output-by-host, another --group-output-by argument is present");
384 exit(EXIT_FAILURE);
385 }
386 opt_group_output_by = RELAYD_GROUP_OUTPUT_BY_HOST;
387 break;
388 case 'x':
389 /* Disallow clear */
390 opt_allow_clear = 0;
391 break;
392 default:
393 /* Unknown option or other error.
394 * Error is printed by getopt, just return */
395 ret = -1;
396 goto end;
397 }
398
399 /* All good. */
400 ret = 0;
401
402 end:
403 return ret;
404 }
405
406 /*
407 * config_entry_handler_cb used to handle options read from a config file.
408 * See config_entry_handler_cb comment in common/config/session-config.h for the
409 * return value conventions.
410 */
411 static int config_entry_handler(const struct config_entry *entry, void *unused)
412 {
413 int ret = 0, i;
414
415 if (!entry || !entry->name || !entry->value) {
416 ret = -EINVAL;
417 goto end;
418 }
419
420 /* Check if the option is to be ignored */
421 for (i = 0; i < sizeof(config_ignore_options) / sizeof(char *); i++) {
422 if (!strcmp(entry->name, config_ignore_options[i])) {
423 goto end;
424 }
425 }
426
427 for (i = 0; i < (sizeof(long_options) / sizeof(struct option)) - 1; i++) {
428 /* Ignore if entry name is not fully matched. */
429 if (strcmp(entry->name, long_options[i].name)) {
430 continue;
431 }
432
433 /*
434 * If the option takes no argument on the command line,
435 * we have to check if the value is "true". We support
436 * non-zero numeric values, true, on and yes.
437 */
438 if (!long_options[i].has_arg) {
439 ret = config_parse_value(entry->value);
440 if (ret <= 0) {
441 if (ret) {
442 WARN("Invalid configuration value \"%s\" for option %s",
443 entry->value, entry->name);
444 }
445 /* False, skip boolean config option. */
446 goto end;
447 }
448 }
449
450 ret = set_option(long_options[i].val, entry->value, entry->name);
451 goto end;
452 }
453
454 WARN("Unrecognized option \"%s\" in daemon configuration file.",
455 entry->name);
456
457 end:
458 return ret;
459 }
460
461 static int parse_env_options(void)
462 {
463 int ret = 0;
464 char *value = NULL;
465
466 value = lttng_secure_getenv(DEFAULT_LTTNG_RELAYD_WORKING_DIRECTORY_ENV);
467 if (value) {
468 opt_working_directory = strdup(value);
469 if (!opt_working_directory) {
470 ERR("Failed to allocate working directory string (\"%s\")",
471 value);
472 ret = -1;
473 }
474 }
475 return ret;
476 }
477
478 static int set_fd_pool_size(void)
479 {
480 int ret = 0;
481 struct rlimit rlimit;
482
483 ret = getrlimit(RLIMIT_NOFILE, &rlimit);
484 if (ret) {
485 PERROR("Failed to get file descriptor limit");
486 ret = -1;
487 goto end;
488 }
489
490 DBG("File descriptor count limits are %" PRIu64 " (soft) and %" PRIu64 " (hard)",
491 (uint64_t) rlimit.rlim_cur,
492 (uint64_t) rlimit.rlim_max);
493 if (lttng_opt_fd_pool_size == -1) {
494 /* Use default value (soft limit - reserve). */
495 if (rlimit.rlim_cur < DEFAULT_RELAYD_MIN_FD_POOL_SIZE) {
496 ERR("The process' file number limit is too low (%" PRIu64 "). The process' file number limit must be set to at least %i.",
497 (uint64_t) rlimit.rlim_cur, DEFAULT_RELAYD_MIN_FD_POOL_SIZE);
498 ret = -1;
499 goto end;
500 }
501 lttng_opt_fd_pool_size = rlimit.rlim_cur -
502 DEFAULT_RELAYD_FD_POOL_SIZE_RESERVE;
503 goto end;
504 }
505
506 if (lttng_opt_fd_pool_size < DEFAULT_RELAYD_MIN_FD_POOL_SIZE) {
507 ERR("File descriptor pool size must be set to at least %d",
508 DEFAULT_RELAYD_MIN_FD_POOL_SIZE);
509 ret = -1;
510 goto end;
511 }
512
513 if (lttng_opt_fd_pool_size > rlimit.rlim_cur) {
514 ERR("File descriptor pool size argument (%u) exceeds the process' soft limit (%" PRIu64 ").",
515 lttng_opt_fd_pool_size, (uint64_t) rlimit.rlim_cur);
516 ret = -1;
517 goto end;
518 }
519
520 DBG("File descriptor pool size argument (%u) adjusted to %u to accomodate transient fd uses",
521 lttng_opt_fd_pool_size,
522 lttng_opt_fd_pool_size - DEFAULT_RELAYD_FD_POOL_SIZE_RESERVE);
523 lttng_opt_fd_pool_size -= DEFAULT_RELAYD_FD_POOL_SIZE_RESERVE;
524 end:
525 return ret;
526 }
527
528 static int set_options(int argc, char **argv)
529 {
530 int c, ret = 0, option_index = 0, retval = 0;
531 int orig_optopt = optopt, orig_optind = optind;
532 char *default_address, *optstring;
533 const char *config_path = NULL;
534
535 optstring = utils_generate_optstring(long_options,
536 sizeof(long_options) / sizeof(struct option));
537 if (!optstring) {
538 retval = -ENOMEM;
539 goto exit;
540 }
541
542 /* Check for the --config option */
543
544 while ((c = getopt_long(argc, argv, optstring, long_options,
545 &option_index)) != -1) {
546 if (c == '?') {
547 retval = -EINVAL;
548 goto exit;
549 } else if (c != 'f') {
550 continue;
551 }
552
553 if (lttng_is_setuid_setgid()) {
554 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
555 "-f, --config");
556 } else {
557 config_path = utils_expand_path(optarg);
558 if (!config_path) {
559 ERR("Failed to resolve path: %s", optarg);
560 }
561 }
562 }
563
564 ret = config_get_section_entries(config_path, config_section_name,
565 config_entry_handler, NULL);
566 if (ret) {
567 if (ret > 0) {
568 ERR("Invalid configuration option at line %i", ret);
569 }
570 retval = -1;
571 goto exit;
572 }
573
574 /* Reset getopt's global state */
575 optopt = orig_optopt;
576 optind = orig_optind;
577 while (1) {
578 c = getopt_long(argc, argv, optstring, long_options, &option_index);
579 if (c == -1) {
580 break;
581 }
582
583 ret = set_option(c, optarg, long_options[option_index].name);
584 if (ret < 0) {
585 retval = -1;
586 goto exit;
587 }
588 }
589
590 /* assign default values */
591 if (control_uri == NULL) {
592 ret = asprintf(&default_address,
593 "tcp://" DEFAULT_NETWORK_CONTROL_BIND_ADDRESS ":%d",
594 DEFAULT_NETWORK_CONTROL_PORT);
595 if (ret < 0) {
596 PERROR("asprintf default data address");
597 retval = -1;
598 goto exit;
599 }
600
601 ret = uri_parse(default_address, &control_uri);
602 free(default_address);
603 if (ret < 0) {
604 ERR("Invalid control URI specified");
605 retval = -1;
606 goto exit;
607 }
608 }
609 if (data_uri == NULL) {
610 ret = asprintf(&default_address,
611 "tcp://" DEFAULT_NETWORK_DATA_BIND_ADDRESS ":%d",
612 DEFAULT_NETWORK_DATA_PORT);
613 if (ret < 0) {
614 PERROR("asprintf default data address");
615 retval = -1;
616 goto exit;
617 }
618
619 ret = uri_parse(default_address, &data_uri);
620 free(default_address);
621 if (ret < 0) {
622 ERR("Invalid data URI specified");
623 retval = -1;
624 goto exit;
625 }
626 }
627 if (live_uri == NULL) {
628 ret = asprintf(&default_address,
629 "tcp://" DEFAULT_NETWORK_VIEWER_BIND_ADDRESS ":%d",
630 DEFAULT_NETWORK_VIEWER_PORT);
631 if (ret < 0) {
632 PERROR("asprintf default viewer control address");
633 retval = -1;
634 goto exit;
635 }
636
637 ret = uri_parse(default_address, &live_uri);
638 free(default_address);
639 if (ret < 0) {
640 ERR("Invalid viewer control URI specified");
641 retval = -1;
642 goto exit;
643 }
644 }
645 ret = set_fd_pool_size();
646 if (ret) {
647 retval = -1;
648 goto exit;
649 }
650
651 if (opt_group_output_by == RELAYD_GROUP_OUTPUT_BY_UNKNOWN) {
652 opt_group_output_by = RELAYD_GROUP_OUTPUT_BY_HOST;
653 }
654 if (opt_allow_clear) {
655 /* Check if env variable exists. */
656 const char *value = lttng_secure_getenv(DEFAULT_LTTNG_RELAYD_DISALLOW_CLEAR_ENV);
657 if (value) {
658 ret = config_parse_value(value);
659 if (ret < 0) {
660 ERR("Invalid value for %s specified", DEFAULT_LTTNG_RELAYD_DISALLOW_CLEAR_ENV);
661 retval = -1;
662 goto exit;
663 }
664 opt_allow_clear = !ret;
665 }
666 }
667
668 exit:
669 free(optstring);
670 return retval;
671 }
672
673 static void print_global_objects(void)
674 {
675 print_viewer_streams();
676 print_relay_streams();
677 print_sessions();
678 }
679
680 static int noop_close(void *data, int *fds)
681 {
682 return 0;
683 }
684
685 static void untrack_stdio(void)
686 {
687 int fds[] = { fileno(stdout), fileno(stderr) };
688
689 /*
690 * noop_close is used since we don't really want to close
691 * the stdio output fds; we merely want to stop tracking them.
692 */
693 (void) fd_tracker_close_unsuspendable_fd(the_fd_tracker,
694 fds, 2, noop_close, NULL);
695 }
696
697 /*
698 * Cleanup the daemon
699 */
700 static void relayd_cleanup(void)
701 {
702 print_global_objects();
703
704 DBG("Cleaning up");
705
706 if (viewer_streams_ht)
707 lttng_ht_destroy(viewer_streams_ht);
708 if (relay_streams_ht)
709 lttng_ht_destroy(relay_streams_ht);
710 if (sessions_ht)
711 lttng_ht_destroy(sessions_ht);
712
713 free(opt_output_path);
714 free(opt_working_directory);
715
716 if (health_relayd) {
717 health_app_destroy(health_relayd);
718 }
719 /* Close thread quit pipes */
720 if (health_quit_pipe[0] != -1) {
721 (void) fd_tracker_util_pipe_close(
722 the_fd_tracker, health_quit_pipe);
723 }
724 if (thread_quit_pipe[0] != -1) {
725 (void) fd_tracker_util_pipe_close(
726 the_fd_tracker, thread_quit_pipe);
727 }
728 if (sessiond_trace_chunk_registry) {
729 sessiond_trace_chunk_registry_destroy(
730 sessiond_trace_chunk_registry);
731 }
732 if (the_fd_tracker) {
733 untrack_stdio();
734 /*
735 * fd_tracker_destroy() will log the contents of the fd-tracker
736 * if a leak is detected.
737 */
738 fd_tracker_destroy(the_fd_tracker);
739 }
740
741 uri_free(control_uri);
742 uri_free(data_uri);
743 /* Live URI is freed in the live thread. */
744
745 if (tracing_group_name_override) {
746 free((void *) tracing_group_name);
747 }
748 }
749
750 /*
751 * Write to writable pipe used to notify a thread.
752 */
753 static int notify_thread_pipe(int wpipe)
754 {
755 ssize_t ret;
756
757 ret = lttng_write(wpipe, "!", 1);
758 if (ret < 1) {
759 PERROR("write poll pipe");
760 goto end;
761 }
762 ret = 0;
763 end:
764 return ret;
765 }
766
767 static int notify_health_quit_pipe(int *pipe)
768 {
769 ssize_t ret;
770
771 ret = lttng_write(pipe[1], "4", 1);
772 if (ret < 1) {
773 PERROR("write relay health quit");
774 goto end;
775 }
776 ret = 0;
777 end:
778 return ret;
779 }
780
781 /*
782 * Stop all relayd and relayd-live threads.
783 */
784 int lttng_relay_stop_threads(void)
785 {
786 int retval = 0;
787
788 /* Stopping all threads */
789 DBG("Terminating all threads");
790 if (notify_thread_pipe(thread_quit_pipe[1])) {
791 ERR("write error on thread quit pipe");
792 retval = -1;
793 }
794
795 if (notify_health_quit_pipe(health_quit_pipe)) {
796 ERR("write error on health quit pipe");
797 }
798
799 /* Dispatch thread */
800 CMM_STORE_SHARED(dispatch_thread_exit, 1);
801 futex_nto1_wake(&relay_conn_queue.futex);
802
803 if (relayd_live_stop()) {
804 ERR("Error stopping live threads");
805 retval = -1;
806 }
807 return retval;
808 }
809
810 /*
811 * Signal handler for the daemon
812 *
813 * Simply stop all worker threads, leaving main() return gracefully after
814 * joining all threads and calling cleanup().
815 */
816 static void sighandler(int sig)
817 {
818 switch (sig) {
819 case SIGINT:
820 DBG("SIGINT caught");
821 if (lttng_relay_stop_threads()) {
822 ERR("Error stopping threads");
823 }
824 break;
825 case SIGTERM:
826 DBG("SIGTERM caught");
827 if (lttng_relay_stop_threads()) {
828 ERR("Error stopping threads");
829 }
830 break;
831 case SIGUSR1:
832 CMM_STORE_SHARED(recv_child_signal, 1);
833 break;
834 default:
835 break;
836 }
837 }
838
839 /*
840 * Setup signal handler for :
841 * SIGINT, SIGTERM, SIGPIPE
842 */
843 static int set_signal_handler(void)
844 {
845 int ret = 0;
846 struct sigaction sa;
847 sigset_t sigset;
848
849 if ((ret = sigemptyset(&sigset)) < 0) {
850 PERROR("sigemptyset");
851 return ret;
852 }
853
854 sa.sa_mask = sigset;
855 sa.sa_flags = 0;
856
857 sa.sa_handler = sighandler;
858 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
859 PERROR("sigaction");
860 return ret;
861 }
862
863 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
864 PERROR("sigaction");
865 return ret;
866 }
867
868 if ((ret = sigaction(SIGUSR1, &sa, NULL)) < 0) {
869 PERROR("sigaction");
870 return ret;
871 }
872
873 sa.sa_handler = SIG_IGN;
874 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
875 PERROR("sigaction");
876 return ret;
877 }
878
879 DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT");
880
881 return ret;
882 }
883
884 void lttng_relay_notify_ready(void)
885 {
886 /* Notify the parent of the fork() process that we are ready. */
887 if (opt_daemon || opt_background) {
888 if (uatomic_sub_return(&lttng_relay_ready, 1) == 0) {
889 kill(child_ppid, SIGUSR1);
890 }
891 }
892 }
893
894 /*
895 * Init thread quit pipe.
896 *
897 * Return -1 on error or 0 if all pipes are created.
898 */
899 static int init_thread_quit_pipe(void)
900 {
901 return fd_tracker_util_pipe_open_cloexec(
902 the_fd_tracker, "Quit pipe", thread_quit_pipe);
903 }
904
905 /*
906 * Init health quit pipe.
907 *
908 * Return -1 on error or 0 if all pipes are created.
909 */
910 static int init_health_quit_pipe(void)
911 {
912 return fd_tracker_util_pipe_open_cloexec(the_fd_tracker,
913 "Health quit pipe", health_quit_pipe);
914 }
915
916 /*
917 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
918 */
919 static int create_named_thread_poll_set(struct lttng_poll_event *events,
920 int size, const char *name)
921 {
922 int ret;
923
924 if (events == NULL || size == 0) {
925 ret = -1;
926 goto error;
927 }
928
929 ret = fd_tracker_util_poll_create(the_fd_tracker,
930 name, events, 1, LTTNG_CLOEXEC);
931
932 /* Add quit pipe */
933 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
934 if (ret < 0) {
935 goto error;
936 }
937
938 return 0;
939
940 error:
941 return ret;
942 }
943
944 /*
945 * Check if the thread quit pipe was triggered.
946 *
947 * Return 1 if it was triggered else 0;
948 */
949 static int check_thread_quit_pipe(int fd, uint32_t events)
950 {
951 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
952 return 1;
953 }
954
955 return 0;
956 }
957
958 static int create_sock(void *data, int *out_fd)
959 {
960 int ret;
961 struct lttcomm_sock *sock = data;
962
963 ret = lttcomm_create_sock(sock);
964 if (ret < 0) {
965 goto end;
966 }
967
968 *out_fd = sock->fd;
969 end:
970 return ret;
971 }
972
973 static int close_sock(void *data, int *in_fd)
974 {
975 struct lttcomm_sock *sock = data;
976
977 return sock->ops->close(sock);
978 }
979
980 static int accept_sock(void *data, int *out_fd)
981 {
982 int ret = 0;
983 /* Socks is an array of in_sock, out_sock. */
984 struct lttcomm_sock **socks = data;
985 struct lttcomm_sock *in_sock = socks[0];
986
987 socks[1] = in_sock->ops->accept(in_sock);
988 if (!socks[1]) {
989 ret = -1;
990 goto end;
991 }
992 *out_fd = socks[1]->fd;
993 end:
994 return ret;
995 }
996
997 /*
998 * Create and init socket from uri.
999 */
1000 static struct lttcomm_sock *relay_socket_create(struct lttng_uri *uri,
1001 const char *name)
1002 {
1003 int ret, sock_fd;
1004 struct lttcomm_sock *sock = NULL;
1005 char uri_str[PATH_MAX];
1006 char *formated_name = NULL;
1007
1008 sock = lttcomm_alloc_sock_from_uri(uri);
1009 if (sock == NULL) {
1010 ERR("Allocating socket");
1011 goto error;
1012 }
1013
1014 /*
1015 * Don't fail to create the socket if the name can't be built as it is
1016 * only used for debugging purposes.
1017 */
1018 ret = uri_to_str_url(uri, uri_str, sizeof(uri_str));
1019 uri_str[sizeof(uri_str) - 1] = '\0';
1020 if (ret >= 0) {
1021 ret = asprintf(&formated_name, "%s socket @ %s", name,
1022 uri_str);
1023 if (ret < 0) {
1024 formated_name = NULL;
1025 }
1026 }
1027
1028 ret = fd_tracker_open_unsuspendable_fd(the_fd_tracker, &sock_fd,
1029 (const char **) (formated_name ? &formated_name : NULL),
1030 1, create_sock, sock);
1031 free(formated_name);
1032 DBG("Listening on %s socket %d", name, sock->fd);
1033
1034 ret = sock->ops->bind(sock);
1035 if (ret < 0) {
1036 PERROR("Failed to bind socket");
1037 goto error;
1038 }
1039
1040 ret = sock->ops->listen(sock, -1);
1041 if (ret < 0) {
1042 goto error;
1043
1044 }
1045
1046 return sock;
1047
1048 error:
1049 if (sock) {
1050 lttcomm_destroy_sock(sock);
1051 }
1052 return NULL;
1053 }
1054
1055 static
1056 struct lttcomm_sock *accept_relayd_sock(struct lttcomm_sock *listening_sock,
1057 const char *name)
1058 {
1059 int out_fd, ret;
1060 struct lttcomm_sock *socks[2] = { listening_sock, NULL };
1061 struct lttcomm_sock *new_sock = NULL;
1062
1063 ret = fd_tracker_open_unsuspendable_fd(
1064 the_fd_tracker, &out_fd,
1065 (const char **) &name,
1066 1, accept_sock, &socks);
1067 if (ret) {
1068 goto end;
1069 }
1070 new_sock = socks[1];
1071 DBG("%s accepted, socket %d", name, new_sock->fd);
1072 end:
1073 return new_sock;
1074 }
1075
1076 /*
1077 * This thread manages the listening for new connections on the network
1078 */
1079 static void *relay_thread_listener(void *data)
1080 {
1081 int i, ret, pollfd, err = -1;
1082 uint32_t revents, nb_fd;
1083 struct lttng_poll_event events;
1084 struct lttcomm_sock *control_sock, *data_sock;
1085
1086 DBG("[thread] Relay listener started");
1087
1088 health_register(health_relayd, HEALTH_RELAYD_TYPE_LISTENER);
1089
1090 health_code_update();
1091
1092 control_sock = relay_socket_create(control_uri, "Control listener");
1093 if (!control_sock) {
1094 goto error_sock_control;
1095 }
1096
1097 data_sock = relay_socket_create(data_uri, "Data listener");
1098 if (!data_sock) {
1099 goto error_sock_relay;
1100 }
1101
1102 /*
1103 * Pass 3 as size here for the thread quit pipe, control and
1104 * data socket.
1105 */
1106 ret = create_named_thread_poll_set(&events, 3, "Listener thread epoll");
1107 if (ret < 0) {
1108 goto error_create_poll;
1109 }
1110
1111 /* Add the control socket */
1112 ret = lttng_poll_add(&events, control_sock->fd, LPOLLIN | LPOLLRDHUP);
1113 if (ret < 0) {
1114 goto error_poll_add;
1115 }
1116
1117 /* Add the data socket */
1118 ret = lttng_poll_add(&events, data_sock->fd, LPOLLIN | LPOLLRDHUP);
1119 if (ret < 0) {
1120 goto error_poll_add;
1121 }
1122
1123 lttng_relay_notify_ready();
1124
1125 if (testpoint(relayd_thread_listener)) {
1126 goto error_testpoint;
1127 }
1128
1129 while (1) {
1130 health_code_update();
1131
1132 DBG("Listener accepting connections");
1133
1134 restart:
1135 health_poll_entry();
1136 ret = lttng_poll_wait(&events, -1);
1137 health_poll_exit();
1138 if (ret < 0) {
1139 /*
1140 * Restart interrupted system call.
1141 */
1142 if (errno == EINTR) {
1143 goto restart;
1144 }
1145 goto error;
1146 }
1147
1148 nb_fd = ret;
1149
1150 DBG("Relay new connection received");
1151 for (i = 0; i < nb_fd; i++) {
1152 health_code_update();
1153
1154 /* Fetch once the poll data */
1155 revents = LTTNG_POLL_GETEV(&events, i);
1156 pollfd = LTTNG_POLL_GETFD(&events, i);
1157
1158 /* Thread quit pipe has been closed. Killing thread. */
1159 ret = check_thread_quit_pipe(pollfd, revents);
1160 if (ret) {
1161 err = 0;
1162 goto exit;
1163 }
1164
1165 if (revents & LPOLLIN) {
1166 /*
1167 * A new connection is requested, therefore a
1168 * sessiond/consumerd connection is allocated in
1169 * this thread, enqueued to a global queue and
1170 * dequeued (and freed) in the worker thread.
1171 */
1172 int val = 1;
1173 struct relay_connection *new_conn;
1174 struct lttcomm_sock *newsock = NULL;
1175 enum connection_type type;
1176
1177 if (pollfd == data_sock->fd) {
1178 type = RELAY_DATA;
1179 newsock = accept_relayd_sock(data_sock,
1180 "Data socket to relayd");
1181 } else {
1182 assert(pollfd == control_sock->fd);
1183 type = RELAY_CONTROL;
1184 newsock = accept_relayd_sock(control_sock,
1185 "Control socket to relayd");
1186 }
1187 if (!newsock) {
1188 PERROR("accepting sock");
1189 goto error;
1190 }
1191
1192 ret = setsockopt(newsock->fd, SOL_SOCKET, SO_REUSEADDR, &val,
1193 sizeof(val));
1194 if (ret < 0) {
1195 PERROR("setsockopt inet");
1196 lttcomm_destroy_sock(newsock);
1197 goto error;
1198 }
1199
1200 ret = socket_apply_keep_alive_config(newsock->fd);
1201 if (ret < 0) {
1202 ERR("Failed to apply TCP keep-alive configuration on socket (%i)",
1203 newsock->fd);
1204 lttcomm_destroy_sock(newsock);
1205 goto error;
1206 }
1207
1208 new_conn = connection_create(newsock, type);
1209 if (!new_conn) {
1210 lttcomm_destroy_sock(newsock);
1211 goto error;
1212 }
1213
1214 /* Enqueue request for the dispatcher thread. */
1215 cds_wfcq_enqueue(&relay_conn_queue.head, &relay_conn_queue.tail,
1216 &new_conn->qnode);
1217
1218 /*
1219 * Wake the dispatch queue futex.
1220 * Implicit memory barrier with the
1221 * exchange in cds_wfcq_enqueue.
1222 */
1223 futex_nto1_wake(&relay_conn_queue.futex);
1224 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1225 ERR("socket poll error");
1226 goto error;
1227 } else {
1228 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
1229 goto error;
1230 }
1231 }
1232 }
1233
1234 exit:
1235 error:
1236 error_poll_add:
1237 error_testpoint:
1238 (void) fd_tracker_util_poll_clean(the_fd_tracker, &events);
1239 error_create_poll:
1240 if (data_sock->fd >= 0) {
1241 int data_sock_fd = data_sock->fd;
1242
1243 ret = fd_tracker_close_unsuspendable_fd(the_fd_tracker,
1244 &data_sock_fd, 1, close_sock,
1245 data_sock);
1246 if (ret) {
1247 PERROR("Failed to close the data listener socket file descriptor");
1248 }
1249 data_sock->fd = -1;
1250 }
1251 lttcomm_destroy_sock(data_sock);
1252 error_sock_relay:
1253 if (control_sock->fd >= 0) {
1254 int control_sock_fd = control_sock->fd;
1255
1256 ret = fd_tracker_close_unsuspendable_fd(the_fd_tracker,
1257 &control_sock_fd, 1, close_sock,
1258 control_sock);
1259 if (ret) {
1260 PERROR("Failed to close the control listener socket file descriptor");
1261 }
1262 control_sock->fd = -1;
1263 }
1264 lttcomm_destroy_sock(control_sock);
1265 error_sock_control:
1266 if (err) {
1267 health_error();
1268 ERR("Health error occurred in %s", __func__);
1269 }
1270 health_unregister(health_relayd);
1271 DBG("Relay listener thread cleanup complete");
1272 lttng_relay_stop_threads();
1273 return NULL;
1274 }
1275
1276 /*
1277 * This thread manages the dispatching of the requests to worker threads
1278 */
1279 static void *relay_thread_dispatcher(void *data)
1280 {
1281 int err = -1;
1282 ssize_t ret;
1283 struct cds_wfcq_node *node;
1284 struct relay_connection *new_conn = NULL;
1285
1286 DBG("[thread] Relay dispatcher started");
1287
1288 health_register(health_relayd, HEALTH_RELAYD_TYPE_DISPATCHER);
1289
1290 if (testpoint(relayd_thread_dispatcher)) {
1291 goto error_testpoint;
1292 }
1293
1294 health_code_update();
1295
1296 for (;;) {
1297 health_code_update();
1298
1299 /* Atomically prepare the queue futex */
1300 futex_nto1_prepare(&relay_conn_queue.futex);
1301
1302 if (CMM_LOAD_SHARED(dispatch_thread_exit)) {
1303 break;
1304 }
1305
1306 do {
1307 health_code_update();
1308
1309 /* Dequeue commands */
1310 node = cds_wfcq_dequeue_blocking(&relay_conn_queue.head,
1311 &relay_conn_queue.tail);
1312 if (node == NULL) {
1313 DBG("Woken up but nothing in the relay command queue");
1314 /* Continue thread execution */
1315 break;
1316 }
1317 new_conn = caa_container_of(node, struct relay_connection, qnode);
1318
1319 DBG("Dispatching request waiting on sock %d", new_conn->sock->fd);
1320
1321 /*
1322 * Inform worker thread of the new request. This
1323 * call is blocking so we can be assured that
1324 * the data will be read at some point in time
1325 * or wait to the end of the world :)
1326 */
1327 ret = lttng_write(relay_conn_pipe[1], &new_conn, sizeof(new_conn));
1328 if (ret < 0) {
1329 PERROR("write connection pipe");
1330 connection_put(new_conn);
1331 goto error;
1332 }
1333 } while (node != NULL);
1334
1335 /* Futex wait on queue. Blocking call on futex() */
1336 health_poll_entry();
1337 futex_nto1_wait(&relay_conn_queue.futex);
1338 health_poll_exit();
1339 }
1340
1341 /* Normal exit, no error */
1342 err = 0;
1343
1344 error:
1345 error_testpoint:
1346 if (err) {
1347 health_error();
1348 ERR("Health error occurred in %s", __func__);
1349 }
1350 health_unregister(health_relayd);
1351 DBG("Dispatch thread dying");
1352 lttng_relay_stop_threads();
1353 return NULL;
1354 }
1355
1356 static bool session_streams_have_index(const struct relay_session *session)
1357 {
1358 return session->minor >= 4 && !session->snapshot;
1359 }
1360
1361 /*
1362 * Handle the RELAYD_CREATE_SESSION command.
1363 *
1364 * On success, send back the session id or else return a negative value.
1365 */
1366 static int relay_create_session(const struct lttcomm_relayd_hdr *recv_hdr,
1367 struct relay_connection *conn,
1368 const struct lttng_buffer_view *payload)
1369 {
1370 int ret = 0;
1371 ssize_t send_ret;
1372 struct relay_session *session = NULL;
1373 struct lttcomm_relayd_create_session_reply_2_11 reply = {};
1374 char session_name[LTTNG_NAME_MAX] = {};
1375 char hostname[LTTNG_HOST_NAME_MAX] = {};
1376 uint32_t live_timer = 0;
1377 bool snapshot = false;
1378 bool session_name_contains_creation_timestamp = false;
1379 /* Left nil for peers < 2.11. */
1380 char base_path[LTTNG_PATH_MAX] = {};
1381 lttng_uuid sessiond_uuid = {};
1382 LTTNG_OPTIONAL(uint64_t) id_sessiond = {};
1383 LTTNG_OPTIONAL(uint64_t) current_chunk_id = {};
1384 LTTNG_OPTIONAL(time_t) creation_time = {};
1385 struct lttng_dynamic_buffer reply_payload;
1386
1387 lttng_dynamic_buffer_init(&reply_payload);
1388
1389 if (conn->minor < 4) {
1390 /* From 2.1 to 2.3 */
1391 ret = 0;
1392 } else if (conn->minor >= 4 && conn->minor < 11) {
1393 /* From 2.4 to 2.10 */
1394 ret = cmd_create_session_2_4(payload, session_name,
1395 hostname, &live_timer, &snapshot);
1396 } else {
1397 bool has_current_chunk;
1398 uint64_t current_chunk_id_value;
1399 time_t creation_time_value;
1400 uint64_t id_sessiond_value;
1401
1402 /* From 2.11 to ... */
1403 ret = cmd_create_session_2_11(payload, session_name, hostname,
1404 base_path, &live_timer, &snapshot, &id_sessiond_value,
1405 sessiond_uuid, &has_current_chunk,
1406 &current_chunk_id_value, &creation_time_value,
1407 &session_name_contains_creation_timestamp);
1408 if (lttng_uuid_is_nil(sessiond_uuid)) {
1409 /* The nil UUID is reserved for pre-2.11 clients. */
1410 ERR("Illegal nil UUID announced by peer in create session command");
1411 ret = -1;
1412 goto send_reply;
1413 }
1414 LTTNG_OPTIONAL_SET(&id_sessiond, id_sessiond_value);
1415 LTTNG_OPTIONAL_SET(&creation_time, creation_time_value);
1416 if (has_current_chunk) {
1417 LTTNG_OPTIONAL_SET(&current_chunk_id,
1418 current_chunk_id_value);
1419 }
1420 }
1421
1422 if (ret < 0) {
1423 goto send_reply;
1424 }
1425
1426 session = session_create(session_name, hostname, base_path, live_timer,
1427 snapshot, sessiond_uuid,
1428 id_sessiond.is_set ? &id_sessiond.value : NULL,
1429 current_chunk_id.is_set ? &current_chunk_id.value : NULL,
1430 creation_time.is_set ? &creation_time.value : NULL,
1431 conn->major, conn->minor,
1432 session_name_contains_creation_timestamp);
1433 if (!session) {
1434 ret = -1;
1435 goto send_reply;
1436 }
1437 assert(!conn->session);
1438 conn->session = session;
1439 DBG("Created session %" PRIu64, session->id);
1440
1441 reply.generic.session_id = htobe64(session->id);
1442
1443 send_reply:
1444 if (ret < 0) {
1445 reply.generic.ret_code = htobe32(LTTNG_ERR_FATAL);
1446 } else {
1447 reply.generic.ret_code = htobe32(LTTNG_OK);
1448 }
1449
1450 if (conn->minor < 11) {
1451 /* From 2.1 to 2.10 */
1452 ret = lttng_dynamic_buffer_append(&reply_payload,
1453 &reply.generic, sizeof(reply.generic));
1454 if (ret) {
1455 ERR("Failed to append \"create session\" command reply header to payload buffer");
1456 ret = -1;
1457 goto end;
1458 }
1459 } else {
1460 const uint32_t output_path_length =
1461 session ? strlen(session->output_path) + 1 : 0;
1462
1463 reply.output_path_length = htobe32(output_path_length);
1464 ret = lttng_dynamic_buffer_append(
1465 &reply_payload, &reply, sizeof(reply));
1466 if (ret) {
1467 ERR("Failed to append \"create session\" command reply header to payload buffer");
1468 goto end;
1469 }
1470
1471 if (output_path_length) {
1472 ret = lttng_dynamic_buffer_append(&reply_payload,
1473 session->output_path,
1474 output_path_length);
1475 if (ret) {
1476 ERR("Failed to append \"create session\" command reply path to payload buffer");
1477 goto end;
1478 }
1479 }
1480 }
1481
1482 send_ret = conn->sock->ops->sendmsg(conn->sock, reply_payload.data,
1483 reply_payload.size, 0);
1484 if (send_ret < (ssize_t) reply_payload.size) {
1485 ERR("Failed to send \"create session\" command reply of %zu bytes (ret = %zd)",
1486 reply_payload.size, send_ret);
1487 ret = -1;
1488 }
1489 end:
1490 if (ret < 0 && session) {
1491 session_put(session);
1492 }
1493 lttng_dynamic_buffer_reset(&reply_payload);
1494 return ret;
1495 }
1496
1497 /*
1498 * When we have received all the streams and the metadata for a channel,
1499 * we make them visible to the viewer threads.
1500 */
1501 static void publish_connection_local_streams(struct relay_connection *conn)
1502 {
1503 struct relay_stream *stream;
1504 struct relay_session *session = conn->session;
1505
1506 /*
1507 * We publish all streams belonging to a session atomically wrt
1508 * session lock.
1509 */
1510 pthread_mutex_lock(&session->lock);
1511 rcu_read_lock();
1512 cds_list_for_each_entry_rcu(stream, &session->recv_list,
1513 recv_node) {
1514 stream_publish(stream);
1515 }
1516 rcu_read_unlock();
1517
1518 /*
1519 * Inform the viewer that there are new streams in the session.
1520 */
1521 if (session->viewer_attached) {
1522 uatomic_set(&session->new_streams, 1);
1523 }
1524 pthread_mutex_unlock(&session->lock);
1525 }
1526
1527 static int conform_channel_path(char *channel_path)
1528 {
1529 int ret = 0;
1530
1531 if (strstr("../", channel_path)) {
1532 ERR("Refusing channel path as it walks up the path hierarchy: \"%s\"",
1533 channel_path);
1534 ret = -1;
1535 goto end;
1536 }
1537
1538 if (*channel_path == '/') {
1539 const size_t len = strlen(channel_path);
1540
1541 /*
1542 * Channel paths from peers prior to 2.11 are expressed as an
1543 * absolute path that is, in reality, relative to the relay
1544 * daemon's output directory. Remove the leading slash so it
1545 * is correctly interpreted as a relative path later on.
1546 *
1547 * len (and not len - 1) is used to copy the trailing NULL.
1548 */
1549 bcopy(channel_path + 1, channel_path, len);
1550 }
1551 end:
1552 return ret;
1553 }
1554
1555 /*
1556 * relay_add_stream: allocate a new stream for a session
1557 */
1558 static int relay_add_stream(const struct lttcomm_relayd_hdr *recv_hdr,
1559 struct relay_connection *conn,
1560 const struct lttng_buffer_view *payload)
1561 {
1562 int ret;
1563 ssize_t send_ret;
1564 struct relay_session *session = conn->session;
1565 struct relay_stream *stream = NULL;
1566 struct lttcomm_relayd_status_stream reply;
1567 struct ctf_trace *trace = NULL;
1568 uint64_t stream_handle = -1ULL;
1569 char *path_name = NULL, *channel_name = NULL;
1570 uint64_t tracefile_size = 0, tracefile_count = 0;
1571 LTTNG_OPTIONAL(uint64_t) stream_chunk_id = {};
1572
1573 if (!session || !conn->version_check_done) {
1574 ERR("Trying to add a stream before version check");
1575 ret = -1;
1576 goto end_no_session;
1577 }
1578
1579 if (session->minor == 1) {
1580 /* For 2.1 */
1581 ret = cmd_recv_stream_2_1(payload, &path_name,
1582 &channel_name);
1583 } else if (session->minor > 1 && session->minor < 11) {
1584 /* From 2.2 to 2.10 */
1585 ret = cmd_recv_stream_2_2(payload, &path_name,
1586 &channel_name, &tracefile_size, &tracefile_count);
1587 } else {
1588 /* From 2.11 to ... */
1589 ret = cmd_recv_stream_2_11(payload, &path_name,
1590 &channel_name, &tracefile_size, &tracefile_count,
1591 &stream_chunk_id.value);
1592 stream_chunk_id.is_set = true;
1593 }
1594
1595 if (ret < 0) {
1596 goto send_reply;
1597 }
1598
1599 if (conform_channel_path(path_name)) {
1600 goto send_reply;
1601 }
1602
1603 /*
1604 * Backward compatibility for --group-output-by-session.
1605 * Prior to lttng 2.11, the complete path is passed by the stream.
1606 * Starting at 2.11, lttng-relayd uses chunk. When dealing with producer
1607 * >=2.11 the chunk is responsible for the output path. When dealing
1608 * with producer < 2.11 the chunk output_path is the root output path
1609 * and the stream carries the complete path (path_name).
1610 * To support --group-output-by-session with older producer (<2.11), we
1611 * need to craft the path based on the stream path.
1612 */
1613 if (opt_group_output_by == RELAYD_GROUP_OUTPUT_BY_SESSION) {
1614 if (conn->minor < 4) {
1615 /*
1616 * From 2.1 to 2.3, the session_name is not passed on
1617 * the RELAYD_CREATE_SESSION command. The session name
1618 * is necessary to detect the presence of a base_path
1619 * inside the stream path. Without it we cannot perform
1620 * a valid group-output-by-session transformation.
1621 */
1622 WARN("Unable to perform a --group-by-session transformation for session %" PRIu64
1623 " for stream with path \"%s\" as it is produced by a peer using a protocol older than v2.4",
1624 session->id, path_name);
1625 } else if (conn->minor >= 4 && conn->minor < 11) {
1626 char *group_by_session_path_name;
1627
1628 assert(session->session_name[0] != '\0');
1629
1630 group_by_session_path_name =
1631 backward_compat_group_by_session(
1632 path_name,
1633 session->session_name);
1634 if (!group_by_session_path_name) {
1635 ERR("Failed to apply group by session to stream of session %" PRIu64,
1636 session->id);
1637 goto send_reply;
1638 }
1639
1640 DBG("Transformed session path from \"%s\" to \"%s\" to honor per-session name grouping",
1641 path_name, group_by_session_path_name);
1642
1643 free(path_name);
1644 path_name = group_by_session_path_name;
1645 }
1646 }
1647
1648 trace = ctf_trace_get_by_path_or_create(session, path_name);
1649 if (!trace) {
1650 goto send_reply;
1651 }
1652
1653 /* This stream here has one reference on the trace. */
1654 pthread_mutex_lock(&last_relay_stream_id_lock);
1655 stream_handle = ++last_relay_stream_id;
1656 pthread_mutex_unlock(&last_relay_stream_id_lock);
1657
1658 /* We pass ownership of path_name and channel_name. */
1659 stream = stream_create(trace, stream_handle, path_name,
1660 channel_name, tracefile_size, tracefile_count);
1661 path_name = NULL;
1662 channel_name = NULL;
1663
1664 /*
1665 * Streams are the owners of their trace. Reference to trace is
1666 * kept within stream_create().
1667 */
1668 ctf_trace_put(trace);
1669
1670 send_reply:
1671 memset(&reply, 0, sizeof(reply));
1672 reply.handle = htobe64(stream_handle);
1673 if (!stream) {
1674 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1675 } else {
1676 reply.ret_code = htobe32(LTTNG_OK);
1677 }
1678
1679 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1680 sizeof(struct lttcomm_relayd_status_stream), 0);
1681 if (send_ret < (ssize_t) sizeof(reply)) {
1682 ERR("Failed to send \"add stream\" command reply (ret = %zd)",
1683 send_ret);
1684 ret = -1;
1685 }
1686
1687 end_no_session:
1688 free(path_name);
1689 free(channel_name);
1690 return ret;
1691 }
1692
1693 /*
1694 * relay_close_stream: close a specific stream
1695 */
1696 static int relay_close_stream(const struct lttcomm_relayd_hdr *recv_hdr,
1697 struct relay_connection *conn,
1698 const struct lttng_buffer_view *payload)
1699 {
1700 int ret;
1701 ssize_t send_ret;
1702 struct relay_session *session = conn->session;
1703 struct lttcomm_relayd_close_stream stream_info;
1704 struct lttcomm_relayd_generic_reply reply;
1705 struct relay_stream *stream;
1706
1707 DBG("Close stream received");
1708
1709 if (!session || !conn->version_check_done) {
1710 ERR("Trying to close a stream before version check");
1711 ret = -1;
1712 goto end_no_session;
1713 }
1714
1715 if (payload->size < sizeof(stream_info)) {
1716 ERR("Unexpected payload size in \"relay_close_stream\": expected >= %zu bytes, got %zu bytes",
1717 sizeof(stream_info), payload->size);
1718 ret = -1;
1719 goto end_no_session;
1720 }
1721 memcpy(&stream_info, payload->data, sizeof(stream_info));
1722 stream_info.stream_id = be64toh(stream_info.stream_id);
1723 stream_info.last_net_seq_num = be64toh(stream_info.last_net_seq_num);
1724
1725 stream = stream_get_by_id(stream_info.stream_id);
1726 if (!stream) {
1727 ret = -1;
1728 goto end;
1729 }
1730
1731 /*
1732 * Set last_net_seq_num before the close flag. Required by data
1733 * pending check.
1734 */
1735 pthread_mutex_lock(&stream->lock);
1736 stream->last_net_seq_num = stream_info.last_net_seq_num;
1737 pthread_mutex_unlock(&stream->lock);
1738
1739 /*
1740 * This is one of the conditions which may trigger a stream close
1741 * with the others being:
1742 * 1) A close command is received for a stream
1743 * 2) The control connection owning the stream is closed
1744 * 3) We have received all of the stream's data _after_ a close
1745 * request.
1746 */
1747 try_stream_close(stream);
1748 if (stream->is_metadata) {
1749 struct relay_viewer_stream *vstream;
1750
1751 vstream = viewer_stream_get_by_id(stream->stream_handle);
1752 if (vstream) {
1753 if (stream->no_new_metadata_notified) {
1754 /*
1755 * Since all the metadata has been sent to the
1756 * viewer and that we have a request to close
1757 * its stream, we can safely teardown the
1758 * corresponding metadata viewer stream.
1759 */
1760 viewer_stream_put(vstream);
1761 }
1762 /* Put local reference. */
1763 viewer_stream_put(vstream);
1764 }
1765 }
1766 stream_put(stream);
1767 ret = 0;
1768
1769 end:
1770 memset(&reply, 0, sizeof(reply));
1771 if (ret < 0) {
1772 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1773 } else {
1774 reply.ret_code = htobe32(LTTNG_OK);
1775 }
1776 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1777 sizeof(struct lttcomm_relayd_generic_reply), 0);
1778 if (send_ret < (ssize_t) sizeof(reply)) {
1779 ERR("Failed to send \"close stream\" command reply (ret = %zd)",
1780 send_ret);
1781 ret = -1;
1782 }
1783
1784 end_no_session:
1785 return ret;
1786 }
1787
1788 /*
1789 * relay_reset_metadata: reset a metadata stream
1790 */
1791 static
1792 int relay_reset_metadata(const struct lttcomm_relayd_hdr *recv_hdr,
1793 struct relay_connection *conn,
1794 const struct lttng_buffer_view *payload)
1795 {
1796 int ret;
1797 ssize_t send_ret;
1798 struct relay_session *session = conn->session;
1799 struct lttcomm_relayd_reset_metadata stream_info;
1800 struct lttcomm_relayd_generic_reply reply;
1801 struct relay_stream *stream;
1802
1803 DBG("Reset metadata received");
1804
1805 if (!session || !conn->version_check_done) {
1806 ERR("Trying to reset a metadata stream before version check");
1807 ret = -1;
1808 goto end_no_session;
1809 }
1810
1811 if (payload->size < sizeof(stream_info)) {
1812 ERR("Unexpected payload size in \"relay_reset_metadata\": expected >= %zu bytes, got %zu bytes",
1813 sizeof(stream_info), payload->size);
1814 ret = -1;
1815 goto end_no_session;
1816 }
1817 memcpy(&stream_info, payload->data, sizeof(stream_info));
1818 stream_info.stream_id = be64toh(stream_info.stream_id);
1819 stream_info.version = be64toh(stream_info.version);
1820
1821 DBG("Update metadata to version %" PRIu64, stream_info.version);
1822
1823 /* Unsupported for live sessions for now. */
1824 if (session->live_timer != 0) {
1825 ret = -1;
1826 goto end;
1827 }
1828
1829 stream = stream_get_by_id(stream_info.stream_id);
1830 if (!stream) {
1831 ret = -1;
1832 goto end;
1833 }
1834 pthread_mutex_lock(&stream->lock);
1835 if (!stream->is_metadata) {
1836 ret = -1;
1837 goto end_unlock;
1838 }
1839
1840 ret = stream_reset_file(stream);
1841 if (ret < 0) {
1842 ERR("Failed to reset metadata stream %" PRIu64
1843 ": stream_path = %s, channel = %s",
1844 stream->stream_handle, stream->path_name,
1845 stream->channel_name);
1846 goto end_unlock;
1847 }
1848 end_unlock:
1849 pthread_mutex_unlock(&stream->lock);
1850 stream_put(stream);
1851
1852 end:
1853 memset(&reply, 0, sizeof(reply));
1854 if (ret < 0) {
1855 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1856 } else {
1857 reply.ret_code = htobe32(LTTNG_OK);
1858 }
1859 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1860 sizeof(struct lttcomm_relayd_generic_reply), 0);
1861 if (send_ret < (ssize_t) sizeof(reply)) {
1862 ERR("Failed to send \"reset metadata\" command reply (ret = %zd)",
1863 send_ret);
1864 ret = -1;
1865 }
1866
1867 end_no_session:
1868 return ret;
1869 }
1870
1871 /*
1872 * relay_unknown_command: send -1 if received unknown command
1873 */
1874 static void relay_unknown_command(struct relay_connection *conn)
1875 {
1876 struct lttcomm_relayd_generic_reply reply;
1877 ssize_t send_ret;
1878
1879 memset(&reply, 0, sizeof(reply));
1880 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1881 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1882 if (send_ret < sizeof(reply)) {
1883 ERR("Failed to send \"unknown command\" command reply (ret = %zd)", send_ret);
1884 }
1885 }
1886
1887 /*
1888 * relay_start: send an acknowledgment to the client to tell if we are
1889 * ready to receive data. We are ready if a session is established.
1890 */
1891 static int relay_start(const struct lttcomm_relayd_hdr *recv_hdr,
1892 struct relay_connection *conn,
1893 const struct lttng_buffer_view *payload)
1894 {
1895 int ret = 0;
1896 ssize_t send_ret;
1897 struct lttcomm_relayd_generic_reply reply;
1898 struct relay_session *session = conn->session;
1899
1900 if (!session) {
1901 DBG("Trying to start the streaming without a session established");
1902 ret = htobe32(LTTNG_ERR_UNK);
1903 }
1904
1905 memset(&reply, 0, sizeof(reply));
1906 reply.ret_code = htobe32(LTTNG_OK);
1907 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1908 sizeof(reply), 0);
1909 if (send_ret < (ssize_t) sizeof(reply)) {
1910 ERR("Failed to send \"relay_start\" command reply (ret = %zd)",
1911 send_ret);
1912 ret = -1;
1913 }
1914
1915 return ret;
1916 }
1917
1918 /*
1919 * relay_recv_metadata: receive the metadata for the session.
1920 */
1921 static int relay_recv_metadata(const struct lttcomm_relayd_hdr *recv_hdr,
1922 struct relay_connection *conn,
1923 const struct lttng_buffer_view *payload)
1924 {
1925 int ret = 0;
1926 struct relay_session *session = conn->session;
1927 struct lttcomm_relayd_metadata_payload metadata_payload_header;
1928 struct relay_stream *metadata_stream;
1929 uint64_t metadata_payload_size;
1930 struct lttng_buffer_view packet_view;
1931
1932 if (!session) {
1933 ERR("Metadata sent before version check");
1934 ret = -1;
1935 goto end;
1936 }
1937
1938 if (recv_hdr->data_size < sizeof(struct lttcomm_relayd_metadata_payload)) {
1939 ERR("Incorrect data size");
1940 ret = -1;
1941 goto end;
1942 }
1943 metadata_payload_size = recv_hdr->data_size -
1944 sizeof(struct lttcomm_relayd_metadata_payload);
1945
1946 memcpy(&metadata_payload_header, payload->data,
1947 sizeof(metadata_payload_header));
1948 metadata_payload_header.stream_id = be64toh(
1949 metadata_payload_header.stream_id);
1950 metadata_payload_header.padding_size = be32toh(
1951 metadata_payload_header.padding_size);
1952
1953 metadata_stream = stream_get_by_id(metadata_payload_header.stream_id);
1954 if (!metadata_stream) {
1955 ret = -1;
1956 goto end;
1957 }
1958
1959 packet_view = lttng_buffer_view_from_view(payload,
1960 sizeof(metadata_payload_header), metadata_payload_size);
1961 if (!packet_view.data) {
1962 ERR("Invalid metadata packet length announced by header");
1963 ret = -1;
1964 goto end_put;
1965 }
1966
1967 pthread_mutex_lock(&metadata_stream->lock);
1968 ret = stream_write(metadata_stream, &packet_view,
1969 metadata_payload_header.padding_size);
1970 pthread_mutex_unlock(&metadata_stream->lock);
1971 if (ret){
1972 ret = -1;
1973 goto end_put;
1974 }
1975 end_put:
1976 stream_put(metadata_stream);
1977 end:
1978 return ret;
1979 }
1980
1981 /*
1982 * relay_send_version: send relayd version number
1983 */
1984 static int relay_send_version(const struct lttcomm_relayd_hdr *recv_hdr,
1985 struct relay_connection *conn,
1986 const struct lttng_buffer_view *payload)
1987 {
1988 int ret;
1989 ssize_t send_ret;
1990 struct lttcomm_relayd_version reply, msg;
1991 bool compatible = true;
1992
1993 conn->version_check_done = true;
1994
1995 /* Get version from the other side. */
1996 if (payload->size < sizeof(msg)) {
1997 ERR("Unexpected payload size in \"relay_send_version\": expected >= %zu bytes, got %zu bytes",
1998 sizeof(msg), payload->size);
1999 ret = -1;
2000 goto end;
2001 }
2002
2003 memcpy(&msg, payload->data, sizeof(msg));
2004 msg.major = be32toh(msg.major);
2005 msg.minor = be32toh(msg.minor);
2006
2007 memset(&reply, 0, sizeof(reply));
2008 reply.major = RELAYD_VERSION_COMM_MAJOR;
2009 reply.minor = RELAYD_VERSION_COMM_MINOR;
2010
2011 /* Major versions must be the same */
2012 if (reply.major != msg.major) {
2013 DBG("Incompatible major versions (%u vs %u), deleting session",
2014 reply.major, msg.major);
2015 compatible = false;
2016 }
2017
2018 conn->major = reply.major;
2019 /* We adapt to the lowest compatible version */
2020 if (reply.minor <= msg.minor) {
2021 conn->minor = reply.minor;
2022 } else {
2023 conn->minor = msg.minor;
2024 }
2025
2026 reply.major = htobe32(reply.major);
2027 reply.minor = htobe32(reply.minor);
2028 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
2029 sizeof(reply), 0);
2030 if (send_ret < (ssize_t) sizeof(reply)) {
2031 ERR("Failed to send \"send version\" command reply (ret = %zd)",
2032 send_ret);
2033 ret = -1;
2034 goto end;
2035 } else {
2036 ret = 0;
2037 }
2038
2039 if (!compatible) {
2040 ret = -1;
2041 goto end;
2042 }
2043
2044 DBG("Version check done using protocol %u.%u", conn->major,
2045 conn->minor);
2046
2047 end:
2048 return ret;
2049 }
2050
2051 /*
2052 * Check for data pending for a given stream id from the session daemon.
2053 */
2054 static int relay_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
2055 struct relay_connection *conn,
2056 const struct lttng_buffer_view *payload)
2057 {
2058 struct relay_session *session = conn->session;
2059 struct lttcomm_relayd_data_pending msg;
2060 struct lttcomm_relayd_generic_reply reply;
2061 struct relay_stream *stream;
2062 ssize_t send_ret;
2063 int ret;
2064 uint64_t stream_seq;
2065
2066 DBG("Data pending command received");
2067
2068 if (!session || !conn->version_check_done) {
2069 ERR("Trying to check for data before version check");
2070 ret = -1;
2071 goto end_no_session;
2072 }
2073
2074 if (payload->size < sizeof(msg)) {
2075 ERR("Unexpected payload size in \"relay_data_pending\": expected >= %zu bytes, got %zu bytes",
2076 sizeof(msg), payload->size);
2077 ret = -1;
2078 goto end_no_session;
2079 }
2080 memcpy(&msg, payload->data, sizeof(msg));
2081 msg.stream_id = be64toh(msg.stream_id);
2082 msg.last_net_seq_num = be64toh(msg.last_net_seq_num);
2083
2084 stream = stream_get_by_id(msg.stream_id);
2085 if (stream == NULL) {
2086 ret = -1;
2087 goto end;
2088 }
2089
2090 pthread_mutex_lock(&stream->lock);
2091
2092 if (session_streams_have_index(session)) {
2093 /*
2094 * Ensure that both the index and stream data have been
2095 * flushed up to the requested point.
2096 */
2097 stream_seq = min(stream->prev_data_seq, stream->prev_index_seq);
2098 } else {
2099 stream_seq = stream->prev_data_seq;
2100 }
2101 DBG("Data pending for stream id %" PRIu64 ": prev_data_seq %" PRIu64
2102 ", prev_index_seq %" PRIu64
2103 ", and last_seq %" PRIu64, msg.stream_id,
2104 stream->prev_data_seq, stream->prev_index_seq,
2105 msg.last_net_seq_num);
2106
2107 /* Avoid wrapping issue */
2108 if (((int64_t) (stream_seq - msg.last_net_seq_num)) >= 0) {
2109 /* Data has in fact been written and is NOT pending */
2110 ret = 0;
2111 } else {
2112 /* Data still being streamed thus pending */
2113 ret = 1;
2114 }
2115
2116 stream->data_pending_check_done = true;
2117 pthread_mutex_unlock(&stream->lock);
2118
2119 stream_put(stream);
2120 end:
2121
2122 memset(&reply, 0, sizeof(reply));
2123 reply.ret_code = htobe32(ret);
2124 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2125 if (send_ret < (ssize_t) sizeof(reply)) {
2126 ERR("Failed to send \"data pending\" command reply (ret = %zd)",
2127 send_ret);
2128 ret = -1;
2129 }
2130
2131 end_no_session:
2132 return ret;
2133 }
2134
2135 /*
2136 * Wait for the control socket to reach a quiescent state.
2137 *
2138 * Note that for now, when receiving this command from the session
2139 * daemon, this means that every subsequent commands or data received on
2140 * the control socket has been handled. So, this is why we simply return
2141 * OK here.
2142 */
2143 static int relay_quiescent_control(const struct lttcomm_relayd_hdr *recv_hdr,
2144 struct relay_connection *conn,
2145 const struct lttng_buffer_view *payload)
2146 {
2147 int ret;
2148 ssize_t send_ret;
2149 struct relay_stream *stream;
2150 struct lttcomm_relayd_quiescent_control msg;
2151 struct lttcomm_relayd_generic_reply reply;
2152
2153 DBG("Checking quiescent state on control socket");
2154
2155 if (!conn->session || !conn->version_check_done) {
2156 ERR("Trying to check for data before version check");
2157 ret = -1;
2158 goto end_no_session;
2159 }
2160
2161 if (payload->size < sizeof(msg)) {
2162 ERR("Unexpected payload size in \"relay_quiescent_control\": expected >= %zu bytes, got %zu bytes",
2163 sizeof(msg), payload->size);
2164 ret = -1;
2165 goto end_no_session;
2166 }
2167 memcpy(&msg, payload->data, sizeof(msg));
2168 msg.stream_id = be64toh(msg.stream_id);
2169
2170 stream = stream_get_by_id(msg.stream_id);
2171 if (!stream) {
2172 goto reply;
2173 }
2174 pthread_mutex_lock(&stream->lock);
2175 stream->data_pending_check_done = true;
2176 pthread_mutex_unlock(&stream->lock);
2177
2178 DBG("Relay quiescent control pending flag set to %" PRIu64, msg.stream_id);
2179 stream_put(stream);
2180 reply:
2181 memset(&reply, 0, sizeof(reply));
2182 reply.ret_code = htobe32(LTTNG_OK);
2183 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2184 if (send_ret < (ssize_t) sizeof(reply)) {
2185 ERR("Failed to send \"quiescent control\" command reply (ret = %zd)",
2186 send_ret);
2187 ret = -1;
2188 } else {
2189 ret = 0;
2190 }
2191
2192 end_no_session:
2193 return ret;
2194 }
2195
2196 /*
2197 * Initialize a data pending command. This means that a consumer is about
2198 * to ask for data pending for each stream it holds. Simply iterate over
2199 * all streams of a session and set the data_pending_check_done flag.
2200 *
2201 * This command returns to the client a LTTNG_OK code.
2202 */
2203 static int relay_begin_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
2204 struct relay_connection *conn,
2205 const struct lttng_buffer_view *payload)
2206 {
2207 int ret;
2208 ssize_t send_ret;
2209 struct lttng_ht_iter iter;
2210 struct lttcomm_relayd_begin_data_pending msg;
2211 struct lttcomm_relayd_generic_reply reply;
2212 struct relay_stream *stream;
2213
2214 assert(recv_hdr);
2215 assert(conn);
2216
2217 DBG("Init streams for data pending");
2218
2219 if (!conn->session || !conn->version_check_done) {
2220 ERR("Trying to check for data before version check");
2221 ret = -1;
2222 goto end_no_session;
2223 }
2224
2225 if (payload->size < sizeof(msg)) {
2226 ERR("Unexpected payload size in \"relay_begin_data_pending\": expected >= %zu bytes, got %zu bytes",
2227 sizeof(msg), payload->size);
2228 ret = -1;
2229 goto end_no_session;
2230 }
2231 memcpy(&msg, payload->data, sizeof(msg));
2232 msg.session_id = be64toh(msg.session_id);
2233
2234 /*
2235 * Iterate over all streams to set the begin data pending flag.
2236 * For now, the streams are indexed by stream handle so we have
2237 * to iterate over all streams to find the one associated with
2238 * the right session_id.
2239 */
2240 rcu_read_lock();
2241 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
2242 node.node) {
2243 if (!stream_get(stream)) {
2244 continue;
2245 }
2246 if (stream->trace->session->id == msg.session_id) {
2247 pthread_mutex_lock(&stream->lock);
2248 stream->data_pending_check_done = false;
2249 pthread_mutex_unlock(&stream->lock);
2250 DBG("Set begin data pending flag to stream %" PRIu64,
2251 stream->stream_handle);
2252 }
2253 stream_put(stream);
2254 }
2255 rcu_read_unlock();
2256
2257 memset(&reply, 0, sizeof(reply));
2258 /* All good, send back reply. */
2259 reply.ret_code = htobe32(LTTNG_OK);
2260
2261 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2262 if (send_ret < (ssize_t) sizeof(reply)) {
2263 ERR("Failed to send \"begin data pending\" command reply (ret = %zd)",
2264 send_ret);
2265 ret = -1;
2266 } else {
2267 ret = 0;
2268 }
2269
2270 end_no_session:
2271 return ret;
2272 }
2273
2274 /*
2275 * End data pending command. This will check, for a given session id, if
2276 * each stream associated with it has its data_pending_check_done flag
2277 * set. If not, this means that the client lost track of the stream but
2278 * the data is still being streamed on our side. In this case, we inform
2279 * the client that data is in flight.
2280 *
2281 * Return to the client if there is data in flight or not with a ret_code.
2282 */
2283 static int relay_end_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
2284 struct relay_connection *conn,
2285 const struct lttng_buffer_view *payload)
2286 {
2287 int ret;
2288 ssize_t send_ret;
2289 struct lttng_ht_iter iter;
2290 struct lttcomm_relayd_end_data_pending msg;
2291 struct lttcomm_relayd_generic_reply reply;
2292 struct relay_stream *stream;
2293 uint32_t is_data_inflight = 0;
2294
2295 DBG("End data pending command");
2296
2297 if (!conn->session || !conn->version_check_done) {
2298 ERR("Trying to check for data before version check");
2299 ret = -1;
2300 goto end_no_session;
2301 }
2302
2303 if (payload->size < sizeof(msg)) {
2304 ERR("Unexpected payload size in \"relay_end_data_pending\": expected >= %zu bytes, got %zu bytes",
2305 sizeof(msg), payload->size);
2306 ret = -1;
2307 goto end_no_session;
2308 }
2309 memcpy(&msg, payload->data, sizeof(msg));
2310 msg.session_id = be64toh(msg.session_id);
2311
2312 /*
2313 * Iterate over all streams to see if the begin data pending
2314 * flag is set.
2315 */
2316 rcu_read_lock();
2317 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
2318 node.node) {
2319 if (!stream_get(stream)) {
2320 continue;
2321 }
2322 if (stream->trace->session->id != msg.session_id) {
2323 stream_put(stream);
2324 continue;
2325 }
2326 pthread_mutex_lock(&stream->lock);
2327 if (!stream->data_pending_check_done) {
2328 uint64_t stream_seq;
2329
2330 if (session_streams_have_index(conn->session)) {
2331 /*
2332 * Ensure that both the index and stream data have been
2333 * flushed up to the requested point.
2334 */
2335 stream_seq = min(stream->prev_data_seq, stream->prev_index_seq);
2336 } else {
2337 stream_seq = stream->prev_data_seq;
2338 }
2339 if (!stream->closed || !(((int64_t) (stream_seq - stream->last_net_seq_num)) >= 0)) {
2340 is_data_inflight = 1;
2341 DBG("Data is still in flight for stream %" PRIu64,
2342 stream->stream_handle);
2343 pthread_mutex_unlock(&stream->lock);
2344 stream_put(stream);
2345 break;
2346 }
2347 }
2348 pthread_mutex_unlock(&stream->lock);
2349 stream_put(stream);
2350 }
2351 rcu_read_unlock();
2352
2353 memset(&reply, 0, sizeof(reply));
2354 /* All good, send back reply. */
2355 reply.ret_code = htobe32(is_data_inflight);
2356
2357 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2358 if (send_ret < (ssize_t) sizeof(reply)) {
2359 ERR("Failed to send \"end data pending\" command reply (ret = %zd)",
2360 send_ret);
2361 ret = -1;
2362 } else {
2363 ret = 0;
2364 }
2365
2366 end_no_session:
2367 return ret;
2368 }
2369
2370 /*
2371 * Receive an index for a specific stream.
2372 *
2373 * Return 0 on success else a negative value.
2374 */
2375 static int relay_recv_index(const struct lttcomm_relayd_hdr *recv_hdr,
2376 struct relay_connection *conn,
2377 const struct lttng_buffer_view *payload)
2378 {
2379 int ret;
2380 ssize_t send_ret;
2381 struct relay_session *session = conn->session;
2382 struct lttcomm_relayd_index index_info;
2383 struct lttcomm_relayd_generic_reply reply;
2384 struct relay_stream *stream;
2385 size_t msg_len;
2386
2387 assert(conn);
2388
2389 DBG("Relay receiving index");
2390
2391 if (!session || !conn->version_check_done) {
2392 ERR("Trying to close a stream before version check");
2393 ret = -1;
2394 goto end_no_session;
2395 }
2396
2397 msg_len = lttcomm_relayd_index_len(
2398 lttng_to_index_major(conn->major, conn->minor),
2399 lttng_to_index_minor(conn->major, conn->minor));
2400 if (payload->size < msg_len) {
2401 ERR("Unexpected payload size in \"relay_recv_index\": expected >= %zu bytes, got %zu bytes",
2402 msg_len, payload->size);
2403 ret = -1;
2404 goto end_no_session;
2405 }
2406 memcpy(&index_info, payload->data, msg_len);
2407 index_info.relay_stream_id = be64toh(index_info.relay_stream_id);
2408 index_info.net_seq_num = be64toh(index_info.net_seq_num);
2409 index_info.packet_size = be64toh(index_info.packet_size);
2410 index_info.content_size = be64toh(index_info.content_size);
2411 index_info.timestamp_begin = be64toh(index_info.timestamp_begin);
2412 index_info.timestamp_end = be64toh(index_info.timestamp_end);
2413 index_info.events_discarded = be64toh(index_info.events_discarded);
2414 index_info.stream_id = be64toh(index_info.stream_id);
2415
2416 if (conn->minor >= 8) {
2417 index_info.stream_instance_id =
2418 be64toh(index_info.stream_instance_id);
2419 index_info.packet_seq_num = be64toh(index_info.packet_seq_num);
2420 } else {
2421 index_info.stream_instance_id = -1ULL;
2422 index_info.packet_seq_num = -1ULL;
2423 }
2424
2425 stream = stream_get_by_id(index_info.relay_stream_id);
2426 if (!stream) {
2427 ERR("stream_get_by_id not found");
2428 ret = -1;
2429 goto end;
2430 }
2431
2432 pthread_mutex_lock(&stream->lock);
2433 ret = stream_add_index(stream, &index_info);
2434 pthread_mutex_unlock(&stream->lock);
2435 if (ret) {
2436 goto end_stream_put;
2437 }
2438
2439 end_stream_put:
2440 stream_put(stream);
2441 end:
2442 memset(&reply, 0, sizeof(reply));
2443 if (ret < 0) {
2444 reply.ret_code = htobe32(LTTNG_ERR_UNK);
2445 } else {
2446 reply.ret_code = htobe32(LTTNG_OK);
2447 }
2448 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2449 if (send_ret < (ssize_t) sizeof(reply)) {
2450 ERR("Failed to send \"recv index\" command reply (ret = %zd)", send_ret);
2451 ret = -1;
2452 }
2453
2454 end_no_session:
2455 return ret;
2456 }
2457
2458 /*
2459 * Receive the streams_sent message.
2460 *
2461 * Return 0 on success else a negative value.
2462 */
2463 static int relay_streams_sent(const struct lttcomm_relayd_hdr *recv_hdr,
2464 struct relay_connection *conn,
2465 const struct lttng_buffer_view *payload)
2466 {
2467 int ret;
2468 ssize_t send_ret;
2469 struct lttcomm_relayd_generic_reply reply;
2470
2471 assert(conn);
2472
2473 DBG("Relay receiving streams_sent");
2474
2475 if (!conn->session || !conn->version_check_done) {
2476 ERR("Trying to close a stream before version check");
2477 ret = -1;
2478 goto end_no_session;
2479 }
2480
2481 /*
2482 * Publish every pending stream in the connection recv list which are
2483 * now ready to be used by the viewer.
2484 */
2485 publish_connection_local_streams(conn);
2486
2487 memset(&reply, 0, sizeof(reply));
2488 reply.ret_code = htobe32(LTTNG_OK);
2489 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2490 if (send_ret < (ssize_t) sizeof(reply)) {
2491 ERR("Failed to send \"streams sent\" command reply (ret = %zd)",
2492 send_ret);
2493 ret = -1;
2494 } else {
2495 /* Success. */
2496 ret = 0;
2497 }
2498
2499 end_no_session:
2500 return ret;
2501 }
2502
2503 /*
2504 * relay_rotate_session_stream: rotate a stream to a new tracefile for the
2505 * session rotation feature (not the tracefile rotation feature).
2506 */
2507 static int relay_rotate_session_streams(
2508 const struct lttcomm_relayd_hdr *recv_hdr,
2509 struct relay_connection *conn,
2510 const struct lttng_buffer_view *payload)
2511 {
2512 int ret = 0;
2513 uint32_t i;
2514 ssize_t send_ret;
2515 enum lttng_error_code reply_code = LTTNG_ERR_UNK;
2516 struct relay_session *session = conn->session;
2517 struct lttcomm_relayd_rotate_streams rotate_streams;
2518 struct lttcomm_relayd_generic_reply reply = {};
2519 struct relay_stream *stream = NULL;
2520 const size_t header_len = sizeof(struct lttcomm_relayd_rotate_streams);
2521 struct lttng_trace_chunk *next_trace_chunk = NULL;
2522 struct lttng_buffer_view stream_positions;
2523 char chunk_id_buf[MAX_INT_DEC_LEN(uint64_t)];
2524 const char *chunk_id_str = "none";
2525
2526 if (!session || !conn->version_check_done) {
2527 ERR("Trying to rotate a stream before version check");
2528 ret = -1;
2529 goto end_no_reply;
2530 }
2531
2532 if (session->major == 2 && session->minor < 11) {
2533 ERR("Unsupported feature before 2.11");
2534 ret = -1;
2535 goto end_no_reply;
2536 }
2537
2538 if (payload->size < header_len) {
2539 ERR("Unexpected payload size in \"relay_rotate_session_stream\": expected >= %zu bytes, got %zu bytes",
2540 header_len, payload->size);
2541 ret = -1;
2542 goto end_no_reply;
2543 }
2544
2545 memcpy(&rotate_streams, payload->data, header_len);
2546
2547 /* Convert header to host endianness. */
2548 rotate_streams = (typeof(rotate_streams)) {
2549 .stream_count = be32toh(rotate_streams.stream_count),
2550 .new_chunk_id = (typeof(rotate_streams.new_chunk_id)) {
2551 .is_set = !!rotate_streams.new_chunk_id.is_set,
2552 .value = be64toh(rotate_streams.new_chunk_id.value),
2553 }
2554 };
2555
2556 if (rotate_streams.new_chunk_id.is_set) {
2557 /*
2558 * Retrieve the trace chunk the stream must transition to. As
2559 * per the protocol, this chunk should have been created
2560 * before this command is received.
2561 */
2562 next_trace_chunk = sessiond_trace_chunk_registry_get_chunk(
2563 sessiond_trace_chunk_registry,
2564 session->sessiond_uuid, session->id,
2565 rotate_streams.new_chunk_id.value);
2566 if (!next_trace_chunk) {
2567 char uuid_str[LTTNG_UUID_STR_LEN];
2568
2569 lttng_uuid_to_str(session->sessiond_uuid, uuid_str);
2570 ERR("Unknown next trace chunk in ROTATE_STREAMS command: sessiond_uuid = {%s}, session_id = %" PRIu64
2571 ", trace_chunk_id = %" PRIu64,
2572 uuid_str, session->id,
2573 rotate_streams.new_chunk_id.value);
2574 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2575 ret = -1;
2576 goto end;
2577 }
2578
2579 ret = snprintf(chunk_id_buf, sizeof(chunk_id_buf), "%" PRIu64,
2580 rotate_streams.new_chunk_id.value);
2581 if (ret < 0 || ret >= sizeof(chunk_id_buf)) {
2582 chunk_id_str = "formatting error";
2583 } else {
2584 chunk_id_str = chunk_id_buf;
2585 }
2586 }
2587
2588 DBG("Rotate %" PRIu32 " streams of session \"%s\" to chunk \"%s\"",
2589 rotate_streams.stream_count, session->session_name,
2590 chunk_id_str);
2591
2592 stream_positions = lttng_buffer_view_from_view(payload,
2593 sizeof(rotate_streams), -1);
2594 if (!stream_positions.data ||
2595 stream_positions.size <
2596 (rotate_streams.stream_count *
2597 sizeof(struct lttcomm_relayd_stream_rotation_position))) {
2598 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2599 ret = -1;
2600 goto end;
2601 }
2602
2603 for (i = 0; i < rotate_streams.stream_count; i++) {
2604 struct lttcomm_relayd_stream_rotation_position *position_comm =
2605 &((typeof(position_comm)) stream_positions.data)[i];
2606 const struct lttcomm_relayd_stream_rotation_position pos = {
2607 .stream_id = be64toh(position_comm->stream_id),
2608 .rotate_at_seq_num = be64toh(
2609 position_comm->rotate_at_seq_num),
2610 };
2611
2612 stream = stream_get_by_id(pos.stream_id);
2613 if (!stream) {
2614 reply_code = LTTNG_ERR_INVALID;
2615 ret = -1;
2616 goto end;
2617 }
2618
2619 pthread_mutex_lock(&stream->lock);
2620 ret = stream_set_pending_rotation(stream, next_trace_chunk,
2621 pos.rotate_at_seq_num);
2622 pthread_mutex_unlock(&stream->lock);
2623 if (ret) {
2624 reply_code = LTTNG_ERR_FILE_CREATION_ERROR;
2625 goto end;
2626 }
2627
2628 stream_put(stream);
2629 stream = NULL;
2630 }
2631
2632 reply_code = LTTNG_OK;
2633 ret = 0;
2634 end:
2635 if (stream) {
2636 stream_put(stream);
2637 }
2638
2639 reply.ret_code = htobe32((uint32_t) reply_code);
2640 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
2641 sizeof(struct lttcomm_relayd_generic_reply), 0);
2642 if (send_ret < (ssize_t) sizeof(reply)) {
2643 ERR("Failed to send \"rotate session stream\" command reply (ret = %zd)",
2644 send_ret);
2645 ret = -1;
2646 }
2647 end_no_reply:
2648 lttng_trace_chunk_put(next_trace_chunk);
2649 return ret;
2650 }
2651
2652
2653
2654 /*
2655 * relay_create_trace_chunk: create a new trace chunk
2656 */
2657 static int relay_create_trace_chunk(const struct lttcomm_relayd_hdr *recv_hdr,
2658 struct relay_connection *conn,
2659 const struct lttng_buffer_view *payload)
2660 {
2661 int ret = 0;
2662 ssize_t send_ret;
2663 struct relay_session *session = conn->session;
2664 struct lttcomm_relayd_create_trace_chunk *msg;
2665 struct lttcomm_relayd_generic_reply reply = {};
2666 struct lttng_buffer_view header_view;
2667 struct lttng_buffer_view chunk_name_view;
2668 struct lttng_trace_chunk *chunk = NULL, *published_chunk = NULL;
2669 enum lttng_error_code reply_code = LTTNG_OK;
2670 enum lttng_trace_chunk_status chunk_status;
2671 const char *new_path;
2672
2673 if (!session || !conn->version_check_done) {
2674 ERR("Trying to create a trace chunk before version check");
2675 ret = -1;
2676 goto end_no_reply;
2677 }
2678
2679 if (session->major == 2 && session->minor < 11) {
2680 ERR("Chunk creation command is unsupported before 2.11");
2681 ret = -1;
2682 goto end_no_reply;
2683 }
2684
2685 header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg));
2686 if (!header_view.data) {
2687 ERR("Failed to receive payload of chunk creation command");
2688 ret = -1;
2689 goto end_no_reply;
2690 }
2691
2692 /* Convert to host endianness. */
2693 msg = (typeof(msg)) header_view.data;
2694 msg->chunk_id = be64toh(msg->chunk_id);
2695 msg->creation_timestamp = be64toh(msg->creation_timestamp);
2696 msg->override_name_length = be32toh(msg->override_name_length);
2697
2698 if (session->current_trace_chunk &&
2699 !lttng_trace_chunk_get_name_overridden(session->current_trace_chunk)) {
2700 chunk_status = lttng_trace_chunk_rename_path(session->current_trace_chunk,
2701 DEFAULT_CHUNK_TMP_OLD_DIRECTORY);
2702 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2703 ERR("Failed to rename old chunk");
2704 ret = -1;
2705 reply_code = LTTNG_ERR_UNK;
2706 goto end;
2707 }
2708 }
2709 session->ongoing_rotation = true;
2710 if (!session->current_trace_chunk) {
2711 if (!session->has_rotated) {
2712 new_path = "";
2713 } else {
2714 new_path = NULL;
2715 }
2716 } else {
2717 new_path = DEFAULT_CHUNK_TMP_NEW_DIRECTORY;
2718 }
2719 chunk = lttng_trace_chunk_create(
2720 msg->chunk_id, msg->creation_timestamp, new_path);
2721 if (!chunk) {
2722 ERR("Failed to create trace chunk in trace chunk creation command");
2723 ret = -1;
2724 reply_code = LTTNG_ERR_NOMEM;
2725 goto end;
2726 }
2727 lttng_trace_chunk_set_fd_tracker(chunk, the_fd_tracker);
2728
2729 if (msg->override_name_length) {
2730 const char *name;
2731
2732 chunk_name_view = lttng_buffer_view_from_view(payload,
2733 sizeof(*msg),
2734 msg->override_name_length);
2735 name = chunk_name_view.data;
2736 if (!name || name[msg->override_name_length - 1]) {
2737 ERR("Failed to receive payload of chunk creation command");
2738 ret = -1;
2739 reply_code = LTTNG_ERR_INVALID;
2740 goto end;
2741 }
2742
2743 chunk_status = lttng_trace_chunk_override_name(
2744 chunk, chunk_name_view.data);
2745 switch (chunk_status) {
2746 case LTTNG_TRACE_CHUNK_STATUS_OK:
2747 break;
2748 case LTTNG_TRACE_CHUNK_STATUS_INVALID_ARGUMENT:
2749 ERR("Failed to set the name of new trace chunk in trace chunk creation command (invalid name)");
2750 reply_code = LTTNG_ERR_INVALID;
2751 ret = -1;
2752 goto end;
2753 default:
2754 ERR("Failed to set the name of new trace chunk in trace chunk creation command (unknown error)");
2755 reply_code = LTTNG_ERR_UNK;
2756 ret = -1;
2757 goto end;
2758 }
2759 }
2760
2761 chunk_status = lttng_trace_chunk_set_credentials_current_user(chunk);
2762 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2763 reply_code = LTTNG_ERR_UNK;
2764 ret = -1;
2765 goto end;
2766 }
2767
2768 assert(conn->session->output_directory);
2769 chunk_status = lttng_trace_chunk_set_as_owner(chunk,
2770 conn->session->output_directory);
2771 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2772 reply_code = LTTNG_ERR_UNK;
2773 ret = -1;
2774 goto end;
2775 }
2776
2777 published_chunk = sessiond_trace_chunk_registry_publish_chunk(
2778 sessiond_trace_chunk_registry,
2779 conn->session->sessiond_uuid,
2780 conn->session->id,
2781 chunk);
2782 if (!published_chunk) {
2783 char uuid_str[LTTNG_UUID_STR_LEN];
2784
2785 lttng_uuid_to_str(conn->session->sessiond_uuid, uuid_str);
2786 ERR("Failed to publish chunk: sessiond_uuid = %s, session_id = %" PRIu64 ", chunk_id = %" PRIu64,
2787 uuid_str,
2788 conn->session->id,
2789 msg->chunk_id);
2790 ret = -1;
2791 reply_code = LTTNG_ERR_NOMEM;
2792 goto end;
2793 }
2794
2795 pthread_mutex_lock(&conn->session->lock);
2796 if (conn->session->pending_closure_trace_chunk) {
2797 /*
2798 * Invalid; this means a second create_trace_chunk command was
2799 * received before a close_trace_chunk.
2800 */
2801 ERR("Invalid trace chunk close command received; a trace chunk is already waiting for a trace chunk close command");
2802 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2803 ret = -1;
2804 goto end_unlock_session;
2805 }
2806 conn->session->pending_closure_trace_chunk =
2807 conn->session->current_trace_chunk;
2808 conn->session->current_trace_chunk = published_chunk;
2809 published_chunk = NULL;
2810 if (!conn->session->pending_closure_trace_chunk) {
2811 session->ongoing_rotation = false;
2812 }
2813 end_unlock_session:
2814 pthread_mutex_unlock(&conn->session->lock);
2815 end:
2816 reply.ret_code = htobe32((uint32_t) reply_code);
2817 send_ret = conn->sock->ops->sendmsg(conn->sock,
2818 &reply,
2819 sizeof(struct lttcomm_relayd_generic_reply),
2820 0);
2821 if (send_ret < (ssize_t) sizeof(reply)) {
2822 ERR("Failed to send \"create trace chunk\" command reply (ret = %zd)",
2823 send_ret);
2824 ret = -1;
2825 }
2826 end_no_reply:
2827 lttng_trace_chunk_put(chunk);
2828 lttng_trace_chunk_put(published_chunk);
2829 return ret;
2830 }
2831
2832 /*
2833 * relay_close_trace_chunk: close a trace chunk
2834 */
2835 static int relay_close_trace_chunk(const struct lttcomm_relayd_hdr *recv_hdr,
2836 struct relay_connection *conn,
2837 const struct lttng_buffer_view *payload)
2838 {
2839 int ret = 0, buf_ret;
2840 ssize_t send_ret;
2841 struct relay_session *session = conn->session;
2842 struct lttcomm_relayd_close_trace_chunk *msg;
2843 struct lttcomm_relayd_close_trace_chunk_reply reply = {};
2844 struct lttng_buffer_view header_view;
2845 struct lttng_trace_chunk *chunk = NULL;
2846 enum lttng_error_code reply_code = LTTNG_OK;
2847 enum lttng_trace_chunk_status chunk_status;
2848 uint64_t chunk_id;
2849 LTTNG_OPTIONAL(enum lttng_trace_chunk_command_type) close_command = {};
2850 time_t close_timestamp;
2851 char closed_trace_chunk_path[LTTNG_PATH_MAX];
2852 size_t path_length = 0;
2853 const char *chunk_name = NULL;
2854 struct lttng_dynamic_buffer reply_payload;
2855 const char *new_path;
2856
2857 lttng_dynamic_buffer_init(&reply_payload);
2858
2859 if (!session || !conn->version_check_done) {
2860 ERR("Trying to close a trace chunk before version check");
2861 ret = -1;
2862 goto end_no_reply;
2863 }
2864
2865 if (session->major == 2 && session->minor < 11) {
2866 ERR("Chunk close command is unsupported before 2.11");
2867 ret = -1;
2868 goto end_no_reply;
2869 }
2870
2871 header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg));
2872 if (!header_view.data) {
2873 ERR("Failed to receive payload of chunk close command");
2874 ret = -1;
2875 goto end_no_reply;
2876 }
2877
2878 /* Convert to host endianness. */
2879 msg = (typeof(msg)) header_view.data;
2880 chunk_id = be64toh(msg->chunk_id);
2881 close_timestamp = (time_t) be64toh(msg->close_timestamp);
2882 close_command = (typeof(close_command)){
2883 .value = be32toh(msg->close_command.value),
2884 .is_set = msg->close_command.is_set,
2885 };
2886
2887 chunk = sessiond_trace_chunk_registry_get_chunk(
2888 sessiond_trace_chunk_registry,
2889 conn->session->sessiond_uuid,
2890 conn->session->id,
2891 chunk_id);
2892 if (!chunk) {
2893 char uuid_str[LTTNG_UUID_STR_LEN];
2894
2895 lttng_uuid_to_str(conn->session->sessiond_uuid, uuid_str);
2896 ERR("Failed to find chunk to close: sessiond_uuid = %s, session_id = %" PRIu64 ", chunk_id = %" PRIu64,
2897 uuid_str,
2898 conn->session->id,
2899 msg->chunk_id);
2900 ret = -1;
2901 reply_code = LTTNG_ERR_NOMEM;
2902 goto end;
2903 }
2904
2905 pthread_mutex_lock(&session->lock);
2906 if (close_command.is_set &&
2907 close_command.value == LTTNG_TRACE_CHUNK_COMMAND_TYPE_DELETE) {
2908 /*
2909 * Clear command. It is a protocol error to ask for a
2910 * clear on a relay which does not allow it. Querying
2911 * the configuration allows figuring out whether
2912 * clearing is allowed before doing the clear.
2913 */
2914 if (!opt_allow_clear) {
2915 ret = -1;
2916 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2917 goto end_unlock_session;
2918 }
2919 }
2920 if (session->pending_closure_trace_chunk &&
2921 session->pending_closure_trace_chunk != chunk) {
2922 ERR("Trace chunk close command for session \"%s\" does not target the trace chunk pending closure",
2923 session->session_name);
2924 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2925 ret = -1;
2926 goto end_unlock_session;
2927 }
2928
2929 if (session->current_trace_chunk && session->current_trace_chunk != chunk &&
2930 !lttng_trace_chunk_get_name_overridden(session->current_trace_chunk)) {
2931 if (close_command.is_set &&
2932 close_command.value == LTTNG_TRACE_CHUNK_COMMAND_TYPE_DELETE &&
2933 !session->has_rotated) {
2934 /* New chunk stays in session output directory. */
2935 new_path = "";
2936 } else {
2937 /* Use chunk name for new chunk. */
2938 new_path = NULL;
2939 }
2940 /* Rename new chunk path. */
2941 chunk_status = lttng_trace_chunk_rename_path(session->current_trace_chunk,
2942 new_path);
2943 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2944 ret = -1;
2945 goto end;
2946 }
2947 session->ongoing_rotation = false;
2948 }
2949 if ((!close_command.is_set ||
2950 close_command.value == LTTNG_TRACE_CHUNK_COMMAND_TYPE_NO_OPERATION) &&
2951 !lttng_trace_chunk_get_name_overridden(chunk)) {
2952 const char *old_path;
2953
2954 if (!session->has_rotated) {
2955 old_path = "";
2956 } else {
2957 old_path = NULL;
2958 }
2959 /* We need to move back the .tmp_old_chunk to its rightful place. */
2960 chunk_status = lttng_trace_chunk_rename_path(chunk, old_path);
2961 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2962 ret = -1;
2963 goto end;
2964 }
2965 }
2966 chunk_status = lttng_trace_chunk_set_close_timestamp(
2967 chunk, close_timestamp);
2968 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2969 ERR("Failed to set trace chunk close timestamp");
2970 ret = -1;
2971 reply_code = LTTNG_ERR_UNK;
2972 goto end_unlock_session;
2973 }
2974
2975 if (close_command.is_set) {
2976 chunk_status = lttng_trace_chunk_set_close_command(
2977 chunk, close_command.value);
2978 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2979 ret = -1;
2980 reply_code = LTTNG_ERR_INVALID;
2981 goto end_unlock_session;
2982 }
2983 }
2984 chunk_status = lttng_trace_chunk_get_name(chunk, &chunk_name, NULL);
2985 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2986 ERR("Failed to get chunk name");
2987 ret = -1;
2988 reply_code = LTTNG_ERR_UNK;
2989 goto end_unlock_session;
2990 }
2991 if (!session->has_rotated && !session->snapshot) {
2992 ret = lttng_strncpy(closed_trace_chunk_path,
2993 session->output_path,
2994 sizeof(closed_trace_chunk_path));
2995 if (ret) {
2996 ERR("Failed to send trace chunk path: path length of %zu bytes exceeds the maximal allowed length of %zu bytes",
2997 strlen(session->output_path),
2998 sizeof(closed_trace_chunk_path));
2999 reply_code = LTTNG_ERR_NOMEM;
3000 ret = -1;
3001 goto end_unlock_session;
3002 }
3003 } else {
3004 if (session->snapshot) {
3005 ret = snprintf(closed_trace_chunk_path,
3006 sizeof(closed_trace_chunk_path),
3007 "%s/%s", session->output_path,
3008 chunk_name);
3009 } else {
3010 ret = snprintf(closed_trace_chunk_path,
3011 sizeof(closed_trace_chunk_path),
3012 "%s/" DEFAULT_ARCHIVED_TRACE_CHUNKS_DIRECTORY
3013 "/%s",
3014 session->output_path, chunk_name);
3015 }
3016 if (ret < 0 || ret == sizeof(closed_trace_chunk_path)) {
3017 ERR("Failed to format closed trace chunk resulting path");
3018 reply_code = ret < 0 ? LTTNG_ERR_UNK : LTTNG_ERR_NOMEM;
3019 ret = -1;
3020 goto end_unlock_session;
3021 }
3022 }
3023 if (close_command.is_set &&
3024 close_command.value == LTTNG_TRACE_CHUNK_COMMAND_TYPE_MOVE_TO_COMPLETED) {
3025 session->has_rotated = true;
3026 }
3027 DBG("Reply chunk path on close: %s", closed_trace_chunk_path);
3028 path_length = strlen(closed_trace_chunk_path) + 1;
3029 if (path_length > UINT32_MAX) {
3030 ERR("Closed trace chunk path exceeds the maximal length allowed by the protocol");
3031 ret = -1;
3032 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
3033 goto end_unlock_session;
3034 }
3035
3036 if (session->current_trace_chunk == chunk) {
3037 /*
3038 * After a trace chunk close command, no new streams
3039 * referencing the chunk may be created. Hence, on the
3040 * event that no new trace chunk have been created for
3041 * the session, the reference to the current trace chunk
3042 * is released in order to allow it to be reclaimed when
3043 * the last stream releases its reference to it.
3044 */
3045 lttng_trace_chunk_put(session->current_trace_chunk);
3046 session->current_trace_chunk = NULL;
3047 }
3048 lttng_trace_chunk_put(session->pending_closure_trace_chunk);
3049 session->pending_closure_trace_chunk = NULL;
3050 end_unlock_session:
3051 pthread_mutex_unlock(&session->lock);
3052
3053 end:
3054 reply.generic.ret_code = htobe32((uint32_t) reply_code);
3055 reply.path_length = htobe32((uint32_t) path_length);
3056 buf_ret = lttng_dynamic_buffer_append(
3057 &reply_payload, &reply, sizeof(reply));
3058 if (buf_ret) {
3059 ERR("Failed to append \"close trace chunk\" command reply header to payload buffer");
3060 goto end_no_reply;
3061 }
3062
3063 if (reply_code == LTTNG_OK) {
3064 buf_ret = lttng_dynamic_buffer_append(&reply_payload,
3065 closed_trace_chunk_path, path_length);
3066 if (buf_ret) {
3067 ERR("Failed to append \"close trace chunk\" command reply path to payload buffer");
3068 goto end_no_reply;
3069 }
3070 }
3071
3072 send_ret = conn->sock->ops->sendmsg(conn->sock,
3073 reply_payload.data,
3074 reply_payload.size,
3075 0);
3076 if (send_ret < reply_payload.size) {
3077 ERR("Failed to send \"close trace chunk\" command reply of %zu bytes (ret = %zd)",
3078 reply_payload.size, send_ret);
3079 ret = -1;
3080 goto end_no_reply;
3081 }
3082 end_no_reply:
3083 lttng_trace_chunk_put(chunk);
3084 lttng_dynamic_buffer_reset(&reply_payload);
3085 return ret;
3086 }
3087
3088 /*
3089 * relay_trace_chunk_exists: check if a trace chunk exists
3090 */
3091 static int relay_trace_chunk_exists(const struct lttcomm_relayd_hdr *recv_hdr,
3092 struct relay_connection *conn,
3093 const struct lttng_buffer_view *payload)
3094 {
3095 int ret = 0;
3096 ssize_t send_ret;
3097 struct relay_session *session = conn->session;
3098 struct lttcomm_relayd_trace_chunk_exists *msg;
3099 struct lttcomm_relayd_trace_chunk_exists_reply reply = {};
3100 struct lttng_buffer_view header_view;
3101 uint64_t chunk_id;
3102 bool chunk_exists;
3103
3104 if (!session || !conn->version_check_done) {
3105 ERR("Trying to close a trace chunk before version check");
3106 ret = -1;
3107 goto end_no_reply;
3108 }
3109
3110 if (session->major == 2 && session->minor < 11) {
3111 ERR("Chunk close command is unsupported before 2.11");
3112 ret = -1;
3113 goto end_no_reply;
3114 }
3115
3116 header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg));
3117 if (!header_view.data) {
3118 ERR("Failed to receive payload of chunk close command");
3119 ret = -1;
3120 goto end_no_reply;
3121 }
3122
3123 /* Convert to host endianness. */
3124 msg = (typeof(msg)) header_view.data;
3125 chunk_id = be64toh(msg->chunk_id);
3126
3127 ret = sessiond_trace_chunk_registry_chunk_exists(
3128 sessiond_trace_chunk_registry,
3129 conn->session->sessiond_uuid,
3130 conn->session->id,
3131 chunk_id, &chunk_exists);
3132 /*
3133 * If ret is not 0, send the reply and report the error to the caller.
3134 * It is a protocol (or internal) error and the session/connection
3135 * should be torn down.
3136 */
3137 reply = (typeof(reply)){
3138 .generic.ret_code = htobe32((uint32_t)
3139 (ret == 0 ? LTTNG_OK : LTTNG_ERR_INVALID_PROTOCOL)),
3140 .trace_chunk_exists = ret == 0 ? chunk_exists : 0,
3141 };
3142 send_ret = conn->sock->ops->sendmsg(
3143 conn->sock, &reply, sizeof(reply), 0);
3144 if (send_ret < (ssize_t) sizeof(reply)) {
3145 ERR("Failed to send \"create trace chunk\" command reply (ret = %zd)",
3146 send_ret);
3147 ret = -1;
3148 }
3149 end_no_reply:
3150 return ret;
3151 }
3152
3153 /*
3154 * relay_get_configuration: query whether feature is available
3155 */
3156 static int relay_get_configuration(const struct lttcomm_relayd_hdr *recv_hdr,
3157 struct relay_connection *conn,
3158 const struct lttng_buffer_view *payload)
3159 {
3160 int ret = 0;
3161 ssize_t send_ret;
3162 struct lttcomm_relayd_get_configuration *msg;
3163 struct lttcomm_relayd_get_configuration_reply reply = {};
3164 struct lttng_buffer_view header_view;
3165 uint64_t query_flags = 0;
3166 uint64_t result_flags = 0;
3167
3168 header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg));
3169 if (!header_view.data) {
3170 ERR("Failed to receive payload of chunk close command");
3171 ret = -1;
3172 goto end_no_reply;
3173 }
3174
3175 /* Convert to host endianness. */
3176 msg = (typeof(msg)) header_view.data;
3177 query_flags = be64toh(msg->query_flags);
3178
3179 if (query_flags) {
3180 ret = LTTNG_ERR_INVALID_PROTOCOL;
3181 goto reply;
3182 }
3183 if (opt_allow_clear) {
3184 result_flags |= LTTCOMM_RELAYD_CONFIGURATION_FLAG_CLEAR_ALLOWED;
3185 }
3186 ret = 0;
3187 reply:
3188 reply = (typeof(reply)){
3189 .generic.ret_code = htobe32((uint32_t)
3190 (ret == 0 ? LTTNG_OK : LTTNG_ERR_INVALID_PROTOCOL)),
3191 .relayd_configuration_flags = htobe64(result_flags),
3192 };
3193 send_ret = conn->sock->ops->sendmsg(
3194 conn->sock, &reply, sizeof(reply), 0);
3195 if (send_ret < (ssize_t) sizeof(reply)) {
3196 ERR("Failed to send \"get configuration\" command reply (ret = %zd)",
3197 send_ret);
3198 ret = -1;
3199 }
3200 end_no_reply:
3201 return ret;
3202 }
3203
3204 #define DBG_CMD(cmd_name, conn) \
3205 DBG3("Processing \"%s\" command for socket %i", cmd_name, conn->sock->fd);
3206
3207 static int relay_process_control_command(struct relay_connection *conn,
3208 const struct lttcomm_relayd_hdr *header,
3209 const struct lttng_buffer_view *payload)
3210 {
3211 int ret = 0;
3212
3213 switch (header->cmd) {
3214 case RELAYD_CREATE_SESSION:
3215 DBG_CMD("RELAYD_CREATE_SESSION", conn);
3216 ret = relay_create_session(header, conn, payload);
3217 break;
3218 case RELAYD_ADD_STREAM:
3219 DBG_CMD("RELAYD_ADD_STREAM", conn);
3220 ret = relay_add_stream(header, conn, payload);
3221 break;
3222 case RELAYD_START_DATA:
3223 DBG_CMD("RELAYD_START_DATA", conn);
3224 ret = relay_start(header, conn, payload);
3225 break;
3226 case RELAYD_SEND_METADATA:
3227 DBG_CMD("RELAYD_SEND_METADATA", conn);
3228 ret = relay_recv_metadata(header, conn, payload);
3229 break;
3230 case RELAYD_VERSION:
3231 DBG_CMD("RELAYD_VERSION", conn);
3232 ret = relay_send_version(header, conn, payload);
3233 break;
3234 case RELAYD_CLOSE_STREAM:
3235 DBG_CMD("RELAYD_CLOSE_STREAM", conn);
3236 ret = relay_close_stream(header, conn, payload);
3237 break;
3238 case RELAYD_DATA_PENDING:
3239 DBG_CMD("RELAYD_DATA_PENDING", conn);
3240 ret = relay_data_pending(header, conn, payload);
3241 break;
3242 case RELAYD_QUIESCENT_CONTROL:
3243 DBG_CMD("RELAYD_QUIESCENT_CONTROL", conn);
3244 ret = relay_quiescent_control(header, conn, payload);
3245 break;
3246 case RELAYD_BEGIN_DATA_PENDING:
3247 DBG_CMD("RELAYD_BEGIN_DATA_PENDING", conn);
3248 ret = relay_begin_data_pending(header, conn, payload);
3249 break;
3250 case RELAYD_END_DATA_PENDING:
3251 DBG_CMD("RELAYD_END_DATA_PENDING", conn);
3252 ret = relay_end_data_pending(header, conn, payload);
3253 break;
3254 case RELAYD_SEND_INDEX:
3255 DBG_CMD("RELAYD_SEND_INDEX", conn);
3256 ret = relay_recv_index(header, conn, payload);
3257 break;
3258 case RELAYD_STREAMS_SENT:
3259 DBG_CMD("RELAYD_STREAMS_SENT", conn);
3260 ret = relay_streams_sent(header, conn, payload);
3261 break;
3262 case RELAYD_RESET_METADATA:
3263 DBG_CMD("RELAYD_RESET_METADATA", conn);
3264 ret = relay_reset_metadata(header, conn, payload);
3265 break;
3266 case RELAYD_ROTATE_STREAMS:
3267 DBG_CMD("RELAYD_ROTATE_STREAMS", conn);
3268 ret = relay_rotate_session_streams(header, conn, payload);
3269 break;
3270 case RELAYD_CREATE_TRACE_CHUNK:
3271 DBG_CMD("RELAYD_CREATE_TRACE_CHUNK", conn);
3272 ret = relay_create_trace_chunk(header, conn, payload);
3273 break;
3274 case RELAYD_CLOSE_TRACE_CHUNK:
3275 DBG_CMD("RELAYD_CLOSE_TRACE_CHUNK", conn);
3276 ret = relay_close_trace_chunk(header, conn, payload);
3277 break;
3278 case RELAYD_TRACE_CHUNK_EXISTS:
3279 DBG_CMD("RELAYD_TRACE_CHUNK_EXISTS", conn);
3280 ret = relay_trace_chunk_exists(header, conn, payload);
3281 break;
3282 case RELAYD_GET_CONFIGURATION:
3283 DBG_CMD("RELAYD_GET_CONFIGURATION", conn);
3284 ret = relay_get_configuration(header, conn, payload);
3285 break;
3286 case RELAYD_UPDATE_SYNC_INFO:
3287 default:
3288 ERR("Received unknown command (%u)", header->cmd);
3289 relay_unknown_command(conn);
3290 ret = -1;
3291 goto end;
3292 }
3293
3294 end:
3295 return ret;
3296 }
3297
3298 static enum relay_connection_status relay_process_control_receive_payload(
3299 struct relay_connection *conn)
3300 {
3301 int ret = 0;
3302 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
3303 struct lttng_dynamic_buffer *reception_buffer =
3304 &conn->protocol.ctrl.reception_buffer;
3305 struct ctrl_connection_state_receive_payload *state =
3306 &conn->protocol.ctrl.state.receive_payload;
3307 struct lttng_buffer_view payload_view;
3308
3309 if (state->left_to_receive == 0) {
3310 /* Short-circuit for payload-less commands. */
3311 goto reception_complete;
3312 }
3313
3314 ret = conn->sock->ops->recvmsg(conn->sock,
3315 reception_buffer->data + state->received,
3316 state->left_to_receive, MSG_DONTWAIT);
3317 if (ret < 0) {
3318 if (errno != EAGAIN && errno != EWOULDBLOCK) {
3319 PERROR("Unable to receive command payload on sock %d",
3320 conn->sock->fd);
3321 status = RELAY_CONNECTION_STATUS_ERROR;
3322 }
3323 goto end;
3324 } else if (ret == 0) {
3325 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
3326 status = RELAY_CONNECTION_STATUS_CLOSED;
3327 goto end;
3328 }
3329
3330 assert(ret > 0);
3331 assert(ret <= state->left_to_receive);
3332
3333 state->left_to_receive -= ret;
3334 state->received += ret;
3335
3336 if (state->left_to_receive > 0) {
3337 /*
3338 * Can't transition to the protocol's next state, wait to
3339 * receive the rest of the header.
3340 */
3341 DBG3("Partial reception of control connection protocol payload (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
3342 state->received, state->left_to_receive,
3343 conn->sock->fd);
3344 goto end;
3345 }
3346
3347 reception_complete:
3348 DBG("Done receiving control command payload: fd = %i, payload size = %" PRIu64 " bytes",
3349 conn->sock->fd, state->received);
3350 /*
3351 * The payload required to process the command has been received.
3352 * A view to the reception buffer is forwarded to the various
3353 * commands and the state of the control is reset on success.
3354 *
3355 * Commands are responsible for sending their reply to the peer.
3356 */
3357 payload_view = lttng_buffer_view_from_dynamic_buffer(reception_buffer,
3358 0, -1);
3359 ret = relay_process_control_command(conn,
3360 &state->header, &payload_view);
3361 if (ret < 0) {
3362 status = RELAY_CONNECTION_STATUS_ERROR;
3363 goto end;
3364 }
3365
3366 ret = connection_reset_protocol_state(conn);
3367 if (ret) {
3368 status = RELAY_CONNECTION_STATUS_ERROR;
3369 }
3370 end:
3371 return status;
3372 }
3373
3374 static enum relay_connection_status relay_process_control_receive_header(
3375 struct relay_connection *conn)
3376 {
3377 int ret = 0;
3378 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
3379 struct lttcomm_relayd_hdr header;
3380 struct lttng_dynamic_buffer *reception_buffer =
3381 &conn->protocol.ctrl.reception_buffer;
3382 struct ctrl_connection_state_receive_header *state =
3383 &conn->protocol.ctrl.state.receive_header;
3384
3385 assert(state->left_to_receive != 0);
3386
3387 ret = conn->sock->ops->recvmsg(conn->sock,
3388 reception_buffer->data + state->received,
3389 state->left_to_receive, MSG_DONTWAIT);
3390 if (ret < 0) {
3391 if (errno != EAGAIN && errno != EWOULDBLOCK) {
3392 PERROR("Unable to receive control command header on sock %d",
3393 conn->sock->fd);
3394 status = RELAY_CONNECTION_STATUS_ERROR;
3395 }
3396 goto end;
3397 } else if (ret == 0) {
3398 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
3399 status = RELAY_CONNECTION_STATUS_CLOSED;
3400 goto end;
3401 }
3402
3403 assert(ret > 0);
3404 assert(ret <= state->left_to_receive);
3405
3406 state->left_to_receive -= ret;
3407 state->received += ret;
3408
3409 if (state->left_to_receive > 0) {
3410 /*
3411 * Can't transition to the protocol's next state, wait to
3412 * receive the rest of the header.
3413 */
3414 DBG3("Partial reception of control connection protocol header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
3415 state->received, state->left_to_receive,
3416 conn->sock->fd);
3417 goto end;
3418 }
3419
3420 /* Transition to next state: receiving the command's payload. */
3421 conn->protocol.ctrl.state_id =
3422 CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD;
3423 memcpy(&header, reception_buffer->data, sizeof(header));
3424 header.circuit_id = be64toh(header.circuit_id);
3425 header.data_size = be64toh(header.data_size);
3426 header.cmd = be32toh(header.cmd);
3427 header.cmd_version = be32toh(header.cmd_version);
3428 memcpy(&conn->protocol.ctrl.state.receive_payload.header,
3429 &header, sizeof(header));
3430
3431 DBG("Done receiving control command header: fd = %i, cmd = %" PRIu32 ", cmd_version = %" PRIu32 ", payload size = %" PRIu64 " bytes",
3432 conn->sock->fd, header.cmd, header.cmd_version,
3433 header.data_size);
3434
3435 if (header.data_size > DEFAULT_NETWORK_RELAYD_CTRL_MAX_PAYLOAD_SIZE) {
3436 ERR("Command header indicates a payload (%" PRIu64 " bytes) that exceeds the maximal payload size allowed on a control connection.",
3437 header.data_size);
3438 status = RELAY_CONNECTION_STATUS_ERROR;
3439 goto end;
3440 }
3441
3442 conn->protocol.ctrl.state.receive_payload.left_to_receive =
3443 header.data_size;
3444 conn->protocol.ctrl.state.receive_payload.received = 0;
3445 ret = lttng_dynamic_buffer_set_size(reception_buffer,
3446 header.data_size);
3447 if (ret) {
3448 status = RELAY_CONNECTION_STATUS_ERROR;
3449 goto end;
3450 }
3451
3452 if (header.data_size == 0) {
3453 /*
3454 * Manually invoke the next state as the poll loop
3455 * will not wake-up to allow us to proceed further.
3456 */
3457 status = relay_process_control_receive_payload(conn);
3458 }
3459 end:
3460 return status;
3461 }
3462
3463 /*
3464 * Process the commands received on the control socket
3465 */
3466 static enum relay_connection_status relay_process_control(
3467 struct relay_connection *conn)
3468 {
3469 enum relay_connection_status status;
3470
3471 switch (conn->protocol.ctrl.state_id) {
3472 case CTRL_CONNECTION_STATE_RECEIVE_HEADER:
3473 status = relay_process_control_receive_header(conn);
3474 break;
3475 case CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD:
3476 status = relay_process_control_receive_payload(conn);
3477 break;
3478 default:
3479 ERR("Unknown control connection protocol state encountered.");
3480 abort();
3481 }
3482
3483 return status;
3484 }
3485
3486 static enum relay_connection_status relay_process_data_receive_header(
3487 struct relay_connection *conn)
3488 {
3489 int ret;
3490 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
3491 struct data_connection_state_receive_header *state =
3492 &conn->protocol.data.state.receive_header;
3493 struct lttcomm_relayd_data_hdr header;
3494 struct relay_stream *stream;
3495
3496 assert(state->left_to_receive != 0);
3497
3498 ret = conn->sock->ops->recvmsg(conn->sock,
3499 state->header_reception_buffer + state->received,
3500 state->left_to_receive, MSG_DONTWAIT);
3501 if (ret < 0) {
3502 if (errno != EAGAIN && errno != EWOULDBLOCK) {
3503 PERROR("Unable to receive data header on sock %d", conn->sock->fd);
3504 status = RELAY_CONNECTION_STATUS_ERROR;
3505 }
3506 goto end;
3507 } else if (ret == 0) {
3508 /* Orderly shutdown. Not necessary to print an error. */
3509 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
3510 status = RELAY_CONNECTION_STATUS_CLOSED;
3511 goto end;
3512 }
3513
3514 assert(ret > 0);
3515 assert(ret <= state->left_to_receive);
3516
3517 state->left_to_receive -= ret;
3518 state->received += ret;
3519
3520 if (state->left_to_receive > 0) {
3521 /*
3522 * Can't transition to the protocol's next state, wait to
3523 * receive the rest of the header.
3524 */
3525 DBG3("Partial reception of data connection header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
3526 state->received, state->left_to_receive,
3527 conn->sock->fd);
3528 goto end;
3529 }
3530
3531 /* Transition to next state: receiving the payload. */
3532 conn->protocol.data.state_id = DATA_CONNECTION_STATE_RECEIVE_PAYLOAD;
3533
3534 memcpy(&header, state->header_reception_buffer, sizeof(header));
3535 header.circuit_id = be64toh(header.circuit_id);
3536 header.stream_id = be64toh(header.stream_id);
3537 header.data_size = be32toh(header.data_size);
3538 header.net_seq_num = be64toh(header.net_seq_num);
3539 header.padding_size = be32toh(header.padding_size);
3540 memcpy(&conn->protocol.data.state.receive_payload.header, &header, sizeof(header));
3541
3542 conn->protocol.data.state.receive_payload.left_to_receive =
3543 header.data_size;
3544 conn->protocol.data.state.receive_payload.received = 0;
3545 conn->protocol.data.state.receive_payload.rotate_index = false;
3546
3547 DBG("Received data connection header on fd %i: circuit_id = %" PRIu64 ", stream_id = %" PRIu64 ", data_size = %" PRIu32 ", net_seq_num = %" PRIu64 ", padding_size = %" PRIu32,
3548 conn->sock->fd, header.circuit_id,
3549 header.stream_id, header.data_size,
3550 header.net_seq_num, header.padding_size);
3551
3552 stream = stream_get_by_id(header.stream_id);
3553 if (!stream) {
3554 DBG("relay_process_data_receive_payload: Cannot find stream %" PRIu64,
3555 header.stream_id);
3556 /* Protocol error. */
3557 status = RELAY_CONNECTION_STATUS_ERROR;
3558 goto end;
3559 }
3560
3561 pthread_mutex_lock(&stream->lock);
3562 /* Prepare stream for the reception of a new packet. */
3563 ret = stream_init_packet(stream, header.data_size,
3564 &conn->protocol.data.state.receive_payload.rotate_index);
3565 pthread_mutex_unlock(&stream->lock);
3566 if (ret) {
3567 ERR("Failed to rotate stream output file");
3568 status = RELAY_CONNECTION_STATUS_ERROR;
3569 goto end_stream_unlock;
3570 }
3571
3572 end_stream_unlock:
3573 stream_put(stream);
3574 end:
3575 return status;
3576 }
3577
3578 static enum relay_connection_status relay_process_data_receive_payload(
3579 struct relay_connection *conn)
3580 {
3581 int ret;
3582 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
3583 struct relay_stream *stream;
3584 struct data_connection_state_receive_payload *state =
3585 &conn->protocol.data.state.receive_payload;
3586 const size_t chunk_size = RECV_DATA_BUFFER_SIZE;
3587 char data_buffer[chunk_size];
3588 bool partial_recv = false;
3589 bool new_stream = false, close_requested = false, index_flushed = false;
3590 uint64_t left_to_receive = state->left_to_receive;
3591 struct relay_session *session;
3592
3593 DBG3("Receiving data for stream id %" PRIu64 " seqnum %" PRIu64 ", %" PRIu64" bytes received, %" PRIu64 " bytes left to receive",
3594 state->header.stream_id, state->header.net_seq_num,
3595 state->received, left_to_receive);
3596
3597 stream = stream_get_by_id(state->header.stream_id);
3598 if (!stream) {
3599 /* Protocol error. */
3600 ERR("relay_process_data_receive_payload: cannot find stream %" PRIu64,
3601 state->header.stream_id);
3602 status = RELAY_CONNECTION_STATUS_ERROR;
3603 goto end;
3604 }
3605
3606 pthread_mutex_lock(&stream->lock);
3607 session = stream->trace->session;
3608 if (!conn->session) {
3609 ret = connection_set_session(conn, session);
3610 if (ret) {
3611 status = RELAY_CONNECTION_STATUS_ERROR;
3612 goto end_stream_unlock;
3613 }
3614 }
3615
3616 /*
3617 * The size of the "chunk" received on any iteration is bounded by:
3618 * - the data left to receive,
3619 * - the data immediately available on the socket,
3620 * - the on-stack data buffer
3621 */
3622 while (left_to_receive > 0 && !partial_recv) {
3623 size_t recv_size = min(left_to_receive, chunk_size);
3624 struct lttng_buffer_view packet_chunk;
3625
3626 ret = conn->sock->ops->recvmsg(conn->sock, data_buffer,
3627 recv_size, MSG_DONTWAIT);
3628 if (ret < 0) {
3629 if (errno != EAGAIN && errno != EWOULDBLOCK) {
3630 PERROR("Socket %d error", conn->sock->fd);
3631 status = RELAY_CONNECTION_STATUS_ERROR;
3632 }
3633 goto end_stream_unlock;
3634 } else if (ret == 0) {
3635 /* No more data ready to be consumed on socket. */
3636 DBG3("No more data ready for consumption on data socket of stream id %" PRIu64,
3637 state->header.stream_id);
3638 status = RELAY_CONNECTION_STATUS_CLOSED;
3639 break;
3640 } else if (ret < (int) recv_size) {
3641 /*
3642 * All the data available on the socket has been
3643 * consumed.
3644 */
3645 partial_recv = true;
3646 recv_size = ret;
3647 }
3648
3649 packet_chunk = lttng_buffer_view_init(data_buffer,
3650 0, recv_size);
3651 assert(packet_chunk.data);
3652
3653 ret = stream_write(stream, &packet_chunk, 0);
3654 if (ret) {
3655 ERR("Relay error writing data to file");
3656 status = RELAY_CONNECTION_STATUS_ERROR;
3657 goto end_stream_unlock;
3658 }
3659
3660 left_to_receive -= recv_size;
3661 state->received += recv_size;
3662 state->left_to_receive = left_to_receive;
3663 }
3664
3665 if (state->left_to_receive > 0) {
3666 /*
3667 * Did not receive all the data expected, wait for more data to
3668 * become available on the socket.
3669 */
3670 DBG3("Partial receive on data connection of stream id %" PRIu64 ", %" PRIu64 " bytes received, %" PRIu64 " bytes left to receive",
3671 state->header.stream_id, state->received,
3672 state->left_to_receive);
3673 goto end_stream_unlock;
3674 }
3675
3676 ret = stream_write(stream, NULL, state->header.padding_size);
3677 if (ret) {
3678 status = RELAY_CONNECTION_STATUS_ERROR;
3679 goto end_stream_unlock;
3680 }
3681
3682 if (session_streams_have_index(session)) {
3683 ret = stream_update_index(stream, state->header.net_seq_num,
3684 state->rotate_index, &index_flushed,
3685 state->header.data_size + state->header.padding_size);
3686 if (ret < 0) {
3687 ERR("Failed to update index: stream %" PRIu64 " net_seq_num %" PRIu64 " ret %d",
3688 stream->stream_handle,
3689 state->header.net_seq_num, ret);
3690 status = RELAY_CONNECTION_STATUS_ERROR;
3691 goto end_stream_unlock;
3692 }
3693 }
3694
3695 if (stream->prev_data_seq == -1ULL) {
3696 new_stream = true;
3697 }
3698
3699 ret = stream_complete_packet(stream, state->header.data_size +
3700 state->header.padding_size, state->header.net_seq_num,
3701 index_flushed);
3702 if (ret) {
3703 status = RELAY_CONNECTION_STATUS_ERROR;
3704 goto end_stream_unlock;
3705 }
3706
3707 /*
3708 * Resetting the protocol state (to RECEIVE_HEADER) will trash the
3709 * contents of *state which are aliased (union) to the same location as
3710 * the new state. Don't use it beyond this point.
3711 */
3712 connection_reset_protocol_state(conn);
3713 state = NULL;
3714
3715 end_stream_unlock:
3716 close_requested = stream->close_requested;
3717 pthread_mutex_unlock(&stream->lock);
3718 if (close_requested && left_to_receive == 0) {
3719 try_stream_close(stream);
3720 }
3721
3722 if (new_stream) {
3723 pthread_mutex_lock(&session->lock);
3724 uatomic_set(&session->new_streams, 1);
3725 pthread_mutex_unlock(&session->lock);
3726 }
3727
3728 stream_put(stream);
3729 end:
3730 return status;
3731 }
3732
3733 /*
3734 * relay_process_data: Process the data received on the data socket
3735 */
3736 static enum relay_connection_status relay_process_data(
3737 struct relay_connection *conn)
3738 {
3739 enum relay_connection_status status;
3740
3741 switch (conn->protocol.data.state_id) {
3742 case DATA_CONNECTION_STATE_RECEIVE_HEADER:
3743 status = relay_process_data_receive_header(conn);
3744 break;
3745 case DATA_CONNECTION_STATE_RECEIVE_PAYLOAD:
3746 status = relay_process_data_receive_payload(conn);
3747 break;
3748 default:
3749 ERR("Unexpected data connection communication state.");
3750 abort();
3751 }
3752
3753 return status;
3754 }
3755
3756 static void cleanup_connection_pollfd(struct lttng_poll_event *events, int pollfd)
3757 {
3758 int ret;
3759
3760 (void) lttng_poll_del(events, pollfd);
3761
3762 ret = fd_tracker_close_unsuspendable_fd(the_fd_tracker, &pollfd, 1,
3763 fd_tracker_util_close_fd, NULL);
3764 if (ret < 0) {
3765 ERR("Closing pollfd %d", pollfd);
3766 }
3767 }
3768
3769 static void relay_thread_close_connection(struct lttng_poll_event *events,
3770 int pollfd, struct relay_connection *conn)
3771 {
3772 const char *type_str;
3773
3774 switch (conn->type) {
3775 case RELAY_DATA:
3776 type_str = "Data";
3777 break;
3778 case RELAY_CONTROL:
3779 type_str = "Control";
3780 break;
3781 case RELAY_VIEWER_COMMAND:
3782 type_str = "Viewer Command";
3783 break;
3784 case RELAY_VIEWER_NOTIFICATION:
3785 type_str = "Viewer Notification";
3786 break;
3787 default:
3788 type_str = "Unknown";
3789 }
3790 cleanup_connection_pollfd(events, pollfd);
3791 connection_put(conn);
3792 DBG("%s connection closed with %d", type_str, pollfd);
3793 }
3794
3795 /*
3796 * This thread does the actual work
3797 */
3798 static void *relay_thread_worker(void *data)
3799 {
3800 int ret, err = -1, last_seen_data_fd = -1;
3801 uint32_t nb_fd;
3802 struct lttng_poll_event events;
3803 struct lttng_ht *relay_connections_ht;
3804 struct lttng_ht_iter iter;
3805 struct relay_connection *destroy_conn = NULL;
3806
3807 DBG("[thread] Relay worker started");
3808
3809 rcu_register_thread();
3810
3811 health_register(health_relayd, HEALTH_RELAYD_TYPE_WORKER);
3812
3813 if (testpoint(relayd_thread_worker)) {
3814 goto error_testpoint;
3815 }
3816
3817 health_code_update();
3818
3819 /* table of connections indexed on socket */
3820 relay_connections_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3821 if (!relay_connections_ht) {
3822 goto relay_connections_ht_error;
3823 }
3824
3825 ret = create_named_thread_poll_set(&events, 2, "Worker thread epoll");
3826 if (ret < 0) {
3827 goto error_poll_create;
3828 }
3829
3830 ret = lttng_poll_add(&events, relay_conn_pipe[0], LPOLLIN | LPOLLRDHUP);
3831 if (ret < 0) {
3832 goto error;
3833 }
3834
3835 restart:
3836 while (1) {
3837 int idx = -1, i, seen_control = 0, last_notdel_data_fd = -1;
3838
3839 health_code_update();
3840
3841 /* Infinite blocking call, waiting for transmission */
3842 DBG3("Relayd worker thread polling...");
3843 health_poll_entry();
3844 ret = lttng_poll_wait(&events, -1);
3845 health_poll_exit();
3846 if (ret < 0) {
3847 /*
3848 * Restart interrupted system call.
3849 */
3850 if (errno == EINTR) {
3851 goto restart;
3852 }
3853 goto error;
3854 }
3855
3856 nb_fd = ret;
3857
3858 /*
3859 * Process control. The control connection is
3860 * prioritized so we don't starve it with high
3861 * throughput tracing data on the data connection.
3862 */
3863 for (i = 0; i < nb_fd; i++) {
3864 /* Fetch once the poll data */
3865 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
3866 int pollfd = LTTNG_POLL_GETFD(&events, i);
3867
3868 health_code_update();
3869
3870 /* Thread quit pipe has been closed. Killing thread. */
3871 ret = check_thread_quit_pipe(pollfd, revents);
3872 if (ret) {
3873 err = 0;
3874 goto exit;
3875 }
3876
3877 /* Inspect the relay conn pipe for new connection */
3878 if (pollfd == relay_conn_pipe[0]) {
3879 if (revents & LPOLLIN) {
3880 struct relay_connection *conn;
3881
3882 ret = lttng_read(relay_conn_pipe[0], &conn, sizeof(conn));
3883 if (ret < 0) {
3884 goto error;
3885 }
3886 ret = lttng_poll_add(&events,
3887 conn->sock->fd,
3888 LPOLLIN | LPOLLRDHUP);
3889 if (ret) {
3890 ERR("Failed to add new connection file descriptor to poll set");
3891 goto error;
3892 }
3893 connection_ht_add(relay_connections_ht, conn);
3894 DBG("Connection socket %d added", conn->sock->fd);
3895 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3896 ERR("Relay connection pipe error");
3897 goto error;
3898 } else {
3899 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
3900 goto error;
3901 }
3902 } else {
3903 struct relay_connection *ctrl_conn;
3904
3905 ctrl_conn = connection_get_by_sock(relay_connections_ht, pollfd);
3906 /* If not found, there is a synchronization issue. */
3907 assert(ctrl_conn);
3908
3909 if (ctrl_conn->type == RELAY_DATA) {
3910 if (revents & LPOLLIN) {
3911 /*
3912 * Flag the last seen data fd not deleted. It will be
3913 * used as the last seen fd if any fd gets deleted in
3914 * this first loop.
3915 */
3916 last_notdel_data_fd = pollfd;
3917 }
3918 goto put_ctrl_connection;
3919 }
3920 assert(ctrl_conn->type == RELAY_CONTROL);
3921
3922 if (revents & LPOLLIN) {
3923 enum relay_connection_status status;
3924
3925 status = relay_process_control(ctrl_conn);
3926 if (status != RELAY_CONNECTION_STATUS_OK) {
3927 /*
3928 * On socket error flag the session as aborted to force
3929 * the cleanup of its stream otherwise it can leak
3930 * during the lifetime of the relayd.
3931 *
3932 * This prevents situations in which streams can be
3933 * left opened because an index was received, the
3934 * control connection is closed, and the data
3935 * connection is closed (uncleanly) before the packet's
3936 * data provided.
3937 *
3938 * Since the control connection encountered an error,
3939 * it is okay to be conservative and close the
3940 * session right now as we can't rely on the protocol
3941 * being respected anymore.
3942 */
3943 if (status == RELAY_CONNECTION_STATUS_ERROR) {
3944 session_abort(ctrl_conn->session);
3945 }
3946
3947 /* Clear the connection on error or close. */
3948 relay_thread_close_connection(&events,
3949 pollfd,
3950 ctrl_conn);
3951 }
3952 seen_control = 1;
3953 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3954 relay_thread_close_connection(&events,
3955 pollfd, ctrl_conn);
3956 if (last_seen_data_fd == pollfd) {
3957 last_seen_data_fd = last_notdel_data_fd;
3958 }
3959 } else {
3960 ERR("Unexpected poll events %u for control sock %d",
3961 revents, pollfd);
3962 connection_put(ctrl_conn);
3963 goto error;
3964 }
3965 put_ctrl_connection:
3966 connection_put(ctrl_conn);
3967 }
3968 }
3969
3970 /*
3971 * The last loop handled a control request, go back to poll to make
3972 * sure we prioritise the control socket.
3973 */
3974 if (seen_control) {
3975 continue;
3976 }
3977
3978 if (last_seen_data_fd >= 0) {
3979 for (i = 0; i < nb_fd; i++) {
3980 int pollfd = LTTNG_POLL_GETFD(&events, i);
3981
3982 health_code_update();
3983
3984 if (last_seen_data_fd == pollfd) {
3985 idx = i;
3986 break;
3987 }
3988 }
3989 }
3990
3991 /* Process data connection. */
3992 for (i = idx + 1; i < nb_fd; i++) {
3993 /* Fetch the poll data. */
3994 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
3995 int pollfd = LTTNG_POLL_GETFD(&events, i);
3996 struct relay_connection *data_conn;
3997
3998 health_code_update();
3999
4000 if (!revents) {
4001 /* No activity for this FD (poll implementation). */
4002 continue;
4003 }
4004
4005 /* Skip the command pipe. It's handled in the first loop. */
4006 if (pollfd == relay_conn_pipe[0]) {
4007 continue;
4008 }
4009
4010 data_conn = connection_get_by_sock(relay_connections_ht, pollfd);
4011 if (!data_conn) {
4012 /* Skip it. Might be removed before. */
4013 continue;
4014 }
4015 if (data_conn->type == RELAY_CONTROL) {
4016 goto put_data_connection;
4017 }
4018 assert(data_conn->type == RELAY_DATA);
4019
4020 if (revents & LPOLLIN) {
4021 enum relay_connection_status status;
4022
4023 status = relay_process_data(data_conn);
4024 /* Connection closed or error. */
4025 if (status != RELAY_CONNECTION_STATUS_OK) {
4026 /*
4027 * On socket error flag the session as aborted to force
4028 * the cleanup of its stream otherwise it can leak
4029 * during the lifetime of the relayd.
4030 *
4031 * This prevents situations in which streams can be
4032 * left opened because an index was received, the
4033 * control connection is closed, and the data
4034 * connection is closed (uncleanly) before the packet's
4035 * data provided.
4036 *
4037 * Since the data connection encountered an error,
4038 * it is okay to be conservative and close the
4039 * session right now as we can't rely on the protocol
4040 * being respected anymore.
4041 */
4042 if (status == RELAY_CONNECTION_STATUS_ERROR) {
4043 session_abort(data_conn->session);
4044 }
4045 relay_thread_close_connection(&events, pollfd,
4046 data_conn);
4047 /*
4048 * Every goto restart call sets the last seen fd where
4049 * here we don't really care since we gracefully
4050 * continue the loop after the connection is deleted.
4051 */
4052 } else {
4053 /* Keep last seen port. */
4054 last_seen_data_fd = pollfd;
4055 connection_put(data_conn);
4056 goto restart;
4057 }
4058 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
4059 relay_thread_close_connection(&events, pollfd,
4060 data_conn);
4061 } else {
4062 ERR("Unknown poll events %u for data sock %d",
4063 revents, pollfd);
4064 }
4065 put_data_connection:
4066 connection_put(data_conn);
4067 }
4068 last_seen_data_fd = -1;
4069 }
4070
4071 /* Normal exit, no error */
4072 ret = 0;
4073
4074 exit:
4075 error:
4076 /* Cleanup remaining connection object. */
4077 rcu_read_lock();
4078 cds_lfht_for_each_entry(relay_connections_ht->ht, &iter.iter,
4079 destroy_conn,
4080 sock_n.node) {
4081 health_code_update();
4082
4083 session_abort(destroy_conn->session);
4084
4085 /*
4086 * No need to grab another ref, because we own
4087 * destroy_conn.
4088 */
4089 relay_thread_close_connection(&events, destroy_conn->sock->fd,
4090 destroy_conn);
4091 }
4092 rcu_read_unlock();
4093
4094 (void) fd_tracker_util_poll_clean(the_fd_tracker, &events);
4095 error_poll_create:
4096 lttng_ht_destroy(relay_connections_ht);
4097 relay_connections_ht_error:
4098 /* Close relay conn pipes */
4099 (void) fd_tracker_util_pipe_close(the_fd_tracker,
4100 relay_conn_pipe);
4101 if (err) {
4102 DBG("Thread exited with error");
4103 }
4104 DBG("Worker thread cleanup complete");
4105 error_testpoint:
4106 if (err) {
4107 health_error();
4108 ERR("Health error occurred in %s", __func__);
4109 }
4110 health_unregister(health_relayd);
4111 rcu_unregister_thread();
4112 lttng_relay_stop_threads();
4113 return NULL;
4114 }
4115
4116 /*
4117 * Create the relay command pipe to wake thread_manage_apps.
4118 * Closed in cleanup().
4119 */
4120 static int create_relay_conn_pipe(void)
4121 {
4122 return fd_tracker_util_pipe_open_cloexec(the_fd_tracker,
4123 "Relayd connection pipe", relay_conn_pipe);
4124 }
4125
4126 static int stdio_open(void *data, int *fds)
4127 {
4128 fds[0] = fileno(stdout);
4129 fds[1] = fileno(stderr);
4130 return 0;
4131 }
4132
4133 static int track_stdio(void)
4134 {
4135 int fds[2];
4136 const char *names[] = { "stdout", "stderr" };
4137
4138 return fd_tracker_open_unsuspendable_fd(the_fd_tracker, fds,
4139 names, 2, stdio_open, NULL);
4140 }
4141
4142 /*
4143 * main
4144 */
4145 int main(int argc, char **argv)
4146 {
4147 bool thread_is_rcu_registered = false;
4148 int ret = 0, retval = 0;
4149 void *status;
4150 char *unlinked_file_directory_path = NULL, *output_path = NULL;
4151
4152 /* Parse environment variables */
4153 ret = parse_env_options();
4154 if (ret) {
4155 retval = -1;
4156 goto exit_options;
4157 }
4158
4159 /*
4160 * Parse arguments.
4161 * Command line arguments overwrite environment.
4162 */
4163 progname = argv[0];
4164 if (set_options(argc, argv)) {
4165 retval = -1;
4166 goto exit_options;
4167 }
4168
4169 if (set_signal_handler()) {
4170 retval = -1;
4171 goto exit_options;
4172 }
4173
4174 relayd_config_log();
4175
4176 if (opt_print_version) {
4177 print_version();
4178 retval = 0;
4179 goto exit_options;
4180 }
4181
4182 ret = fclose(stdin);
4183 if (ret) {
4184 PERROR("Failed to close stdin");
4185 goto exit_options;
4186 }
4187
4188 DBG("Clear command %s", opt_allow_clear ? "allowed" : "disallowed");
4189
4190 /* Try to create directory if -o, --output is specified. */
4191 if (opt_output_path) {
4192 if (*opt_output_path != '/') {
4193 ERR("Please specify an absolute path for -o, --output PATH");
4194 retval = -1;
4195 goto exit_options;
4196 }
4197
4198 ret = utils_mkdir_recursive(opt_output_path, S_IRWXU | S_IRWXG,
4199 -1, -1);
4200 if (ret < 0) {
4201 ERR("Unable to create %s", opt_output_path);
4202 retval = -1;
4203 goto exit_options;
4204 }
4205 }
4206
4207 /* Daemonize */
4208 if (opt_daemon || opt_background) {
4209 ret = lttng_daemonize(&child_ppid, &recv_child_signal,
4210 !opt_background);
4211 if (ret < 0) {
4212 retval = -1;
4213 goto exit_options;
4214 }
4215 }
4216
4217 if (opt_working_directory) {
4218 ret = utils_change_working_directory(opt_working_directory);
4219 if (ret) {
4220 /* All errors are already logged. */
4221 goto exit_options;
4222 }
4223 }
4224
4225 sessiond_trace_chunk_registry = sessiond_trace_chunk_registry_create();
4226 if (!sessiond_trace_chunk_registry) {
4227 ERR("Failed to initialize session daemon trace chunk registry");
4228 retval = -1;
4229 goto exit_options;
4230 }
4231
4232 /*
4233 * The RCU thread registration (and use, through the fd-tracker's
4234 * creation) is done after the daemonization to allow us to not
4235 * deal with liburcu's fork() management as the call RCU needs to
4236 * be restored.
4237 */
4238 rcu_register_thread();
4239 thread_is_rcu_registered = true;
4240
4241 output_path = create_output_path("");
4242 if (!output_path) {
4243 ERR("Failed to get output path");
4244 retval = -1;
4245 goto exit_options;
4246 }
4247 ret = asprintf(&unlinked_file_directory_path, "%s/%s", output_path,
4248 DEFAULT_UNLINKED_FILES_DIRECTORY);
4249 free(output_path);
4250 if (ret < 0) {
4251 ERR("Failed to format unlinked file directory path");
4252 retval = -1;
4253 goto exit_options;
4254 }
4255 the_fd_tracker = fd_tracker_create(
4256 unlinked_file_directory_path, lttng_opt_fd_pool_size);
4257 free(unlinked_file_directory_path);
4258 if (!the_fd_tracker) {
4259 retval = -1;
4260 goto exit_options;
4261 }
4262
4263 ret = track_stdio();
4264 if (ret) {
4265 retval = -1;
4266 goto exit_options;
4267 }
4268
4269 /* Initialize thread health monitoring */
4270 health_relayd = health_app_create(NR_HEALTH_RELAYD_TYPES);
4271 if (!health_relayd) {
4272 PERROR("health_app_create error");
4273 retval = -1;
4274 goto exit_options;
4275 }
4276
4277 /* Create thread quit pipe */
4278 if (init_thread_quit_pipe()) {
4279 retval = -1;
4280 goto exit_options;
4281 }
4282
4283 /* Setup the thread apps communication pipe. */
4284 if (create_relay_conn_pipe()) {
4285 retval = -1;
4286 goto exit_options;
4287 }
4288
4289 /* Init relay command queue. */
4290 cds_wfcq_init(&relay_conn_queue.head, &relay_conn_queue.tail);
4291
4292 /* Initialize communication library */
4293 lttcomm_init();
4294 lttcomm_inet_init();
4295
4296 /* tables of sessions indexed by session ID */
4297 sessions_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
4298 if (!sessions_ht) {
4299 retval = -1;
4300 goto exit_options;
4301 }
4302
4303 /* tables of streams indexed by stream ID */
4304 relay_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
4305 if (!relay_streams_ht) {
4306 retval = -1;
4307 goto exit_options;
4308 }
4309
4310 /* tables of streams indexed by stream ID */
4311 viewer_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
4312 if (!viewer_streams_ht) {
4313 retval = -1;
4314 goto exit_options;
4315 }
4316
4317 ret = init_health_quit_pipe();
4318 if (ret) {
4319 retval = -1;
4320 goto exit_options;
4321 }
4322
4323 /* Create thread to manage the client socket */
4324 ret = pthread_create(&health_thread, default_pthread_attr(),
4325 thread_manage_health, (void *) NULL);
4326 if (ret) {
4327 errno = ret;
4328 PERROR("pthread_create health");
4329 retval = -1;
4330 goto exit_options;
4331 }
4332
4333 /* Setup the dispatcher thread */
4334 ret = pthread_create(&dispatcher_thread, default_pthread_attr(),
4335 relay_thread_dispatcher, (void *) NULL);
4336 if (ret) {
4337 errno = ret;
4338 PERROR("pthread_create dispatcher");
4339 retval = -1;
4340 goto exit_dispatcher_thread;
4341 }
4342
4343 /* Setup the worker thread */
4344 ret = pthread_create(&worker_thread, default_pthread_attr(),
4345 relay_thread_worker, NULL);
4346 if (ret) {
4347 errno = ret;
4348 PERROR("pthread_create worker");
4349 retval = -1;
4350 goto exit_worker_thread;
4351 }
4352
4353 /* Setup the listener thread */
4354 ret = pthread_create(&listener_thread, default_pthread_attr(),
4355 relay_thread_listener, (void *) NULL);
4356 if (ret) {
4357 errno = ret;
4358 PERROR("pthread_create listener");
4359 retval = -1;
4360 goto exit_listener_thread;
4361 }
4362
4363 ret = relayd_live_create(live_uri);
4364 if (ret) {
4365 ERR("Starting live viewer threads");
4366 retval = -1;
4367 goto exit_live;
4368 }
4369
4370 /*
4371 * This is where we start awaiting program completion (e.g. through
4372 * signal that asks threads to teardown).
4373 */
4374
4375 ret = relayd_live_join();
4376 if (ret) {
4377 retval = -1;
4378 }
4379 exit_live:
4380
4381 ret = pthread_join(listener_thread, &status);
4382 if (ret) {
4383 errno = ret;
4384 PERROR("pthread_join listener_thread");
4385 retval = -1;
4386 }
4387
4388 exit_listener_thread:
4389 ret = pthread_join(worker_thread, &status);
4390 if (ret) {
4391 errno = ret;
4392 PERROR("pthread_join worker_thread");
4393 retval = -1;
4394 }
4395
4396 exit_worker_thread:
4397 ret = pthread_join(dispatcher_thread, &status);
4398 if (ret) {
4399 errno = ret;
4400 PERROR("pthread_join dispatcher_thread");
4401 retval = -1;
4402 }
4403 exit_dispatcher_thread:
4404
4405 ret = pthread_join(health_thread, &status);
4406 if (ret) {
4407 errno = ret;
4408 PERROR("pthread_join health_thread");
4409 retval = -1;
4410 }
4411 exit_options:
4412 /*
4413 * Wait for all pending call_rcu work to complete before tearing
4414 * down data structures. call_rcu worker may be trying to
4415 * perform lookups in those structures.
4416 */
4417 rcu_barrier();
4418 relayd_cleanup();
4419
4420 /* Ensure all prior call_rcu are done. */
4421 rcu_barrier();
4422
4423 if (thread_is_rcu_registered) {
4424 rcu_unregister_thread();
4425 }
4426
4427 if (!retval) {
4428 exit(EXIT_SUCCESS);
4429 } else {
4430 exit(EXIT_FAILURE);
4431 }
4432 }
This page took 0.235126 seconds and 5 git commands to generate.