Commit | Line | Data |
---|---|---|
b8aa1682 JD |
1 | /* |
2 | * Copyright (C) 2012 - Julien Desfossez <jdesfossez@efficios.com> | |
3 | * David Goulet <dgoulet@efficios.com> | |
cd60b05a | 4 | * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com> |
7591bab1 | 5 | * 2015 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
b8aa1682 JD |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License, version 2 only, | |
9 | * as published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
14 | * more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License along | |
17 | * with this program; if not, write to the Free Software Foundation, Inc., | |
18 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | |
19 | */ | |
20 | ||
6c1c0768 | 21 | #define _LGPL_SOURCE |
b8aa1682 JD |
22 | #include <getopt.h> |
23 | #include <grp.h> | |
24 | #include <limits.h> | |
25 | #include <pthread.h> | |
26 | #include <signal.h> | |
27 | #include <stdio.h> | |
28 | #include <stdlib.h> | |
29 | #include <string.h> | |
30 | #include <sys/mman.h> | |
31 | #include <sys/mount.h> | |
32 | #include <sys/resource.h> | |
33 | #include <sys/socket.h> | |
34 | #include <sys/stat.h> | |
35 | #include <sys/types.h> | |
36 | #include <sys/wait.h> | |
173af62f | 37 | #include <inttypes.h> |
b8aa1682 JD |
38 | #include <urcu/futex.h> |
39 | #include <urcu/uatomic.h> | |
70626904 | 40 | #include <urcu/rculist.h> |
b8aa1682 JD |
41 | #include <unistd.h> |
42 | #include <fcntl.h> | |
f8be1183 | 43 | #include <strings.h> |
b8aa1682 JD |
44 | |
45 | #include <lttng/lttng.h> | |
46 | #include <common/common.h> | |
47 | #include <common/compat/poll.h> | |
48 | #include <common/compat/socket.h> | |
f263b7fd | 49 | #include <common/compat/endian.h> |
e8fa9fb0 | 50 | #include <common/compat/getenv.h> |
b8aa1682 | 51 | #include <common/defaults.h> |
3fd27398 | 52 | #include <common/daemonize.h> |
b8aa1682 JD |
53 | #include <common/futex.h> |
54 | #include <common/sessiond-comm/sessiond-comm.h> | |
55 | #include <common/sessiond-comm/inet.h> | |
b8aa1682 JD |
56 | #include <common/sessiond-comm/relayd.h> |
57 | #include <common/uri.h> | |
a02de639 | 58 | #include <common/utils.h> |
d3ecc550 | 59 | #include <common/align.h> |
f40ef1d5 | 60 | #include <common/config/session-config.h> |
5312a3ed JG |
61 | #include <common/dynamic-buffer.h> |
62 | #include <common/buffer-view.h> | |
70626904 | 63 | #include <common/string-utils/format.h> |
b8aa1682 | 64 | |
a3bc3918 | 65 | #include "version.h" |
0f907de1 | 66 | #include "cmd.h" |
d3e2ba59 | 67 | #include "ctf-trace.h" |
1c20f0e2 | 68 | #include "index.h" |
0f907de1 | 69 | #include "utils.h" |
b8aa1682 | 70 | #include "lttng-relayd.h" |
d3e2ba59 | 71 | #include "live.h" |
55706a7d | 72 | #include "health-relayd.h" |
9b5e0863 | 73 | #include "testpoint.h" |
2f8f53af | 74 | #include "viewer-stream.h" |
2a174661 DG |
75 | #include "session.h" |
76 | #include "stream.h" | |
58eb9381 | 77 | #include "connection.h" |
a44ca2ca | 78 | #include "tracefile-array.h" |
f056029c | 79 | #include "tcp_keep_alive.h" |
23c8ff50 | 80 | #include "sessiond-trace-chunks.h" |
b8aa1682 | 81 | |
4fc83d94 PP |
82 | static const char *help_msg = |
83 | #ifdef LTTNG_EMBED_HELP | |
84 | #include <lttng-relayd.8.h> | |
85 | #else | |
86 | NULL | |
87 | #endif | |
88 | ; | |
89 | ||
5569b118 JG |
90 | enum relay_connection_status { |
91 | RELAY_CONNECTION_STATUS_OK, | |
a9577b76 | 92 | /* An error occurred while processing an event on the connection. */ |
5569b118 JG |
93 | RELAY_CONNECTION_STATUS_ERROR, |
94 | /* Connection closed/shutdown cleanly. */ | |
95 | RELAY_CONNECTION_STATUS_CLOSED, | |
96 | }; | |
97 | ||
b8aa1682 | 98 | /* command line options */ |
ce9ee1fb | 99 | char *opt_output_path, *opt_working_directory; |
a3bc3918 | 100 | static int opt_daemon, opt_background, opt_print_version; |
a8b66566 | 101 | enum relay_group_output_by opt_group_output_by = RELAYD_GROUP_OUTPUT_BY_UNKNOWN; |
3fd27398 MD |
102 | |
103 | /* | |
104 | * We need to wait for listener and live listener threads, as well as | |
105 | * health check thread, before being ready to signal readiness. | |
106 | */ | |
107 | #define NR_LTTNG_RELAY_READY 3 | |
108 | static int lttng_relay_ready = NR_LTTNG_RELAY_READY; | |
0848dba7 MD |
109 | |
110 | /* Size of receive buffer. */ | |
111 | #define RECV_DATA_BUFFER_SIZE 65536 | |
112 | ||
3fd27398 MD |
113 | static int recv_child_signal; /* Set to 1 when a SIGUSR1 signal is received. */ |
114 | static pid_t child_ppid; /* Internal parent PID use with daemonize. */ | |
115 | ||
095a4ae5 MD |
116 | static struct lttng_uri *control_uri; |
117 | static struct lttng_uri *data_uri; | |
d3e2ba59 | 118 | static struct lttng_uri *live_uri; |
b8aa1682 JD |
119 | |
120 | const char *progname; | |
b8aa1682 | 121 | |
65931c8b | 122 | const char *tracing_group_name = DEFAULT_TRACING_GROUP; |
cd60b05a JG |
123 | static int tracing_group_name_override; |
124 | ||
125 | const char * const config_section_name = "relayd"; | |
65931c8b | 126 | |
b8aa1682 JD |
127 | /* |
128 | * Quit pipe for all threads. This permits a single cancellation point | |
129 | * for all threads when receiving an event on the pipe. | |
130 | */ | |
0b242f62 | 131 | int thread_quit_pipe[2] = { -1, -1 }; |
b8aa1682 JD |
132 | |
133 | /* | |
134 | * This pipe is used to inform the worker thread that a command is queued and | |
135 | * ready to be processed. | |
136 | */ | |
58eb9381 | 137 | static int relay_conn_pipe[2] = { -1, -1 }; |
b8aa1682 | 138 | |
26c9d55e | 139 | /* Shared between threads */ |
b8aa1682 JD |
140 | static int dispatch_thread_exit; |
141 | ||
142 | static pthread_t listener_thread; | |
143 | static pthread_t dispatcher_thread; | |
144 | static pthread_t worker_thread; | |
65931c8b | 145 | static pthread_t health_thread; |
b8aa1682 | 146 | |
7591bab1 MD |
147 | /* |
148 | * last_relay_stream_id_lock protects last_relay_stream_id increment | |
149 | * atomicity on 32-bit architectures. | |
150 | */ | |
151 | static pthread_mutex_t last_relay_stream_id_lock = PTHREAD_MUTEX_INITIALIZER; | |
095a4ae5 | 152 | static uint64_t last_relay_stream_id; |
b8aa1682 JD |
153 | |
154 | /* | |
155 | * Relay command queue. | |
156 | * | |
157 | * The relay_thread_listener and relay_thread_dispatcher communicate with this | |
158 | * queue. | |
159 | */ | |
58eb9381 | 160 | static struct relay_conn_queue relay_conn_queue; |
b8aa1682 | 161 | |
d3e2ba59 JD |
162 | /* Global relay stream hash table. */ |
163 | struct lttng_ht *relay_streams_ht; | |
164 | ||
92c6ca54 DG |
165 | /* Global relay viewer stream hash table. */ |
166 | struct lttng_ht *viewer_streams_ht; | |
167 | ||
7591bab1 MD |
168 | /* Global relay sessions hash table. */ |
169 | struct lttng_ht *sessions_ht; | |
0a6518b0 | 170 | |
55706a7d | 171 | /* Relayd health monitoring */ |
eea7556c | 172 | struct health_app *health_relayd; |
55706a7d | 173 | |
23c8ff50 JG |
174 | struct sessiond_trace_chunk_registry *sessiond_trace_chunk_registry; |
175 | ||
cd60b05a JG |
176 | static struct option long_options[] = { |
177 | { "control-port", 1, 0, 'C', }, | |
178 | { "data-port", 1, 0, 'D', }, | |
8d5c808e | 179 | { "live-port", 1, 0, 'L', }, |
cd60b05a | 180 | { "daemonize", 0, 0, 'd', }, |
b5218ffb | 181 | { "background", 0, 0, 'b', }, |
cd60b05a JG |
182 | { "group", 1, 0, 'g', }, |
183 | { "help", 0, 0, 'h', }, | |
184 | { "output", 1, 0, 'o', }, | |
185 | { "verbose", 0, 0, 'v', }, | |
186 | { "config", 1, 0, 'f' }, | |
3a904098 | 187 | { "version", 0, 0, 'V' }, |
ce9ee1fb | 188 | { "working-directory", 1, 0, 'w', }, |
a8b66566 JR |
189 | { "group-output-by-session", 0, 0, 's', }, |
190 | { "group-output-by-host", 0, 0, 'p', }, | |
cd60b05a JG |
191 | { NULL, 0, 0, 0, }, |
192 | }; | |
193 | ||
3a904098 | 194 | static const char *config_ignore_options[] = { "help", "config", "version" }; |
cd60b05a | 195 | |
a3bc3918 JR |
196 | static void print_version(void) { |
197 | fprintf(stdout, "%s\n", VERSION); | |
198 | } | |
199 | ||
200 | static void relayd_config_log(void) | |
201 | { | |
202 | DBG("LTTng-relayd " VERSION " - " VERSION_NAME "%s%s", | |
203 | GIT_VERSION[0] == '\0' ? "" : " - " GIT_VERSION, | |
204 | EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " EXTRA_VERSION_NAME); | |
205 | if (EXTRA_VERSION_DESCRIPTION[0] != '\0') { | |
206 | DBG("LTTng-relayd extra version description:\n\t" EXTRA_VERSION_DESCRIPTION "\n"); | |
207 | } | |
7f5ed73a JR |
208 | if (EXTRA_VERSION_PATCHES[0] != '\0') { |
209 | DBG("LTTng-relayd extra patches:\n\t" EXTRA_VERSION_PATCHES "\n"); | |
210 | } | |
a3bc3918 JR |
211 | } |
212 | ||
cd60b05a JG |
213 | /* |
214 | * Take an option from the getopt output and set it in the right variable to be | |
215 | * used later. | |
216 | * | |
217 | * Return 0 on success else a negative value. | |
218 | */ | |
7591bab1 | 219 | static int set_option(int opt, const char *arg, const char *optname) |
b8aa1682 | 220 | { |
cd60b05a JG |
221 | int ret; |
222 | ||
223 | switch (opt) { | |
224 | case 0: | |
225 | fprintf(stderr, "option %s", optname); | |
226 | if (arg) { | |
227 | fprintf(stderr, " with arg %s\n", arg); | |
228 | } | |
229 | break; | |
230 | case 'C': | |
e8fa9fb0 MD |
231 | if (lttng_is_setuid_setgid()) { |
232 | WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.", | |
233 | "-C, --control-port"); | |
234 | } else { | |
235 | ret = uri_parse(arg, &control_uri); | |
236 | if (ret < 0) { | |
237 | ERR("Invalid control URI specified"); | |
238 | goto end; | |
239 | } | |
240 | if (control_uri->port == 0) { | |
241 | control_uri->port = DEFAULT_NETWORK_CONTROL_PORT; | |
242 | } | |
cd60b05a JG |
243 | } |
244 | break; | |
245 | case 'D': | |
e8fa9fb0 MD |
246 | if (lttng_is_setuid_setgid()) { |
247 | WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.", | |
248 | "-D, -data-port"); | |
249 | } else { | |
250 | ret = uri_parse(arg, &data_uri); | |
251 | if (ret < 0) { | |
252 | ERR("Invalid data URI specified"); | |
253 | goto end; | |
254 | } | |
255 | if (data_uri->port == 0) { | |
256 | data_uri->port = DEFAULT_NETWORK_DATA_PORT; | |
257 | } | |
cd60b05a JG |
258 | } |
259 | break; | |
8d5c808e | 260 | case 'L': |
e8fa9fb0 MD |
261 | if (lttng_is_setuid_setgid()) { |
262 | WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.", | |
263 | "-L, -live-port"); | |
264 | } else { | |
265 | ret = uri_parse(arg, &live_uri); | |
266 | if (ret < 0) { | |
267 | ERR("Invalid live URI specified"); | |
268 | goto end; | |
269 | } | |
270 | if (live_uri->port == 0) { | |
271 | live_uri->port = DEFAULT_NETWORK_VIEWER_PORT; | |
272 | } | |
8d5c808e AM |
273 | } |
274 | break; | |
cd60b05a JG |
275 | case 'd': |
276 | opt_daemon = 1; | |
277 | break; | |
b5218ffb MD |
278 | case 'b': |
279 | opt_background = 1; | |
280 | break; | |
cd60b05a | 281 | case 'g': |
e8fa9fb0 MD |
282 | if (lttng_is_setuid_setgid()) { |
283 | WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.", | |
284 | "-g, --group"); | |
285 | } else { | |
286 | tracing_group_name = strdup(arg); | |
287 | if (tracing_group_name == NULL) { | |
288 | ret = -errno; | |
289 | PERROR("strdup"); | |
290 | goto end; | |
291 | } | |
292 | tracing_group_name_override = 1; | |
330a40bb | 293 | } |
cd60b05a JG |
294 | break; |
295 | case 'h': | |
4fc83d94 | 296 | ret = utils_show_help(8, "lttng-relayd", help_msg); |
655b5cc1 | 297 | if (ret) { |
4fc83d94 | 298 | ERR("Cannot show --help for `lttng-relayd`"); |
655b5cc1 PP |
299 | perror("exec"); |
300 | } | |
cd60b05a | 301 | exit(EXIT_FAILURE); |
3a904098 | 302 | case 'V': |
a3bc3918 JR |
303 | opt_print_version = 1; |
304 | break; | |
cd60b05a | 305 | case 'o': |
e8fa9fb0 MD |
306 | if (lttng_is_setuid_setgid()) { |
307 | WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.", | |
308 | "-o, --output"); | |
309 | } else { | |
310 | ret = asprintf(&opt_output_path, "%s", arg); | |
311 | if (ret < 0) { | |
312 | ret = -errno; | |
313 | PERROR("asprintf opt_output_path"); | |
314 | goto end; | |
315 | } | |
cd60b05a JG |
316 | } |
317 | break; | |
ce9ee1fb JR |
318 | case 'w': |
319 | if (lttng_is_setuid_setgid()) { | |
320 | WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.", | |
321 | "-w, --working-directory"); | |
322 | } else { | |
323 | ret = asprintf(&opt_working_directory, "%s", arg); | |
324 | if (ret < 0) { | |
325 | ret = -errno; | |
326 | PERROR("asprintf opt_working_directory"); | |
327 | goto end; | |
328 | } | |
329 | } | |
330 | break; | |
331 | ||
cd60b05a JG |
332 | case 'v': |
333 | /* Verbose level can increase using multiple -v */ | |
334 | if (arg) { | |
335 | lttng_opt_verbose = config_parse_value(arg); | |
336 | } else { | |
849e5b7b DG |
337 | /* Only 3 level of verbosity (-vvv). */ |
338 | if (lttng_opt_verbose < 3) { | |
339 | lttng_opt_verbose += 1; | |
340 | } | |
cd60b05a JG |
341 | } |
342 | break; | |
a8b66566 JR |
343 | case 's': |
344 | if (opt_group_output_by != RELAYD_GROUP_OUTPUT_BY_UNKNOWN) { | |
345 | ERR("Cannot set --group-output-by-session, another --group-output-by argument is present"); | |
346 | exit(EXIT_FAILURE); | |
347 | } | |
348 | opt_group_output_by = RELAYD_GROUP_OUTPUT_BY_SESSION; | |
349 | break; | |
350 | case 'p': | |
351 | if (opt_group_output_by != RELAYD_GROUP_OUTPUT_BY_UNKNOWN) { | |
352 | ERR("Cannot set --group-output-by-host, another --group-output-by argument is present"); | |
353 | exit(EXIT_FAILURE); | |
354 | } | |
355 | opt_group_output_by = RELAYD_GROUP_OUTPUT_BY_HOST; | |
356 | break; | |
cd60b05a JG |
357 | default: |
358 | /* Unknown option or other error. | |
359 | * Error is printed by getopt, just return */ | |
360 | ret = -1; | |
361 | goto end; | |
362 | } | |
363 | ||
364 | /* All good. */ | |
365 | ret = 0; | |
366 | ||
367 | end: | |
368 | return ret; | |
369 | } | |
370 | ||
371 | /* | |
372 | * config_entry_handler_cb used to handle options read from a config file. | |
f40ef1d5 | 373 | * See config_entry_handler_cb comment in common/config/session-config.h for the |
cd60b05a JG |
374 | * return value conventions. |
375 | */ | |
7591bab1 | 376 | static int config_entry_handler(const struct config_entry *entry, void *unused) |
cd60b05a JG |
377 | { |
378 | int ret = 0, i; | |
379 | ||
380 | if (!entry || !entry->name || !entry->value) { | |
381 | ret = -EINVAL; | |
382 | goto end; | |
383 | } | |
384 | ||
385 | /* Check if the option is to be ignored */ | |
386 | for (i = 0; i < sizeof(config_ignore_options) / sizeof(char *); i++) { | |
387 | if (!strcmp(entry->name, config_ignore_options[i])) { | |
388 | goto end; | |
389 | } | |
390 | } | |
391 | ||
392 | for (i = 0; i < (sizeof(long_options) / sizeof(struct option)) - 1; i++) { | |
393 | /* Ignore if entry name is not fully matched. */ | |
394 | if (strcmp(entry->name, long_options[i].name)) { | |
395 | continue; | |
396 | } | |
397 | ||
398 | /* | |
7591bab1 MD |
399 | * If the option takes no argument on the command line, |
400 | * we have to check if the value is "true". We support | |
401 | * non-zero numeric values, true, on and yes. | |
cd60b05a JG |
402 | */ |
403 | if (!long_options[i].has_arg) { | |
404 | ret = config_parse_value(entry->value); | |
405 | if (ret <= 0) { | |
406 | if (ret) { | |
407 | WARN("Invalid configuration value \"%s\" for option %s", | |
408 | entry->value, entry->name); | |
409 | } | |
410 | /* False, skip boolean config option. */ | |
411 | goto end; | |
412 | } | |
413 | } | |
414 | ||
415 | ret = set_option(long_options[i].val, entry->value, entry->name); | |
416 | goto end; | |
417 | } | |
418 | ||
419 | WARN("Unrecognized option \"%s\" in daemon configuration file.", | |
420 | entry->name); | |
421 | ||
422 | end: | |
423 | return ret; | |
424 | } | |
425 | ||
2a10de3b JR |
426 | static int parse_env_options(void) |
427 | { | |
428 | int ret = 0; | |
429 | char *value = NULL; | |
430 | ||
431 | value = lttng_secure_getenv(DEFAULT_LTTNG_RELAYD_WORKING_DIRECTORY_ENV); | |
432 | if (value) { | |
433 | opt_working_directory = strdup(value); | |
434 | if (!opt_working_directory) { | |
435 | ERR("Failed to allocate working directory string (\"%s\")", | |
436 | value); | |
437 | ret = -1; | |
438 | } | |
439 | } | |
440 | return ret; | |
441 | } | |
442 | ||
7591bab1 | 443 | static int set_options(int argc, char **argv) |
cd60b05a | 444 | { |
178a0557 | 445 | int c, ret = 0, option_index = 0, retval = 0; |
cd60b05a JG |
446 | int orig_optopt = optopt, orig_optind = optind; |
447 | char *default_address, *optstring; | |
448 | const char *config_path = NULL; | |
449 | ||
450 | optstring = utils_generate_optstring(long_options, | |
451 | sizeof(long_options) / sizeof(struct option)); | |
452 | if (!optstring) { | |
178a0557 | 453 | retval = -ENOMEM; |
cd60b05a JG |
454 | goto exit; |
455 | } | |
456 | ||
457 | /* Check for the --config option */ | |
458 | ||
459 | while ((c = getopt_long(argc, argv, optstring, long_options, | |
460 | &option_index)) != -1) { | |
461 | if (c == '?') { | |
178a0557 | 462 | retval = -EINVAL; |
cd60b05a JG |
463 | goto exit; |
464 | } else if (c != 'f') { | |
465 | continue; | |
466 | } | |
467 | ||
e8fa9fb0 MD |
468 | if (lttng_is_setuid_setgid()) { |
469 | WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.", | |
470 | "-f, --config"); | |
471 | } else { | |
472 | config_path = utils_expand_path(optarg); | |
473 | if (!config_path) { | |
474 | ERR("Failed to resolve path: %s", optarg); | |
475 | } | |
cd60b05a JG |
476 | } |
477 | } | |
478 | ||
479 | ret = config_get_section_entries(config_path, config_section_name, | |
480 | config_entry_handler, NULL); | |
481 | if (ret) { | |
482 | if (ret > 0) { | |
483 | ERR("Invalid configuration option at line %i", ret); | |
cd60b05a | 484 | } |
178a0557 | 485 | retval = -1; |
cd60b05a JG |
486 | goto exit; |
487 | } | |
b8aa1682 | 488 | |
cd60b05a JG |
489 | /* Reset getopt's global state */ |
490 | optopt = orig_optopt; | |
491 | optind = orig_optind; | |
b8aa1682 | 492 | while (1) { |
cd60b05a | 493 | c = getopt_long(argc, argv, optstring, long_options, &option_index); |
b8aa1682 JD |
494 | if (c == -1) { |
495 | break; | |
496 | } | |
497 | ||
cd60b05a JG |
498 | ret = set_option(c, optarg, long_options[option_index].name); |
499 | if (ret < 0) { | |
178a0557 | 500 | retval = -1; |
b8aa1682 JD |
501 | goto exit; |
502 | } | |
503 | } | |
504 | ||
505 | /* assign default values */ | |
506 | if (control_uri == NULL) { | |
fa91dc52 MD |
507 | ret = asprintf(&default_address, |
508 | "tcp://" DEFAULT_NETWORK_CONTROL_BIND_ADDRESS ":%d", | |
509 | DEFAULT_NETWORK_CONTROL_PORT); | |
b8aa1682 JD |
510 | if (ret < 0) { |
511 | PERROR("asprintf default data address"); | |
178a0557 | 512 | retval = -1; |
b8aa1682 JD |
513 | goto exit; |
514 | } | |
515 | ||
516 | ret = uri_parse(default_address, &control_uri); | |
517 | free(default_address); | |
518 | if (ret < 0) { | |
519 | ERR("Invalid control URI specified"); | |
178a0557 | 520 | retval = -1; |
b8aa1682 JD |
521 | goto exit; |
522 | } | |
523 | } | |
524 | if (data_uri == NULL) { | |
fa91dc52 MD |
525 | ret = asprintf(&default_address, |
526 | "tcp://" DEFAULT_NETWORK_DATA_BIND_ADDRESS ":%d", | |
527 | DEFAULT_NETWORK_DATA_PORT); | |
b8aa1682 JD |
528 | if (ret < 0) { |
529 | PERROR("asprintf default data address"); | |
178a0557 | 530 | retval = -1; |
b8aa1682 JD |
531 | goto exit; |
532 | } | |
533 | ||
534 | ret = uri_parse(default_address, &data_uri); | |
535 | free(default_address); | |
536 | if (ret < 0) { | |
537 | ERR("Invalid data URI specified"); | |
178a0557 | 538 | retval = -1; |
b8aa1682 JD |
539 | goto exit; |
540 | } | |
541 | } | |
d3e2ba59 | 542 | if (live_uri == NULL) { |
fa91dc52 MD |
543 | ret = asprintf(&default_address, |
544 | "tcp://" DEFAULT_NETWORK_VIEWER_BIND_ADDRESS ":%d", | |
545 | DEFAULT_NETWORK_VIEWER_PORT); | |
d3e2ba59 JD |
546 | if (ret < 0) { |
547 | PERROR("asprintf default viewer control address"); | |
178a0557 | 548 | retval = -1; |
d3e2ba59 JD |
549 | goto exit; |
550 | } | |
551 | ||
552 | ret = uri_parse(default_address, &live_uri); | |
553 | free(default_address); | |
554 | if (ret < 0) { | |
555 | ERR("Invalid viewer control URI specified"); | |
178a0557 | 556 | retval = -1; |
d3e2ba59 JD |
557 | goto exit; |
558 | } | |
559 | } | |
b8aa1682 | 560 | |
a8b66566 JR |
561 | if (opt_group_output_by == RELAYD_GROUP_OUTPUT_BY_UNKNOWN) { |
562 | opt_group_output_by = RELAYD_GROUP_OUTPUT_BY_HOST; | |
563 | } | |
564 | ||
b8aa1682 | 565 | exit: |
cd60b05a | 566 | free(optstring); |
178a0557 | 567 | return retval; |
b8aa1682 JD |
568 | } |
569 | ||
7591bab1 MD |
570 | static void print_global_objects(void) |
571 | { | |
572 | rcu_register_thread(); | |
573 | ||
574 | print_viewer_streams(); | |
575 | print_relay_streams(); | |
576 | print_sessions(); | |
577 | ||
578 | rcu_unregister_thread(); | |
579 | } | |
580 | ||
b8aa1682 JD |
581 | /* |
582 | * Cleanup the daemon | |
583 | */ | |
7591bab1 | 584 | static void relayd_cleanup(void) |
b8aa1682 | 585 | { |
7591bab1 MD |
586 | print_global_objects(); |
587 | ||
b8aa1682 JD |
588 | DBG("Cleaning up"); |
589 | ||
178a0557 MD |
590 | if (viewer_streams_ht) |
591 | lttng_ht_destroy(viewer_streams_ht); | |
592 | if (relay_streams_ht) | |
593 | lttng_ht_destroy(relay_streams_ht); | |
7591bab1 MD |
594 | if (sessions_ht) |
595 | lttng_ht_destroy(sessions_ht); | |
178a0557 | 596 | |
095a4ae5 | 597 | free(opt_output_path); |
ce9ee1fb | 598 | free(opt_working_directory); |
095a4ae5 | 599 | |
a02de639 CB |
600 | /* Close thread quit pipes */ |
601 | utils_close_pipe(thread_quit_pipe); | |
602 | ||
710c1f73 DG |
603 | uri_free(control_uri); |
604 | uri_free(data_uri); | |
8d5c808e | 605 | /* Live URI is freed in the live thread. */ |
cd60b05a JG |
606 | |
607 | if (tracing_group_name_override) { | |
608 | free((void *) tracing_group_name); | |
609 | } | |
b8aa1682 JD |
610 | } |
611 | ||
612 | /* | |
613 | * Write to writable pipe used to notify a thread. | |
614 | */ | |
7591bab1 | 615 | static int notify_thread_pipe(int wpipe) |
b8aa1682 | 616 | { |
6cd525e8 | 617 | ssize_t ret; |
b8aa1682 | 618 | |
6cd525e8 MD |
619 | ret = lttng_write(wpipe, "!", 1); |
620 | if (ret < 1) { | |
b8aa1682 | 621 | PERROR("write poll pipe"); |
b4aacfdc | 622 | goto end; |
b8aa1682 | 623 | } |
b4aacfdc MD |
624 | ret = 0; |
625 | end: | |
b8aa1682 JD |
626 | return ret; |
627 | } | |
628 | ||
7591bab1 | 629 | static int notify_health_quit_pipe(int *pipe) |
65931c8b | 630 | { |
6cd525e8 | 631 | ssize_t ret; |
65931c8b | 632 | |
6cd525e8 MD |
633 | ret = lttng_write(pipe[1], "4", 1); |
634 | if (ret < 1) { | |
65931c8b | 635 | PERROR("write relay health quit"); |
b4aacfdc | 636 | goto end; |
65931c8b | 637 | } |
b4aacfdc MD |
638 | ret = 0; |
639 | end: | |
640 | return ret; | |
65931c8b MD |
641 | } |
642 | ||
b8aa1682 | 643 | /* |
b4aacfdc | 644 | * Stop all relayd and relayd-live threads. |
b8aa1682 | 645 | */ |
b4aacfdc | 646 | int lttng_relay_stop_threads(void) |
b8aa1682 | 647 | { |
b4aacfdc | 648 | int retval = 0; |
b8aa1682 JD |
649 | |
650 | /* Stopping all threads */ | |
651 | DBG("Terminating all threads"); | |
b4aacfdc | 652 | if (notify_thread_pipe(thread_quit_pipe[1])) { |
b8aa1682 | 653 | ERR("write error on thread quit pipe"); |
b4aacfdc | 654 | retval = -1; |
b8aa1682 JD |
655 | } |
656 | ||
b4aacfdc MD |
657 | if (notify_health_quit_pipe(health_quit_pipe)) { |
658 | ERR("write error on health quit pipe"); | |
659 | } | |
65931c8b | 660 | |
b8aa1682 | 661 | /* Dispatch thread */ |
26c9d55e | 662 | CMM_STORE_SHARED(dispatch_thread_exit, 1); |
58eb9381 | 663 | futex_nto1_wake(&relay_conn_queue.futex); |
178a0557 | 664 | |
b4aacfdc | 665 | if (relayd_live_stop()) { |
178a0557 | 666 | ERR("Error stopping live threads"); |
b4aacfdc | 667 | retval = -1; |
178a0557 | 668 | } |
b4aacfdc | 669 | return retval; |
b8aa1682 JD |
670 | } |
671 | ||
672 | /* | |
673 | * Signal handler for the daemon | |
674 | * | |
675 | * Simply stop all worker threads, leaving main() return gracefully after | |
676 | * joining all threads and calling cleanup(). | |
677 | */ | |
7591bab1 | 678 | static void sighandler(int sig) |
b8aa1682 JD |
679 | { |
680 | switch (sig) { | |
b8aa1682 JD |
681 | case SIGINT: |
682 | DBG("SIGINT caught"); | |
b4aacfdc MD |
683 | if (lttng_relay_stop_threads()) { |
684 | ERR("Error stopping threads"); | |
685 | } | |
b8aa1682 JD |
686 | break; |
687 | case SIGTERM: | |
688 | DBG("SIGTERM caught"); | |
b4aacfdc MD |
689 | if (lttng_relay_stop_threads()) { |
690 | ERR("Error stopping threads"); | |
691 | } | |
b8aa1682 | 692 | break; |
3fd27398 MD |
693 | case SIGUSR1: |
694 | CMM_STORE_SHARED(recv_child_signal, 1); | |
695 | break; | |
b8aa1682 JD |
696 | default: |
697 | break; | |
698 | } | |
699 | } | |
700 | ||
701 | /* | |
702 | * Setup signal handler for : | |
703 | * SIGINT, SIGTERM, SIGPIPE | |
704 | */ | |
7591bab1 | 705 | static int set_signal_handler(void) |
b8aa1682 JD |
706 | { |
707 | int ret = 0; | |
708 | struct sigaction sa; | |
709 | sigset_t sigset; | |
710 | ||
711 | if ((ret = sigemptyset(&sigset)) < 0) { | |
712 | PERROR("sigemptyset"); | |
713 | return ret; | |
714 | } | |
715 | ||
b8aa1682 JD |
716 | sa.sa_mask = sigset; |
717 | sa.sa_flags = 0; | |
0072e5e2 MD |
718 | |
719 | sa.sa_handler = sighandler; | |
b8aa1682 JD |
720 | if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) { |
721 | PERROR("sigaction"); | |
722 | return ret; | |
723 | } | |
724 | ||
725 | if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) { | |
726 | PERROR("sigaction"); | |
727 | return ret; | |
728 | } | |
729 | ||
0072e5e2 | 730 | if ((ret = sigaction(SIGUSR1, &sa, NULL)) < 0) { |
b8aa1682 JD |
731 | PERROR("sigaction"); |
732 | return ret; | |
733 | } | |
734 | ||
0072e5e2 MD |
735 | sa.sa_handler = SIG_IGN; |
736 | if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) { | |
3fd27398 MD |
737 | PERROR("sigaction"); |
738 | return ret; | |
739 | } | |
740 | ||
741 | DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT"); | |
b8aa1682 JD |
742 | |
743 | return ret; | |
744 | } | |
745 | ||
3fd27398 MD |
746 | void lttng_relay_notify_ready(void) |
747 | { | |
748 | /* Notify the parent of the fork() process that we are ready. */ | |
749 | if (opt_daemon || opt_background) { | |
750 | if (uatomic_sub_return(<tng_relay_ready, 1) == 0) { | |
751 | kill(child_ppid, SIGUSR1); | |
752 | } | |
753 | } | |
754 | } | |
755 | ||
b8aa1682 JD |
756 | /* |
757 | * Init thread quit pipe. | |
758 | * | |
759 | * Return -1 on error or 0 if all pipes are created. | |
760 | */ | |
7591bab1 | 761 | static int init_thread_quit_pipe(void) |
b8aa1682 | 762 | { |
a02de639 | 763 | int ret; |
b8aa1682 | 764 | |
a02de639 | 765 | ret = utils_create_pipe_cloexec(thread_quit_pipe); |
b8aa1682 | 766 | |
b8aa1682 JD |
767 | return ret; |
768 | } | |
769 | ||
770 | /* | |
771 | * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set. | |
772 | */ | |
7591bab1 | 773 | static int create_thread_poll_set(struct lttng_poll_event *events, int size) |
b8aa1682 JD |
774 | { |
775 | int ret; | |
776 | ||
777 | if (events == NULL || size == 0) { | |
778 | ret = -1; | |
779 | goto error; | |
780 | } | |
781 | ||
782 | ret = lttng_poll_create(events, size, LTTNG_CLOEXEC); | |
783 | if (ret < 0) { | |
784 | goto error; | |
785 | } | |
786 | ||
787 | /* Add quit pipe */ | |
c7759e6a | 788 | ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR); |
b8aa1682 JD |
789 | if (ret < 0) { |
790 | goto error; | |
791 | } | |
792 | ||
793 | return 0; | |
794 | ||
795 | error: | |
796 | return ret; | |
797 | } | |
798 | ||
799 | /* | |
800 | * Check if the thread quit pipe was triggered. | |
801 | * | |
802 | * Return 1 if it was triggered else 0; | |
803 | */ | |
7591bab1 | 804 | static int check_thread_quit_pipe(int fd, uint32_t events) |
b8aa1682 JD |
805 | { |
806 | if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) { | |
807 | return 1; | |
808 | } | |
809 | ||
810 | return 0; | |
811 | } | |
812 | ||
813 | /* | |
814 | * Create and init socket from uri. | |
815 | */ | |
7591bab1 | 816 | static struct lttcomm_sock *relay_socket_create(struct lttng_uri *uri) |
b8aa1682 JD |
817 | { |
818 | int ret; | |
819 | struct lttcomm_sock *sock = NULL; | |
820 | ||
821 | sock = lttcomm_alloc_sock_from_uri(uri); | |
822 | if (sock == NULL) { | |
823 | ERR("Allocating socket"); | |
824 | goto error; | |
825 | } | |
826 | ||
827 | ret = lttcomm_create_sock(sock); | |
828 | if (ret < 0) { | |
829 | goto error; | |
830 | } | |
831 | DBG("Listening on sock %d", sock->fd); | |
832 | ||
833 | ret = sock->ops->bind(sock); | |
834 | if (ret < 0) { | |
2288467f | 835 | PERROR("Failed to bind socket"); |
b8aa1682 JD |
836 | goto error; |
837 | } | |
838 | ||
839 | ret = sock->ops->listen(sock, -1); | |
840 | if (ret < 0) { | |
841 | goto error; | |
842 | ||
843 | } | |
844 | ||
845 | return sock; | |
846 | ||
847 | error: | |
848 | if (sock) { | |
849 | lttcomm_destroy_sock(sock); | |
850 | } | |
851 | return NULL; | |
852 | } | |
853 | ||
854 | /* | |
855 | * This thread manages the listening for new connections on the network | |
856 | */ | |
7591bab1 | 857 | static void *relay_thread_listener(void *data) |
b8aa1682 | 858 | { |
095a4ae5 | 859 | int i, ret, pollfd, err = -1; |
b8aa1682 JD |
860 | uint32_t revents, nb_fd; |
861 | struct lttng_poll_event events; | |
862 | struct lttcomm_sock *control_sock, *data_sock; | |
863 | ||
b8aa1682 JD |
864 | DBG("[thread] Relay listener started"); |
865 | ||
55706a7d MD |
866 | health_register(health_relayd, HEALTH_RELAYD_TYPE_LISTENER); |
867 | ||
f385ae0a MD |
868 | health_code_update(); |
869 | ||
7591bab1 | 870 | control_sock = relay_socket_create(control_uri); |
b8aa1682 | 871 | if (!control_sock) { |
095a4ae5 | 872 | goto error_sock_control; |
b8aa1682 JD |
873 | } |
874 | ||
7591bab1 | 875 | data_sock = relay_socket_create(data_uri); |
b8aa1682 | 876 | if (!data_sock) { |
095a4ae5 | 877 | goto error_sock_relay; |
b8aa1682 JD |
878 | } |
879 | ||
880 | /* | |
7591bab1 MD |
881 | * Pass 3 as size here for the thread quit pipe, control and |
882 | * data socket. | |
b8aa1682 JD |
883 | */ |
884 | ret = create_thread_poll_set(&events, 3); | |
885 | if (ret < 0) { | |
886 | goto error_create_poll; | |
887 | } | |
888 | ||
889 | /* Add the control socket */ | |
890 | ret = lttng_poll_add(&events, control_sock->fd, LPOLLIN | LPOLLRDHUP); | |
891 | if (ret < 0) { | |
892 | goto error_poll_add; | |
893 | } | |
894 | ||
895 | /* Add the data socket */ | |
896 | ret = lttng_poll_add(&events, data_sock->fd, LPOLLIN | LPOLLRDHUP); | |
897 | if (ret < 0) { | |
898 | goto error_poll_add; | |
899 | } | |
900 | ||
3fd27398 MD |
901 | lttng_relay_notify_ready(); |
902 | ||
9b5e0863 MD |
903 | if (testpoint(relayd_thread_listener)) { |
904 | goto error_testpoint; | |
905 | } | |
906 | ||
b8aa1682 | 907 | while (1) { |
f385ae0a MD |
908 | health_code_update(); |
909 | ||
b8aa1682 JD |
910 | DBG("Listener accepting connections"); |
911 | ||
b8aa1682 | 912 | restart: |
f385ae0a | 913 | health_poll_entry(); |
b8aa1682 | 914 | ret = lttng_poll_wait(&events, -1); |
f385ae0a | 915 | health_poll_exit(); |
b8aa1682 JD |
916 | if (ret < 0) { |
917 | /* | |
918 | * Restart interrupted system call. | |
919 | */ | |
920 | if (errno == EINTR) { | |
921 | goto restart; | |
922 | } | |
923 | goto error; | |
924 | } | |
925 | ||
0d9c5d77 DG |
926 | nb_fd = ret; |
927 | ||
b8aa1682 JD |
928 | DBG("Relay new connection received"); |
929 | for (i = 0; i < nb_fd; i++) { | |
f385ae0a MD |
930 | health_code_update(); |
931 | ||
b8aa1682 JD |
932 | /* Fetch once the poll data */ |
933 | revents = LTTNG_POLL_GETEV(&events, i); | |
934 | pollfd = LTTNG_POLL_GETFD(&events, i); | |
935 | ||
936 | /* Thread quit pipe has been closed. Killing thread. */ | |
937 | ret = check_thread_quit_pipe(pollfd, revents); | |
938 | if (ret) { | |
095a4ae5 MD |
939 | err = 0; |
940 | goto exit; | |
b8aa1682 JD |
941 | } |
942 | ||
03e43155 | 943 | if (revents & LPOLLIN) { |
4b7f17b2 | 944 | /* |
7591bab1 MD |
945 | * A new connection is requested, therefore a |
946 | * sessiond/consumerd connection is allocated in | |
947 | * this thread, enqueued to a global queue and | |
948 | * dequeued (and freed) in the worker thread. | |
4b7f17b2 | 949 | */ |
58eb9381 DG |
950 | int val = 1; |
951 | struct relay_connection *new_conn; | |
4b7f17b2 | 952 | struct lttcomm_sock *newsock; |
7591bab1 | 953 | enum connection_type type; |
b8aa1682 JD |
954 | |
955 | if (pollfd == data_sock->fd) { | |
7591bab1 | 956 | type = RELAY_DATA; |
b8aa1682 | 957 | newsock = data_sock->ops->accept(data_sock); |
58eb9381 DG |
958 | DBG("Relay data connection accepted, socket %d", |
959 | newsock->fd); | |
4b7f17b2 MD |
960 | } else { |
961 | assert(pollfd == control_sock->fd); | |
7591bab1 | 962 | type = RELAY_CONTROL; |
b8aa1682 | 963 | newsock = control_sock->ops->accept(control_sock); |
58eb9381 DG |
964 | DBG("Relay control connection accepted, socket %d", |
965 | newsock->fd); | |
b8aa1682 | 966 | } |
58eb9381 DG |
967 | if (!newsock) { |
968 | PERROR("accepting sock"); | |
58eb9381 DG |
969 | goto error; |
970 | } | |
971 | ||
972 | ret = setsockopt(newsock->fd, SOL_SOCKET, SO_REUSEADDR, &val, | |
973 | sizeof(val)); | |
b8aa1682 JD |
974 | if (ret < 0) { |
975 | PERROR("setsockopt inet"); | |
4b7f17b2 | 976 | lttcomm_destroy_sock(newsock); |
b8aa1682 JD |
977 | goto error; |
978 | } | |
f056029c JR |
979 | |
980 | ret = socket_apply_keep_alive_config(newsock->fd); | |
981 | if (ret < 0) { | |
982 | ERR("Failed to apply TCP keep-alive configuration on socket (%i)", | |
983 | newsock->fd); | |
984 | lttcomm_destroy_sock(newsock); | |
985 | goto error; | |
986 | } | |
987 | ||
7591bab1 MD |
988 | new_conn = connection_create(newsock, type); |
989 | if (!new_conn) { | |
990 | lttcomm_destroy_sock(newsock); | |
991 | goto error; | |
992 | } | |
58eb9381 DG |
993 | |
994 | /* Enqueue request for the dispatcher thread. */ | |
8bdee6e2 SM |
995 | cds_wfcq_enqueue(&relay_conn_queue.head, &relay_conn_queue.tail, |
996 | &new_conn->qnode); | |
b8aa1682 JD |
997 | |
998 | /* | |
7591bab1 MD |
999 | * Wake the dispatch queue futex. |
1000 | * Implicit memory barrier with the | |
1001 | * exchange in cds_wfcq_enqueue. | |
b8aa1682 | 1002 | */ |
58eb9381 | 1003 | futex_nto1_wake(&relay_conn_queue.futex); |
03e43155 MD |
1004 | } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { |
1005 | ERR("socket poll error"); | |
1006 | goto error; | |
1007 | } else { | |
1008 | ERR("Unexpected poll events %u for sock %d", revents, pollfd); | |
1009 | goto error; | |
b8aa1682 JD |
1010 | } |
1011 | } | |
1012 | } | |
1013 | ||
095a4ae5 | 1014 | exit: |
b8aa1682 JD |
1015 | error: |
1016 | error_poll_add: | |
9b5e0863 | 1017 | error_testpoint: |
b8aa1682 JD |
1018 | lttng_poll_clean(&events); |
1019 | error_create_poll: | |
095a4ae5 MD |
1020 | if (data_sock->fd >= 0) { |
1021 | ret = data_sock->ops->close(data_sock); | |
b8aa1682 JD |
1022 | if (ret) { |
1023 | PERROR("close"); | |
1024 | } | |
b8aa1682 | 1025 | } |
095a4ae5 MD |
1026 | lttcomm_destroy_sock(data_sock); |
1027 | error_sock_relay: | |
1028 | if (control_sock->fd >= 0) { | |
1029 | ret = control_sock->ops->close(control_sock); | |
b8aa1682 JD |
1030 | if (ret) { |
1031 | PERROR("close"); | |
1032 | } | |
b8aa1682 | 1033 | } |
095a4ae5 MD |
1034 | lttcomm_destroy_sock(control_sock); |
1035 | error_sock_control: | |
1036 | if (err) { | |
f385ae0a MD |
1037 | health_error(); |
1038 | ERR("Health error occurred in %s", __func__); | |
095a4ae5 | 1039 | } |
55706a7d | 1040 | health_unregister(health_relayd); |
b8aa1682 | 1041 | DBG("Relay listener thread cleanup complete"); |
b4aacfdc | 1042 | lttng_relay_stop_threads(); |
b8aa1682 JD |
1043 | return NULL; |
1044 | } | |
1045 | ||
1046 | /* | |
1047 | * This thread manages the dispatching of the requests to worker threads | |
1048 | */ | |
7591bab1 | 1049 | static void *relay_thread_dispatcher(void *data) |
b8aa1682 | 1050 | { |
6cd525e8 MD |
1051 | int err = -1; |
1052 | ssize_t ret; | |
8bdee6e2 | 1053 | struct cds_wfcq_node *node; |
58eb9381 | 1054 | struct relay_connection *new_conn = NULL; |
b8aa1682 JD |
1055 | |
1056 | DBG("[thread] Relay dispatcher started"); | |
1057 | ||
55706a7d MD |
1058 | health_register(health_relayd, HEALTH_RELAYD_TYPE_DISPATCHER); |
1059 | ||
9b5e0863 MD |
1060 | if (testpoint(relayd_thread_dispatcher)) { |
1061 | goto error_testpoint; | |
1062 | } | |
1063 | ||
f385ae0a MD |
1064 | health_code_update(); |
1065 | ||
0ed3b1a8 | 1066 | for (;;) { |
f385ae0a MD |
1067 | health_code_update(); |
1068 | ||
b8aa1682 | 1069 | /* Atomically prepare the queue futex */ |
58eb9381 | 1070 | futex_nto1_prepare(&relay_conn_queue.futex); |
b8aa1682 | 1071 | |
0ed3b1a8 MD |
1072 | if (CMM_LOAD_SHARED(dispatch_thread_exit)) { |
1073 | break; | |
1074 | } | |
1075 | ||
b8aa1682 | 1076 | do { |
f385ae0a MD |
1077 | health_code_update(); |
1078 | ||
b8aa1682 | 1079 | /* Dequeue commands */ |
8bdee6e2 SM |
1080 | node = cds_wfcq_dequeue_blocking(&relay_conn_queue.head, |
1081 | &relay_conn_queue.tail); | |
b8aa1682 JD |
1082 | if (node == NULL) { |
1083 | DBG("Woken up but nothing in the relay command queue"); | |
1084 | /* Continue thread execution */ | |
1085 | break; | |
1086 | } | |
58eb9381 | 1087 | new_conn = caa_container_of(node, struct relay_connection, qnode); |
b8aa1682 | 1088 | |
58eb9381 | 1089 | DBG("Dispatching request waiting on sock %d", new_conn->sock->fd); |
b8aa1682 JD |
1090 | |
1091 | /* | |
7591bab1 MD |
1092 | * Inform worker thread of the new request. This |
1093 | * call is blocking so we can be assured that | |
1094 | * the data will be read at some point in time | |
1095 | * or wait to the end of the world :) | |
b8aa1682 | 1096 | */ |
58eb9381 DG |
1097 | ret = lttng_write(relay_conn_pipe[1], &new_conn, sizeof(new_conn)); |
1098 | if (ret < 0) { | |
1099 | PERROR("write connection pipe"); | |
7591bab1 | 1100 | connection_put(new_conn); |
b8aa1682 JD |
1101 | goto error; |
1102 | } | |
1103 | } while (node != NULL); | |
1104 | ||
1105 | /* Futex wait on queue. Blocking call on futex() */ | |
f385ae0a | 1106 | health_poll_entry(); |
58eb9381 | 1107 | futex_nto1_wait(&relay_conn_queue.futex); |
f385ae0a | 1108 | health_poll_exit(); |
b8aa1682 JD |
1109 | } |
1110 | ||
f385ae0a MD |
1111 | /* Normal exit, no error */ |
1112 | err = 0; | |
1113 | ||
b8aa1682 | 1114 | error: |
9b5e0863 | 1115 | error_testpoint: |
f385ae0a MD |
1116 | if (err) { |
1117 | health_error(); | |
1118 | ERR("Health error occurred in %s", __func__); | |
1119 | } | |
55706a7d | 1120 | health_unregister(health_relayd); |
b8aa1682 | 1121 | DBG("Dispatch thread dying"); |
b4aacfdc | 1122 | lttng_relay_stop_threads(); |
b8aa1682 JD |
1123 | return NULL; |
1124 | } | |
1125 | ||
298a25ca JG |
1126 | static bool session_streams_have_index(const struct relay_session *session) |
1127 | { | |
1128 | return session->minor >= 4 && !session->snapshot; | |
1129 | } | |
1130 | ||
c5b6f4f0 DG |
1131 | /* |
1132 | * Handle the RELAYD_CREATE_SESSION command. | |
1133 | * | |
1134 | * On success, send back the session id or else return a negative value. | |
1135 | */ | |
5312a3ed JG |
1136 | static int relay_create_session(const struct lttcomm_relayd_hdr *recv_hdr, |
1137 | struct relay_connection *conn, | |
1138 | const struct lttng_buffer_view *payload) | |
c5b6f4f0 | 1139 | { |
5312a3ed JG |
1140 | int ret = 0; |
1141 | ssize_t send_ret; | |
4c6885d2 | 1142 | struct relay_session *session = NULL; |
ecd1a12f | 1143 | struct lttcomm_relayd_create_session_reply_2_11 reply = {}; |
1e791a74 JG |
1144 | char session_name[LTTNG_NAME_MAX] = {}; |
1145 | char hostname[LTTNG_HOST_NAME_MAX] = {}; | |
7591bab1 MD |
1146 | uint32_t live_timer = 0; |
1147 | bool snapshot = false; | |
46ef2188 | 1148 | bool session_name_contains_creation_timestamp = false; |
23c8ff50 | 1149 | /* Left nil for peers < 2.11. */ |
6fa5fe7c | 1150 | char base_path[LTTNG_PATH_MAX] = {}; |
23c8ff50 | 1151 | lttng_uuid sessiond_uuid = {}; |
1e791a74 JG |
1152 | LTTNG_OPTIONAL(uint64_t) id_sessiond = {}; |
1153 | LTTNG_OPTIONAL(uint64_t) current_chunk_id = {}; | |
db1da059 | 1154 | LTTNG_OPTIONAL(time_t) creation_time = {}; |
ecd1a12f MD |
1155 | struct lttng_dynamic_buffer reply_payload; |
1156 | ||
1157 | lttng_dynamic_buffer_init(&reply_payload); | |
c5b6f4f0 | 1158 | |
f86f6389 JR |
1159 | if (conn->minor < 4) { |
1160 | /* From 2.1 to 2.3 */ | |
1161 | ret = 0; | |
1162 | } else if (conn->minor >= 4 && conn->minor < 11) { | |
1163 | /* From 2.4 to 2.10 */ | |
5312a3ed | 1164 | ret = cmd_create_session_2_4(payload, session_name, |
7591bab1 | 1165 | hostname, &live_timer, &snapshot); |
f86f6389 | 1166 | } else { |
84fa4db5 | 1167 | bool has_current_chunk; |
db1da059 JG |
1168 | uint64_t current_chunk_id_value; |
1169 | time_t creation_time_value; | |
1170 | uint64_t id_sessiond_value; | |
84fa4db5 | 1171 | |
f86f6389 | 1172 | /* From 2.11 to ... */ |
db1da059 | 1173 | ret = cmd_create_session_2_11(payload, session_name, hostname, |
6fa5fe7c | 1174 | base_path, &live_timer, &snapshot, &id_sessiond_value, |
db1da059 | 1175 | sessiond_uuid, &has_current_chunk, |
46ef2188 MD |
1176 | ¤t_chunk_id_value, &creation_time_value, |
1177 | &session_name_contains_creation_timestamp); | |
23c8ff50 JG |
1178 | if (lttng_uuid_is_nil(sessiond_uuid)) { |
1179 | /* The nil UUID is reserved for pre-2.11 clients. */ | |
1180 | ERR("Illegal nil UUID announced by peer in create session command"); | |
1181 | ret = -1; | |
1182 | goto send_reply; | |
1183 | } | |
db1da059 JG |
1184 | LTTNG_OPTIONAL_SET(&id_sessiond, id_sessiond_value); |
1185 | LTTNG_OPTIONAL_SET(&creation_time, creation_time_value); | |
1186 | if (has_current_chunk) { | |
1187 | LTTNG_OPTIONAL_SET(¤t_chunk_id, | |
1188 | current_chunk_id_value); | |
1189 | } | |
7591bab1 | 1190 | } |
f86f6389 | 1191 | |
7591bab1 MD |
1192 | if (ret < 0) { |
1193 | goto send_reply; | |
d3e2ba59 JD |
1194 | } |
1195 | ||
6fa5fe7c | 1196 | session = session_create(session_name, hostname, base_path, live_timer, |
1e791a74 JG |
1197 | snapshot, sessiond_uuid, |
1198 | id_sessiond.is_set ? &id_sessiond.value : NULL, | |
1199 | current_chunk_id.is_set ? ¤t_chunk_id.value : NULL, | |
db1da059 | 1200 | creation_time.is_set ? &creation_time.value : NULL, |
46ef2188 MD |
1201 | conn->major, conn->minor, |
1202 | session_name_contains_creation_timestamp); | |
7591bab1 MD |
1203 | if (!session) { |
1204 | ret = -1; | |
1205 | goto send_reply; | |
1206 | } | |
1207 | assert(!conn->session); | |
1208 | conn->session = session; | |
c5b6f4f0 DG |
1209 | DBG("Created session %" PRIu64, session->id); |
1210 | ||
ecd1a12f | 1211 | reply.generic.session_id = htobe64(session->id); |
7591bab1 MD |
1212 | |
1213 | send_reply: | |
c5b6f4f0 | 1214 | if (ret < 0) { |
ecd1a12f | 1215 | reply.generic.ret_code = htobe32(LTTNG_ERR_FATAL); |
c5b6f4f0 | 1216 | } else { |
ecd1a12f | 1217 | reply.generic.ret_code = htobe32(LTTNG_OK); |
c5b6f4f0 DG |
1218 | } |
1219 | ||
ecd1a12f MD |
1220 | if (conn->minor < 11) { |
1221 | /* From 2.1 to 2.10 */ | |
1222 | ret = lttng_dynamic_buffer_append(&reply_payload, | |
1223 | &reply.generic, sizeof(reply.generic)); | |
1224 | if (ret) { | |
1225 | ERR("Failed to append \"create session\" command reply header to payload buffer"); | |
1226 | ret = -1; | |
1227 | goto end; | |
1228 | } | |
1229 | } else { | |
1230 | const uint32_t output_path_length = | |
8d382dd4 | 1231 | session ? strlen(session->output_path) + 1 : 0; |
ecd1a12f MD |
1232 | |
1233 | reply.output_path_length = htobe32(output_path_length); | |
8d382dd4 JG |
1234 | ret = lttng_dynamic_buffer_append( |
1235 | &reply_payload, &reply, sizeof(reply)); | |
ecd1a12f MD |
1236 | if (ret) { |
1237 | ERR("Failed to append \"create session\" command reply header to payload buffer"); | |
1238 | goto end; | |
1239 | } | |
1240 | ||
8d382dd4 JG |
1241 | if (output_path_length) { |
1242 | ret = lttng_dynamic_buffer_append(&reply_payload, | |
1243 | session->output_path, | |
1244 | output_path_length); | |
1245 | if (ret) { | |
1246 | ERR("Failed to append \"create session\" command reply path to payload buffer"); | |
1247 | goto end; | |
1248 | } | |
ecd1a12f MD |
1249 | } |
1250 | } | |
1251 | ||
1252 | send_ret = conn->sock->ops->sendmsg(conn->sock, reply_payload.data, | |
1253 | reply_payload.size, 0); | |
1254 | if (send_ret < (ssize_t) reply_payload.size) { | |
1255 | ERR("Failed to send \"create session\" command reply of %zu bytes (ret = %zd)", | |
1256 | reply_payload.size, send_ret); | |
5312a3ed | 1257 | ret = -1; |
c5b6f4f0 | 1258 | } |
ecd1a12f | 1259 | end: |
4c6885d2 JG |
1260 | if (ret < 0 && session) { |
1261 | session_put(session); | |
1262 | } | |
ecd1a12f | 1263 | lttng_dynamic_buffer_reset(&reply_payload); |
c5b6f4f0 DG |
1264 | return ret; |
1265 | } | |
1266 | ||
a4baae1b JD |
1267 | /* |
1268 | * When we have received all the streams and the metadata for a channel, | |
1269 | * we make them visible to the viewer threads. | |
1270 | */ | |
7591bab1 | 1271 | static void publish_connection_local_streams(struct relay_connection *conn) |
a4baae1b | 1272 | { |
7591bab1 MD |
1273 | struct relay_stream *stream; |
1274 | struct relay_session *session = conn->session; | |
a4baae1b | 1275 | |
7591bab1 MD |
1276 | /* |
1277 | * We publish all streams belonging to a session atomically wrt | |
1278 | * session lock. | |
1279 | */ | |
1280 | pthread_mutex_lock(&session->lock); | |
1281 | rcu_read_lock(); | |
1282 | cds_list_for_each_entry_rcu(stream, &session->recv_list, | |
1283 | recv_node) { | |
1284 | stream_publish(stream); | |
a4baae1b | 1285 | } |
7591bab1 | 1286 | rcu_read_unlock(); |
a4baae1b | 1287 | |
7591bab1 MD |
1288 | /* |
1289 | * Inform the viewer that there are new streams in the session. | |
1290 | */ | |
1291 | if (session->viewer_attached) { | |
1292 | uatomic_set(&session->new_streams, 1); | |
1293 | } | |
1294 | pthread_mutex_unlock(&session->lock); | |
a4baae1b JD |
1295 | } |
1296 | ||
348a81dc JG |
1297 | static int conform_channel_path(char *channel_path) |
1298 | { | |
1299 | int ret = 0; | |
1300 | ||
1301 | if (strstr("../", channel_path)) { | |
1302 | ERR("Refusing channel path as it walks up the path hierarchy: \"%s\"", | |
1303 | channel_path); | |
1304 | ret = -1; | |
1305 | goto end; | |
1306 | } | |
1307 | ||
1308 | if (*channel_path == '/') { | |
1309 | const size_t len = strlen(channel_path); | |
1310 | ||
1311 | /* | |
1312 | * Channel paths from peers prior to 2.11 are expressed as an | |
1313 | * absolute path that is, in reality, relative to the relay | |
1314 | * daemon's output directory. Remove the leading slash so it | |
1315 | * is correctly interpreted as a relative path later on. | |
1316 | * | |
1317 | * len (and not len - 1) is used to copy the trailing NULL. | |
1318 | */ | |
1319 | bcopy(channel_path + 1, channel_path, len); | |
1320 | } | |
1321 | end: | |
1322 | return ret; | |
1323 | } | |
1324 | ||
b8aa1682 JD |
1325 | /* |
1326 | * relay_add_stream: allocate a new stream for a session | |
1327 | */ | |
5312a3ed JG |
1328 | static int relay_add_stream(const struct lttcomm_relayd_hdr *recv_hdr, |
1329 | struct relay_connection *conn, | |
1330 | const struct lttng_buffer_view *payload) | |
b8aa1682 | 1331 | { |
7591bab1 MD |
1332 | int ret; |
1333 | ssize_t send_ret; | |
58eb9381 | 1334 | struct relay_session *session = conn->session; |
b8aa1682 JD |
1335 | struct relay_stream *stream = NULL; |
1336 | struct lttcomm_relayd_status_stream reply; | |
4030a636 | 1337 | struct ctf_trace *trace = NULL; |
7591bab1 MD |
1338 | uint64_t stream_handle = -1ULL; |
1339 | char *path_name = NULL, *channel_name = NULL; | |
1340 | uint64_t tracefile_size = 0, tracefile_count = 0; | |
348a81dc | 1341 | LTTNG_OPTIONAL(uint64_t) stream_chunk_id = {}; |
b8aa1682 | 1342 | |
5312a3ed | 1343 | if (!session || !conn->version_check_done) { |
b8aa1682 JD |
1344 | ERR("Trying to add a stream before version check"); |
1345 | ret = -1; | |
1346 | goto end_no_session; | |
1347 | } | |
1348 | ||
2f21a469 JR |
1349 | if (session->minor == 1) { |
1350 | /* For 2.1 */ | |
5312a3ed | 1351 | ret = cmd_recv_stream_2_1(payload, &path_name, |
7591bab1 | 1352 | &channel_name); |
2f21a469 JR |
1353 | } else if (session->minor > 1 && session->minor < 11) { |
1354 | /* From 2.2 to 2.10 */ | |
5312a3ed | 1355 | ret = cmd_recv_stream_2_2(payload, &path_name, |
7591bab1 | 1356 | &channel_name, &tracefile_size, &tracefile_count); |
2f21a469 JR |
1357 | } else { |
1358 | /* From 2.11 to ... */ | |
1359 | ret = cmd_recv_stream_2_11(payload, &path_name, | |
0b50e4b3 JG |
1360 | &channel_name, &tracefile_size, &tracefile_count, |
1361 | &stream_chunk_id.value); | |
1362 | stream_chunk_id.is_set = true; | |
0f907de1 | 1363 | } |
2f21a469 | 1364 | |
0f907de1 | 1365 | if (ret < 0) { |
7591bab1 | 1366 | goto send_reply; |
b8aa1682 JD |
1367 | } |
1368 | ||
348a81dc JG |
1369 | if (conform_channel_path(path_name)) { |
1370 | goto send_reply; | |
1371 | } | |
1372 | ||
7591bab1 | 1373 | trace = ctf_trace_get_by_path_or_create(session, path_name); |
2a174661 | 1374 | if (!trace) { |
7591bab1 | 1375 | goto send_reply; |
2a174661 | 1376 | } |
7591bab1 | 1377 | /* This stream here has one reference on the trace. */ |
2a174661 | 1378 | |
7591bab1 MD |
1379 | pthread_mutex_lock(&last_relay_stream_id_lock); |
1380 | stream_handle = ++last_relay_stream_id; | |
1381 | pthread_mutex_unlock(&last_relay_stream_id_lock); | |
d3e2ba59 | 1382 | |
7591bab1 MD |
1383 | /* We pass ownership of path_name and channel_name. */ |
1384 | stream = stream_create(trace, stream_handle, path_name, | |
348a81dc | 1385 | channel_name, tracefile_size, tracefile_count); |
7591bab1 MD |
1386 | path_name = NULL; |
1387 | channel_name = NULL; | |
a4baae1b | 1388 | |
2a174661 | 1389 | /* |
7591bab1 MD |
1390 | * Streams are the owners of their trace. Reference to trace is |
1391 | * kept within stream_create(). | |
2a174661 | 1392 | */ |
7591bab1 | 1393 | ctf_trace_put(trace); |
d3e2ba59 | 1394 | |
7591bab1 | 1395 | send_reply: |
53efb85a | 1396 | memset(&reply, 0, sizeof(reply)); |
7591bab1 MD |
1397 | reply.handle = htobe64(stream_handle); |
1398 | if (!stream) { | |
f73fabfd | 1399 | reply.ret_code = htobe32(LTTNG_ERR_UNK); |
b8aa1682 | 1400 | } else { |
f73fabfd | 1401 | reply.ret_code = htobe32(LTTNG_OK); |
b8aa1682 | 1402 | } |
5af40280 | 1403 | |
58eb9381 | 1404 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, |
b8aa1682 | 1405 | sizeof(struct lttcomm_relayd_status_stream), 0); |
5312a3ed JG |
1406 | if (send_ret < (ssize_t) sizeof(reply)) { |
1407 | ERR("Failed to send \"add stream\" command reply (ret = %zd)", | |
1408 | send_ret); | |
1409 | ret = -1; | |
b8aa1682 JD |
1410 | } |
1411 | ||
1412 | end_no_session: | |
7591bab1 MD |
1413 | free(path_name); |
1414 | free(channel_name); | |
0f907de1 | 1415 | return ret; |
b8aa1682 JD |
1416 | } |
1417 | ||
173af62f DG |
1418 | /* |
1419 | * relay_close_stream: close a specific stream | |
1420 | */ | |
5312a3ed JG |
1421 | static int relay_close_stream(const struct lttcomm_relayd_hdr *recv_hdr, |
1422 | struct relay_connection *conn, | |
1423 | const struct lttng_buffer_view *payload) | |
173af62f | 1424 | { |
5312a3ed JG |
1425 | int ret; |
1426 | ssize_t send_ret; | |
58eb9381 | 1427 | struct relay_session *session = conn->session; |
173af62f DG |
1428 | struct lttcomm_relayd_close_stream stream_info; |
1429 | struct lttcomm_relayd_generic_reply reply; | |
1430 | struct relay_stream *stream; | |
173af62f DG |
1431 | |
1432 | DBG("Close stream received"); | |
1433 | ||
5312a3ed | 1434 | if (!session || !conn->version_check_done) { |
173af62f DG |
1435 | ERR("Trying to close a stream before version check"); |
1436 | ret = -1; | |
1437 | goto end_no_session; | |
1438 | } | |
1439 | ||
5312a3ed JG |
1440 | if (payload->size < sizeof(stream_info)) { |
1441 | ERR("Unexpected payload size in \"relay_close_stream\": expected >= %zu bytes, got %zu bytes", | |
1442 | sizeof(stream_info), payload->size); | |
173af62f DG |
1443 | ret = -1; |
1444 | goto end_no_session; | |
1445 | } | |
5312a3ed JG |
1446 | memcpy(&stream_info, payload->data, sizeof(stream_info)); |
1447 | stream_info.stream_id = be64toh(stream_info.stream_id); | |
1448 | stream_info.last_net_seq_num = be64toh(stream_info.last_net_seq_num); | |
173af62f | 1449 | |
5312a3ed | 1450 | stream = stream_get_by_id(stream_info.stream_id); |
173af62f DG |
1451 | if (!stream) { |
1452 | ret = -1; | |
7591bab1 | 1453 | goto end; |
173af62f | 1454 | } |
77f7bd85 MD |
1455 | |
1456 | /* | |
1457 | * Set last_net_seq_num before the close flag. Required by data | |
1458 | * pending check. | |
1459 | */ | |
7591bab1 | 1460 | pthread_mutex_lock(&stream->lock); |
5312a3ed | 1461 | stream->last_net_seq_num = stream_info.last_net_seq_num; |
77f7bd85 MD |
1462 | pthread_mutex_unlock(&stream->lock); |
1463 | ||
bda7c7b9 JG |
1464 | /* |
1465 | * This is one of the conditions which may trigger a stream close | |
1466 | * with the others being: | |
1467 | * 1) A close command is received for a stream | |
1468 | * 2) The control connection owning the stream is closed | |
1469 | * 3) We have received all of the stream's data _after_ a close | |
1470 | * request. | |
1471 | */ | |
1472 | try_stream_close(stream); | |
7591bab1 MD |
1473 | if (stream->is_metadata) { |
1474 | struct relay_viewer_stream *vstream; | |
173af62f | 1475 | |
7591bab1 MD |
1476 | vstream = viewer_stream_get_by_id(stream->stream_handle); |
1477 | if (vstream) { | |
1478 | if (vstream->metadata_sent == stream->metadata_received) { | |
1479 | /* | |
1480 | * Since all the metadata has been sent to the | |
1481 | * viewer and that we have a request to close | |
1482 | * its stream, we can safely teardown the | |
1483 | * corresponding metadata viewer stream. | |
1484 | */ | |
1485 | viewer_stream_put(vstream); | |
1486 | } | |
1487 | /* Put local reference. */ | |
1488 | viewer_stream_put(vstream); | |
1489 | } | |
1490 | } | |
7591bab1 | 1491 | stream_put(stream); |
5312a3ed | 1492 | ret = 0; |
173af62f | 1493 | |
7591bab1 | 1494 | end: |
53efb85a | 1495 | memset(&reply, 0, sizeof(reply)); |
173af62f | 1496 | if (ret < 0) { |
f73fabfd | 1497 | reply.ret_code = htobe32(LTTNG_ERR_UNK); |
173af62f | 1498 | } else { |
f73fabfd | 1499 | reply.ret_code = htobe32(LTTNG_OK); |
173af62f | 1500 | } |
58eb9381 | 1501 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, |
173af62f | 1502 | sizeof(struct lttcomm_relayd_generic_reply), 0); |
5312a3ed JG |
1503 | if (send_ret < (ssize_t) sizeof(reply)) { |
1504 | ERR("Failed to send \"close stream\" command reply (ret = %zd)", | |
1505 | send_ret); | |
1506 | ret = -1; | |
173af62f DG |
1507 | } |
1508 | ||
1509 | end_no_session: | |
1510 | return ret; | |
1511 | } | |
1512 | ||
93ec662e JD |
1513 | /* |
1514 | * relay_reset_metadata: reset a metadata stream | |
1515 | */ | |
1516 | static | |
5312a3ed JG |
1517 | int relay_reset_metadata(const struct lttcomm_relayd_hdr *recv_hdr, |
1518 | struct relay_connection *conn, | |
1519 | const struct lttng_buffer_view *payload) | |
93ec662e | 1520 | { |
5312a3ed JG |
1521 | int ret; |
1522 | ssize_t send_ret; | |
93ec662e JD |
1523 | struct relay_session *session = conn->session; |
1524 | struct lttcomm_relayd_reset_metadata stream_info; | |
1525 | struct lttcomm_relayd_generic_reply reply; | |
1526 | struct relay_stream *stream; | |
1527 | ||
1528 | DBG("Reset metadata received"); | |
1529 | ||
5312a3ed | 1530 | if (!session || !conn->version_check_done) { |
93ec662e JD |
1531 | ERR("Trying to reset a metadata stream before version check"); |
1532 | ret = -1; | |
1533 | goto end_no_session; | |
1534 | } | |
1535 | ||
5312a3ed JG |
1536 | if (payload->size < sizeof(stream_info)) { |
1537 | ERR("Unexpected payload size in \"relay_reset_metadata\": expected >= %zu bytes, got %zu bytes", | |
1538 | sizeof(stream_info), payload->size); | |
93ec662e JD |
1539 | ret = -1; |
1540 | goto end_no_session; | |
1541 | } | |
5312a3ed JG |
1542 | memcpy(&stream_info, payload->data, sizeof(stream_info)); |
1543 | stream_info.stream_id = be64toh(stream_info.stream_id); | |
1544 | stream_info.version = be64toh(stream_info.version); | |
1545 | ||
1546 | DBG("Update metadata to version %" PRIu64, stream_info.version); | |
93ec662e JD |
1547 | |
1548 | /* Unsupported for live sessions for now. */ | |
1549 | if (session->live_timer != 0) { | |
1550 | ret = -1; | |
1551 | goto end; | |
1552 | } | |
1553 | ||
5312a3ed | 1554 | stream = stream_get_by_id(stream_info.stream_id); |
93ec662e JD |
1555 | if (!stream) { |
1556 | ret = -1; | |
1557 | goto end; | |
1558 | } | |
1559 | pthread_mutex_lock(&stream->lock); | |
1560 | if (!stream->is_metadata) { | |
1561 | ret = -1; | |
1562 | goto end_unlock; | |
1563 | } | |
1564 | ||
c35f9726 | 1565 | ret = stream_reset_file(stream); |
93ec662e | 1566 | if (ret < 0) { |
c35f9726 JG |
1567 | ERR("Failed to reset metadata stream %" PRIu64 |
1568 | ": stream_path = %s, channel = %s", | |
1569 | stream->stream_handle, stream->path_name, | |
1570 | stream->channel_name); | |
93ec662e JD |
1571 | goto end_unlock; |
1572 | } | |
93ec662e JD |
1573 | end_unlock: |
1574 | pthread_mutex_unlock(&stream->lock); | |
1575 | stream_put(stream); | |
1576 | ||
1577 | end: | |
1578 | memset(&reply, 0, sizeof(reply)); | |
1579 | if (ret < 0) { | |
1580 | reply.ret_code = htobe32(LTTNG_ERR_UNK); | |
1581 | } else { | |
1582 | reply.ret_code = htobe32(LTTNG_OK); | |
1583 | } | |
1584 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, | |
1585 | sizeof(struct lttcomm_relayd_generic_reply), 0); | |
5312a3ed JG |
1586 | if (send_ret < (ssize_t) sizeof(reply)) { |
1587 | ERR("Failed to send \"reset metadata\" command reply (ret = %zd)", | |
1588 | send_ret); | |
1589 | ret = -1; | |
93ec662e JD |
1590 | } |
1591 | ||
1592 | end_no_session: | |
1593 | return ret; | |
1594 | } | |
1595 | ||
b8aa1682 JD |
1596 | /* |
1597 | * relay_unknown_command: send -1 if received unknown command | |
1598 | */ | |
7591bab1 | 1599 | static void relay_unknown_command(struct relay_connection *conn) |
b8aa1682 JD |
1600 | { |
1601 | struct lttcomm_relayd_generic_reply reply; | |
5312a3ed | 1602 | ssize_t send_ret; |
b8aa1682 | 1603 | |
53efb85a | 1604 | memset(&reply, 0, sizeof(reply)); |
f73fabfd | 1605 | reply.ret_code = htobe32(LTTNG_ERR_UNK); |
5312a3ed JG |
1606 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
1607 | if (send_ret < sizeof(reply)) { | |
1608 | ERR("Failed to send \"unknown command\" command reply (ret = %zd)", send_ret); | |
b8aa1682 JD |
1609 | } |
1610 | } | |
1611 | ||
1612 | /* | |
1613 | * relay_start: send an acknowledgment to the client to tell if we are | |
1614 | * ready to receive data. We are ready if a session is established. | |
1615 | */ | |
5312a3ed JG |
1616 | static int relay_start(const struct lttcomm_relayd_hdr *recv_hdr, |
1617 | struct relay_connection *conn, | |
1618 | const struct lttng_buffer_view *payload) | |
b8aa1682 | 1619 | { |
5312a3ed JG |
1620 | int ret = 0; |
1621 | ssize_t send_ret; | |
b8aa1682 | 1622 | struct lttcomm_relayd_generic_reply reply; |
58eb9381 | 1623 | struct relay_session *session = conn->session; |
b8aa1682 JD |
1624 | |
1625 | if (!session) { | |
1626 | DBG("Trying to start the streaming without a session established"); | |
f73fabfd | 1627 | ret = htobe32(LTTNG_ERR_UNK); |
b8aa1682 JD |
1628 | } |
1629 | ||
53efb85a | 1630 | memset(&reply, 0, sizeof(reply)); |
5312a3ed JG |
1631 | reply.ret_code = htobe32(LTTNG_OK); |
1632 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, | |
1633 | sizeof(reply), 0); | |
1634 | if (send_ret < (ssize_t) sizeof(reply)) { | |
1635 | ERR("Failed to send \"relay_start\" command reply (ret = %zd)", | |
1636 | send_ret); | |
1637 | ret = -1; | |
b8aa1682 JD |
1638 | } |
1639 | ||
1640 | return ret; | |
1641 | } | |
1642 | ||
b8aa1682 | 1643 | /* |
7591bab1 | 1644 | * relay_recv_metadata: receive the metadata for the session. |
b8aa1682 | 1645 | */ |
5312a3ed JG |
1646 | static int relay_recv_metadata(const struct lttcomm_relayd_hdr *recv_hdr, |
1647 | struct relay_connection *conn, | |
1648 | const struct lttng_buffer_view *payload) | |
b8aa1682 | 1649 | { |
32d1569c | 1650 | int ret = 0; |
58eb9381 | 1651 | struct relay_session *session = conn->session; |
5312a3ed | 1652 | struct lttcomm_relayd_metadata_payload metadata_payload_header; |
b8aa1682 | 1653 | struct relay_stream *metadata_stream; |
5312a3ed | 1654 | uint64_t metadata_payload_size; |
c35f9726 | 1655 | struct lttng_buffer_view packet_view; |
b8aa1682 JD |
1656 | |
1657 | if (!session) { | |
1658 | ERR("Metadata sent before version check"); | |
1659 | ret = -1; | |
1660 | goto end; | |
1661 | } | |
1662 | ||
5312a3ed | 1663 | if (recv_hdr->data_size < sizeof(struct lttcomm_relayd_metadata_payload)) { |
f6416125 MD |
1664 | ERR("Incorrect data size"); |
1665 | ret = -1; | |
1666 | goto end; | |
1667 | } | |
5312a3ed JG |
1668 | metadata_payload_size = recv_hdr->data_size - |
1669 | sizeof(struct lttcomm_relayd_metadata_payload); | |
f6416125 | 1670 | |
5312a3ed JG |
1671 | memcpy(&metadata_payload_header, payload->data, |
1672 | sizeof(metadata_payload_header)); | |
1673 | metadata_payload_header.stream_id = be64toh( | |
1674 | metadata_payload_header.stream_id); | |
1675 | metadata_payload_header.padding_size = be32toh( | |
1676 | metadata_payload_header.padding_size); | |
9d1bbf21 | 1677 | |
5312a3ed | 1678 | metadata_stream = stream_get_by_id(metadata_payload_header.stream_id); |
b8aa1682 JD |
1679 | if (!metadata_stream) { |
1680 | ret = -1; | |
7591bab1 | 1681 | goto end; |
b8aa1682 JD |
1682 | } |
1683 | ||
c35f9726 JG |
1684 | packet_view = lttng_buffer_view_from_view(payload, |
1685 | sizeof(metadata_payload_header), metadata_payload_size); | |
1686 | if (!packet_view.data) { | |
1687 | ERR("Invalid metadata packet length announced by header"); | |
b8aa1682 | 1688 | ret = -1; |
7591bab1 | 1689 | goto end_put; |
b8aa1682 | 1690 | } |
1d4dfdef | 1691 | |
c35f9726 JG |
1692 | pthread_mutex_lock(&metadata_stream->lock); |
1693 | ret = stream_write(metadata_stream, &packet_view, | |
5312a3ed | 1694 | metadata_payload_header.padding_size); |
c35f9726 JG |
1695 | pthread_mutex_unlock(&metadata_stream->lock); |
1696 | if (ret){ | |
5312a3ed | 1697 | ret = -1; |
7591bab1 | 1698 | goto end_put; |
1d4dfdef | 1699 | } |
7591bab1 | 1700 | end_put: |
7591bab1 | 1701 | stream_put(metadata_stream); |
b8aa1682 JD |
1702 | end: |
1703 | return ret; | |
1704 | } | |
1705 | ||
1706 | /* | |
1707 | * relay_send_version: send relayd version number | |
1708 | */ | |
5312a3ed JG |
1709 | static int relay_send_version(const struct lttcomm_relayd_hdr *recv_hdr, |
1710 | struct relay_connection *conn, | |
1711 | const struct lttng_buffer_view *payload) | |
b8aa1682 | 1712 | { |
7f51dcba | 1713 | int ret; |
5312a3ed | 1714 | ssize_t send_ret; |
092b6259 | 1715 | struct lttcomm_relayd_version reply, msg; |
87cb6359 | 1716 | bool compatible = true; |
b8aa1682 | 1717 | |
5312a3ed | 1718 | conn->version_check_done = true; |
b8aa1682 | 1719 | |
092b6259 | 1720 | /* Get version from the other side. */ |
5312a3ed JG |
1721 | if (payload->size < sizeof(msg)) { |
1722 | ERR("Unexpected payload size in \"relay_send_version\": expected >= %zu bytes, got %zu bytes", | |
1723 | sizeof(msg), payload->size); | |
092b6259 | 1724 | ret = -1; |
092b6259 DG |
1725 | goto end; |
1726 | } | |
1727 | ||
5312a3ed JG |
1728 | memcpy(&msg, payload->data, sizeof(msg)); |
1729 | msg.major = be32toh(msg.major); | |
1730 | msg.minor = be32toh(msg.minor); | |
1731 | ||
53efb85a | 1732 | memset(&reply, 0, sizeof(reply)); |
d83a952c MD |
1733 | reply.major = RELAYD_VERSION_COMM_MAJOR; |
1734 | reply.minor = RELAYD_VERSION_COMM_MINOR; | |
d4519fa3 JD |
1735 | |
1736 | /* Major versions must be the same */ | |
5312a3ed | 1737 | if (reply.major != msg.major) { |
6151a90f | 1738 | DBG("Incompatible major versions (%u vs %u), deleting session", |
5312a3ed | 1739 | reply.major, msg.major); |
87cb6359 | 1740 | compatible = false; |
d4519fa3 JD |
1741 | } |
1742 | ||
58eb9381 | 1743 | conn->major = reply.major; |
0f907de1 | 1744 | /* We adapt to the lowest compatible version */ |
5312a3ed | 1745 | if (reply.minor <= msg.minor) { |
58eb9381 | 1746 | conn->minor = reply.minor; |
0f907de1 | 1747 | } else { |
5312a3ed | 1748 | conn->minor = msg.minor; |
0f907de1 JD |
1749 | } |
1750 | ||
6151a90f JD |
1751 | reply.major = htobe32(reply.major); |
1752 | reply.minor = htobe32(reply.minor); | |
5312a3ed JG |
1753 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, |
1754 | sizeof(reply), 0); | |
1755 | if (send_ret < (ssize_t) sizeof(reply)) { | |
1756 | ERR("Failed to send \"send version\" command reply (ret = %zd)", | |
1757 | send_ret); | |
1758 | ret = -1; | |
1759 | goto end; | |
1760 | } else { | |
1761 | ret = 0; | |
6151a90f JD |
1762 | } |
1763 | ||
87cb6359 JD |
1764 | if (!compatible) { |
1765 | ret = -1; | |
1766 | goto end; | |
1767 | } | |
1768 | ||
58eb9381 DG |
1769 | DBG("Version check done using protocol %u.%u", conn->major, |
1770 | conn->minor); | |
b8aa1682 JD |
1771 | |
1772 | end: | |
1773 | return ret; | |
1774 | } | |
1775 | ||
c8f59ee5 | 1776 | /* |
6d805429 | 1777 | * Check for data pending for a given stream id from the session daemon. |
c8f59ee5 | 1778 | */ |
5312a3ed JG |
1779 | static int relay_data_pending(const struct lttcomm_relayd_hdr *recv_hdr, |
1780 | struct relay_connection *conn, | |
1781 | const struct lttng_buffer_view *payload) | |
c8f59ee5 | 1782 | { |
58eb9381 | 1783 | struct relay_session *session = conn->session; |
6d805429 | 1784 | struct lttcomm_relayd_data_pending msg; |
c8f59ee5 DG |
1785 | struct lttcomm_relayd_generic_reply reply; |
1786 | struct relay_stream *stream; | |
5312a3ed | 1787 | ssize_t send_ret; |
c8f59ee5 | 1788 | int ret; |
298a25ca | 1789 | uint64_t stream_seq; |
c8f59ee5 | 1790 | |
6d805429 | 1791 | DBG("Data pending command received"); |
c8f59ee5 | 1792 | |
5312a3ed | 1793 | if (!session || !conn->version_check_done) { |
c8f59ee5 DG |
1794 | ERR("Trying to check for data before version check"); |
1795 | ret = -1; | |
1796 | goto end_no_session; | |
1797 | } | |
1798 | ||
5312a3ed JG |
1799 | if (payload->size < sizeof(msg)) { |
1800 | ERR("Unexpected payload size in \"relay_data_pending\": expected >= %zu bytes, got %zu bytes", | |
1801 | sizeof(msg), payload->size); | |
c8f59ee5 DG |
1802 | ret = -1; |
1803 | goto end_no_session; | |
1804 | } | |
5312a3ed JG |
1805 | memcpy(&msg, payload->data, sizeof(msg)); |
1806 | msg.stream_id = be64toh(msg.stream_id); | |
1807 | msg.last_net_seq_num = be64toh(msg.last_net_seq_num); | |
c8f59ee5 | 1808 | |
5312a3ed | 1809 | stream = stream_get_by_id(msg.stream_id); |
de91f48a | 1810 | if (stream == NULL) { |
c8f59ee5 | 1811 | ret = -1; |
7591bab1 | 1812 | goto end; |
c8f59ee5 DG |
1813 | } |
1814 | ||
7591bab1 MD |
1815 | pthread_mutex_lock(&stream->lock); |
1816 | ||
298a25ca JG |
1817 | if (session_streams_have_index(session)) { |
1818 | /* | |
1819 | * Ensure that both the index and stream data have been | |
1820 | * flushed up to the requested point. | |
1821 | */ | |
a8f9f353 | 1822 | stream_seq = min(stream->prev_data_seq, stream->prev_index_seq); |
298a25ca | 1823 | } else { |
a8f9f353 | 1824 | stream_seq = stream->prev_data_seq; |
298a25ca | 1825 | } |
a8f9f353 | 1826 | DBG("Data pending for stream id %" PRIu64 ": prev_data_seq %" PRIu64 |
298a25ca JG |
1827 | ", prev_index_seq %" PRIu64 |
1828 | ", and last_seq %" PRIu64, msg.stream_id, | |
a8f9f353 | 1829 | stream->prev_data_seq, stream->prev_index_seq, |
298a25ca | 1830 | msg.last_net_seq_num); |
c8f59ee5 | 1831 | |
33832e64 | 1832 | /* Avoid wrapping issue */ |
298a25ca | 1833 | if (((int64_t) (stream_seq - msg.last_net_seq_num)) >= 0) { |
6d805429 | 1834 | /* Data has in fact been written and is NOT pending */ |
c8f59ee5 | 1835 | ret = 0; |
6d805429 DG |
1836 | } else { |
1837 | /* Data still being streamed thus pending */ | |
1838 | ret = 1; | |
c8f59ee5 DG |
1839 | } |
1840 | ||
7591bab1 MD |
1841 | stream->data_pending_check_done = true; |
1842 | pthread_mutex_unlock(&stream->lock); | |
f7079f67 | 1843 | |
7591bab1 MD |
1844 | stream_put(stream); |
1845 | end: | |
c8f59ee5 | 1846 | |
53efb85a | 1847 | memset(&reply, 0, sizeof(reply)); |
c8f59ee5 | 1848 | reply.ret_code = htobe32(ret); |
5312a3ed JG |
1849 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
1850 | if (send_ret < (ssize_t) sizeof(reply)) { | |
1851 | ERR("Failed to send \"data pending\" command reply (ret = %zd)", | |
1852 | send_ret); | |
1853 | ret = -1; | |
c8f59ee5 DG |
1854 | } |
1855 | ||
1856 | end_no_session: | |
1857 | return ret; | |
1858 | } | |
1859 | ||
1860 | /* | |
1861 | * Wait for the control socket to reach a quiescent state. | |
1862 | * | |
7591bab1 MD |
1863 | * Note that for now, when receiving this command from the session |
1864 | * daemon, this means that every subsequent commands or data received on | |
1865 | * the control socket has been handled. So, this is why we simply return | |
1866 | * OK here. | |
c8f59ee5 | 1867 | */ |
5312a3ed JG |
1868 | static int relay_quiescent_control(const struct lttcomm_relayd_hdr *recv_hdr, |
1869 | struct relay_connection *conn, | |
1870 | const struct lttng_buffer_view *payload) | |
c8f59ee5 DG |
1871 | { |
1872 | int ret; | |
5312a3ed | 1873 | ssize_t send_ret; |
ad7051c0 | 1874 | struct relay_stream *stream; |
ad7051c0 | 1875 | struct lttcomm_relayd_quiescent_control msg; |
c8f59ee5 DG |
1876 | struct lttcomm_relayd_generic_reply reply; |
1877 | ||
1878 | DBG("Checking quiescent state on control socket"); | |
1879 | ||
5312a3ed | 1880 | if (!conn->session || !conn->version_check_done) { |
ad7051c0 DG |
1881 | ERR("Trying to check for data before version check"); |
1882 | ret = -1; | |
1883 | goto end_no_session; | |
1884 | } | |
1885 | ||
5312a3ed JG |
1886 | if (payload->size < sizeof(msg)) { |
1887 | ERR("Unexpected payload size in \"relay_quiescent_control\": expected >= %zu bytes, got %zu bytes", | |
1888 | sizeof(msg), payload->size); | |
ad7051c0 DG |
1889 | ret = -1; |
1890 | goto end_no_session; | |
1891 | } | |
5312a3ed JG |
1892 | memcpy(&msg, payload->data, sizeof(msg)); |
1893 | msg.stream_id = be64toh(msg.stream_id); | |
ad7051c0 | 1894 | |
5312a3ed | 1895 | stream = stream_get_by_id(msg.stream_id); |
7591bab1 MD |
1896 | if (!stream) { |
1897 | goto reply; | |
1898 | } | |
1899 | pthread_mutex_lock(&stream->lock); | |
1900 | stream->data_pending_check_done = true; | |
1901 | pthread_mutex_unlock(&stream->lock); | |
5312a3ed JG |
1902 | |
1903 | DBG("Relay quiescent control pending flag set to %" PRIu64, msg.stream_id); | |
7591bab1 MD |
1904 | stream_put(stream); |
1905 | reply: | |
53efb85a | 1906 | memset(&reply, 0, sizeof(reply)); |
c8f59ee5 | 1907 | reply.ret_code = htobe32(LTTNG_OK); |
5312a3ed JG |
1908 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
1909 | if (send_ret < (ssize_t) sizeof(reply)) { | |
1910 | ERR("Failed to send \"quiescent control\" command reply (ret = %zd)", | |
1911 | send_ret); | |
1912 | ret = -1; | |
1913 | } else { | |
1914 | ret = 0; | |
c8f59ee5 DG |
1915 | } |
1916 | ||
ad7051c0 | 1917 | end_no_session: |
c8f59ee5 DG |
1918 | return ret; |
1919 | } | |
1920 | ||
f7079f67 | 1921 | /* |
7591bab1 MD |
1922 | * Initialize a data pending command. This means that a consumer is about |
1923 | * to ask for data pending for each stream it holds. Simply iterate over | |
1924 | * all streams of a session and set the data_pending_check_done flag. | |
f7079f67 DG |
1925 | * |
1926 | * This command returns to the client a LTTNG_OK code. | |
1927 | */ | |
5312a3ed JG |
1928 | static int relay_begin_data_pending(const struct lttcomm_relayd_hdr *recv_hdr, |
1929 | struct relay_connection *conn, | |
1930 | const struct lttng_buffer_view *payload) | |
f7079f67 DG |
1931 | { |
1932 | int ret; | |
5312a3ed | 1933 | ssize_t send_ret; |
f7079f67 DG |
1934 | struct lttng_ht_iter iter; |
1935 | struct lttcomm_relayd_begin_data_pending msg; | |
1936 | struct lttcomm_relayd_generic_reply reply; | |
1937 | struct relay_stream *stream; | |
f7079f67 DG |
1938 | |
1939 | assert(recv_hdr); | |
58eb9381 | 1940 | assert(conn); |
f7079f67 DG |
1941 | |
1942 | DBG("Init streams for data pending"); | |
1943 | ||
5312a3ed | 1944 | if (!conn->session || !conn->version_check_done) { |
f7079f67 DG |
1945 | ERR("Trying to check for data before version check"); |
1946 | ret = -1; | |
1947 | goto end_no_session; | |
1948 | } | |
1949 | ||
5312a3ed JG |
1950 | if (payload->size < sizeof(msg)) { |
1951 | ERR("Unexpected payload size in \"relay_begin_data_pending\": expected >= %zu bytes, got %zu bytes", | |
1952 | sizeof(msg), payload->size); | |
f7079f67 DG |
1953 | ret = -1; |
1954 | goto end_no_session; | |
1955 | } | |
5312a3ed JG |
1956 | memcpy(&msg, payload->data, sizeof(msg)); |
1957 | msg.session_id = be64toh(msg.session_id); | |
f7079f67 DG |
1958 | |
1959 | /* | |
7591bab1 MD |
1960 | * Iterate over all streams to set the begin data pending flag. |
1961 | * For now, the streams are indexed by stream handle so we have | |
1962 | * to iterate over all streams to find the one associated with | |
1963 | * the right session_id. | |
f7079f67 DG |
1964 | */ |
1965 | rcu_read_lock(); | |
d3e2ba59 | 1966 | cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream, |
2a174661 | 1967 | node.node) { |
7591bab1 MD |
1968 | if (!stream_get(stream)) { |
1969 | continue; | |
1970 | } | |
5312a3ed | 1971 | if (stream->trace->session->id == msg.session_id) { |
7591bab1 MD |
1972 | pthread_mutex_lock(&stream->lock); |
1973 | stream->data_pending_check_done = false; | |
1974 | pthread_mutex_unlock(&stream->lock); | |
f7079f67 DG |
1975 | DBG("Set begin data pending flag to stream %" PRIu64, |
1976 | stream->stream_handle); | |
1977 | } | |
7591bab1 | 1978 | stream_put(stream); |
f7079f67 DG |
1979 | } |
1980 | rcu_read_unlock(); | |
1981 | ||
53efb85a | 1982 | memset(&reply, 0, sizeof(reply)); |
f7079f67 DG |
1983 | /* All good, send back reply. */ |
1984 | reply.ret_code = htobe32(LTTNG_OK); | |
1985 | ||
5312a3ed JG |
1986 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
1987 | if (send_ret < (ssize_t) sizeof(reply)) { | |
1988 | ERR("Failed to send \"begin data pending\" command reply (ret = %zd)", | |
1989 | send_ret); | |
1990 | ret = -1; | |
1991 | } else { | |
1992 | ret = 0; | |
f7079f67 DG |
1993 | } |
1994 | ||
1995 | end_no_session: | |
1996 | return ret; | |
1997 | } | |
1998 | ||
1999 | /* | |
7591bab1 MD |
2000 | * End data pending command. This will check, for a given session id, if |
2001 | * each stream associated with it has its data_pending_check_done flag | |
2002 | * set. If not, this means that the client lost track of the stream but | |
2003 | * the data is still being streamed on our side. In this case, we inform | |
2004 | * the client that data is in flight. | |
f7079f67 DG |
2005 | * |
2006 | * Return to the client if there is data in flight or not with a ret_code. | |
2007 | */ | |
5312a3ed JG |
2008 | static int relay_end_data_pending(const struct lttcomm_relayd_hdr *recv_hdr, |
2009 | struct relay_connection *conn, | |
2010 | const struct lttng_buffer_view *payload) | |
f7079f67 DG |
2011 | { |
2012 | int ret; | |
5312a3ed | 2013 | ssize_t send_ret; |
f7079f67 DG |
2014 | struct lttng_ht_iter iter; |
2015 | struct lttcomm_relayd_end_data_pending msg; | |
2016 | struct lttcomm_relayd_generic_reply reply; | |
2017 | struct relay_stream *stream; | |
f7079f67 DG |
2018 | uint32_t is_data_inflight = 0; |
2019 | ||
f7079f67 DG |
2020 | DBG("End data pending command"); |
2021 | ||
5312a3ed | 2022 | if (!conn->session || !conn->version_check_done) { |
f7079f67 DG |
2023 | ERR("Trying to check for data before version check"); |
2024 | ret = -1; | |
2025 | goto end_no_session; | |
2026 | } | |
2027 | ||
5312a3ed JG |
2028 | if (payload->size < sizeof(msg)) { |
2029 | ERR("Unexpected payload size in \"relay_end_data_pending\": expected >= %zu bytes, got %zu bytes", | |
2030 | sizeof(msg), payload->size); | |
f7079f67 DG |
2031 | ret = -1; |
2032 | goto end_no_session; | |
2033 | } | |
5312a3ed JG |
2034 | memcpy(&msg, payload->data, sizeof(msg)); |
2035 | msg.session_id = be64toh(msg.session_id); | |
f7079f67 | 2036 | |
7591bab1 MD |
2037 | /* |
2038 | * Iterate over all streams to see if the begin data pending | |
2039 | * flag is set. | |
2040 | */ | |
f7079f67 | 2041 | rcu_read_lock(); |
d3e2ba59 | 2042 | cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream, |
2a174661 | 2043 | node.node) { |
7591bab1 MD |
2044 | if (!stream_get(stream)) { |
2045 | continue; | |
2046 | } | |
5312a3ed | 2047 | if (stream->trace->session->id != msg.session_id) { |
7591bab1 MD |
2048 | stream_put(stream); |
2049 | continue; | |
2050 | } | |
2051 | pthread_mutex_lock(&stream->lock); | |
2052 | if (!stream->data_pending_check_done) { | |
298a25ca JG |
2053 | uint64_t stream_seq; |
2054 | ||
2055 | if (session_streams_have_index(conn->session)) { | |
2056 | /* | |
2057 | * Ensure that both the index and stream data have been | |
2058 | * flushed up to the requested point. | |
2059 | */ | |
a8f9f353 | 2060 | stream_seq = min(stream->prev_data_seq, stream->prev_index_seq); |
298a25ca | 2061 | } else { |
a8f9f353 | 2062 | stream_seq = stream->prev_data_seq; |
298a25ca JG |
2063 | } |
2064 | if (!stream->closed || !(((int64_t) (stream_seq - stream->last_net_seq_num)) >= 0)) { | |
7591bab1 MD |
2065 | is_data_inflight = 1; |
2066 | DBG("Data is still in flight for stream %" PRIu64, | |
2067 | stream->stream_handle); | |
2068 | pthread_mutex_unlock(&stream->lock); | |
2069 | stream_put(stream); | |
2070 | break; | |
2071 | } | |
f7079f67 | 2072 | } |
7591bab1 MD |
2073 | pthread_mutex_unlock(&stream->lock); |
2074 | stream_put(stream); | |
f7079f67 DG |
2075 | } |
2076 | rcu_read_unlock(); | |
2077 | ||
53efb85a | 2078 | memset(&reply, 0, sizeof(reply)); |
f7079f67 DG |
2079 | /* All good, send back reply. */ |
2080 | reply.ret_code = htobe32(is_data_inflight); | |
2081 | ||
5312a3ed JG |
2082 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
2083 | if (send_ret < (ssize_t) sizeof(reply)) { | |
2084 | ERR("Failed to send \"end data pending\" command reply (ret = %zd)", | |
2085 | send_ret); | |
2086 | ret = -1; | |
2087 | } else { | |
2088 | ret = 0; | |
f7079f67 DG |
2089 | } |
2090 | ||
2091 | end_no_session: | |
2092 | return ret; | |
2093 | } | |
2094 | ||
1c20f0e2 JD |
2095 | /* |
2096 | * Receive an index for a specific stream. | |
2097 | * | |
2098 | * Return 0 on success else a negative value. | |
2099 | */ | |
5312a3ed JG |
2100 | static int relay_recv_index(const struct lttcomm_relayd_hdr *recv_hdr, |
2101 | struct relay_connection *conn, | |
2102 | const struct lttng_buffer_view *payload) | |
1c20f0e2 | 2103 | { |
5312a3ed JG |
2104 | int ret; |
2105 | ssize_t send_ret; | |
58eb9381 | 2106 | struct relay_session *session = conn->session; |
1c20f0e2 | 2107 | struct lttcomm_relayd_index index_info; |
1c20f0e2 JD |
2108 | struct lttcomm_relayd_generic_reply reply; |
2109 | struct relay_stream *stream; | |
f8f3885c | 2110 | size_t msg_len; |
1c20f0e2 | 2111 | |
58eb9381 | 2112 | assert(conn); |
1c20f0e2 JD |
2113 | |
2114 | DBG("Relay receiving index"); | |
2115 | ||
5312a3ed | 2116 | if (!session || !conn->version_check_done) { |
1c20f0e2 JD |
2117 | ERR("Trying to close a stream before version check"); |
2118 | ret = -1; | |
2119 | goto end_no_session; | |
2120 | } | |
2121 | ||
f8f3885c MD |
2122 | msg_len = lttcomm_relayd_index_len( |
2123 | lttng_to_index_major(conn->major, conn->minor), | |
2124 | lttng_to_index_minor(conn->major, conn->minor)); | |
5312a3ed JG |
2125 | if (payload->size < msg_len) { |
2126 | ERR("Unexpected payload size in \"relay_recv_index\": expected >= %zu bytes, got %zu bytes", | |
2127 | msg_len, payload->size); | |
1c20f0e2 JD |
2128 | ret = -1; |
2129 | goto end_no_session; | |
2130 | } | |
5312a3ed JG |
2131 | memcpy(&index_info, payload->data, msg_len); |
2132 | index_info.relay_stream_id = be64toh(index_info.relay_stream_id); | |
2133 | index_info.net_seq_num = be64toh(index_info.net_seq_num); | |
2134 | index_info.packet_size = be64toh(index_info.packet_size); | |
2135 | index_info.content_size = be64toh(index_info.content_size); | |
2136 | index_info.timestamp_begin = be64toh(index_info.timestamp_begin); | |
2137 | index_info.timestamp_end = be64toh(index_info.timestamp_end); | |
2138 | index_info.events_discarded = be64toh(index_info.events_discarded); | |
2139 | index_info.stream_id = be64toh(index_info.stream_id); | |
81df238b JR |
2140 | |
2141 | if (conn->minor >= 8) { | |
2142 | index_info.stream_instance_id = | |
2143 | be64toh(index_info.stream_instance_id); | |
2144 | index_info.packet_seq_num = be64toh(index_info.packet_seq_num); | |
2145 | } | |
5312a3ed JG |
2146 | |
2147 | stream = stream_get_by_id(index_info.relay_stream_id); | |
1c20f0e2 | 2148 | if (!stream) { |
7591bab1 | 2149 | ERR("stream_get_by_id not found"); |
1c20f0e2 | 2150 | ret = -1; |
7591bab1 | 2151 | goto end; |
1c20f0e2 | 2152 | } |
d3e2ba59 | 2153 | |
c35f9726 JG |
2154 | pthread_mutex_lock(&stream->lock); |
2155 | ret = stream_add_index(stream, &index_info); | |
2156 | pthread_mutex_unlock(&stream->lock); | |
2157 | if (ret) { | |
7591bab1 MD |
2158 | goto end_stream_put; |
2159 | } | |
1c20f0e2 | 2160 | |
7591bab1 | 2161 | end_stream_put: |
7591bab1 | 2162 | stream_put(stream); |
7591bab1 | 2163 | end: |
53efb85a | 2164 | memset(&reply, 0, sizeof(reply)); |
1c20f0e2 JD |
2165 | if (ret < 0) { |
2166 | reply.ret_code = htobe32(LTTNG_ERR_UNK); | |
2167 | } else { | |
2168 | reply.ret_code = htobe32(LTTNG_OK); | |
2169 | } | |
58eb9381 | 2170 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
5312a3ed JG |
2171 | if (send_ret < (ssize_t) sizeof(reply)) { |
2172 | ERR("Failed to send \"recv index\" command reply (ret = %zd)", send_ret); | |
2173 | ret = -1; | |
1c20f0e2 JD |
2174 | } |
2175 | ||
2176 | end_no_session: | |
2177 | return ret; | |
2178 | } | |
2179 | ||
a4baae1b JD |
2180 | /* |
2181 | * Receive the streams_sent message. | |
2182 | * | |
2183 | * Return 0 on success else a negative value. | |
2184 | */ | |
5312a3ed JG |
2185 | static int relay_streams_sent(const struct lttcomm_relayd_hdr *recv_hdr, |
2186 | struct relay_connection *conn, | |
2187 | const struct lttng_buffer_view *payload) | |
a4baae1b | 2188 | { |
5312a3ed JG |
2189 | int ret; |
2190 | ssize_t send_ret; | |
a4baae1b JD |
2191 | struct lttcomm_relayd_generic_reply reply; |
2192 | ||
58eb9381 | 2193 | assert(conn); |
a4baae1b JD |
2194 | |
2195 | DBG("Relay receiving streams_sent"); | |
2196 | ||
5312a3ed | 2197 | if (!conn->session || !conn->version_check_done) { |
a4baae1b JD |
2198 | ERR("Trying to close a stream before version check"); |
2199 | ret = -1; | |
2200 | goto end_no_session; | |
2201 | } | |
2202 | ||
2203 | /* | |
7591bab1 MD |
2204 | * Publish every pending stream in the connection recv list which are |
2205 | * now ready to be used by the viewer. | |
4a9daf17 | 2206 | */ |
7591bab1 | 2207 | publish_connection_local_streams(conn); |
4a9daf17 | 2208 | |
53efb85a | 2209 | memset(&reply, 0, sizeof(reply)); |
a4baae1b | 2210 | reply.ret_code = htobe32(LTTNG_OK); |
58eb9381 | 2211 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
5312a3ed JG |
2212 | if (send_ret < (ssize_t) sizeof(reply)) { |
2213 | ERR("Failed to send \"streams sent\" command reply (ret = %zd)", | |
2214 | send_ret); | |
2215 | ret = -1; | |
a4baae1b JD |
2216 | } else { |
2217 | /* Success. */ | |
2218 | ret = 0; | |
2219 | } | |
2220 | ||
2221 | end_no_session: | |
2222 | return ret; | |
2223 | } | |
2224 | ||
d3ecc550 | 2225 | /* |
c35f9726 JG |
2226 | * relay_rotate_session_stream: rotate a stream to a new tracefile for the |
2227 | * session rotation feature (not the tracefile rotation feature). | |
d3ecc550 | 2228 | */ |
c35f9726 JG |
2229 | static int relay_rotate_session_streams( |
2230 | const struct lttcomm_relayd_hdr *recv_hdr, | |
5312a3ed JG |
2231 | struct relay_connection *conn, |
2232 | const struct lttng_buffer_view *payload) | |
d3ecc550 | 2233 | { |
30b9d5ab | 2234 | int ret = 0; |
c35f9726 | 2235 | uint32_t i; |
5312a3ed | 2236 | ssize_t send_ret; |
c35f9726 | 2237 | enum lttng_error_code reply_code = LTTNG_ERR_UNK; |
d3ecc550 | 2238 | struct relay_session *session = conn->session; |
c35f9726 JG |
2239 | struct lttcomm_relayd_rotate_streams rotate_streams; |
2240 | struct lttcomm_relayd_generic_reply reply = {}; | |
2241 | struct relay_stream *stream = NULL; | |
2242 | const size_t header_len = sizeof(struct lttcomm_relayd_rotate_streams); | |
2243 | struct lttng_trace_chunk *next_trace_chunk = NULL; | |
2244 | struct lttng_buffer_view stream_positions; | |
70626904 JG |
2245 | char chunk_id_buf[MAX_INT_DEC_LEN(uint64_t)]; |
2246 | const char *chunk_id_str = "none"; | |
d3ecc550 | 2247 | |
d3ecc550 JD |
2248 | if (!session || !conn->version_check_done) { |
2249 | ERR("Trying to rotate a stream before version check"); | |
2250 | ret = -1; | |
2251 | goto end_no_reply; | |
2252 | } | |
2253 | ||
2254 | if (session->major == 2 && session->minor < 11) { | |
2255 | ERR("Unsupported feature before 2.11"); | |
2256 | ret = -1; | |
2257 | goto end_no_reply; | |
2258 | } | |
2259 | ||
5312a3ed JG |
2260 | if (payload->size < header_len) { |
2261 | ERR("Unexpected payload size in \"relay_rotate_session_stream\": expected >= %zu bytes, got %zu bytes", | |
2262 | header_len, payload->size); | |
d3ecc550 JD |
2263 | ret = -1; |
2264 | goto end_no_reply; | |
2265 | } | |
2266 | ||
c35f9726 | 2267 | memcpy(&rotate_streams, payload->data, header_len); |
5312a3ed | 2268 | |
c35f9726 JG |
2269 | /* Convert header to host endianness. */ |
2270 | rotate_streams = (typeof(rotate_streams)) { | |
2271 | .stream_count = be32toh(rotate_streams.stream_count), | |
2272 | .new_chunk_id = (typeof(rotate_streams.new_chunk_id)) { | |
2273 | .is_set = !!rotate_streams.new_chunk_id.is_set, | |
2274 | .value = be64toh(rotate_streams.new_chunk_id.value), | |
2275 | } | |
2276 | }; | |
d3ecc550 | 2277 | |
c35f9726 JG |
2278 | if (rotate_streams.new_chunk_id.is_set) { |
2279 | /* | |
2280 | * Retrieve the trace chunk the stream must transition to. As | |
2281 | * per the protocol, this chunk should have been created | |
2282 | * before this command is received. | |
2283 | */ | |
2284 | next_trace_chunk = sessiond_trace_chunk_registry_get_chunk( | |
2285 | sessiond_trace_chunk_registry, | |
2286 | session->sessiond_uuid, session->id, | |
2287 | rotate_streams.new_chunk_id.value); | |
2288 | if (!next_trace_chunk) { | |
2289 | char uuid_str[UUID_STR_LEN]; | |
2290 | ||
2291 | lttng_uuid_to_str(session->sessiond_uuid, uuid_str); | |
2292 | ERR("Unknown next trace chunk in ROTATE_STREAMS command: sessiond_uuid = {%s}, session_id = %" PRIu64 | |
2293 | ", trace_chunk_id = %" PRIu64, | |
2294 | uuid_str, session->id, | |
2295 | rotate_streams.new_chunk_id.value); | |
2296 | reply_code = LTTNG_ERR_INVALID_PROTOCOL; | |
2297 | ret = -1; | |
2298 | goto end; | |
2299 | } | |
70626904 JG |
2300 | |
2301 | ret = snprintf(chunk_id_buf, sizeof(chunk_id_buf), "%" PRIu64, | |
2302 | rotate_streams.new_chunk_id.value); | |
2303 | if (ret < 0 || ret >= sizeof(chunk_id_buf)) { | |
2304 | chunk_id_str = "formatting error"; | |
2305 | } else { | |
2306 | chunk_id_str = chunk_id_buf; | |
2307 | } | |
ecd1a12f | 2308 | session->has_rotated = true; |
d3ecc550 JD |
2309 | } |
2310 | ||
70626904 JG |
2311 | DBG("Rotate %" PRIu32 " streams of session \"%s\" to chunk \"%s\"", |
2312 | rotate_streams.stream_count, session->session_name, | |
2313 | chunk_id_str); | |
2314 | ||
c35f9726 JG |
2315 | stream_positions = lttng_buffer_view_from_view(payload, |
2316 | sizeof(rotate_streams), -1); | |
2317 | if (!stream_positions.data || | |
2318 | stream_positions.size < | |
2319 | (rotate_streams.stream_count * | |
2320 | sizeof(struct lttcomm_relayd_stream_rotation_position))) { | |
2321 | reply_code = LTTNG_ERR_INVALID_PROTOCOL; | |
d3ecc550 | 2322 | ret = -1; |
5312a3ed | 2323 | goto end; |
d3ecc550 JD |
2324 | } |
2325 | ||
c35f9726 JG |
2326 | for (i = 0; i < rotate_streams.stream_count; i++) { |
2327 | struct lttcomm_relayd_stream_rotation_position *position_comm = | |
2328 | &((typeof(position_comm)) stream_positions.data)[i]; | |
2329 | const struct lttcomm_relayd_stream_rotation_position pos = { | |
2330 | .stream_id = be64toh(position_comm->stream_id), | |
2331 | .rotate_at_seq_num = be64toh( | |
2332 | position_comm->rotate_at_seq_num), | |
2333 | }; | |
5312a3ed | 2334 | |
c35f9726 JG |
2335 | stream = stream_get_by_id(pos.stream_id); |
2336 | if (!stream) { | |
2337 | reply_code = LTTNG_ERR_INVALID; | |
2338 | ret = -1; | |
2339 | goto end; | |
c6db3843 JG |
2340 | } |
2341 | ||
c35f9726 JG |
2342 | pthread_mutex_lock(&stream->lock); |
2343 | ret = stream_set_pending_rotation(stream, next_trace_chunk, | |
2344 | pos.rotate_at_seq_num); | |
2345 | pthread_mutex_unlock(&stream->lock); | |
2346 | if (ret) { | |
2347 | reply_code = LTTNG_ERR_FILE_CREATION_ERROR; | |
2348 | goto end; | |
c6db3843 | 2349 | } |
c35f9726 JG |
2350 | |
2351 | stream_put(stream); | |
2352 | stream = NULL; | |
d3ecc550 JD |
2353 | } |
2354 | ||
c35f9726 | 2355 | reply_code = LTTNG_OK; |
eaeb64a9 | 2356 | ret = 0; |
d3ecc550 | 2357 | end: |
c35f9726 JG |
2358 | if (stream) { |
2359 | stream_put(stream); | |
d3ecc550 | 2360 | } |
c35f9726 JG |
2361 | |
2362 | reply.ret_code = htobe32((uint32_t) reply_code); | |
d3ecc550 JD |
2363 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, |
2364 | sizeof(struct lttcomm_relayd_generic_reply), 0); | |
5312a3ed JG |
2365 | if (send_ret < (ssize_t) sizeof(reply)) { |
2366 | ERR("Failed to send \"rotate session stream\" command reply (ret = %zd)", | |
2367 | send_ret); | |
2368 | ret = -1; | |
d3ecc550 | 2369 | } |
d3ecc550 | 2370 | end_no_reply: |
c35f9726 | 2371 | lttng_trace_chunk_put(next_trace_chunk); |
d3ecc550 JD |
2372 | return ret; |
2373 | } | |
2374 | ||
e5add6d0 | 2375 | |
e5add6d0 JG |
2376 | |
2377 | /* | |
2378 | * relay_create_trace_chunk: create a new trace chunk | |
2379 | */ | |
2380 | static int relay_create_trace_chunk(const struct lttcomm_relayd_hdr *recv_hdr, | |
2381 | struct relay_connection *conn, | |
2382 | const struct lttng_buffer_view *payload) | |
2383 | { | |
2384 | int ret = 0; | |
2385 | ssize_t send_ret; | |
2386 | struct relay_session *session = conn->session; | |
2387 | struct lttcomm_relayd_create_trace_chunk *msg; | |
2388 | struct lttcomm_relayd_generic_reply reply = {}; | |
2389 | struct lttng_buffer_view header_view; | |
2390 | struct lttng_buffer_view chunk_name_view; | |
2391 | struct lttng_trace_chunk *chunk = NULL, *published_chunk = NULL; | |
2392 | enum lttng_error_code reply_code = LTTNG_OK; | |
2393 | enum lttng_trace_chunk_status chunk_status; | |
2394 | struct lttng_directory_handle session_output; | |
2395 | ||
2396 | if (!session || !conn->version_check_done) { | |
2397 | ERR("Trying to create a trace chunk before version check"); | |
2398 | ret = -1; | |
2399 | goto end_no_reply; | |
2400 | } | |
2401 | ||
2402 | if (session->major == 2 && session->minor < 11) { | |
2403 | ERR("Chunk creation command is unsupported before 2.11"); | |
2404 | ret = -1; | |
2405 | goto end_no_reply; | |
2406 | } | |
2407 | ||
2408 | header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg)); | |
2409 | if (!header_view.data) { | |
2410 | ERR("Failed to receive payload of chunk creation command"); | |
2411 | ret = -1; | |
2412 | goto end_no_reply; | |
2413 | } | |
2414 | ||
2415 | /* Convert to host endianness. */ | |
2416 | msg = (typeof(msg)) header_view.data; | |
2417 | msg->chunk_id = be64toh(msg->chunk_id); | |
2418 | msg->creation_timestamp = be64toh(msg->creation_timestamp); | |
2419 | msg->override_name_length = be32toh(msg->override_name_length); | |
2420 | ||
2421 | chunk = lttng_trace_chunk_create( | |
2422 | msg->chunk_id, msg->creation_timestamp); | |
2423 | if (!chunk) { | |
2424 | ERR("Failed to create trace chunk in trace chunk creation command"); | |
2425 | ret = -1; | |
2426 | reply_code = LTTNG_ERR_NOMEM; | |
2427 | goto end; | |
2428 | } | |
2429 | ||
2430 | if (msg->override_name_length) { | |
2431 | const char *name; | |
2432 | ||
2433 | chunk_name_view = lttng_buffer_view_from_view(payload, | |
2434 | sizeof(*msg), | |
2435 | msg->override_name_length); | |
2436 | name = chunk_name_view.data; | |
2437 | if (!name || name[msg->override_name_length - 1]) { | |
2438 | ERR("Failed to receive payload of chunk creation command"); | |
2439 | ret = -1; | |
2440 | reply_code = LTTNG_ERR_INVALID; | |
2441 | goto end; | |
2442 | } | |
2443 | ||
2444 | chunk_status = lttng_trace_chunk_override_name( | |
2445 | chunk, chunk_name_view.data); | |
2446 | switch (chunk_status) { | |
2447 | case LTTNG_TRACE_CHUNK_STATUS_OK: | |
2448 | break; | |
2449 | case LTTNG_TRACE_CHUNK_STATUS_INVALID_ARGUMENT: | |
2450 | ERR("Failed to set the name of new trace chunk in trace chunk creation command (invalid name)"); | |
2451 | reply_code = LTTNG_ERR_INVALID; | |
2452 | ret = -1; | |
2453 | goto end; | |
2454 | default: | |
2455 | ERR("Failed to set the name of new trace chunk in trace chunk creation command (unknown error)"); | |
2456 | reply_code = LTTNG_ERR_UNK; | |
2457 | ret = -1; | |
2458 | goto end; | |
2459 | } | |
2460 | } | |
2461 | ||
e5add6d0 JG |
2462 | chunk_status = lttng_trace_chunk_set_credentials_current_user(chunk); |
2463 | if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { | |
2464 | reply_code = LTTNG_ERR_UNK; | |
2465 | ret = -1; | |
2466 | goto end; | |
2467 | } | |
2468 | ||
0ccc0411 JG |
2469 | ret = session_init_output_directory_handle( |
2470 | conn->session, &session_output); | |
2471 | if (ret) { | |
2472 | reply_code = LTTNG_ERR_CREATE_DIR_FAIL; | |
2473 | goto end; | |
2474 | } | |
e5add6d0 | 2475 | chunk_status = lttng_trace_chunk_set_as_owner(chunk, &session_output); |
0ccc0411 | 2476 | lttng_directory_handle_fini(&session_output); |
e5add6d0 JG |
2477 | if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { |
2478 | reply_code = LTTNG_ERR_UNK; | |
2479 | ret = -1; | |
2480 | goto end; | |
2481 | } | |
2482 | ||
2483 | published_chunk = sessiond_trace_chunk_registry_publish_chunk( | |
2484 | sessiond_trace_chunk_registry, | |
2485 | conn->session->sessiond_uuid, | |
2486 | conn->session->id, | |
2487 | chunk); | |
2488 | if (!published_chunk) { | |
2489 | char uuid_str[UUID_STR_LEN]; | |
2490 | ||
2491 | lttng_uuid_to_str(conn->session->sessiond_uuid, uuid_str); | |
2492 | ERR("Failed to publish chunk: sessiond_uuid = %s, session_id = %" PRIu64 ", chunk_id = %" PRIu64, | |
2493 | uuid_str, | |
2494 | conn->session->id, | |
2495 | msg->chunk_id); | |
2496 | ret = -1; | |
2497 | reply_code = LTTNG_ERR_NOMEM; | |
2498 | goto end; | |
2499 | } | |
2500 | ||
2501 | pthread_mutex_lock(&conn->session->lock); | |
62bad3bf JG |
2502 | if (conn->session->pending_closure_trace_chunk) { |
2503 | /* | |
2504 | * Invalid; this means a second create_trace_chunk command was | |
2505 | * received before a close_trace_chunk. | |
2506 | */ | |
2507 | ERR("Invalid trace chunk close command received; a trace chunk is already waiting for a trace chunk close command"); | |
2508 | reply_code = LTTNG_ERR_INVALID_PROTOCOL; | |
2509 | ret = -1; | |
2510 | goto end_unlock_session; | |
2511 | } | |
2512 | conn->session->pending_closure_trace_chunk = | |
2513 | conn->session->current_trace_chunk; | |
e5add6d0 | 2514 | conn->session->current_trace_chunk = published_chunk; |
e5add6d0 | 2515 | published_chunk = NULL; |
62bad3bf | 2516 | end_unlock_session: |
c35f9726 | 2517 | pthread_mutex_unlock(&conn->session->lock); |
e5add6d0 JG |
2518 | end: |
2519 | reply.ret_code = htobe32((uint32_t) reply_code); | |
2520 | send_ret = conn->sock->ops->sendmsg(conn->sock, | |
2521 | &reply, | |
2522 | sizeof(struct lttcomm_relayd_generic_reply), | |
2523 | 0); | |
2524 | if (send_ret < (ssize_t) sizeof(reply)) { | |
2525 | ERR("Failed to send \"create trace chunk\" command reply (ret = %zd)", | |
2526 | send_ret); | |
2527 | ret = -1; | |
2528 | } | |
2529 | end_no_reply: | |
2530 | lttng_trace_chunk_put(chunk); | |
2531 | lttng_trace_chunk_put(published_chunk); | |
e5add6d0 JG |
2532 | return ret; |
2533 | } | |
2534 | ||
bbc4768c JG |
2535 | /* |
2536 | * relay_close_trace_chunk: close a trace chunk | |
2537 | */ | |
2538 | static int relay_close_trace_chunk(const struct lttcomm_relayd_hdr *recv_hdr, | |
2539 | struct relay_connection *conn, | |
2540 | const struct lttng_buffer_view *payload) | |
2541 | { | |
9898f786 | 2542 | int ret = 0, buf_ret; |
bbc4768c JG |
2543 | ssize_t send_ret; |
2544 | struct relay_session *session = conn->session; | |
2545 | struct lttcomm_relayd_close_trace_chunk *msg; | |
ecd1a12f | 2546 | struct lttcomm_relayd_close_trace_chunk_reply reply = {}; |
bbc4768c JG |
2547 | struct lttng_buffer_view header_view; |
2548 | struct lttng_trace_chunk *chunk = NULL; | |
2549 | enum lttng_error_code reply_code = LTTNG_OK; | |
2550 | enum lttng_trace_chunk_status chunk_status; | |
2551 | uint64_t chunk_id; | |
c35f9726 | 2552 | LTTNG_OPTIONAL(enum lttng_trace_chunk_command_type) close_command = {}; |
bbc4768c | 2553 | time_t close_timestamp; |
ecd1a12f MD |
2554 | char closed_trace_chunk_path[LTTNG_PATH_MAX]; |
2555 | size_t path_length = 0; | |
2556 | const char *chunk_name = NULL; | |
2557 | struct lttng_dynamic_buffer reply_payload; | |
2558 | ||
2559 | lttng_dynamic_buffer_init(&reply_payload); | |
bbc4768c JG |
2560 | |
2561 | if (!session || !conn->version_check_done) { | |
2562 | ERR("Trying to close a trace chunk before version check"); | |
2563 | ret = -1; | |
2564 | goto end_no_reply; | |
2565 | } | |
2566 | ||
2567 | if (session->major == 2 && session->minor < 11) { | |
2568 | ERR("Chunk close command is unsupported before 2.11"); | |
2569 | ret = -1; | |
2570 | goto end_no_reply; | |
2571 | } | |
2572 | ||
2573 | header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg)); | |
2574 | if (!header_view.data) { | |
2575 | ERR("Failed to receive payload of chunk close command"); | |
2576 | ret = -1; | |
2577 | goto end_no_reply; | |
2578 | } | |
2579 | ||
2580 | /* Convert to host endianness. */ | |
2581 | msg = (typeof(msg)) header_view.data; | |
2582 | chunk_id = be64toh(msg->chunk_id); | |
2583 | close_timestamp = (time_t) be64toh(msg->close_timestamp); | |
2584 | close_command = (typeof(close_command)){ | |
2585 | .value = be32toh(msg->close_command.value), | |
2586 | .is_set = msg->close_command.is_set, | |
2587 | }; | |
2588 | ||
2589 | chunk = sessiond_trace_chunk_registry_get_chunk( | |
2590 | sessiond_trace_chunk_registry, | |
2591 | conn->session->sessiond_uuid, | |
2592 | conn->session->id, | |
2593 | chunk_id); | |
2594 | if (!chunk) { | |
2595 | char uuid_str[UUID_STR_LEN]; | |
2596 | ||
2597 | lttng_uuid_to_str(conn->session->sessiond_uuid, uuid_str); | |
2598 | ERR("Failed to find chunk to close: sessiond_uuid = %s, session_id = %" PRIu64 ", chunk_id = %" PRIu64, | |
2599 | uuid_str, | |
2600 | conn->session->id, | |
2601 | msg->chunk_id); | |
2602 | ret = -1; | |
2603 | reply_code = LTTNG_ERR_NOMEM; | |
2604 | goto end; | |
2605 | } | |
2606 | ||
62bad3bf JG |
2607 | pthread_mutex_lock(&session->lock); |
2608 | if (session->pending_closure_trace_chunk && | |
2609 | session->pending_closure_trace_chunk != chunk) { | |
2610 | ERR("Trace chunk close command for session \"%s\" does not target the trace chunk pending closure", | |
2611 | session->session_name); | |
2612 | reply_code = LTTNG_ERR_INVALID_PROTOCOL; | |
2613 | ret = -1; | |
2614 | goto end_unlock_session; | |
2615 | } | |
2616 | ||
bbc4768c JG |
2617 | chunk_status = lttng_trace_chunk_set_close_timestamp( |
2618 | chunk, close_timestamp); | |
2619 | if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { | |
2620 | ERR("Failed to set trace chunk close timestamp"); | |
2621 | ret = -1; | |
2622 | reply_code = LTTNG_ERR_UNK; | |
62bad3bf | 2623 | goto end_unlock_session; |
bbc4768c JG |
2624 | } |
2625 | ||
2626 | if (close_command.is_set) { | |
2627 | chunk_status = lttng_trace_chunk_set_close_command( | |
2628 | chunk, close_command.value); | |
2629 | if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { | |
2630 | ret = -1; | |
2631 | reply_code = LTTNG_ERR_INVALID; | |
62bad3bf | 2632 | goto end_unlock_session; |
bbc4768c JG |
2633 | } |
2634 | } | |
ecd1a12f MD |
2635 | chunk_status = lttng_trace_chunk_get_name(chunk, &chunk_name, NULL); |
2636 | if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { | |
2637 | ERR("Failed to get chunk name"); | |
2638 | ret = -1; | |
2639 | reply_code = LTTNG_ERR_UNK; | |
2640 | goto end_unlock_session; | |
2641 | } | |
2642 | if (!session->has_rotated && !session->snapshot) { | |
2643 | ret = lttng_strncpy(closed_trace_chunk_path, | |
2644 | session->output_path, | |
2645 | sizeof(closed_trace_chunk_path)); | |
2646 | if (ret) { | |
2647 | ERR("Failed to send trace chunk path: path length of %zu bytes exceeds the maximal allowed length of %zu bytes", | |
2648 | strlen(session->output_path), | |
2649 | sizeof(closed_trace_chunk_path)); | |
2650 | reply_code = LTTNG_ERR_NOMEM; | |
2651 | ret = -1; | |
2652 | goto end_unlock_session; | |
2653 | } | |
2654 | } else { | |
2655 | if (session->snapshot) { | |
2656 | ret = snprintf(closed_trace_chunk_path, | |
2657 | sizeof(closed_trace_chunk_path), | |
2658 | "%s/%s", session->output_path, | |
2659 | chunk_name); | |
2660 | } else { | |
2661 | ret = snprintf(closed_trace_chunk_path, | |
2662 | sizeof(closed_trace_chunk_path), | |
2663 | "%s/" DEFAULT_ARCHIVED_TRACE_CHUNKS_DIRECTORY | |
2664 | "/%s", | |
2665 | session->output_path, chunk_name); | |
2666 | } | |
2667 | if (ret < 0 || ret == sizeof(closed_trace_chunk_path)) { | |
2668 | ERR("Failed to format closed trace chunk resulting path"); | |
2669 | reply_code = ret < 0 ? LTTNG_ERR_UNK : LTTNG_ERR_NOMEM; | |
2670 | ret = -1; | |
2671 | goto end_unlock_session; | |
2672 | } | |
2673 | } | |
2674 | DBG("Reply chunk path on close: %s", closed_trace_chunk_path); | |
2675 | path_length = strlen(closed_trace_chunk_path) + 1; | |
2676 | if (path_length > UINT32_MAX) { | |
2677 | ERR("Closed trace chunk path exceeds the maximal length allowed by the protocol"); | |
2678 | ret = -1; | |
2679 | reply_code = LTTNG_ERR_INVALID_PROTOCOL; | |
2680 | goto end_unlock_session; | |
2681 | } | |
bbc4768c | 2682 | |
c35f9726 JG |
2683 | if (session->current_trace_chunk == chunk) { |
2684 | /* | |
2685 | * After a trace chunk close command, no new streams | |
2686 | * referencing the chunk may be created. Hence, on the | |
2687 | * event that no new trace chunk have been created for | |
2688 | * the session, the reference to the current trace chunk | |
2689 | * is released in order to allow it to be reclaimed when | |
2690 | * the last stream releases its reference to it. | |
2691 | */ | |
2692 | lttng_trace_chunk_put(session->current_trace_chunk); | |
2693 | session->current_trace_chunk = NULL; | |
2694 | } | |
62bad3bf JG |
2695 | lttng_trace_chunk_put(session->pending_closure_trace_chunk); |
2696 | session->pending_closure_trace_chunk = NULL; | |
2697 | end_unlock_session: | |
c35f9726 JG |
2698 | pthread_mutex_unlock(&session->lock); |
2699 | ||
bbc4768c | 2700 | end: |
ecd1a12f MD |
2701 | reply.generic.ret_code = htobe32((uint32_t) reply_code); |
2702 | reply.path_length = htobe32((uint32_t) path_length); | |
9898f786 | 2703 | buf_ret = lttng_dynamic_buffer_append( |
ecd1a12f | 2704 | &reply_payload, &reply, sizeof(reply)); |
9898f786 | 2705 | if (buf_ret) { |
ecd1a12f MD |
2706 | ERR("Failed to append \"close trace chunk\" command reply header to payload buffer"); |
2707 | goto end_no_reply; | |
2708 | } | |
2709 | ||
2710 | if (reply_code == LTTNG_OK) { | |
9898f786 | 2711 | buf_ret = lttng_dynamic_buffer_append(&reply_payload, |
ecd1a12f | 2712 | closed_trace_chunk_path, path_length); |
9898f786 | 2713 | if (buf_ret) { |
ecd1a12f MD |
2714 | ERR("Failed to append \"close trace chunk\" command reply path to payload buffer"); |
2715 | goto end_no_reply; | |
2716 | } | |
2717 | } | |
2718 | ||
bbc4768c | 2719 | send_ret = conn->sock->ops->sendmsg(conn->sock, |
ecd1a12f MD |
2720 | reply_payload.data, |
2721 | reply_payload.size, | |
bbc4768c | 2722 | 0); |
ecd1a12f MD |
2723 | if (send_ret < reply_payload.size) { |
2724 | ERR("Failed to send \"close trace chunk\" command reply of %zu bytes (ret = %zd)", | |
2725 | reply_payload.size, send_ret); | |
bbc4768c | 2726 | ret = -1; |
ecd1a12f | 2727 | goto end_no_reply; |
bbc4768c JG |
2728 | } |
2729 | end_no_reply: | |
2730 | lttng_trace_chunk_put(chunk); | |
ecd1a12f | 2731 | lttng_dynamic_buffer_reset(&reply_payload); |
bbc4768c JG |
2732 | return ret; |
2733 | } | |
2734 | ||
c35f9726 JG |
2735 | /* |
2736 | * relay_trace_chunk_exists: check if a trace chunk exists | |
2737 | */ | |
2738 | static int relay_trace_chunk_exists(const struct lttcomm_relayd_hdr *recv_hdr, | |
2739 | struct relay_connection *conn, | |
2740 | const struct lttng_buffer_view *payload) | |
2741 | { | |
2742 | int ret = 0; | |
2743 | ssize_t send_ret; | |
2744 | struct relay_session *session = conn->session; | |
2745 | struct lttcomm_relayd_trace_chunk_exists *msg; | |
2746 | struct lttcomm_relayd_trace_chunk_exists_reply reply = {}; | |
2747 | struct lttng_buffer_view header_view; | |
c35f9726 | 2748 | uint64_t chunk_id; |
6b584c2e | 2749 | bool chunk_exists; |
c35f9726 JG |
2750 | |
2751 | if (!session || !conn->version_check_done) { | |
2752 | ERR("Trying to close a trace chunk before version check"); | |
2753 | ret = -1; | |
2754 | goto end_no_reply; | |
2755 | } | |
2756 | ||
2757 | if (session->major == 2 && session->minor < 11) { | |
2758 | ERR("Chunk close command is unsupported before 2.11"); | |
2759 | ret = -1; | |
2760 | goto end_no_reply; | |
2761 | } | |
2762 | ||
2763 | header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg)); | |
2764 | if (!header_view.data) { | |
2765 | ERR("Failed to receive payload of chunk close command"); | |
2766 | ret = -1; | |
2767 | goto end_no_reply; | |
2768 | } | |
2769 | ||
2770 | /* Convert to host endianness. */ | |
2771 | msg = (typeof(msg)) header_view.data; | |
2772 | chunk_id = be64toh(msg->chunk_id); | |
2773 | ||
6b584c2e | 2774 | ret = sessiond_trace_chunk_registry_chunk_exists( |
c35f9726 JG |
2775 | sessiond_trace_chunk_registry, |
2776 | conn->session->sessiond_uuid, | |
2777 | conn->session->id, | |
6b584c2e JG |
2778 | chunk_id, &chunk_exists); |
2779 | /* | |
2780 | * If ret is not 0, send the reply and report the error to the caller. | |
2781 | * It is a protocol (or internal) error and the session/connection | |
2782 | * should be torn down. | |
2783 | */ | |
2784 | reply = (typeof(reply)){ | |
2785 | .generic.ret_code = htobe32((uint32_t) | |
2786 | (ret == 0 ? LTTNG_OK : LTTNG_ERR_INVALID_PROTOCOL)), | |
2787 | .trace_chunk_exists = ret == 0 ? chunk_exists : 0, | |
c35f9726 | 2788 | }; |
6b584c2e JG |
2789 | send_ret = conn->sock->ops->sendmsg( |
2790 | conn->sock, &reply, sizeof(reply), 0); | |
c35f9726 JG |
2791 | if (send_ret < (ssize_t) sizeof(reply)) { |
2792 | ERR("Failed to send \"create trace chunk\" command reply (ret = %zd)", | |
2793 | send_ret); | |
2794 | ret = -1; | |
2795 | } | |
2796 | end_no_reply: | |
c35f9726 JG |
2797 | return ret; |
2798 | } | |
2799 | ||
5312a3ed JG |
2800 | #define DBG_CMD(cmd_name, conn) \ |
2801 | DBG3("Processing \"%s\" command for socket %i", cmd_name, conn->sock->fd); | |
2802 | ||
2803 | static int relay_process_control_command(struct relay_connection *conn, | |
2804 | const struct lttcomm_relayd_hdr *header, | |
2805 | const struct lttng_buffer_view *payload) | |
b8aa1682 JD |
2806 | { |
2807 | int ret = 0; | |
2808 | ||
5312a3ed | 2809 | switch (header->cmd) { |
b8aa1682 | 2810 | case RELAYD_CREATE_SESSION: |
5312a3ed JG |
2811 | DBG_CMD("RELAYD_CREATE_SESSION", conn); |
2812 | ret = relay_create_session(header, conn, payload); | |
b8aa1682 | 2813 | break; |
b8aa1682 | 2814 | case RELAYD_ADD_STREAM: |
5312a3ed JG |
2815 | DBG_CMD("RELAYD_ADD_STREAM", conn); |
2816 | ret = relay_add_stream(header, conn, payload); | |
b8aa1682 JD |
2817 | break; |
2818 | case RELAYD_START_DATA: | |
5312a3ed JG |
2819 | DBG_CMD("RELAYD_START_DATA", conn); |
2820 | ret = relay_start(header, conn, payload); | |
b8aa1682 JD |
2821 | break; |
2822 | case RELAYD_SEND_METADATA: | |
5312a3ed JG |
2823 | DBG_CMD("RELAYD_SEND_METADATA", conn); |
2824 | ret = relay_recv_metadata(header, conn, payload); | |
b8aa1682 JD |
2825 | break; |
2826 | case RELAYD_VERSION: | |
5312a3ed JG |
2827 | DBG_CMD("RELAYD_VERSION", conn); |
2828 | ret = relay_send_version(header, conn, payload); | |
b8aa1682 | 2829 | break; |
173af62f | 2830 | case RELAYD_CLOSE_STREAM: |
5312a3ed JG |
2831 | DBG_CMD("RELAYD_CLOSE_STREAM", conn); |
2832 | ret = relay_close_stream(header, conn, payload); | |
173af62f | 2833 | break; |
6d805429 | 2834 | case RELAYD_DATA_PENDING: |
5312a3ed JG |
2835 | DBG_CMD("RELAYD_DATA_PENDING", conn); |
2836 | ret = relay_data_pending(header, conn, payload); | |
c8f59ee5 DG |
2837 | break; |
2838 | case RELAYD_QUIESCENT_CONTROL: | |
5312a3ed JG |
2839 | DBG_CMD("RELAYD_QUIESCENT_CONTROL", conn); |
2840 | ret = relay_quiescent_control(header, conn, payload); | |
c8f59ee5 | 2841 | break; |
f7079f67 | 2842 | case RELAYD_BEGIN_DATA_PENDING: |
5312a3ed JG |
2843 | DBG_CMD("RELAYD_BEGIN_DATA_PENDING", conn); |
2844 | ret = relay_begin_data_pending(header, conn, payload); | |
f7079f67 DG |
2845 | break; |
2846 | case RELAYD_END_DATA_PENDING: | |
5312a3ed JG |
2847 | DBG_CMD("RELAYD_END_DATA_PENDING", conn); |
2848 | ret = relay_end_data_pending(header, conn, payload); | |
f7079f67 | 2849 | break; |
1c20f0e2 | 2850 | case RELAYD_SEND_INDEX: |
5312a3ed JG |
2851 | DBG_CMD("RELAYD_SEND_INDEX", conn); |
2852 | ret = relay_recv_index(header, conn, payload); | |
1c20f0e2 | 2853 | break; |
a4baae1b | 2854 | case RELAYD_STREAMS_SENT: |
5312a3ed JG |
2855 | DBG_CMD("RELAYD_STREAMS_SENT", conn); |
2856 | ret = relay_streams_sent(header, conn, payload); | |
a4baae1b | 2857 | break; |
93ec662e | 2858 | case RELAYD_RESET_METADATA: |
5312a3ed JG |
2859 | DBG_CMD("RELAYD_RESET_METADATA", conn); |
2860 | ret = relay_reset_metadata(header, conn, payload); | |
93ec662e | 2861 | break; |
c35f9726 JG |
2862 | case RELAYD_ROTATE_STREAMS: |
2863 | DBG_CMD("RELAYD_ROTATE_STREAMS", conn); | |
2864 | ret = relay_rotate_session_streams(header, conn, payload); | |
d3ecc550 | 2865 | break; |
e5add6d0 JG |
2866 | case RELAYD_CREATE_TRACE_CHUNK: |
2867 | DBG_CMD("RELAYD_CREATE_TRACE_CHUNK", conn); | |
2868 | ret = relay_create_trace_chunk(header, conn, payload); | |
2869 | break; | |
bbc4768c JG |
2870 | case RELAYD_CLOSE_TRACE_CHUNK: |
2871 | DBG_CMD("RELAYD_CLOSE_TRACE_CHUNK", conn); | |
2872 | ret = relay_close_trace_chunk(header, conn, payload); | |
2873 | break; | |
c35f9726 JG |
2874 | case RELAYD_TRACE_CHUNK_EXISTS: |
2875 | DBG_CMD("RELAYD_TRACE_CHUNK_EXISTS", conn); | |
2876 | ret = relay_trace_chunk_exists(header, conn, payload); | |
2877 | break; | |
b8aa1682 JD |
2878 | case RELAYD_UPDATE_SYNC_INFO: |
2879 | default: | |
5312a3ed | 2880 | ERR("Received unknown command (%u)", header->cmd); |
58eb9381 | 2881 | relay_unknown_command(conn); |
b8aa1682 JD |
2882 | ret = -1; |
2883 | goto end; | |
2884 | } | |
2885 | ||
2886 | end: | |
2887 | return ret; | |
2888 | } | |
2889 | ||
5569b118 JG |
2890 | static enum relay_connection_status relay_process_control_receive_payload( |
2891 | struct relay_connection *conn) | |
5312a3ed JG |
2892 | { |
2893 | int ret = 0; | |
5569b118 | 2894 | enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK; |
5312a3ed JG |
2895 | struct lttng_dynamic_buffer *reception_buffer = |
2896 | &conn->protocol.ctrl.reception_buffer; | |
2897 | struct ctrl_connection_state_receive_payload *state = | |
2898 | &conn->protocol.ctrl.state.receive_payload; | |
2899 | struct lttng_buffer_view payload_view; | |
2900 | ||
2901 | if (state->left_to_receive == 0) { | |
2902 | /* Short-circuit for payload-less commands. */ | |
2903 | goto reception_complete; | |
2904 | } | |
2905 | ||
2906 | ret = conn->sock->ops->recvmsg(conn->sock, | |
2907 | reception_buffer->data + state->received, | |
2908 | state->left_to_receive, MSG_DONTWAIT); | |
2909 | if (ret < 0) { | |
5569b118 JG |
2910 | if (errno != EAGAIN && errno != EWOULDBLOCK) { |
2911 | PERROR("Unable to receive command payload on sock %d", | |
2912 | conn->sock->fd); | |
2913 | status = RELAY_CONNECTION_STATUS_ERROR; | |
2914 | } | |
5312a3ed JG |
2915 | goto end; |
2916 | } else if (ret == 0) { | |
2917 | DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd); | |
5569b118 | 2918 | status = RELAY_CONNECTION_STATUS_CLOSED; |
5312a3ed JG |
2919 | goto end; |
2920 | } | |
2921 | ||
2922 | assert(ret > 0); | |
2923 | assert(ret <= state->left_to_receive); | |
2924 | ||
2925 | state->left_to_receive -= ret; | |
2926 | state->received += ret; | |
2927 | ||
2928 | if (state->left_to_receive > 0) { | |
2929 | /* | |
2930 | * Can't transition to the protocol's next state, wait to | |
2931 | * receive the rest of the header. | |
2932 | */ | |
2933 | DBG3("Partial reception of control connection protocol payload (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)", | |
2934 | state->received, state->left_to_receive, | |
2935 | conn->sock->fd); | |
5312a3ed JG |
2936 | goto end; |
2937 | } | |
2938 | ||
2939 | reception_complete: | |
2940 | DBG("Done receiving control command payload: fd = %i, payload size = %" PRIu64 " bytes", | |
2941 | conn->sock->fd, state->received); | |
2942 | /* | |
2943 | * The payload required to process the command has been received. | |
2944 | * A view to the reception buffer is forwarded to the various | |
2945 | * commands and the state of the control is reset on success. | |
2946 | * | |
2947 | * Commands are responsible for sending their reply to the peer. | |
2948 | */ | |
2949 | payload_view = lttng_buffer_view_from_dynamic_buffer(reception_buffer, | |
2950 | 0, -1); | |
2951 | ret = relay_process_control_command(conn, | |
2952 | &state->header, &payload_view); | |
2953 | if (ret < 0) { | |
5569b118 | 2954 | status = RELAY_CONNECTION_STATUS_ERROR; |
5312a3ed JG |
2955 | goto end; |
2956 | } | |
2957 | ||
2958 | ret = connection_reset_protocol_state(conn); | |
5569b118 JG |
2959 | if (ret) { |
2960 | status = RELAY_CONNECTION_STATUS_ERROR; | |
2961 | } | |
5312a3ed | 2962 | end: |
5569b118 | 2963 | return status; |
5312a3ed JG |
2964 | } |
2965 | ||
5569b118 JG |
2966 | static enum relay_connection_status relay_process_control_receive_header( |
2967 | struct relay_connection *conn) | |
5312a3ed JG |
2968 | { |
2969 | int ret = 0; | |
5569b118 | 2970 | enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK; |
5312a3ed JG |
2971 | struct lttcomm_relayd_hdr header; |
2972 | struct lttng_dynamic_buffer *reception_buffer = | |
2973 | &conn->protocol.ctrl.reception_buffer; | |
2974 | struct ctrl_connection_state_receive_header *state = | |
2975 | &conn->protocol.ctrl.state.receive_header; | |
2976 | ||
2977 | assert(state->left_to_receive != 0); | |
2978 | ||
2979 | ret = conn->sock->ops->recvmsg(conn->sock, | |
2980 | reception_buffer->data + state->received, | |
2981 | state->left_to_receive, MSG_DONTWAIT); | |
2982 | if (ret < 0) { | |
5569b118 JG |
2983 | if (errno != EAGAIN && errno != EWOULDBLOCK) { |
2984 | PERROR("Unable to receive control command header on sock %d", | |
2985 | conn->sock->fd); | |
2986 | status = RELAY_CONNECTION_STATUS_ERROR; | |
2987 | } | |
5312a3ed JG |
2988 | goto end; |
2989 | } else if (ret == 0) { | |
2990 | DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd); | |
5569b118 | 2991 | status = RELAY_CONNECTION_STATUS_CLOSED; |
5312a3ed JG |
2992 | goto end; |
2993 | } | |
2994 | ||
2995 | assert(ret > 0); | |
2996 | assert(ret <= state->left_to_receive); | |
2997 | ||
2998 | state->left_to_receive -= ret; | |
2999 | state->received += ret; | |
3000 | ||
3001 | if (state->left_to_receive > 0) { | |
3002 | /* | |
3003 | * Can't transition to the protocol's next state, wait to | |
3004 | * receive the rest of the header. | |
3005 | */ | |
3006 | DBG3("Partial reception of control connection protocol header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)", | |
3007 | state->received, state->left_to_receive, | |
3008 | conn->sock->fd); | |
5312a3ed JG |
3009 | goto end; |
3010 | } | |
3011 | ||
3012 | /* Transition to next state: receiving the command's payload. */ | |
3013 | conn->protocol.ctrl.state_id = | |
3014 | CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD; | |
3015 | memcpy(&header, reception_buffer->data, sizeof(header)); | |
3016 | header.circuit_id = be64toh(header.circuit_id); | |
3017 | header.data_size = be64toh(header.data_size); | |
3018 | header.cmd = be32toh(header.cmd); | |
3019 | header.cmd_version = be32toh(header.cmd_version); | |
3020 | memcpy(&conn->protocol.ctrl.state.receive_payload.header, | |
3021 | &header, sizeof(header)); | |
3022 | ||
3023 | DBG("Done receiving control command header: fd = %i, cmd = %" PRIu32 ", cmd_version = %" PRIu32 ", payload size = %" PRIu64 " bytes", | |
3024 | conn->sock->fd, header.cmd, header.cmd_version, | |
3025 | header.data_size); | |
3026 | ||
715e6fb1 | 3027 | if (header.data_size > DEFAULT_NETWORK_RELAYD_CTRL_MAX_PAYLOAD_SIZE) { |
5312a3ed JG |
3028 | ERR("Command header indicates a payload (%" PRIu64 " bytes) that exceeds the maximal payload size allowed on a control connection.", |
3029 | header.data_size); | |
5569b118 | 3030 | status = RELAY_CONNECTION_STATUS_ERROR; |
5312a3ed JG |
3031 | goto end; |
3032 | } | |
3033 | ||
3034 | conn->protocol.ctrl.state.receive_payload.left_to_receive = | |
3035 | header.data_size; | |
3036 | conn->protocol.ctrl.state.receive_payload.received = 0; | |
3037 | ret = lttng_dynamic_buffer_set_size(reception_buffer, | |
3038 | header.data_size); | |
3039 | if (ret) { | |
5569b118 | 3040 | status = RELAY_CONNECTION_STATUS_ERROR; |
5312a3ed JG |
3041 | goto end; |
3042 | } | |
3043 | ||
3044 | if (header.data_size == 0) { | |
3045 | /* | |
3046 | * Manually invoke the next state as the poll loop | |
3047 | * will not wake-up to allow us to proceed further. | |
3048 | */ | |
5569b118 | 3049 | status = relay_process_control_receive_payload(conn); |
5312a3ed JG |
3050 | } |
3051 | end: | |
5569b118 | 3052 | return status; |
5312a3ed JG |
3053 | } |
3054 | ||
3055 | /* | |
3056 | * Process the commands received on the control socket | |
3057 | */ | |
5569b118 JG |
3058 | static enum relay_connection_status relay_process_control( |
3059 | struct relay_connection *conn) | |
5312a3ed | 3060 | { |
5569b118 | 3061 | enum relay_connection_status status; |
5312a3ed JG |
3062 | |
3063 | switch (conn->protocol.ctrl.state_id) { | |
3064 | case CTRL_CONNECTION_STATE_RECEIVE_HEADER: | |
5569b118 | 3065 | status = relay_process_control_receive_header(conn); |
5312a3ed JG |
3066 | break; |
3067 | case CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD: | |
5569b118 | 3068 | status = relay_process_control_receive_payload(conn); |
5312a3ed JG |
3069 | break; |
3070 | default: | |
3071 | ERR("Unknown control connection protocol state encountered."); | |
3072 | abort(); | |
3073 | } | |
3074 | ||
5569b118 | 3075 | return status; |
5312a3ed JG |
3076 | } |
3077 | ||
5569b118 JG |
3078 | static enum relay_connection_status relay_process_data_receive_header( |
3079 | struct relay_connection *conn) | |
b8aa1682 | 3080 | { |
5312a3ed | 3081 | int ret; |
5569b118 | 3082 | enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK; |
5312a3ed JG |
3083 | struct data_connection_state_receive_header *state = |
3084 | &conn->protocol.data.state.receive_header; | |
3085 | struct lttcomm_relayd_data_hdr header; | |
b8aa1682 | 3086 | struct relay_stream *stream; |
5312a3ed JG |
3087 | |
3088 | assert(state->left_to_receive != 0); | |
3089 | ||
3090 | ret = conn->sock->ops->recvmsg(conn->sock, | |
3091 | state->header_reception_buffer + state->received, | |
3092 | state->left_to_receive, MSG_DONTWAIT); | |
3093 | if (ret < 0) { | |
5569b118 JG |
3094 | if (errno != EAGAIN && errno != EWOULDBLOCK) { |
3095 | PERROR("Unable to receive data header on sock %d", conn->sock->fd); | |
3096 | status = RELAY_CONNECTION_STATUS_ERROR; | |
3097 | } | |
5312a3ed JG |
3098 | goto end; |
3099 | } else if (ret == 0) { | |
3100 | /* Orderly shutdown. Not necessary to print an error. */ | |
3101 | DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd); | |
5569b118 | 3102 | status = RELAY_CONNECTION_STATUS_CLOSED; |
b8aa1682 JD |
3103 | goto end; |
3104 | } | |
3105 | ||
5312a3ed JG |
3106 | assert(ret > 0); |
3107 | assert(ret <= state->left_to_receive); | |
3108 | ||
3109 | state->left_to_receive -= ret; | |
3110 | state->received += ret; | |
3111 | ||
3112 | if (state->left_to_receive > 0) { | |
3113 | /* | |
3114 | * Can't transition to the protocol's next state, wait to | |
3115 | * receive the rest of the header. | |
3116 | */ | |
3117 | DBG3("Partial reception of data connection header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)", | |
3118 | state->received, state->left_to_receive, | |
3119 | conn->sock->fd); | |
7591bab1 | 3120 | goto end; |
b8aa1682 | 3121 | } |
b8aa1682 | 3122 | |
5312a3ed JG |
3123 | /* Transition to next state: receiving the payload. */ |
3124 | conn->protocol.data.state_id = DATA_CONNECTION_STATE_RECEIVE_PAYLOAD; | |
173af62f | 3125 | |
5312a3ed JG |
3126 | memcpy(&header, state->header_reception_buffer, sizeof(header)); |
3127 | header.circuit_id = be64toh(header.circuit_id); | |
3128 | header.stream_id = be64toh(header.stream_id); | |
3129 | header.data_size = be32toh(header.data_size); | |
3130 | header.net_seq_num = be64toh(header.net_seq_num); | |
3131 | header.padding_size = be32toh(header.padding_size); | |
3132 | memcpy(&conn->protocol.data.state.receive_payload.header, &header, sizeof(header)); | |
3133 | ||
3134 | conn->protocol.data.state.receive_payload.left_to_receive = | |
3135 | header.data_size; | |
3136 | conn->protocol.data.state.receive_payload.received = 0; | |
3137 | conn->protocol.data.state.receive_payload.rotate_index = false; | |
3138 | ||
3139 | DBG("Received data connection header on fd %i: circuit_id = %" PRIu64 ", stream_id = %" PRIu64 ", data_size = %" PRIu32 ", net_seq_num = %" PRIu64 ", padding_size = %" PRIu32, | |
3140 | conn->sock->fd, header.circuit_id, | |
3141 | header.stream_id, header.data_size, | |
3142 | header.net_seq_num, header.padding_size); | |
3143 | ||
3144 | stream = stream_get_by_id(header.stream_id); | |
3145 | if (!stream) { | |
3146 | DBG("relay_process_data_receive_payload: Cannot find stream %" PRIu64, | |
3147 | header.stream_id); | |
5569b118 JG |
3148 | /* Protocol error. */ |
3149 | status = RELAY_CONNECTION_STATUS_ERROR; | |
5312a3ed JG |
3150 | goto end; |
3151 | } | |
b8aa1682 | 3152 | |
7591bab1 | 3153 | pthread_mutex_lock(&stream->lock); |
c35f9726 JG |
3154 | /* Prepare stream for the reception of a new packet. */ |
3155 | ret = stream_init_packet(stream, header.data_size, | |
3156 | &conn->protocol.data.state.receive_payload.rotate_index); | |
3157 | pthread_mutex_unlock(&stream->lock); | |
3158 | if (ret) { | |
3159 | ERR("Failed to rotate stream output file"); | |
3160 | status = RELAY_CONNECTION_STATUS_ERROR; | |
3161 | goto end_stream_unlock; | |
1c20f0e2 JD |
3162 | } |
3163 | ||
5312a3ed | 3164 | end_stream_unlock: |
5312a3ed JG |
3165 | stream_put(stream); |
3166 | end: | |
5569b118 | 3167 | return status; |
5312a3ed JG |
3168 | } |
3169 | ||
5569b118 JG |
3170 | static enum relay_connection_status relay_process_data_receive_payload( |
3171 | struct relay_connection *conn) | |
5312a3ed JG |
3172 | { |
3173 | int ret; | |
5569b118 | 3174 | enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK; |
5312a3ed JG |
3175 | struct relay_stream *stream; |
3176 | struct data_connection_state_receive_payload *state = | |
3177 | &conn->protocol.data.state.receive_payload; | |
3178 | const size_t chunk_size = RECV_DATA_BUFFER_SIZE; | |
3179 | char data_buffer[chunk_size]; | |
3180 | bool partial_recv = false; | |
3181 | bool new_stream = false, close_requested = false, index_flushed = false; | |
3182 | uint64_t left_to_receive = state->left_to_receive; | |
3183 | struct relay_session *session; | |
3184 | ||
fd0f1e3e JR |
3185 | DBG3("Receiving data for stream id %" PRIu64 " seqnum %" PRIu64 ", %" PRIu64" bytes received, %" PRIu64 " bytes left to receive", |
3186 | state->header.stream_id, state->header.net_seq_num, | |
3187 | state->received, left_to_receive); | |
3188 | ||
5312a3ed JG |
3189 | stream = stream_get_by_id(state->header.stream_id); |
3190 | if (!stream) { | |
5569b118 | 3191 | /* Protocol error. */ |
fd0f1e3e | 3192 | ERR("relay_process_data_receive_payload: cannot find stream %" PRIu64, |
5312a3ed | 3193 | state->header.stream_id); |
5569b118 | 3194 | status = RELAY_CONNECTION_STATUS_ERROR; |
5312a3ed | 3195 | goto end; |
1c20f0e2 JD |
3196 | } |
3197 | ||
5312a3ed JG |
3198 | pthread_mutex_lock(&stream->lock); |
3199 | session = stream->trace->session; | |
fd0f1e3e JR |
3200 | if (!conn->session) { |
3201 | ret = connection_set_session(conn, session); | |
3202 | if (ret) { | |
3203 | status = RELAY_CONNECTION_STATUS_ERROR; | |
3204 | goto end_stream_unlock; | |
3205 | } | |
3206 | } | |
5312a3ed JG |
3207 | |
3208 | /* | |
3209 | * The size of the "chunk" received on any iteration is bounded by: | |
3210 | * - the data left to receive, | |
3211 | * - the data immediately available on the socket, | |
3212 | * - the on-stack data buffer | |
3213 | */ | |
3214 | while (left_to_receive > 0 && !partial_recv) { | |
5312a3ed | 3215 | size_t recv_size = min(left_to_receive, chunk_size); |
c35f9726 | 3216 | struct lttng_buffer_view packet_chunk; |
5312a3ed JG |
3217 | |
3218 | ret = conn->sock->ops->recvmsg(conn->sock, data_buffer, | |
3219 | recv_size, MSG_DONTWAIT); | |
3220 | if (ret < 0) { | |
5569b118 JG |
3221 | if (errno != EAGAIN && errno != EWOULDBLOCK) { |
3222 | PERROR("Socket %d error", conn->sock->fd); | |
3223 | status = RELAY_CONNECTION_STATUS_ERROR; | |
3224 | } | |
0848dba7 | 3225 | goto end_stream_unlock; |
5312a3ed JG |
3226 | } else if (ret == 0) { |
3227 | /* No more data ready to be consumed on socket. */ | |
3228 | DBG3("No more data ready for consumption on data socket of stream id %" PRIu64, | |
3229 | state->header.stream_id); | |
5569b118 | 3230 | status = RELAY_CONNECTION_STATUS_CLOSED; |
5312a3ed JG |
3231 | break; |
3232 |