cpp-common/bt2c/fmt.hpp: use `wise_enum::string_type` in `EnableIfIsWiseEnum` definition
[babeltrace.git] / src / plugins / ctf / lttng-live / viewer-connection.cpp
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright 2019 Francis Deslauriers <francis.deslauriers@efficios.com>
5 * Copyright 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 */
7
8 #include <glib.h>
9 #include <stdint.h>
10 #include <stdio.h>
11
12 #include <babeltrace2/babeltrace.h>
13
14 #include "common/common.h"
15 #include "compat/endian.h" /* IWYU pragma: keep */
16 #include "cpp-common/bt2s/make-unique.hpp"
17
18 #include "data-stream.hpp"
19 #include "lttng-live.hpp"
20 #include "lttng-viewer-abi.hpp"
21 #include "metadata.hpp"
22 #include "viewer-connection.hpp"
23
24 #define viewer_handle_send_recv_status(_status, _action, _msg_str) \
25 do { \
26 switch (_status) { \
27 case LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED: \
28 break; \
29 case LTTNG_LIVE_VIEWER_STATUS_ERROR: \
30 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, \
31 "Error " _action " " _msg_str); \
32 break; \
33 default: \
34 bt_common_abort(); \
35 } \
36 } while (0)
37
38 #define viewer_handle_send_status(_status, _msg_str) \
39 viewer_handle_send_recv_status(_status, "sending", _msg_str)
40
41 #define viewer_handle_recv_status(_status, _msg_str) \
42 viewer_handle_send_recv_status(_status, "receiving", _msg_str)
43
44 #define LTTNG_LIVE_CPPLOGE_APPEND_CAUSE_ERRNO(_msg, _fmt, ...) \
45 do { \
46 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, _msg ": {}" _fmt, \
47 bt_socket_errormsg(), ##__VA_ARGS__); \
48 } while (0)
49
50 static inline enum lttng_live_iterator_status
51 viewer_status_to_live_iterator_status(enum lttng_live_viewer_status viewer_status)
52 {
53 switch (viewer_status) {
54 case LTTNG_LIVE_VIEWER_STATUS_OK:
55 return LTTNG_LIVE_ITERATOR_STATUS_OK;
56 case LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED:
57 return LTTNG_LIVE_ITERATOR_STATUS_AGAIN;
58 case LTTNG_LIVE_VIEWER_STATUS_ERROR:
59 return LTTNG_LIVE_ITERATOR_STATUS_ERROR;
60 }
61
62 bt_common_abort();
63 }
64
65 static inline enum ctf_msg_iter_medium_status
66 viewer_status_to_ctf_msg_iter_medium_status(enum lttng_live_viewer_status viewer_status)
67 {
68 switch (viewer_status) {
69 case LTTNG_LIVE_VIEWER_STATUS_OK:
70 return CTF_MSG_ITER_MEDIUM_STATUS_OK;
71 case LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED:
72 return CTF_MSG_ITER_MEDIUM_STATUS_AGAIN;
73 case LTTNG_LIVE_VIEWER_STATUS_ERROR:
74 return CTF_MSG_ITER_MEDIUM_STATUS_ERROR;
75 }
76
77 bt_common_abort();
78 }
79
80 static inline void viewer_connection_close_socket(struct live_viewer_connection *viewer_connection)
81 {
82 if (viewer_connection->control_sock == BT_INVALID_SOCKET) {
83 return;
84 }
85
86 int ret = bt_socket_close(viewer_connection->control_sock);
87 if (ret == -1) {
88 BT_CPPLOGW_ERRNO_SPEC(viewer_connection->logger,
89 "Error closing viewer connection socket: ", ".");
90 }
91
92 viewer_connection->control_sock = BT_INVALID_SOCKET;
93 }
94
95 /*
96 * This function receives a message from the Relay daemon.
97 * If it received the entire message, it returns _OK,
98 * If it's interrupted, it returns _INTERRUPTED,
99 * otherwise, it returns _ERROR.
100 */
101 static enum lttng_live_viewer_status
102 lttng_live_recv(struct live_viewer_connection *viewer_connection, void *buf, size_t len)
103 {
104 ssize_t received;
105 size_t total_received = 0, to_receive = len;
106 struct lttng_live_msg_iter *lttng_live_msg_iter = viewer_connection->lttng_live_msg_iter;
107 BT_SOCKET sock = viewer_connection->control_sock;
108
109 /*
110 * Receive a message from the Relay.
111 */
112 do {
113 received = bt_socket_recv(sock, (char *) buf + total_received, to_receive, 0);
114 if (received == BT_SOCKET_ERROR) {
115 if (bt_socket_interrupted()) {
116 if (lttng_live_graph_is_canceled(lttng_live_msg_iter)) {
117 /*
118 * This interruption was due to a
119 * SIGINT and the graph is being torn
120 * down.
121 */
122 lttng_live_msg_iter->was_interrupted = true;
123 return LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED;
124 } else {
125 /*
126 * A signal was received, but the graph
127 * is not being torn down. Carry on.
128 */
129 continue;
130 }
131 } else {
132 /*
133 * For any other types of socket error, close
134 * the socket and return an error.
135 */
136 LTTNG_LIVE_CPPLOGE_APPEND_CAUSE_ERRNO("Error receiving from Relay", ".");
137
138 viewer_connection_close_socket(viewer_connection);
139 return LTTNG_LIVE_VIEWER_STATUS_ERROR;
140 }
141 } else if (received == 0) {
142 /*
143 * The recv() call returned 0. This means the
144 * connection was orderly shutdown from the other peer.
145 * If that happens when we are trying to receive
146 * a message from it, it means something when wrong.
147 * Close the socket and return an error.
148 */
149 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
150 "Remote side has closed connection");
151 viewer_connection_close_socket(viewer_connection);
152 return LTTNG_LIVE_VIEWER_STATUS_ERROR;
153 }
154
155 BT_ASSERT(received <= to_receive);
156 total_received += received;
157 to_receive -= received;
158
159 } while (to_receive > 0);
160
161 BT_ASSERT(total_received == len);
162 return LTTNG_LIVE_VIEWER_STATUS_OK;
163 }
164
165 /*
166 * This function sends a message to the Relay daemon.
167 * If it send the message, it returns _OK,
168 * If it's interrupted, it returns _INTERRUPTED,
169 * otherwise, it returns _ERROR.
170 */
171 static enum lttng_live_viewer_status
172 lttng_live_send(struct live_viewer_connection *viewer_connection, const void *buf, size_t len)
173 {
174 struct lttng_live_msg_iter *lttng_live_msg_iter = viewer_connection->lttng_live_msg_iter;
175 BT_SOCKET sock = viewer_connection->control_sock;
176 size_t to_send = len;
177 ssize_t total_sent = 0;
178
179 do {
180 ssize_t sent = bt_socket_send_nosigpipe(sock, (char *) buf + total_sent, to_send);
181 if (sent == BT_SOCKET_ERROR) {
182 if (bt_socket_interrupted()) {
183 if (lttng_live_graph_is_canceled(lttng_live_msg_iter)) {
184 /*
185 * This interruption was a SIGINT and
186 * the graph is being teared down.
187 */
188 lttng_live_msg_iter->was_interrupted = true;
189 return LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED;
190 } else {
191 /*
192 * A signal was received, but the graph
193 * is not being teared down. Carry on.
194 */
195 continue;
196 }
197 } else {
198 /*
199 * For any other types of socket error, close
200 * the socket and return an error.
201 */
202 LTTNG_LIVE_CPPLOGE_APPEND_CAUSE_ERRNO("Error sending to Relay", ".");
203
204 viewer_connection_close_socket(viewer_connection);
205 return LTTNG_LIVE_VIEWER_STATUS_ERROR;
206 }
207 }
208
209 BT_ASSERT(sent <= to_send);
210 total_sent += sent;
211 to_send -= sent;
212
213 } while (to_send > 0);
214
215 BT_ASSERT(total_sent == len);
216 return LTTNG_LIVE_VIEWER_STATUS_OK;
217 }
218
219 static int parse_url(struct live_viewer_connection *viewer_connection)
220 {
221 char error_buf[256] = {0};
222 struct bt_common_lttng_live_url_parts lttng_live_url_parts = {};
223 bt_common_lttng_live_url_parts_deleter partsDeleter {lttng_live_url_parts};
224
225 if (viewer_connection->url.empty()) {
226 return -1;
227 }
228
229 lttng_live_url_parts = bt_common_parse_lttng_live_url(viewer_connection->url.c_str(), error_buf,
230 sizeof(error_buf));
231 if (!lttng_live_url_parts.proto) {
232 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Invalid LTTng live URL format: {}",
233 error_buf);
234 return -1;
235 }
236 viewer_connection->proto.reset(lttng_live_url_parts.proto);
237 lttng_live_url_parts.proto = NULL;
238
239 viewer_connection->relay_hostname.reset(lttng_live_url_parts.hostname);
240 lttng_live_url_parts.hostname = NULL;
241
242 if (lttng_live_url_parts.port >= 0) {
243 viewer_connection->port = lttng_live_url_parts.port;
244 } else {
245 viewer_connection->port = LTTNG_DEFAULT_NETWORK_VIEWER_PORT;
246 }
247
248 viewer_connection->target_hostname.reset(lttng_live_url_parts.target_hostname);
249 lttng_live_url_parts.target_hostname = NULL;
250
251 if (lttng_live_url_parts.session_name) {
252 viewer_connection->session_name.reset(lttng_live_url_parts.session_name);
253 lttng_live_url_parts.session_name = NULL;
254 }
255
256 return 0;
257 }
258
259 static enum lttng_live_viewer_status
260 lttng_live_handshake(struct live_viewer_connection *viewer_connection)
261 {
262 struct lttng_viewer_cmd cmd;
263 struct lttng_viewer_connect connect;
264 enum lttng_live_viewer_status status;
265 const size_t cmd_buf_len = sizeof(cmd) + sizeof(connect);
266 char cmd_buf[cmd_buf_len];
267
268 BT_CPPLOGD_SPEC(viewer_connection->logger,
269 "Handshaking with the relay daemon: cmd={}, major-version={}, minor-version={}",
270 LTTNG_VIEWER_CONNECT, LTTNG_LIVE_MAJOR, LTTNG_LIVE_MINOR);
271
272 cmd.cmd = htobe32(LTTNG_VIEWER_CONNECT);
273 cmd.data_size = htobe64((uint64_t) sizeof(connect));
274 cmd.cmd_version = htobe32(0);
275
276 connect.viewer_session_id = -1ULL; /* will be set on recv */
277 connect.major = htobe32(LTTNG_LIVE_MAJOR);
278 connect.minor = htobe32(LTTNG_LIVE_MINOR);
279 connect.type = htobe32(LTTNG_VIEWER_CLIENT_COMMAND);
280
281 /*
282 * Merge the cmd and connection request to prevent a write-write
283 * sequence on the TCP socket. Otherwise, a delayed ACK will prevent the
284 * second write to be performed quickly in presence of Nagle's algorithm
285 */
286 memcpy(cmd_buf, &cmd, sizeof(cmd));
287 memcpy(cmd_buf + sizeof(cmd), &connect, sizeof(connect));
288
289 status = lttng_live_send(viewer_connection, &cmd_buf, cmd_buf_len);
290 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
291 viewer_handle_send_status(status, "viewer connect command");
292 return status;
293 }
294
295 status = lttng_live_recv(viewer_connection, &connect, sizeof(connect));
296 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
297 viewer_handle_recv_status(status, "viewer connect reply");
298 return status;
299 }
300
301 BT_CPPLOGI_SPEC(viewer_connection->logger, "Received viewer session ID : {}",
302 (uint64_t) be64toh(connect.viewer_session_id));
303 BT_CPPLOGI_SPEC(viewer_connection->logger, "Relayd version : {}.{}", be32toh(connect.major),
304 be32toh(connect.minor));
305
306 if (LTTNG_LIVE_MAJOR != be32toh(connect.major)) {
307 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
308 "Incompatible lttng-relayd protocol");
309 return LTTNG_LIVE_VIEWER_STATUS_ERROR;
310 }
311 /* Use the smallest protocol version implemented. */
312 if (LTTNG_LIVE_MINOR > be32toh(connect.minor)) {
313 viewer_connection->minor = be32toh(connect.minor);
314 } else {
315 viewer_connection->minor = LTTNG_LIVE_MINOR;
316 }
317 viewer_connection->major = LTTNG_LIVE_MAJOR;
318
319 return LTTNG_LIVE_VIEWER_STATUS_OK;
320 }
321
322 static enum lttng_live_viewer_status
323 lttng_live_connect_viewer(struct live_viewer_connection *viewer_connection)
324 {
325 struct hostent *host;
326 struct sockaddr_in server_addr;
327 enum lttng_live_viewer_status status;
328
329 if (parse_url(viewer_connection)) {
330 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Failed to parse URL");
331 return LTTNG_LIVE_VIEWER_STATUS_ERROR;
332 }
333
334 BT_CPPLOGD_SPEC(
335 viewer_connection->logger,
336 "Connecting to hostname : {}, port : {}, target hostname : {}, session name : {}, proto : {}",
337 viewer_connection->relay_hostname->str, viewer_connection->port,
338 !viewer_connection->target_hostname ? "<none>" : viewer_connection->target_hostname->str,
339 !viewer_connection->session_name ? "<none>" : viewer_connection->session_name->str,
340 viewer_connection->proto->str);
341
342 host = gethostbyname(viewer_connection->relay_hostname->str);
343 if (!host) {
344 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
345 "Cannot lookup hostname: hostname=\"{}\"",
346 viewer_connection->relay_hostname->str);
347 return LTTNG_LIVE_VIEWER_STATUS_ERROR;
348 }
349
350 if ((viewer_connection->control_sock = socket(AF_INET, SOCK_STREAM, 0)) == BT_INVALID_SOCKET) {
351 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Socket creation failed: {}",
352 bt_socket_errormsg());
353 return LTTNG_LIVE_VIEWER_STATUS_ERROR;
354 }
355
356 server_addr.sin_family = AF_INET;
357 server_addr.sin_port = htons(viewer_connection->port);
358 server_addr.sin_addr = *((struct in_addr *) host->h_addr);
359 memset(&(server_addr.sin_zero), 0, 8);
360
361 if (connect(viewer_connection->control_sock, (struct sockaddr *) &server_addr,
362 sizeof(struct sockaddr)) == BT_SOCKET_ERROR) {
363 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Connection failed: {}",
364 bt_socket_errormsg());
365 viewer_connection_close_socket(viewer_connection);
366 return LTTNG_LIVE_VIEWER_STATUS_ERROR;
367 }
368
369 status = lttng_live_handshake(viewer_connection);
370
371 /*
372 * Only print error and append cause in case of error. not in case of
373 * interruption.
374 */
375 if (status == LTTNG_LIVE_VIEWER_STATUS_ERROR) {
376 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Viewer handshake failed");
377 viewer_connection_close_socket(viewer_connection);
378 return LTTNG_LIVE_VIEWER_STATUS_ERROR;
379 } else if (status == LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED) {
380 return LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED;
381 }
382
383 return LTTNG_LIVE_VIEWER_STATUS_OK;
384 }
385
386 static int list_update_session(const bt2::ArrayValue results,
387 const struct lttng_viewer_session *session, bool *_found,
388 struct live_viewer_connection *viewer_connection)
389 {
390 bool found = false;
391
392 for (const auto value : results) {
393 const auto map = value.asMap();
394 const auto hostnameVal = map["target-hostname"];
395
396 if (!hostnameVal) {
397 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
398 "Error borrowing \"target-hostname\" entry.");
399 return -1;
400 }
401
402 const auto sessionNameVal = map["session-name"];
403
404 if (!sessionNameVal) {
405 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
406 "Error borrowing \"session-name\" entry.");
407 return -1;
408 }
409
410 const auto hostname_str = hostnameVal->asString().value();
411 const auto session_name_str = sessionNameVal->asString().value();
412
413 if (strcmp(session->hostname, hostname_str) == 0 &&
414 strcmp(session->session_name, session_name_str) == 0) {
415 uint32_t streams = be32toh(session->streams);
416 uint32_t clients = be32toh(session->clients);
417
418 found = true;
419
420 const auto streamCountVal = map["stream-count"];
421
422 if (!streamCountVal) {
423 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
424 "Error borrowing \"stream-count\" entry.");
425 return -1;
426 }
427
428 auto val = streamCountVal->asUnsignedInteger().value();
429
430 /* sum */
431 val += streams;
432 streamCountVal->asUnsignedInteger().value(val);
433
434 const auto clientCountVal = map["client-count"];
435
436 if (!clientCountVal) {
437 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
438 "Error borrowing \"client-count\" entry.");
439 return -1;
440 }
441
442 val = clientCountVal->asUnsignedInteger().value();
443
444 /* max */
445 val = std::max<uint64_t>(clients, val);
446 clientCountVal->asUnsignedInteger().value(val);
447 }
448
449 if (found) {
450 break;
451 }
452 }
453
454 *_found = found;
455 return 0;
456 }
457
458 static int list_append_session(const bt2::ArrayValue results, const std::string& base_url,
459 const struct lttng_viewer_session *session,
460 struct live_viewer_connection *viewer_connection)
461 {
462 int ret = 0;
463 bool found = false;
464
465 /*
466 * If the session already exists, add the stream count to it,
467 * and do max of client counts.
468 */
469 ret = list_update_session(results, session, &found, viewer_connection);
470 if (ret || found) {
471 return ret;
472 }
473
474 const auto map = bt2::MapValue::create();
475
476 if (base_url.empty()) {
477 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Error: base_url empty.");
478 return -1;
479 }
480
481 /*
482 * key = "url",
483 * value = <string>,
484 */
485 map->insert("url",
486 fmt::format("{}/host/{}/{}", base_url, session->hostname, session->session_name));
487
488 /*
489 * key = "target-hostname",
490 * value = <string>,
491 */
492 map->insert("target-hostname", session->hostname);
493
494 /*
495 * key = "session-name",
496 * value = <string>,
497 */
498 map->insert("session-name", session->session_name);
499
500 /*
501 * key = "timer-us",
502 * value = <integer>,
503 */
504 {
505 uint32_t live_timer = be32toh(session->live_timer);
506
507 map->insert("timer-us", (uint64_t) live_timer);
508 }
509
510 /*
511 * key = "stream-count",
512 * value = <integer>,
513 */
514 {
515 uint32_t streams = be32toh(session->streams);
516
517 map->insert("stream-count", (uint64_t) streams);
518 }
519
520 /*
521 * key = "client-count",
522 * value = <integer>,
523 */
524 {
525 uint32_t clients = be32toh(session->clients);
526
527 map->insert("client-count", (uint64_t) clients);
528 }
529
530 results.append(*map);
531 return 0;
532 }
533
534 /*
535 * Data structure returned:
536 *
537 * {
538 * <array> = {
539 * [n] = {
540 * <map> = {
541 * {
542 * key = "url",
543 * value = <string>,
544 * },
545 * {
546 * key = "target-hostname",
547 * value = <string>,
548 * },
549 * {
550 * key = "session-name",
551 * value = <string>,
552 * },
553 * {
554 * key = "timer-us",
555 * value = <integer>,
556 * },
557 * {
558 * key = "stream-count",
559 * value = <integer>,
560 * },
561 * {
562 * key = "client-count",
563 * value = <integer>,
564 * },
565 * },
566 * }
567 * }
568 */
569
570 bt2::Value::Shared
571 live_viewer_connection_list_sessions(struct live_viewer_connection *viewer_connection)
572 {
573 enum lttng_live_viewer_status viewer_status;
574 struct lttng_viewer_cmd cmd;
575 struct lttng_viewer_list_sessions list;
576 uint32_t i, sessions_count;
577 auto result = bt2::ArrayValue::create();
578
579 BT_CPPLOGD_SPEC(viewer_connection->logger, "Requesting list of sessions: cmd={}",
580 LTTNG_VIEWER_LIST_SESSIONS);
581
582 cmd.cmd = htobe32(LTTNG_VIEWER_LIST_SESSIONS);
583 cmd.data_size = htobe64((uint64_t) 0);
584 cmd.cmd_version = htobe32(0);
585
586 viewer_status = lttng_live_send(viewer_connection, &cmd, sizeof(cmd));
587 if (viewer_status == LTTNG_LIVE_VIEWER_STATUS_ERROR) {
588 BT_CPPLOGE_APPEND_CAUSE_AND_THROW_SPEC(viewer_connection->logger, bt2::Error,
589 "Error sending list sessions command");
590 } else if (viewer_status == LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED) {
591 throw bt2c::TryAgain {};
592 }
593
594 viewer_status = lttng_live_recv(viewer_connection, &list, sizeof(list));
595 if (viewer_status == LTTNG_LIVE_VIEWER_STATUS_ERROR) {
596 BT_CPPLOGE_APPEND_CAUSE_AND_THROW_SPEC(viewer_connection->logger, bt2::Error,
597 "Error receiving session list");
598 } else if (viewer_status == LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED) {
599 throw bt2c::TryAgain {};
600 }
601
602 sessions_count = be32toh(list.sessions_count);
603 for (i = 0; i < sessions_count; i++) {
604 struct lttng_viewer_session lsession;
605
606 viewer_status = lttng_live_recv(viewer_connection, &lsession, sizeof(lsession));
607 if (viewer_status == LTTNG_LIVE_VIEWER_STATUS_ERROR) {
608 BT_CPPLOGE_APPEND_CAUSE_AND_THROW_SPEC(viewer_connection->logger, bt2::Error,
609 "Error receiving session:");
610 } else if (viewer_status == LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED) {
611 throw bt2c::TryAgain {};
612 }
613
614 lsession.hostname[LTTNG_VIEWER_HOST_NAME_MAX - 1] = '\0';
615 lsession.session_name[LTTNG_VIEWER_NAME_MAX - 1] = '\0';
616 if (list_append_session(*result, viewer_connection->url, &lsession, viewer_connection)) {
617 BT_CPPLOGE_APPEND_CAUSE_AND_THROW_SPEC(viewer_connection->logger, bt2::Error,
618 "Error appending session");
619 }
620 }
621
622 return result;
623 }
624
625 static enum lttng_live_viewer_status
626 lttng_live_query_session_ids(struct lttng_live_msg_iter *lttng_live_msg_iter)
627 {
628 struct lttng_viewer_cmd cmd;
629 struct lttng_viewer_list_sessions list;
630 struct lttng_viewer_session lsession;
631 uint32_t i, sessions_count;
632 uint64_t session_id;
633 enum lttng_live_viewer_status status;
634 live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection.get();
635
636 BT_CPPLOGD_SPEC(viewer_connection->logger,
637 "Asking the relay daemon for the list of sessions: cmd={}",
638 LTTNG_VIEWER_LIST_SESSIONS);
639
640 cmd.cmd = htobe32(LTTNG_VIEWER_LIST_SESSIONS);
641 cmd.data_size = htobe64((uint64_t) 0);
642 cmd.cmd_version = htobe32(0);
643
644 status = lttng_live_send(viewer_connection, &cmd, sizeof(cmd));
645 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
646 viewer_handle_send_status(status, "list sessions command");
647 return status;
648 }
649
650 status = lttng_live_recv(viewer_connection, &list, sizeof(list));
651 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
652 viewer_handle_recv_status(status, "session list reply");
653 return status;
654 }
655
656 sessions_count = be32toh(list.sessions_count);
657 for (i = 0; i < sessions_count; i++) {
658 status = lttng_live_recv(viewer_connection, &lsession, sizeof(lsession));
659 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
660 viewer_handle_recv_status(status, "session reply");
661 return status;
662 }
663 lsession.hostname[LTTNG_VIEWER_HOST_NAME_MAX - 1] = '\0';
664 lsession.session_name[LTTNG_VIEWER_NAME_MAX - 1] = '\0';
665 session_id = be64toh(lsession.id);
666
667 BT_CPPLOGI_SPEC(viewer_connection->logger,
668 "Adding session to internal list: "
669 "session-id={}, hostname=\"{}\", session-name=\"{}\"",
670 session_id, lsession.hostname, lsession.session_name);
671
672 if ((strncmp(lsession.session_name, viewer_connection->session_name->str,
673 LTTNG_VIEWER_NAME_MAX) == 0) &&
674 (strncmp(lsession.hostname, viewer_connection->target_hostname->str,
675 LTTNG_VIEWER_HOST_NAME_MAX) == 0)) {
676 if (lttng_live_add_session(lttng_live_msg_iter, session_id, lsession.hostname,
677 lsession.session_name)) {
678 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
679 "Failed to add live session");
680 return LTTNG_LIVE_VIEWER_STATUS_ERROR;
681 }
682 }
683 }
684
685 return LTTNG_LIVE_VIEWER_STATUS_OK;
686 }
687
688 enum lttng_live_viewer_status
689 lttng_live_create_viewer_session(struct lttng_live_msg_iter *lttng_live_msg_iter)
690 {
691 struct lttng_viewer_cmd cmd;
692 struct lttng_viewer_create_session_response resp;
693 enum lttng_live_viewer_status status;
694 live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection.get();
695
696 BT_CPPLOGD_SPEC(viewer_connection->logger, "Creating a viewer session: cmd={}",
697 LTTNG_VIEWER_CREATE_SESSION);
698
699 cmd.cmd = htobe32(LTTNG_VIEWER_CREATE_SESSION);
700 cmd.data_size = htobe64((uint64_t) 0);
701 cmd.cmd_version = htobe32(0);
702
703 status = lttng_live_send(viewer_connection, &cmd, sizeof(cmd));
704 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
705 viewer_handle_send_status(status, "create session command");
706 return status;
707 }
708
709 status = lttng_live_recv(viewer_connection, &resp, sizeof(resp));
710 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
711 viewer_handle_recv_status(status, "create session reply");
712 return status;
713 }
714
715 if (be32toh(resp.status) != LTTNG_VIEWER_CREATE_SESSION_OK) {
716 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Error creating viewer session");
717 return LTTNG_LIVE_VIEWER_STATUS_ERROR;
718 }
719
720 status = lttng_live_query_session_ids(lttng_live_msg_iter);
721 if (status == LTTNG_LIVE_VIEWER_STATUS_ERROR) {
722 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
723 "Failed to query live viewer session ids");
724 return LTTNG_LIVE_VIEWER_STATUS_ERROR;
725 } else if (status == LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED) {
726 return LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED;
727 }
728
729 return LTTNG_LIVE_VIEWER_STATUS_OK;
730 }
731
732 static enum lttng_live_viewer_status receive_streams(struct lttng_live_session *session,
733 uint32_t stream_count,
734 bt_self_message_iterator *self_msg_iter)
735 {
736 uint32_t i;
737 struct lttng_live_msg_iter *lttng_live_msg_iter = session->lttng_live_msg_iter;
738 enum lttng_live_viewer_status status;
739 live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection.get();
740
741 BT_CPPLOGI_SPEC(viewer_connection->logger, "Getting {} new streams", stream_count);
742 for (i = 0; i < stream_count; i++) {
743 struct lttng_viewer_stream stream;
744 struct lttng_live_stream_iterator *live_stream;
745 uint64_t stream_id;
746 uint64_t ctf_trace_id;
747
748 status = lttng_live_recv(viewer_connection, &stream, sizeof(stream));
749 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
750 viewer_handle_recv_status(status, "stream reply");
751 return status;
752 }
753 stream.path_name[LTTNG_VIEWER_PATH_MAX - 1] = '\0';
754 stream.channel_name[LTTNG_VIEWER_NAME_MAX - 1] = '\0';
755 stream_id = be64toh(stream.id);
756 ctf_trace_id = be64toh(stream.ctf_trace_id);
757
758 if (stream.metadata_flag) {
759 BT_CPPLOGI_SPEC(viewer_connection->logger, " metadata stream {} : {}/{}", stream_id,
760 stream.path_name, stream.channel_name);
761 if (lttng_live_metadata_create_stream(session, ctf_trace_id, stream_id)) {
762 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
763 "Error creating metadata stream");
764 return LTTNG_LIVE_VIEWER_STATUS_ERROR;
765 }
766 session->lazy_stream_msg_init = true;
767 } else {
768 BT_CPPLOGI_SPEC(viewer_connection->logger, " stream {} : {}/{}", stream_id,
769 stream.path_name, stream.channel_name);
770 live_stream =
771 lttng_live_stream_iterator_create(session, ctf_trace_id, stream_id, self_msg_iter);
772 if (!live_stream) {
773 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Error creating stream");
774 return LTTNG_LIVE_VIEWER_STATUS_ERROR;
775 }
776 }
777 }
778
779 return LTTNG_LIVE_VIEWER_STATUS_OK;
780 }
781
782 enum lttng_live_viewer_status lttng_live_session_attach(struct lttng_live_session *session,
783 bt_self_message_iterator *self_msg_iter)
784 {
785 struct lttng_viewer_cmd cmd;
786 enum lttng_live_viewer_status status;
787 struct lttng_viewer_attach_session_request rq;
788 struct lttng_viewer_attach_session_response rp;
789 struct lttng_live_msg_iter *lttng_live_msg_iter = session->lttng_live_msg_iter;
790 live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection.get();
791 uint64_t session_id = session->id;
792 uint32_t streams_count;
793 const size_t cmd_buf_len = sizeof(cmd) + sizeof(rq);
794 char cmd_buf[cmd_buf_len];
795
796 BT_CPPLOGD_SPEC(viewer_connection->logger,
797 "Attaching to session: cmd={}, session-id={}, seek={}",
798 LTTNG_VIEWER_ATTACH_SESSION, session_id, LTTNG_VIEWER_SEEK_LAST);
799
800 cmd.cmd = htobe32(LTTNG_VIEWER_ATTACH_SESSION);
801 cmd.data_size = htobe64((uint64_t) sizeof(rq));
802 cmd.cmd_version = htobe32(0);
803
804 memset(&rq, 0, sizeof(rq));
805 rq.session_id = htobe64(session_id);
806 // TODO: add cmd line parameter to select seek beginning
807 // rq.seek = htobe32(LTTNG_VIEWER_SEEK_BEGINNING);
808 rq.seek = htobe32(LTTNG_VIEWER_SEEK_LAST);
809
810 /*
811 * Merge the cmd and connection request to prevent a write-write
812 * sequence on the TCP socket. Otherwise, a delayed ACK will prevent the
813 * second write to be performed quickly in presence of Nagle's algorithm.
814 */
815 memcpy(cmd_buf, &cmd, sizeof(cmd));
816 memcpy(cmd_buf + sizeof(cmd), &rq, sizeof(rq));
817 status = lttng_live_send(viewer_connection, &cmd_buf, cmd_buf_len);
818 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
819 viewer_handle_send_status(status, "attach session command");
820 return status;
821 }
822
823 status = lttng_live_recv(viewer_connection, &rp, sizeof(rp));
824 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
825 viewer_handle_recv_status(status, "attach session reply");
826 return status;
827 }
828
829 streams_count = be32toh(rp.streams_count);
830 switch (be32toh(rp.status)) {
831 case LTTNG_VIEWER_ATTACH_OK:
832 break;
833 case LTTNG_VIEWER_ATTACH_UNK:
834 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Session id {} is unknown",
835 session_id);
836 return LTTNG_LIVE_VIEWER_STATUS_ERROR;
837 case LTTNG_VIEWER_ATTACH_ALREADY:
838 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
839 "There is already a viewer attached to this session");
840 return LTTNG_LIVE_VIEWER_STATUS_ERROR;
841 case LTTNG_VIEWER_ATTACH_NOT_LIVE:
842 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Not a live session");
843 return LTTNG_LIVE_VIEWER_STATUS_ERROR;
844 case LTTNG_VIEWER_ATTACH_SEEK_ERR:
845 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Wrong seek parameter");
846 return LTTNG_LIVE_VIEWER_STATUS_ERROR;
847 default:
848 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Unknown attach return code {}",
849 be32toh(rp.status));
850 return LTTNG_LIVE_VIEWER_STATUS_ERROR;
851 }
852
853 /* We receive the initial list of streams. */
854 status = receive_streams(session, streams_count, self_msg_iter);
855 switch (status) {
856 case LTTNG_LIVE_VIEWER_STATUS_OK:
857 break;
858 case LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED:
859 return status;
860 case LTTNG_LIVE_VIEWER_STATUS_ERROR:
861 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Error receiving streams");
862 return status;
863 default:
864 bt_common_abort();
865 }
866
867 session->attached = true;
868 session->new_streams_needed = false;
869
870 return LTTNG_LIVE_VIEWER_STATUS_OK;
871 }
872
873 enum lttng_live_viewer_status lttng_live_session_detach(struct lttng_live_session *session)
874 {
875 struct lttng_viewer_cmd cmd;
876 enum lttng_live_viewer_status status;
877 struct lttng_viewer_detach_session_request rq;
878 struct lttng_viewer_detach_session_response rp;
879 struct lttng_live_msg_iter *lttng_live_msg_iter = session->lttng_live_msg_iter;
880 live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection.get();
881 uint64_t session_id = session->id;
882 const size_t cmd_buf_len = sizeof(cmd) + sizeof(rq);
883 char cmd_buf[cmd_buf_len];
884
885 /*
886 * The session might already be detached and the viewer socket might
887 * already been closed. This happens when calling this function when
888 * tearing down the graph after an error.
889 */
890 if (!session->attached || viewer_connection->control_sock == BT_INVALID_SOCKET) {
891 return LTTNG_LIVE_VIEWER_STATUS_OK;
892 }
893
894 BT_CPPLOGD_SPEC(viewer_connection->logger, "Detaching from session: cmd={}, session-id={}",
895 LTTNG_VIEWER_DETACH_SESSION, session_id);
896
897 cmd.cmd = htobe32(LTTNG_VIEWER_DETACH_SESSION);
898 cmd.data_size = htobe64((uint64_t) sizeof(rq));
899 cmd.cmd_version = htobe32(0);
900
901 memset(&rq, 0, sizeof(rq));
902 rq.session_id = htobe64(session_id);
903
904 /*
905 * Merge the cmd and connection request to prevent a write-write
906 * sequence on the TCP socket. Otherwise, a delayed ACK will prevent the
907 * second write to be performed quickly in presence of Nagle's algorithm.
908 */
909 memcpy(cmd_buf, &cmd, sizeof(cmd));
910 memcpy(cmd_buf + sizeof(cmd), &rq, sizeof(rq));
911 status = lttng_live_send(viewer_connection, &cmd_buf, cmd_buf_len);
912 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
913 viewer_handle_send_status(status, "detach session command");
914 return status;
915 }
916
917 status = lttng_live_recv(viewer_connection, &rp, sizeof(rp));
918 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
919 viewer_handle_recv_status(status, "detach session reply");
920 return status;
921 }
922
923 switch (be32toh(rp.status)) {
924 case LTTNG_VIEWER_DETACH_SESSION_OK:
925 break;
926 case LTTNG_VIEWER_DETACH_SESSION_UNK:
927 BT_CPPLOGW_SPEC(viewer_connection->logger, "Session id {} is unknown", session_id);
928 return LTTNG_LIVE_VIEWER_STATUS_ERROR;
929 case LTTNG_VIEWER_DETACH_SESSION_ERR:
930 BT_CPPLOGW_SPEC(viewer_connection->logger, "Error detaching session id {}", session_id);
931 return LTTNG_LIVE_VIEWER_STATUS_ERROR;
932 default:
933 BT_CPPLOGE_SPEC(viewer_connection->logger, "Unknown detach return code {}",
934 be32toh(rp.status));
935 return LTTNG_LIVE_VIEWER_STATUS_ERROR;
936 }
937
938 session->attached = false;
939
940 return LTTNG_LIVE_VIEWER_STATUS_OK;
941 }
942
943 enum lttng_live_get_one_metadata_status
944 lttng_live_get_one_metadata_packet(struct lttng_live_trace *trace, std::vector<char>& buf)
945 {
946 uint64_t len = 0;
947 enum lttng_live_viewer_status viewer_status;
948 struct lttng_viewer_cmd cmd;
949 struct lttng_viewer_get_metadata rq;
950 struct lttng_viewer_metadata_packet rp;
951 std::vector<char> data;
952 struct lttng_live_session *session = trace->session;
953 struct lttng_live_msg_iter *lttng_live_msg_iter = session->lttng_live_msg_iter;
954 struct lttng_live_metadata *metadata = trace->metadata.get();
955 live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection.get();
956 const size_t cmd_buf_len = sizeof(cmd) + sizeof(rq);
957 char cmd_buf[cmd_buf_len];
958
959 BT_CPPLOGD_SPEC(viewer_connection->logger,
960 "Requesting new metadata for trace:"
961 "cmd={}, trace-id={}, metadata-stream-id={}",
962 LTTNG_VIEWER_GET_METADATA, trace->id, metadata->stream_id);
963
964 rq.stream_id = htobe64(metadata->stream_id);
965 cmd.cmd = htobe32(LTTNG_VIEWER_GET_METADATA);
966 cmd.data_size = htobe64((uint64_t) sizeof(rq));
967 cmd.cmd_version = htobe32(0);
968
969 /*
970 * Merge the cmd and connection request to prevent a write-write
971 * sequence on the TCP socket. Otherwise, a delayed ACK will prevent the
972 * second write to be performed quickly in presence of Nagle's algorithm.
973 */
974 memcpy(cmd_buf, &cmd, sizeof(cmd));
975 memcpy(cmd_buf + sizeof(cmd), &rq, sizeof(rq));
976 viewer_status = lttng_live_send(viewer_connection, &cmd_buf, cmd_buf_len);
977 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
978 viewer_handle_send_status(viewer_status, "get metadata command");
979 return (lttng_live_get_one_metadata_status) viewer_status;
980 }
981
982 viewer_status = lttng_live_recv(viewer_connection, &rp, sizeof(rp));
983 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
984 viewer_handle_recv_status(viewer_status, "get metadata reply");
985 return (lttng_live_get_one_metadata_status) viewer_status;
986 }
987
988 switch (be32toh(rp.status)) {
989 case LTTNG_VIEWER_METADATA_OK:
990 BT_CPPLOGD_SPEC(viewer_connection->logger, "Received get_metadata response: ok");
991 break;
992 case LTTNG_VIEWER_NO_NEW_METADATA:
993 BT_CPPLOGD_SPEC(viewer_connection->logger, "Received get_metadata response: no new");
994 return LTTNG_LIVE_GET_ONE_METADATA_STATUS_END;
995 case LTTNG_VIEWER_METADATA_ERR:
996 /*
997 * The Relayd cannot find this stream id. Maybe its
998 * gone already. This can happen in short lived UST app
999 * in a per-pid session.
1000 */
1001 BT_CPPLOGD_SPEC(viewer_connection->logger, "Received get_metadata response: error");
1002 return LTTNG_LIVE_GET_ONE_METADATA_STATUS_CLOSED;
1003 default:
1004 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
1005 "Received get_metadata response: unknown");
1006 return LTTNG_LIVE_GET_ONE_METADATA_STATUS_ERROR;
1007 }
1008
1009 len = be64toh(rp.len);
1010 if (len == 0) {
1011 /*
1012 * We received a `LTTNG_VIEWER_METADATA_OK` with a packet
1013 * length of 0. This means we must try again. This scenario
1014 * arises when a clear command is performed on an lttng session.
1015 */
1016 BT_CPPLOGD_SPEC(
1017 viewer_connection->logger,
1018 "Expecting a metadata packet of size 0. Retry to get a packet from the relay.");
1019 return LTTNG_LIVE_GET_ONE_METADATA_STATUS_OK;
1020 }
1021
1022 BT_CPPLOGD_SPEC(viewer_connection->logger, "Writing {} bytes to metadata", len);
1023 if (len <= 0) {
1024 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Erroneous response length");
1025 return LTTNG_LIVE_GET_ONE_METADATA_STATUS_ERROR;
1026 }
1027
1028 data.resize(len);
1029
1030 viewer_status = lttng_live_recv(viewer_connection, data.data(), len);
1031 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1032 viewer_handle_recv_status(viewer_status, "get metadata packet");
1033 return (lttng_live_get_one_metadata_status) viewer_status;
1034 }
1035
1036 /*
1037 * Write the metadata to the file handle.
1038 */
1039 buf.insert(buf.end(), data.begin(), data.end());
1040
1041 return LTTNG_LIVE_GET_ONE_METADATA_STATUS_OK;
1042 }
1043
1044 /*
1045 * Assign the fields from a lttng_viewer_index to a packet_index.
1046 */
1047 static void lttng_index_to_packet_index(struct lttng_viewer_index *lindex,
1048 struct packet_index *pindex)
1049 {
1050 BT_ASSERT(lindex);
1051 BT_ASSERT(pindex);
1052
1053 pindex->offset = be64toh(lindex->offset);
1054 pindex->packet_size = be64toh(lindex->packet_size);
1055 pindex->content_size = be64toh(lindex->content_size);
1056 pindex->ts_cycles.timestamp_begin = be64toh(lindex->timestamp_begin);
1057 pindex->ts_cycles.timestamp_end = be64toh(lindex->timestamp_end);
1058 pindex->events_discarded = be64toh(lindex->events_discarded);
1059 }
1060
1061 static void lttng_live_need_new_streams(struct lttng_live_msg_iter *lttng_live_msg_iter)
1062 {
1063 for (const auto& session : lttng_live_msg_iter->sessions) {
1064 BT_CPPLOGD_SPEC(lttng_live_msg_iter->logger,
1065 "Marking session as needing new streams: "
1066 "session-id={}",
1067 session->id);
1068 session->new_streams_needed = true;
1069 }
1070 }
1071
1072 enum lttng_live_iterator_status
1073 lttng_live_get_next_index(struct lttng_live_msg_iter *lttng_live_msg_iter,
1074 struct lttng_live_stream_iterator *stream, struct packet_index *index)
1075 {
1076 struct lttng_viewer_cmd cmd;
1077 struct lttng_viewer_get_next_index rq;
1078 enum lttng_live_viewer_status viewer_status;
1079 struct lttng_viewer_index rp;
1080 live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection.get();
1081 struct lttng_live_trace *trace = stream->trace;
1082 const size_t cmd_buf_len = sizeof(cmd) + sizeof(rq);
1083 char cmd_buf[cmd_buf_len];
1084 uint32_t flags, rp_status;
1085
1086 BT_CPPLOGD_SPEC(viewer_connection->logger,
1087 "Requesting next index for stream: cmd={}, "
1088 "viewer-stream-id={}",
1089 LTTNG_VIEWER_GET_NEXT_INDEX, stream->viewer_stream_id);
1090 cmd.cmd = htobe32(LTTNG_VIEWER_GET_NEXT_INDEX);
1091 cmd.data_size = htobe64((uint64_t) sizeof(rq));
1092 cmd.cmd_version = htobe32(0);
1093
1094 memset(&rq, 0, sizeof(rq));
1095 rq.stream_id = htobe64(stream->viewer_stream_id);
1096
1097 /*
1098 * Merge the cmd and connection request to prevent a write-write
1099 * sequence on the TCP socket. Otherwise, a delayed ACK will prevent the
1100 * second write to be performed quickly in presence of Nagle's algorithm.
1101 */
1102 memcpy(cmd_buf, &cmd, sizeof(cmd));
1103 memcpy(cmd_buf + sizeof(cmd), &rq, sizeof(rq));
1104
1105 viewer_status = lttng_live_send(viewer_connection, &cmd_buf, cmd_buf_len);
1106 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1107 viewer_handle_send_status(viewer_status, "get next index command");
1108 return viewer_status_to_live_iterator_status(viewer_status);
1109 }
1110
1111 viewer_status = lttng_live_recv(viewer_connection, &rp, sizeof(rp));
1112 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1113 viewer_handle_recv_status(viewer_status, "get next index reply");
1114 return viewer_status_to_live_iterator_status(viewer_status);
1115 }
1116
1117 flags = be32toh(rp.flags);
1118 rp_status = be32toh(rp.status);
1119
1120 BT_CPPLOGD_SPEC(
1121 viewer_connection->logger, "Received response from relay daemon: cmd=%s, response={}",
1122 LTTNG_VIEWER_GET_NEXT_INDEX, static_cast<lttng_viewer_next_index_return_code>(rp_status));
1123
1124 if (flags & LTTNG_VIEWER_FLAG_NEW_STREAM) {
1125 BT_CPPLOGD_SPEC(viewer_connection->logger,
1126 "Marking all sessions as possibly needing new streams: "
1127 "response={}, response-flag=NEW_STREAM",
1128 static_cast<lttng_viewer_next_index_return_code>(rp_status));
1129 lttng_live_need_new_streams(lttng_live_msg_iter);
1130 }
1131
1132 switch (rp_status) {
1133 case LTTNG_VIEWER_INDEX_INACTIVE:
1134 {
1135 uint64_t ctf_stream_class_id;
1136
1137 memset(index, 0, sizeof(struct packet_index));
1138 index->ts_cycles.timestamp_end = be64toh(rp.timestamp_end);
1139 stream->current_inactivity_ts = index->ts_cycles.timestamp_end;
1140 ctf_stream_class_id = be64toh(rp.stream_id);
1141 if (stream->ctf_stream_class_id.is_set) {
1142 BT_ASSERT(stream->ctf_stream_class_id.value == ctf_stream_class_id);
1143 } else {
1144 stream->ctf_stream_class_id.value = ctf_stream_class_id;
1145 stream->ctf_stream_class_id.is_set = true;
1146 }
1147 lttng_live_stream_iterator_set_state(stream, LTTNG_LIVE_STREAM_QUIESCENT);
1148 return LTTNG_LIVE_ITERATOR_STATUS_OK;
1149 }
1150 case LTTNG_VIEWER_INDEX_OK:
1151 {
1152 uint64_t ctf_stream_class_id;
1153
1154 lttng_index_to_packet_index(&rp, index);
1155 ctf_stream_class_id = be64toh(rp.stream_id);
1156 if (stream->ctf_stream_class_id.is_set) {
1157 BT_ASSERT(stream->ctf_stream_class_id.value == ctf_stream_class_id);
1158 } else {
1159 stream->ctf_stream_class_id.value = ctf_stream_class_id;
1160 stream->ctf_stream_class_id.is_set = true;
1161 }
1162 lttng_live_stream_iterator_set_state(stream, LTTNG_LIVE_STREAM_ACTIVE_DATA);
1163
1164 if (flags & LTTNG_VIEWER_FLAG_NEW_METADATA) {
1165 BT_CPPLOGD_SPEC(viewer_connection->logger,
1166 "Marking trace as needing new metadata: "
1167 "response={}, response-flag=NEW_METADATA, trace-id={}",
1168 static_cast<lttng_viewer_next_index_return_code>(rp_status), trace->id);
1169 trace->metadata_stream_state = LTTNG_LIVE_METADATA_STREAM_STATE_NEEDED;
1170 }
1171 return LTTNG_LIVE_ITERATOR_STATUS_OK;
1172 break;
1173 }
1174 case LTTNG_VIEWER_INDEX_RETRY:
1175 memset(index, 0, sizeof(struct packet_index));
1176 lttng_live_stream_iterator_set_state(stream, LTTNG_LIVE_STREAM_ACTIVE_NO_DATA);
1177 return LTTNG_LIVE_ITERATOR_STATUS_AGAIN;
1178 case LTTNG_VIEWER_INDEX_HUP:
1179 memset(index, 0, sizeof(struct packet_index));
1180 index->offset = EOF;
1181 lttng_live_stream_iterator_set_state(stream, LTTNG_LIVE_STREAM_EOF);
1182 stream->has_stream_hung_up = true;
1183 return LTTNG_LIVE_ITERATOR_STATUS_END;
1184 case LTTNG_VIEWER_INDEX_ERR:
1185 memset(index, 0, sizeof(struct packet_index));
1186 lttng_live_stream_iterator_set_state(stream, LTTNG_LIVE_STREAM_ACTIVE_NO_DATA);
1187 return LTTNG_LIVE_ITERATOR_STATUS_ERROR;
1188 default:
1189 BT_CPPLOGD_SPEC(viewer_connection->logger,
1190 "Received get_next_index response: unknown value");
1191 memset(index, 0, sizeof(struct packet_index));
1192 lttng_live_stream_iterator_set_state(stream, LTTNG_LIVE_STREAM_ACTIVE_NO_DATA);
1193 return LTTNG_LIVE_ITERATOR_STATUS_ERROR;
1194 }
1195 }
1196
1197 enum ctf_msg_iter_medium_status
1198 lttng_live_get_stream_bytes(struct lttng_live_msg_iter *lttng_live_msg_iter,
1199 struct lttng_live_stream_iterator *stream, uint8_t *buf,
1200 uint64_t offset, uint64_t req_len, uint64_t *recv_len)
1201 {
1202 enum lttng_live_viewer_status viewer_status;
1203 struct lttng_viewer_trace_packet rp;
1204 struct lttng_viewer_cmd cmd;
1205 struct lttng_viewer_get_packet rq;
1206 live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection.get();
1207 struct lttng_live_trace *trace = stream->trace;
1208 const size_t cmd_buf_len = sizeof(cmd) + sizeof(rq);
1209 char cmd_buf[cmd_buf_len];
1210 uint32_t flags, rp_status;
1211
1212 BT_CPPLOGD_SPEC(viewer_connection->logger,
1213 "Requesting data from stream: cmd={}, "
1214 "offset={}, request-len={}",
1215 LTTNG_VIEWER_GET_PACKET, offset, req_len);
1216
1217 cmd.cmd = htobe32(LTTNG_VIEWER_GET_PACKET);
1218 cmd.data_size = htobe64((uint64_t) sizeof(rq));
1219 cmd.cmd_version = htobe32(0);
1220
1221 memset(&rq, 0, sizeof(rq));
1222 rq.stream_id = htobe64(stream->viewer_stream_id);
1223 rq.offset = htobe64(offset);
1224 rq.len = htobe32(req_len);
1225
1226 /*
1227 * Merge the cmd and connection request to prevent a write-write
1228 * sequence on the TCP socket. Otherwise, a delayed ACK will prevent the
1229 * second write to be performed quickly in presence of Nagle's algorithm.
1230 */
1231 memcpy(cmd_buf, &cmd, sizeof(cmd));
1232 memcpy(cmd_buf + sizeof(cmd), &rq, sizeof(rq));
1233
1234 viewer_status = lttng_live_send(viewer_connection, &cmd_buf, cmd_buf_len);
1235 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1236 viewer_handle_send_status(viewer_status, "get data packet command");
1237 return viewer_status_to_ctf_msg_iter_medium_status(viewer_status);
1238 }
1239
1240 viewer_status = lttng_live_recv(viewer_connection, &rp, sizeof(rp));
1241 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1242 viewer_handle_recv_status(viewer_status, "get data packet reply");
1243 return viewer_status_to_ctf_msg_iter_medium_status(viewer_status);
1244 }
1245
1246 flags = be32toh(rp.flags);
1247 rp_status = be32toh(rp.status);
1248
1249 BT_CPPLOGD_SPEC(
1250 viewer_connection->logger, "Received response from relay daemon: cmd={}, response={}",
1251 LTTNG_VIEWER_GET_PACKET, static_cast<lttng_viewer_get_packet_return_code>(rp_status));
1252 switch (rp_status) {
1253 case LTTNG_VIEWER_GET_PACKET_OK:
1254 req_len = be32toh(rp.len);
1255 BT_CPPLOGD_SPEC(viewer_connection->logger,
1256 "Got packet from relay daemon: response={}, packet-len={}",
1257 static_cast<lttng_viewer_get_packet_return_code>(rp_status), req_len);
1258 break;
1259 case LTTNG_VIEWER_GET_PACKET_RETRY:
1260 /* Unimplemented by relay daemon */
1261 return CTF_MSG_ITER_MEDIUM_STATUS_AGAIN;
1262 case LTTNG_VIEWER_GET_PACKET_ERR:
1263 if (flags & LTTNG_VIEWER_FLAG_NEW_METADATA) {
1264 BT_CPPLOGD_SPEC(viewer_connection->logger,
1265 "Marking trace as needing new metadata: "
1266 "response={}, response-flag=NEW_METADATA, trace-id={}",
1267 static_cast<lttng_viewer_get_packet_return_code>(rp_status), trace->id);
1268 trace->metadata_stream_state = LTTNG_LIVE_METADATA_STREAM_STATE_NEEDED;
1269 }
1270 if (flags & LTTNG_VIEWER_FLAG_NEW_STREAM) {
1271 BT_CPPLOGD_SPEC(viewer_connection->logger,
1272 "Marking all sessions as possibly needing new streams: "
1273 "response={}, response-flag=NEW_STREAM",
1274 static_cast<lttng_viewer_get_packet_return_code>(rp_status));
1275 lttng_live_need_new_streams(lttng_live_msg_iter);
1276 }
1277 if (flags & (LTTNG_VIEWER_FLAG_NEW_METADATA | LTTNG_VIEWER_FLAG_NEW_STREAM)) {
1278 BT_CPPLOGD_SPEC(viewer_connection->logger,
1279 "Reply with any one flags set means we should retry: response={}",
1280 static_cast<lttng_viewer_get_packet_return_code>(rp_status));
1281 return CTF_MSG_ITER_MEDIUM_STATUS_AGAIN;
1282 }
1283 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
1284 "Received get_data_packet response: error");
1285 return CTF_MSG_ITER_MEDIUM_STATUS_ERROR;
1286 case LTTNG_VIEWER_GET_PACKET_EOF:
1287 return CTF_MSG_ITER_MEDIUM_STATUS_EOF;
1288 default:
1289 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
1290 "Received get_data_packet response: unknown ({})", rp_status);
1291 return CTF_MSG_ITER_MEDIUM_STATUS_ERROR;
1292 }
1293
1294 if (req_len == 0) {
1295 return CTF_MSG_ITER_MEDIUM_STATUS_ERROR;
1296 }
1297
1298 viewer_status = lttng_live_recv(viewer_connection, buf, req_len);
1299 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1300 viewer_handle_recv_status(viewer_status, "get data packet");
1301 return viewer_status_to_ctf_msg_iter_medium_status(viewer_status);
1302 }
1303 *recv_len = req_len;
1304
1305 return CTF_MSG_ITER_MEDIUM_STATUS_OK;
1306 }
1307
1308 /*
1309 * Request new streams for a session.
1310 */
1311 enum lttng_live_iterator_status
1312 lttng_live_session_get_new_streams(struct lttng_live_session *session,
1313 bt_self_message_iterator *self_msg_iter)
1314 {
1315 struct lttng_viewer_cmd cmd;
1316 struct lttng_viewer_new_streams_request rq;
1317 struct lttng_viewer_new_streams_response rp;
1318 struct lttng_live_msg_iter *lttng_live_msg_iter = session->lttng_live_msg_iter;
1319 enum lttng_live_viewer_status viewer_status;
1320 live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection.get();
1321 uint32_t streams_count;
1322 const size_t cmd_buf_len = sizeof(cmd) + sizeof(rq);
1323 char cmd_buf[cmd_buf_len];
1324
1325 if (!session->new_streams_needed) {
1326 return LTTNG_LIVE_ITERATOR_STATUS_OK;
1327 }
1328
1329 BT_CPPLOGD_SPEC(viewer_connection->logger,
1330 "Requesting new streams for session: cmd={}, session-id={}",
1331 LTTNG_VIEWER_GET_NEW_STREAMS, session->id);
1332
1333 cmd.cmd = htobe32(LTTNG_VIEWER_GET_NEW_STREAMS);
1334 cmd.data_size = htobe64((uint64_t) sizeof(rq));
1335 cmd.cmd_version = htobe32(0);
1336
1337 memset(&rq, 0, sizeof(rq));
1338 rq.session_id = htobe64(session->id);
1339
1340 /*
1341 * Merge the cmd and connection request to prevent a write-write
1342 * sequence on the TCP socket. Otherwise, a delayed ACK will prevent the
1343 * second write to be performed quickly in presence of Nagle's algorithm.
1344 */
1345 memcpy(cmd_buf, &cmd, sizeof(cmd));
1346 memcpy(cmd_buf + sizeof(cmd), &rq, sizeof(rq));
1347
1348 viewer_status = lttng_live_send(viewer_connection, &cmd_buf, cmd_buf_len);
1349 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1350 viewer_handle_send_status(viewer_status, "get new streams command");
1351 return viewer_status_to_live_iterator_status(viewer_status);
1352 }
1353
1354 viewer_status = lttng_live_recv(viewer_connection, &rp, sizeof(rp));
1355 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1356 viewer_handle_recv_status(viewer_status, "get new streams reply");
1357 return viewer_status_to_live_iterator_status(viewer_status);
1358 }
1359
1360 streams_count = be32toh(rp.streams_count);
1361
1362 switch (be32toh(rp.status)) {
1363 case LTTNG_VIEWER_NEW_STREAMS_OK:
1364 session->new_streams_needed = false;
1365 break;
1366 case LTTNG_VIEWER_NEW_STREAMS_NO_NEW:
1367 session->new_streams_needed = false;
1368 return LTTNG_LIVE_ITERATOR_STATUS_OK;
1369 case LTTNG_VIEWER_NEW_STREAMS_HUP:
1370 session->new_streams_needed = false;
1371 session->closed = true;
1372 return LTTNG_LIVE_ITERATOR_STATUS_END;
1373 case LTTNG_VIEWER_NEW_STREAMS_ERR:
1374 BT_CPPLOGD_SPEC(viewer_connection->logger, "Received get_new_streams response: error");
1375 return LTTNG_LIVE_ITERATOR_STATUS_ERROR;
1376 default:
1377 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
1378 "Received get_new_streams response: Unknown return code {}",
1379 be32toh(rp.status));
1380 return LTTNG_LIVE_ITERATOR_STATUS_ERROR;
1381 }
1382
1383 viewer_status = receive_streams(session, streams_count, self_msg_iter);
1384 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1385 viewer_handle_recv_status(viewer_status, "new streams");
1386 return viewer_status_to_live_iterator_status(viewer_status);
1387 }
1388
1389 return LTTNG_LIVE_ITERATOR_STATUS_OK;
1390 }
1391
1392 enum lttng_live_viewer_status
1393 live_viewer_connection_create(const char *url, bool in_query,
1394 struct lttng_live_msg_iter *lttng_live_msg_iter,
1395 const bt2c::Logger& parentLogger, live_viewer_connection::UP& viewer)
1396 {
1397 auto viewer_connection = bt2s::make_unique<live_viewer_connection>(parentLogger);
1398
1399 if (bt_socket_init(viewer_connection->logger) != 0) {
1400 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Failed to init socket");
1401 return LTTNG_LIVE_VIEWER_STATUS_ERROR;
1402 }
1403
1404 viewer_connection->control_sock = BT_INVALID_SOCKET;
1405 viewer_connection->port = -1;
1406 viewer_connection->in_query = in_query;
1407 viewer_connection->lttng_live_msg_iter = lttng_live_msg_iter;
1408 viewer_connection->url = url;
1409
1410 BT_CPPLOGD_SPEC(viewer_connection->logger, "Establishing connection to url \"{}\"...", url);
1411 const auto status = lttng_live_connect_viewer(viewer_connection.get());
1412
1413 /*
1414 * Only print error and append cause in case of error. not in case of
1415 * interruption.
1416 */
1417 if (status == LTTNG_LIVE_VIEWER_STATUS_ERROR) {
1418 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
1419 "Failed to establish connection: "
1420 "url=\"{}\"",
1421 url);
1422 return status;
1423 } else if (status == LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED) {
1424 return status;
1425 }
1426 BT_CPPLOGD_SPEC(viewer_connection->logger, "Connection to url \"{}\" is established", url);
1427
1428 viewer = std::move(viewer_connection);
1429 return LTTNG_LIVE_VIEWER_STATUS_OK;
1430 }
1431
1432 live_viewer_connection::~live_viewer_connection()
1433 {
1434 BT_CPPLOGD_SPEC(this->logger, "Closing connection to relay: relay-url=\"{}\"", this->url);
1435
1436 viewer_connection_close_socket(this);
1437
1438 bt_socket_fini();
1439 }
This page took 0.10089 seconds and 4 git commands to generate.