src.ctf.lttng-live: use std::string in list_append_session
[babeltrace.git] / src / plugins / ctf / lttng-live / viewer-connection.cpp
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright 2019 Francis Deslauriers <francis.deslauriers@efficios.com>
5 * Copyright 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 */
7
8 #include <glib.h>
9 #include <stdint.h>
10 #include <stdio.h>
11
12 #include <babeltrace2/babeltrace.h>
13
14 #include "common/common.h"
15 #include "compat/endian.h" /* IWYU pragma: keep */
16
17 #include "data-stream.hpp"
18 #include "lttng-live.hpp"
19 #include "lttng-viewer-abi.hpp"
20 #include "metadata.hpp"
21 #include "viewer-connection.hpp"
22
23 #define viewer_handle_send_recv_status(_status, _action, _msg_str) \
24 do { \
25 switch (_status) { \
26 case LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED: \
27 break; \
28 case LTTNG_LIVE_VIEWER_STATUS_ERROR: \
29 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, \
30 "Error " _action " " _msg_str); \
31 break; \
32 default: \
33 bt_common_abort(); \
34 } \
35 } while (0)
36
37 #define viewer_handle_send_status(_status, _msg_str) \
38 viewer_handle_send_recv_status(_status, "sending", _msg_str)
39
40 #define viewer_handle_recv_status(_status, _msg_str) \
41 viewer_handle_send_recv_status(_status, "receiving", _msg_str)
42
43 #define LTTNG_LIVE_CPPLOGE_APPEND_CAUSE_ERRNO(_msg, _fmt, ...) \
44 do { \
45 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, _msg ": {}" _fmt, \
46 bt_socket_errormsg(), ##__VA_ARGS__); \
47 } while (0)
48
49 static inline enum lttng_live_iterator_status
50 viewer_status_to_live_iterator_status(enum lttng_live_viewer_status viewer_status)
51 {
52 switch (viewer_status) {
53 case LTTNG_LIVE_VIEWER_STATUS_OK:
54 return LTTNG_LIVE_ITERATOR_STATUS_OK;
55 case LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED:
56 return LTTNG_LIVE_ITERATOR_STATUS_AGAIN;
57 case LTTNG_LIVE_VIEWER_STATUS_ERROR:
58 return LTTNG_LIVE_ITERATOR_STATUS_ERROR;
59 }
60
61 bt_common_abort();
62 }
63
64 static inline enum ctf_msg_iter_medium_status
65 viewer_status_to_ctf_msg_iter_medium_status(enum lttng_live_viewer_status viewer_status)
66 {
67 switch (viewer_status) {
68 case LTTNG_LIVE_VIEWER_STATUS_OK:
69 return CTF_MSG_ITER_MEDIUM_STATUS_OK;
70 case LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED:
71 return CTF_MSG_ITER_MEDIUM_STATUS_AGAIN;
72 case LTTNG_LIVE_VIEWER_STATUS_ERROR:
73 return CTF_MSG_ITER_MEDIUM_STATUS_ERROR;
74 }
75
76 bt_common_abort();
77 }
78
79 static inline void viewer_connection_close_socket(struct live_viewer_connection *viewer_connection)
80 {
81 int ret = bt_socket_close(viewer_connection->control_sock);
82 if (ret == -1) {
83 BT_CPPLOGW_ERRNO_SPEC(viewer_connection->logger,
84 "Error closing viewer connection socket: ", ".");
85 }
86
87 viewer_connection->control_sock = BT_INVALID_SOCKET;
88 }
89
90 /*
91 * This function receives a message from the Relay daemon.
92 * If it received the entire message, it returns _OK,
93 * If it's interrupted, it returns _INTERRUPTED,
94 * otherwise, it returns _ERROR.
95 */
96 static enum lttng_live_viewer_status
97 lttng_live_recv(struct live_viewer_connection *viewer_connection, void *buf, size_t len)
98 {
99 ssize_t received;
100 size_t total_received = 0, to_receive = len;
101 struct lttng_live_msg_iter *lttng_live_msg_iter = viewer_connection->lttng_live_msg_iter;
102 enum lttng_live_viewer_status status;
103 BT_SOCKET sock = viewer_connection->control_sock;
104
105 /*
106 * Receive a message from the Relay.
107 */
108 do {
109 received = bt_socket_recv(sock, (char *) buf + total_received, to_receive, 0);
110 if (received == BT_SOCKET_ERROR) {
111 if (bt_socket_interrupted()) {
112 if (lttng_live_graph_is_canceled(lttng_live_msg_iter)) {
113 /*
114 * This interruption was due to a
115 * SIGINT and the graph is being torn
116 * down.
117 */
118 status = LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED;
119 lttng_live_msg_iter->was_interrupted = true;
120 goto end;
121 } else {
122 /*
123 * A signal was received, but the graph
124 * is not being torn down. Carry on.
125 */
126 continue;
127 }
128 } else {
129 /*
130 * For any other types of socket error, close
131 * the socket and return an error.
132 */
133 LTTNG_LIVE_CPPLOGE_APPEND_CAUSE_ERRNO("Error receiving from Relay", ".");
134
135 viewer_connection_close_socket(viewer_connection);
136 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
137 goto end;
138 }
139 } else if (received == 0) {
140 /*
141 * The recv() call returned 0. This means the
142 * connection was orderly shutdown from the other peer.
143 * If that happens when we are trying to receive
144 * a message from it, it means something when wrong.
145 * Close the socket and return an error.
146 */
147 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
148 "Remote side has closed connection");
149 viewer_connection_close_socket(viewer_connection);
150 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
151 goto end;
152 }
153
154 BT_ASSERT(received <= to_receive);
155 total_received += received;
156 to_receive -= received;
157
158 } while (to_receive > 0);
159
160 BT_ASSERT(total_received == len);
161 status = LTTNG_LIVE_VIEWER_STATUS_OK;
162
163 end:
164 return status;
165 }
166
167 /*
168 * This function sends a message to the Relay daemon.
169 * If it send the message, it returns _OK,
170 * If it's interrupted, it returns _INTERRUPTED,
171 * otherwise, it returns _ERROR.
172 */
173 static enum lttng_live_viewer_status
174 lttng_live_send(struct live_viewer_connection *viewer_connection, const void *buf, size_t len)
175 {
176 enum lttng_live_viewer_status status;
177 struct lttng_live_msg_iter *lttng_live_msg_iter = viewer_connection->lttng_live_msg_iter;
178 BT_SOCKET sock = viewer_connection->control_sock;
179 size_t to_send = len;
180 ssize_t total_sent = 0;
181
182 do {
183 ssize_t sent = bt_socket_send_nosigpipe(sock, (char *) buf + total_sent, to_send);
184 if (sent == BT_SOCKET_ERROR) {
185 if (bt_socket_interrupted()) {
186 if (lttng_live_graph_is_canceled(lttng_live_msg_iter)) {
187 /*
188 * This interruption was a SIGINT and
189 * the graph is being teared down.
190 */
191 status = LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED;
192 lttng_live_msg_iter->was_interrupted = true;
193 goto end;
194 } else {
195 /*
196 * A signal was received, but the graph
197 * is not being teared down. Carry on.
198 */
199 continue;
200 }
201 } else {
202 /*
203 * For any other types of socket error, close
204 * the socket and return an error.
205 */
206 LTTNG_LIVE_CPPLOGE_APPEND_CAUSE_ERRNO("Error sending to Relay", ".");
207
208 viewer_connection_close_socket(viewer_connection);
209 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
210 goto end;
211 }
212 }
213
214 BT_ASSERT(sent <= to_send);
215 total_sent += sent;
216 to_send -= sent;
217
218 } while (to_send > 0);
219
220 BT_ASSERT(total_sent == len);
221 status = LTTNG_LIVE_VIEWER_STATUS_OK;
222
223 end:
224 return status;
225 }
226
227 static int parse_url(struct live_viewer_connection *viewer_connection)
228 {
229 char error_buf[256] = {0};
230 struct bt_common_lttng_live_url_parts lttng_live_url_parts = {};
231 int ret = -1;
232 const char *path = viewer_connection->url->str;
233
234 if (!path) {
235 goto end;
236 }
237
238 lttng_live_url_parts = bt_common_parse_lttng_live_url(path, error_buf, sizeof(error_buf));
239 if (!lttng_live_url_parts.proto) {
240 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Invalid LTTng live URL format: {}",
241 error_buf);
242 goto end;
243 }
244 viewer_connection->proto = lttng_live_url_parts.proto;
245 lttng_live_url_parts.proto = NULL;
246
247 viewer_connection->relay_hostname = lttng_live_url_parts.hostname;
248 lttng_live_url_parts.hostname = NULL;
249
250 if (lttng_live_url_parts.port >= 0) {
251 viewer_connection->port = lttng_live_url_parts.port;
252 } else {
253 viewer_connection->port = LTTNG_DEFAULT_NETWORK_VIEWER_PORT;
254 }
255
256 viewer_connection->target_hostname = lttng_live_url_parts.target_hostname;
257 lttng_live_url_parts.target_hostname = NULL;
258
259 if (lttng_live_url_parts.session_name) {
260 viewer_connection->session_name = lttng_live_url_parts.session_name;
261 lttng_live_url_parts.session_name = NULL;
262 }
263
264 ret = 0;
265
266 end:
267 bt_common_destroy_lttng_live_url_parts(&lttng_live_url_parts);
268 return ret;
269 }
270
271 static enum lttng_live_viewer_status
272 lttng_live_handshake(struct live_viewer_connection *viewer_connection)
273 {
274 struct lttng_viewer_cmd cmd;
275 struct lttng_viewer_connect connect;
276 enum lttng_live_viewer_status status;
277 const size_t cmd_buf_len = sizeof(cmd) + sizeof(connect);
278 char cmd_buf[cmd_buf_len];
279
280 BT_CPPLOGD_SPEC(viewer_connection->logger,
281 "Handshaking with the relay daemon: cmd={}, major-version={}, minor-version={}",
282 LTTNG_VIEWER_CONNECT, LTTNG_LIVE_MAJOR, LTTNG_LIVE_MINOR);
283
284 cmd.cmd = htobe32(LTTNG_VIEWER_CONNECT);
285 cmd.data_size = htobe64((uint64_t) sizeof(connect));
286 cmd.cmd_version = htobe32(0);
287
288 connect.viewer_session_id = -1ULL; /* will be set on recv */
289 connect.major = htobe32(LTTNG_LIVE_MAJOR);
290 connect.minor = htobe32(LTTNG_LIVE_MINOR);
291 connect.type = htobe32(LTTNG_VIEWER_CLIENT_COMMAND);
292
293 /*
294 * Merge the cmd and connection request to prevent a write-write
295 * sequence on the TCP socket. Otherwise, a delayed ACK will prevent the
296 * second write to be performed quickly in presence of Nagle's algorithm
297 */
298 memcpy(cmd_buf, &cmd, sizeof(cmd));
299 memcpy(cmd_buf + sizeof(cmd), &connect, sizeof(connect));
300
301 status = lttng_live_send(viewer_connection, &cmd_buf, cmd_buf_len);
302 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
303 viewer_handle_send_status(status, "viewer connect command");
304 goto end;
305 }
306
307 status = lttng_live_recv(viewer_connection, &connect, sizeof(connect));
308 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
309 viewer_handle_recv_status(status, "viewer connect reply");
310 goto end;
311 }
312
313 BT_CPPLOGI_SPEC(viewer_connection->logger, "Received viewer session ID : {}",
314 (uint64_t) be64toh(connect.viewer_session_id));
315 BT_CPPLOGI_SPEC(viewer_connection->logger, "Relayd version : {}.{}", be32toh(connect.major),
316 be32toh(connect.minor));
317
318 if (LTTNG_LIVE_MAJOR != be32toh(connect.major)) {
319 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
320 "Incompatible lttng-relayd protocol");
321 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
322 goto end;
323 }
324 /* Use the smallest protocol version implemented. */
325 if (LTTNG_LIVE_MINOR > be32toh(connect.minor)) {
326 viewer_connection->minor = be32toh(connect.minor);
327 } else {
328 viewer_connection->minor = LTTNG_LIVE_MINOR;
329 }
330 viewer_connection->major = LTTNG_LIVE_MAJOR;
331
332 status = LTTNG_LIVE_VIEWER_STATUS_OK;
333
334 goto end;
335
336 end:
337 return status;
338 }
339
340 static enum lttng_live_viewer_status
341 lttng_live_connect_viewer(struct live_viewer_connection *viewer_connection)
342 {
343 struct hostent *host;
344 struct sockaddr_in server_addr;
345 enum lttng_live_viewer_status status;
346
347 if (parse_url(viewer_connection)) {
348 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Failed to parse URL");
349 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
350 goto error;
351 }
352
353 BT_CPPLOGD_SPEC(
354 viewer_connection->logger,
355 "Connecting to hostname : {}, port : {}, target hostname : {}, session name : {}, proto : {}",
356 viewer_connection->relay_hostname->str, viewer_connection->port,
357 !viewer_connection->target_hostname ? "<none>" : viewer_connection->target_hostname->str,
358 !viewer_connection->session_name ? "<none>" : viewer_connection->session_name->str,
359 viewer_connection->proto->str);
360
361 host = gethostbyname(viewer_connection->relay_hostname->str);
362 if (!host) {
363 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
364 "Cannot lookup hostname: hostname=\"{}\"",
365 viewer_connection->relay_hostname->str);
366 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
367 goto error;
368 }
369
370 if ((viewer_connection->control_sock = socket(AF_INET, SOCK_STREAM, 0)) == BT_INVALID_SOCKET) {
371 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Socket creation failed: {}",
372 bt_socket_errormsg());
373 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
374 goto error;
375 }
376
377 server_addr.sin_family = AF_INET;
378 server_addr.sin_port = htons(viewer_connection->port);
379 server_addr.sin_addr = *((struct in_addr *) host->h_addr);
380 memset(&(server_addr.sin_zero), 0, 8);
381
382 if (connect(viewer_connection->control_sock, (struct sockaddr *) &server_addr,
383 sizeof(struct sockaddr)) == BT_SOCKET_ERROR) {
384 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Connection failed: {}",
385 bt_socket_errormsg());
386 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
387 goto error;
388 }
389
390 status = lttng_live_handshake(viewer_connection);
391
392 /*
393 * Only print error and append cause in case of error. not in case of
394 * interruption.
395 */
396 if (status == LTTNG_LIVE_VIEWER_STATUS_ERROR) {
397 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Viewer handshake failed");
398 goto error;
399 } else if (status == LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED) {
400 goto end;
401 }
402
403 goto end;
404
405 error:
406 if (viewer_connection->control_sock != BT_INVALID_SOCKET) {
407 if (bt_socket_close(viewer_connection->control_sock) == BT_SOCKET_ERROR) {
408 BT_CPPLOGW_SPEC(viewer_connection->logger, "Error closing socket: {}.",
409 bt_socket_errormsg());
410 }
411 }
412 viewer_connection->control_sock = BT_INVALID_SOCKET;
413 end:
414 return status;
415 }
416
417 static void lttng_live_disconnect_viewer(struct live_viewer_connection *viewer_connection)
418 {
419 if (viewer_connection->control_sock == BT_INVALID_SOCKET) {
420 return;
421 }
422 if (bt_socket_close(viewer_connection->control_sock) == BT_SOCKET_ERROR) {
423 BT_CPPLOGW_SPEC(viewer_connection->logger, "Error closing socket: {}",
424 bt_socket_errormsg());
425 viewer_connection->control_sock = BT_INVALID_SOCKET;
426 }
427 }
428
429 static int list_update_session(bt_value *results, const struct lttng_viewer_session *session,
430 bool *_found, struct live_viewer_connection *viewer_connection)
431 {
432 int ret = 0;
433 uint64_t i, len;
434 bt_value *map = NULL;
435 bt_value *hostname = NULL;
436 bt_value *session_name = NULL;
437 bt_value *btval = NULL;
438 bool found = false;
439
440 len = bt_value_array_get_length(results);
441 for (i = 0; i < len; i++) {
442 const char *hostname_str = NULL;
443 const char *session_name_str = NULL;
444
445 map = bt_value_array_borrow_element_by_index(results, i);
446 hostname = bt_value_map_borrow_entry_value(map, "target-hostname");
447 if (!hostname) {
448 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
449 "Error borrowing \"target-hostname\" entry.");
450 ret = -1;
451 goto end;
452 }
453 session_name = bt_value_map_borrow_entry_value(map, "session-name");
454 if (!session_name) {
455 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
456 "Error borrowing \"session-name\" entry.");
457 ret = -1;
458 goto end;
459 }
460 hostname_str = bt_value_string_get(hostname);
461 session_name_str = bt_value_string_get(session_name);
462
463 if (strcmp(session->hostname, hostname_str) == 0 &&
464 strcmp(session->session_name, session_name_str) == 0) {
465 int64_t val;
466 uint32_t streams = be32toh(session->streams);
467 uint32_t clients = be32toh(session->clients);
468
469 found = true;
470
471 btval = bt_value_map_borrow_entry_value(map, "stream-count");
472 if (!btval) {
473 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
474 "Error borrowing \"stream-count\" entry.");
475 ret = -1;
476 goto end;
477 }
478 val = bt_value_integer_unsigned_get(btval);
479 /* sum */
480 val += streams;
481 bt_value_integer_unsigned_set(btval, val);
482
483 btval = bt_value_map_borrow_entry_value(map, "client-count");
484 if (!btval) {
485 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
486 "Error borrowing \"client-count\" entry.");
487 ret = -1;
488 goto end;
489 }
490 val = bt_value_integer_unsigned_get(btval);
491 /* max */
492 val = bt_max_t(int64_t, clients, val);
493 bt_value_integer_unsigned_set(btval, val);
494 }
495
496 if (found) {
497 break;
498 }
499 }
500 end:
501 *_found = found;
502 return ret;
503 }
504
505 static int list_append_session(bt_value *results, GString *base_url,
506 const struct lttng_viewer_session *session,
507 struct live_viewer_connection *viewer_connection)
508 {
509 int ret = 0;
510 bt_value_map_insert_entry_status insert_status;
511 bt_value_array_append_element_status append_status;
512 bt_value *map = NULL;
513 bool found = false;
514
515 /*
516 * If the session already exists, add the stream count to it,
517 * and do max of client counts.
518 */
519 ret = list_update_session(results, session, &found, viewer_connection);
520 if (ret || found) {
521 goto end;
522 }
523
524 map = bt_value_map_create();
525 if (!map) {
526 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Error creating map value.");
527 ret = -1;
528 goto end;
529 }
530
531 if (base_url->len < 1) {
532 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
533 "Error: base_url length smaller than 1.");
534 ret = -1;
535 goto end;
536 }
537 /*
538 * key = "url",
539 * value = <string>,
540 */
541 insert_status = bt_value_map_insert_string_entry(
542 map, "url",
543 fmt::format("{}/host/{}/{}", base_url->str, session->hostname, session->session_name)
544 .c_str());
545 if (insert_status != BT_VALUE_MAP_INSERT_ENTRY_STATUS_OK) {
546 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Error inserting \"url\" entry.");
547 ret = -1;
548 goto end;
549 }
550
551 /*
552 * key = "target-hostname",
553 * value = <string>,
554 */
555 insert_status = bt_value_map_insert_string_entry(map, "target-hostname", session->hostname);
556 if (insert_status != BT_VALUE_MAP_INSERT_ENTRY_STATUS_OK) {
557 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
558 "Error inserting \"target-hostname\" entry.");
559 ret = -1;
560 goto end;
561 }
562
563 /*
564 * key = "session-name",
565 * value = <string>,
566 */
567 insert_status = bt_value_map_insert_string_entry(map, "session-name", session->session_name);
568 if (insert_status != BT_VALUE_MAP_INSERT_ENTRY_STATUS_OK) {
569 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
570 "Error inserting \"session-name\" entry.");
571 ret = -1;
572 goto end;
573 }
574
575 /*
576 * key = "timer-us",
577 * value = <integer>,
578 */
579 {
580 uint32_t live_timer = be32toh(session->live_timer);
581
582 insert_status = bt_value_map_insert_unsigned_integer_entry(map, "timer-us", live_timer);
583 if (insert_status != BT_VALUE_MAP_INSERT_ENTRY_STATUS_OK) {
584 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
585 "Error inserting \"timer-us\" entry.");
586 ret = -1;
587 goto end;
588 }
589 }
590
591 /*
592 * key = "stream-count",
593 * value = <integer>,
594 */
595 {
596 uint32_t streams = be32toh(session->streams);
597
598 insert_status = bt_value_map_insert_unsigned_integer_entry(map, "stream-count", streams);
599 if (insert_status != BT_VALUE_MAP_INSERT_ENTRY_STATUS_OK) {
600 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
601 "Error inserting \"stream-count\" entry.");
602 ret = -1;
603 goto end;
604 }
605 }
606
607 /*
608 * key = "client-count",
609 * value = <integer>,
610 */
611 {
612 uint32_t clients = be32toh(session->clients);
613
614 insert_status = bt_value_map_insert_unsigned_integer_entry(map, "client-count", clients);
615 if (insert_status != BT_VALUE_MAP_INSERT_ENTRY_STATUS_OK) {
616 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
617 "Error inserting \"client-count\" entry.");
618 ret = -1;
619 goto end;
620 }
621 }
622
623 append_status = bt_value_array_append_element(results, map);
624 if (append_status != BT_VALUE_ARRAY_APPEND_ELEMENT_STATUS_OK) {
625 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Error appending map to results.");
626 ret = -1;
627 }
628
629 end:
630 BT_VALUE_PUT_REF_AND_RESET(map);
631 return ret;
632 }
633
634 /*
635 * Data structure returned:
636 *
637 * {
638 * <array> = {
639 * [n] = {
640 * <map> = {
641 * {
642 * key = "url",
643 * value = <string>,
644 * },
645 * {
646 * key = "target-hostname",
647 * value = <string>,
648 * },
649 * {
650 * key = "session-name",
651 * value = <string>,
652 * },
653 * {
654 * key = "timer-us",
655 * value = <integer>,
656 * },
657 * {
658 * key = "stream-count",
659 * value = <integer>,
660 * },
661 * {
662 * key = "client-count",
663 * value = <integer>,
664 * },
665 * },
666 * }
667 * }
668 */
669
670 bt_component_class_query_method_status
671 live_viewer_connection_list_sessions(struct live_viewer_connection *viewer_connection,
672 const bt_value **user_result)
673 {
674 bt_component_class_query_method_status status = BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_OK;
675 bt_value *result = NULL;
676 enum lttng_live_viewer_status viewer_status;
677 struct lttng_viewer_cmd cmd;
678 struct lttng_viewer_list_sessions list;
679 uint32_t i, sessions_count;
680
681 result = bt_value_array_create();
682 if (!result) {
683 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Error creating array");
684 status = BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_MEMORY_ERROR;
685 goto error;
686 }
687
688 BT_CPPLOGD_SPEC(viewer_connection->logger, "Requesting list of sessions: cmd={}",
689 LTTNG_VIEWER_LIST_SESSIONS);
690
691 cmd.cmd = htobe32(LTTNG_VIEWER_LIST_SESSIONS);
692 cmd.data_size = htobe64((uint64_t) 0);
693 cmd.cmd_version = htobe32(0);
694
695 viewer_status = lttng_live_send(viewer_connection, &cmd, sizeof(cmd));
696 if (viewer_status == LTTNG_LIVE_VIEWER_STATUS_ERROR) {
697 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
698 "Error sending list sessions command");
699 status = BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_ERROR;
700 goto error;
701 } else if (viewer_status == LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED) {
702 status = BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_AGAIN;
703 goto error;
704 }
705
706 viewer_status = lttng_live_recv(viewer_connection, &list, sizeof(list));
707 if (viewer_status == LTTNG_LIVE_VIEWER_STATUS_ERROR) {
708 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Error receiving session list");
709 status = BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_ERROR;
710 goto error;
711 } else if (viewer_status == LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED) {
712 status = BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_AGAIN;
713 goto error;
714 }
715
716 sessions_count = be32toh(list.sessions_count);
717 for (i = 0; i < sessions_count; i++) {
718 struct lttng_viewer_session lsession;
719
720 viewer_status = lttng_live_recv(viewer_connection, &lsession, sizeof(lsession));
721 if (viewer_status == LTTNG_LIVE_VIEWER_STATUS_ERROR) {
722 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Error receiving session:");
723 status = BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_ERROR;
724 goto error;
725 } else if (viewer_status == LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED) {
726 status = BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_AGAIN;
727 goto error;
728 }
729
730 lsession.hostname[LTTNG_VIEWER_HOST_NAME_MAX - 1] = '\0';
731 lsession.session_name[LTTNG_VIEWER_NAME_MAX - 1] = '\0';
732 if (list_append_session(result, viewer_connection->url, &lsession, viewer_connection)) {
733 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Error appending session");
734 status = BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_ERROR;
735 goto error;
736 }
737 }
738
739 *user_result = result;
740 goto end;
741 error:
742 BT_VALUE_PUT_REF_AND_RESET(result);
743 end:
744 return status;
745 }
746
747 static enum lttng_live_viewer_status
748 lttng_live_query_session_ids(struct lttng_live_msg_iter *lttng_live_msg_iter)
749 {
750 struct lttng_viewer_cmd cmd;
751 struct lttng_viewer_list_sessions list;
752 struct lttng_viewer_session lsession;
753 uint32_t i, sessions_count;
754 uint64_t session_id;
755 enum lttng_live_viewer_status status;
756 struct live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection;
757
758 BT_CPPLOGD_SPEC(viewer_connection->logger,
759 "Asking the relay daemon for the list of sessions: cmd={}",
760 LTTNG_VIEWER_LIST_SESSIONS);
761
762 cmd.cmd = htobe32(LTTNG_VIEWER_LIST_SESSIONS);
763 cmd.data_size = htobe64((uint64_t) 0);
764 cmd.cmd_version = htobe32(0);
765
766 status = lttng_live_send(viewer_connection, &cmd, sizeof(cmd));
767 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
768 viewer_handle_send_status(status, "list sessions command");
769 goto end;
770 }
771
772 status = lttng_live_recv(viewer_connection, &list, sizeof(list));
773 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
774 viewer_handle_recv_status(status, "session list reply");
775 goto end;
776 }
777
778 sessions_count = be32toh(list.sessions_count);
779 for (i = 0; i < sessions_count; i++) {
780 status = lttng_live_recv(viewer_connection, &lsession, sizeof(lsession));
781 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
782 viewer_handle_recv_status(status, "session reply");
783 goto end;
784 }
785 lsession.hostname[LTTNG_VIEWER_HOST_NAME_MAX - 1] = '\0';
786 lsession.session_name[LTTNG_VIEWER_NAME_MAX - 1] = '\0';
787 session_id = be64toh(lsession.id);
788
789 BT_CPPLOGI_SPEC(viewer_connection->logger,
790 "Adding session to internal list: "
791 "session-id={}, hostname=\"{}\", session-name=\"{}\"",
792 session_id, lsession.hostname, lsession.session_name);
793
794 if ((strncmp(lsession.session_name, viewer_connection->session_name->str,
795 LTTNG_VIEWER_NAME_MAX) == 0) &&
796 (strncmp(lsession.hostname, viewer_connection->target_hostname->str,
797 LTTNG_VIEWER_HOST_NAME_MAX) == 0)) {
798 if (lttng_live_add_session(lttng_live_msg_iter, session_id, lsession.hostname,
799 lsession.session_name)) {
800 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
801 "Failed to add live session");
802 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
803 goto end;
804 }
805 }
806 }
807
808 status = LTTNG_LIVE_VIEWER_STATUS_OK;
809
810 end:
811 return status;
812 }
813
814 enum lttng_live_viewer_status
815 lttng_live_create_viewer_session(struct lttng_live_msg_iter *lttng_live_msg_iter)
816 {
817 struct lttng_viewer_cmd cmd;
818 struct lttng_viewer_create_session_response resp;
819 enum lttng_live_viewer_status status;
820 struct live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection;
821
822 BT_CPPLOGD_SPEC(viewer_connection->logger, "Creating a viewer session: cmd={}",
823 LTTNG_VIEWER_CREATE_SESSION);
824
825 cmd.cmd = htobe32(LTTNG_VIEWER_CREATE_SESSION);
826 cmd.data_size = htobe64((uint64_t) 0);
827 cmd.cmd_version = htobe32(0);
828
829 status = lttng_live_send(viewer_connection, &cmd, sizeof(cmd));
830 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
831 viewer_handle_send_status(status, "create session command");
832 goto end;
833 }
834
835 status = lttng_live_recv(viewer_connection, &resp, sizeof(resp));
836 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
837 viewer_handle_recv_status(status, "create session reply");
838 goto end;
839 }
840
841 if (be32toh(resp.status) != LTTNG_VIEWER_CREATE_SESSION_OK) {
842 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Error creating viewer session");
843 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
844 goto end;
845 }
846
847 status = lttng_live_query_session_ids(lttng_live_msg_iter);
848 if (status == LTTNG_LIVE_VIEWER_STATUS_ERROR) {
849 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
850 "Failed to query live viewer session ids");
851 goto end;
852 } else if (status == LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED) {
853 goto end;
854 }
855
856 end:
857 return status;
858 }
859
860 static enum lttng_live_viewer_status receive_streams(struct lttng_live_session *session,
861 uint32_t stream_count,
862 bt_self_message_iterator *self_msg_iter)
863 {
864 uint32_t i;
865 struct lttng_live_msg_iter *lttng_live_msg_iter = session->lttng_live_msg_iter;
866 enum lttng_live_viewer_status status;
867 struct live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection;
868
869 BT_CPPLOGI_SPEC(viewer_connection->logger, "Getting {} new streams", stream_count);
870 for (i = 0; i < stream_count; i++) {
871 struct lttng_viewer_stream stream;
872 struct lttng_live_stream_iterator *live_stream;
873 uint64_t stream_id;
874 uint64_t ctf_trace_id;
875
876 status = lttng_live_recv(viewer_connection, &stream, sizeof(stream));
877 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
878 viewer_handle_recv_status(status, "stream reply");
879 goto end;
880 }
881 stream.path_name[LTTNG_VIEWER_PATH_MAX - 1] = '\0';
882 stream.channel_name[LTTNG_VIEWER_NAME_MAX - 1] = '\0';
883 stream_id = be64toh(stream.id);
884 ctf_trace_id = be64toh(stream.ctf_trace_id);
885
886 if (stream.metadata_flag) {
887 BT_CPPLOGI_SPEC(viewer_connection->logger, " metadata stream {} : {}/{}", stream_id,
888 stream.path_name, stream.channel_name);
889 if (lttng_live_metadata_create_stream(session, ctf_trace_id, stream_id)) {
890 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
891 "Error creating metadata stream");
892 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
893 goto end;
894 }
895 session->lazy_stream_msg_init = true;
896 } else {
897 BT_CPPLOGI_SPEC(viewer_connection->logger, " stream {} : {}/{}", stream_id,
898 stream.path_name, stream.channel_name);
899 live_stream =
900 lttng_live_stream_iterator_create(session, ctf_trace_id, stream_id, self_msg_iter);
901 if (!live_stream) {
902 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Error creating stream");
903 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
904 goto end;
905 }
906 }
907 }
908 status = LTTNG_LIVE_VIEWER_STATUS_OK;
909
910 end:
911 return status;
912 }
913
914 enum lttng_live_viewer_status lttng_live_session_attach(struct lttng_live_session *session,
915 bt_self_message_iterator *self_msg_iter)
916 {
917 struct lttng_viewer_cmd cmd;
918 enum lttng_live_viewer_status status;
919 struct lttng_viewer_attach_session_request rq;
920 struct lttng_viewer_attach_session_response rp;
921 struct lttng_live_msg_iter *lttng_live_msg_iter = session->lttng_live_msg_iter;
922 struct live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection;
923 uint64_t session_id = session->id;
924 uint32_t streams_count;
925 const size_t cmd_buf_len = sizeof(cmd) + sizeof(rq);
926 char cmd_buf[cmd_buf_len];
927
928 BT_CPPLOGD_SPEC(viewer_connection->logger,
929 "Attaching to session: cmd={}, session-id={}, seek={}",
930 LTTNG_VIEWER_ATTACH_SESSION, session_id, LTTNG_VIEWER_SEEK_LAST);
931
932 cmd.cmd = htobe32(LTTNG_VIEWER_ATTACH_SESSION);
933 cmd.data_size = htobe64((uint64_t) sizeof(rq));
934 cmd.cmd_version = htobe32(0);
935
936 memset(&rq, 0, sizeof(rq));
937 rq.session_id = htobe64(session_id);
938 // TODO: add cmd line parameter to select seek beginning
939 // rq.seek = htobe32(LTTNG_VIEWER_SEEK_BEGINNING);
940 rq.seek = htobe32(LTTNG_VIEWER_SEEK_LAST);
941
942 /*
943 * Merge the cmd and connection request to prevent a write-write
944 * sequence on the TCP socket. Otherwise, a delayed ACK will prevent the
945 * second write to be performed quickly in presence of Nagle's algorithm.
946 */
947 memcpy(cmd_buf, &cmd, sizeof(cmd));
948 memcpy(cmd_buf + sizeof(cmd), &rq, sizeof(rq));
949 status = lttng_live_send(viewer_connection, &cmd_buf, cmd_buf_len);
950 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
951 viewer_handle_send_status(status, "attach session command");
952 goto end;
953 }
954
955 status = lttng_live_recv(viewer_connection, &rp, sizeof(rp));
956 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
957 viewer_handle_recv_status(status, "attach session reply");
958 goto end;
959 }
960
961 streams_count = be32toh(rp.streams_count);
962 switch (be32toh(rp.status)) {
963 case LTTNG_VIEWER_ATTACH_OK:
964 break;
965 case LTTNG_VIEWER_ATTACH_UNK:
966 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Session id {} is unknown",
967 session_id);
968 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
969 goto end;
970 case LTTNG_VIEWER_ATTACH_ALREADY:
971 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
972 "There is already a viewer attached to this session");
973 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
974 goto end;
975 case LTTNG_VIEWER_ATTACH_NOT_LIVE:
976 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Not a live session");
977 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
978 goto end;
979 case LTTNG_VIEWER_ATTACH_SEEK_ERR:
980 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Wrong seek parameter");
981 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
982 goto end;
983 default:
984 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Unknown attach return code {}",
985 be32toh(rp.status));
986 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
987 goto end;
988 }
989
990 /* We receive the initial list of streams. */
991 status = receive_streams(session, streams_count, self_msg_iter);
992 switch (status) {
993 case LTTNG_LIVE_VIEWER_STATUS_OK:
994 break;
995 case LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED:
996 goto end;
997 case LTTNG_LIVE_VIEWER_STATUS_ERROR:
998 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Error receiving streams");
999 goto end;
1000 default:
1001 bt_common_abort();
1002 }
1003
1004 session->attached = true;
1005 session->new_streams_needed = false;
1006
1007 end:
1008 return status;
1009 }
1010
1011 enum lttng_live_viewer_status lttng_live_session_detach(struct lttng_live_session *session)
1012 {
1013 struct lttng_viewer_cmd cmd;
1014 enum lttng_live_viewer_status status;
1015 struct lttng_viewer_detach_session_request rq;
1016 struct lttng_viewer_detach_session_response rp;
1017 struct lttng_live_msg_iter *lttng_live_msg_iter = session->lttng_live_msg_iter;
1018 struct live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection;
1019 uint64_t session_id = session->id;
1020 const size_t cmd_buf_len = sizeof(cmd) + sizeof(rq);
1021 char cmd_buf[cmd_buf_len];
1022
1023 /*
1024 * The session might already be detached and the viewer socket might
1025 * already been closed. This happens when calling this function when
1026 * tearing down the graph after an error.
1027 */
1028 if (!session->attached || viewer_connection->control_sock == BT_INVALID_SOCKET) {
1029 return LTTNG_LIVE_VIEWER_STATUS_OK;
1030 }
1031
1032 BT_CPPLOGD_SPEC(viewer_connection->logger, "Detaching from session: cmd={}, session-id={}",
1033 LTTNG_VIEWER_DETACH_SESSION, session_id);
1034
1035 cmd.cmd = htobe32(LTTNG_VIEWER_DETACH_SESSION);
1036 cmd.data_size = htobe64((uint64_t) sizeof(rq));
1037 cmd.cmd_version = htobe32(0);
1038
1039 memset(&rq, 0, sizeof(rq));
1040 rq.session_id = htobe64(session_id);
1041
1042 /*
1043 * Merge the cmd and connection request to prevent a write-write
1044 * sequence on the TCP socket. Otherwise, a delayed ACK will prevent the
1045 * second write to be performed quickly in presence of Nagle's algorithm.
1046 */
1047 memcpy(cmd_buf, &cmd, sizeof(cmd));
1048 memcpy(cmd_buf + sizeof(cmd), &rq, sizeof(rq));
1049 status = lttng_live_send(viewer_connection, &cmd_buf, cmd_buf_len);
1050 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1051 viewer_handle_send_status(status, "detach session command");
1052 goto end;
1053 }
1054
1055 status = lttng_live_recv(viewer_connection, &rp, sizeof(rp));
1056 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1057 viewer_handle_recv_status(status, "detach session reply");
1058 goto end;
1059 }
1060
1061 switch (be32toh(rp.status)) {
1062 case LTTNG_VIEWER_DETACH_SESSION_OK:
1063 break;
1064 case LTTNG_VIEWER_DETACH_SESSION_UNK:
1065 BT_CPPLOGW_SPEC(viewer_connection->logger, "Session id {} is unknown", session_id);
1066 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
1067 goto end;
1068 case LTTNG_VIEWER_DETACH_SESSION_ERR:
1069 BT_CPPLOGW_SPEC(viewer_connection->logger, "Error detaching session id {}", session_id);
1070 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
1071 goto end;
1072 default:
1073 BT_CPPLOGE_SPEC(viewer_connection->logger, "Unknown detach return code {}",
1074 be32toh(rp.status));
1075 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
1076 goto end;
1077 }
1078
1079 session->attached = false;
1080
1081 status = LTTNG_LIVE_VIEWER_STATUS_OK;
1082
1083 end:
1084 return status;
1085 }
1086
1087 enum lttng_live_get_one_metadata_status
1088 lttng_live_get_one_metadata_packet(struct lttng_live_trace *trace, std::vector<char>& buf)
1089 {
1090 uint64_t len = 0;
1091 enum lttng_live_get_one_metadata_status status;
1092 enum lttng_live_viewer_status viewer_status;
1093 struct lttng_viewer_cmd cmd;
1094 struct lttng_viewer_get_metadata rq;
1095 struct lttng_viewer_metadata_packet rp;
1096 std::vector<char> data;
1097 struct lttng_live_session *session = trace->session;
1098 struct lttng_live_msg_iter *lttng_live_msg_iter = session->lttng_live_msg_iter;
1099 struct lttng_live_metadata *metadata = trace->metadata;
1100 struct live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection;
1101 const size_t cmd_buf_len = sizeof(cmd) + sizeof(rq);
1102 char cmd_buf[cmd_buf_len];
1103
1104 BT_CPPLOGD_SPEC(viewer_connection->logger,
1105 "Requesting new metadata for trace:"
1106 "cmd={}, trace-id={}, metadata-stream-id={}",
1107 LTTNG_VIEWER_GET_METADATA, trace->id, metadata->stream_id);
1108
1109 rq.stream_id = htobe64(metadata->stream_id);
1110 cmd.cmd = htobe32(LTTNG_VIEWER_GET_METADATA);
1111 cmd.data_size = htobe64((uint64_t) sizeof(rq));
1112 cmd.cmd_version = htobe32(0);
1113
1114 /*
1115 * Merge the cmd and connection request to prevent a write-write
1116 * sequence on the TCP socket. Otherwise, a delayed ACK will prevent the
1117 * second write to be performed quickly in presence of Nagle's algorithm.
1118 */
1119 memcpy(cmd_buf, &cmd, sizeof(cmd));
1120 memcpy(cmd_buf + sizeof(cmd), &rq, sizeof(rq));
1121 viewer_status = lttng_live_send(viewer_connection, &cmd_buf, cmd_buf_len);
1122 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1123 viewer_handle_send_status(viewer_status, "get metadata command");
1124 status = (enum lttng_live_get_one_metadata_status) viewer_status;
1125 goto end;
1126 }
1127
1128 viewer_status = lttng_live_recv(viewer_connection, &rp, sizeof(rp));
1129 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1130 viewer_handle_recv_status(viewer_status, "get metadata reply");
1131 status = (enum lttng_live_get_one_metadata_status) viewer_status;
1132 goto end;
1133 }
1134
1135 switch (be32toh(rp.status)) {
1136 case LTTNG_VIEWER_METADATA_OK:
1137 BT_CPPLOGD_SPEC(viewer_connection->logger, "Received get_metadata response: ok");
1138 break;
1139 case LTTNG_VIEWER_NO_NEW_METADATA:
1140 BT_CPPLOGD_SPEC(viewer_connection->logger, "Received get_metadata response: no new");
1141 status = LTTNG_LIVE_GET_ONE_METADATA_STATUS_END;
1142 goto end;
1143 case LTTNG_VIEWER_METADATA_ERR:
1144 /*
1145 * The Relayd cannot find this stream id. Maybe its
1146 * gone already. This can happen in short lived UST app
1147 * in a per-pid session.
1148 */
1149 BT_CPPLOGD_SPEC(viewer_connection->logger, "Received get_metadata response: error");
1150 status = LTTNG_LIVE_GET_ONE_METADATA_STATUS_CLOSED;
1151 goto end;
1152 default:
1153 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
1154 "Received get_metadata response: unknown");
1155 status = LTTNG_LIVE_GET_ONE_METADATA_STATUS_ERROR;
1156 goto end;
1157 }
1158
1159 len = be64toh(rp.len);
1160 if (len == 0) {
1161 /*
1162 * We received a `LTTNG_VIEWER_METADATA_OK` with a packet
1163 * length of 0. This means we must try again. This scenario
1164 * arises when a clear command is performed on an lttng session.
1165 */
1166 BT_CPPLOGD_SPEC(
1167 viewer_connection->logger,
1168 "Expecting a metadata packet of size 0. Retry to get a packet from the relay.");
1169 goto empty_metadata_packet_retry;
1170 }
1171
1172 BT_CPPLOGD_SPEC(viewer_connection->logger, "Writing {} bytes to metadata", len);
1173 if (len <= 0) {
1174 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Erroneous response length");
1175 status = LTTNG_LIVE_GET_ONE_METADATA_STATUS_ERROR;
1176 goto end;
1177 }
1178
1179 data.resize(len);
1180
1181 viewer_status = lttng_live_recv(viewer_connection, data.data(), len);
1182 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1183 viewer_handle_recv_status(viewer_status, "get metadata packet");
1184 status = (enum lttng_live_get_one_metadata_status) viewer_status;
1185 goto end;
1186 }
1187
1188 /*
1189 * Write the metadata to the file handle.
1190 */
1191 buf.insert(buf.end(), data.begin(), data.end());
1192
1193 empty_metadata_packet_retry:
1194 status = LTTNG_LIVE_GET_ONE_METADATA_STATUS_OK;
1195
1196 end:
1197 return status;
1198 }
1199
1200 /*
1201 * Assign the fields from a lttng_viewer_index to a packet_index.
1202 */
1203 static void lttng_index_to_packet_index(struct lttng_viewer_index *lindex,
1204 struct packet_index *pindex)
1205 {
1206 BT_ASSERT(lindex);
1207 BT_ASSERT(pindex);
1208
1209 pindex->offset = be64toh(lindex->offset);
1210 pindex->packet_size = be64toh(lindex->packet_size);
1211 pindex->content_size = be64toh(lindex->content_size);
1212 pindex->ts_cycles.timestamp_begin = be64toh(lindex->timestamp_begin);
1213 pindex->ts_cycles.timestamp_end = be64toh(lindex->timestamp_end);
1214 pindex->events_discarded = be64toh(lindex->events_discarded);
1215 }
1216
1217 static void lttng_live_need_new_streams(struct lttng_live_msg_iter *lttng_live_msg_iter)
1218 {
1219 uint64_t session_idx;
1220
1221 for (session_idx = 0; session_idx < lttng_live_msg_iter->sessions->len; session_idx++) {
1222 struct lttng_live_session *session =
1223 (lttng_live_session *) g_ptr_array_index(lttng_live_msg_iter->sessions, session_idx);
1224 BT_CPPLOGD_SPEC(lttng_live_msg_iter->logger,
1225 "Marking session as needing new streams: "
1226 "session-id={}",
1227 session->id);
1228 session->new_streams_needed = true;
1229 }
1230 }
1231
1232 enum lttng_live_iterator_status
1233 lttng_live_get_next_index(struct lttng_live_msg_iter *lttng_live_msg_iter,
1234 struct lttng_live_stream_iterator *stream, struct packet_index *index)
1235 {
1236 struct lttng_viewer_cmd cmd;
1237 struct lttng_viewer_get_next_index rq;
1238 enum lttng_live_viewer_status viewer_status;
1239 struct lttng_viewer_index rp;
1240 enum lttng_live_iterator_status status;
1241 struct live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection;
1242 struct lttng_live_trace *trace = stream->trace;
1243 const size_t cmd_buf_len = sizeof(cmd) + sizeof(rq);
1244 char cmd_buf[cmd_buf_len];
1245 uint32_t flags, rp_status;
1246
1247 BT_CPPLOGD_SPEC(viewer_connection->logger,
1248 "Requesting next index for stream: cmd={}, "
1249 "viewer-stream-id={}",
1250 LTTNG_VIEWER_GET_NEXT_INDEX, stream->viewer_stream_id);
1251 cmd.cmd = htobe32(LTTNG_VIEWER_GET_NEXT_INDEX);
1252 cmd.data_size = htobe64((uint64_t) sizeof(rq));
1253 cmd.cmd_version = htobe32(0);
1254
1255 memset(&rq, 0, sizeof(rq));
1256 rq.stream_id = htobe64(stream->viewer_stream_id);
1257
1258 /*
1259 * Merge the cmd and connection request to prevent a write-write
1260 * sequence on the TCP socket. Otherwise, a delayed ACK will prevent the
1261 * second write to be performed quickly in presence of Nagle's algorithm.
1262 */
1263 memcpy(cmd_buf, &cmd, sizeof(cmd));
1264 memcpy(cmd_buf + sizeof(cmd), &rq, sizeof(rq));
1265
1266 viewer_status = lttng_live_send(viewer_connection, &cmd_buf, cmd_buf_len);
1267 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1268 viewer_handle_send_status(viewer_status, "get next index command");
1269 goto error;
1270 }
1271
1272 viewer_status = lttng_live_recv(viewer_connection, &rp, sizeof(rp));
1273 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1274 viewer_handle_recv_status(viewer_status, "get next index reply");
1275 goto error;
1276 }
1277
1278 flags = be32toh(rp.flags);
1279 rp_status = be32toh(rp.status);
1280
1281 BT_CPPLOGD_SPEC(
1282 viewer_connection->logger, "Received response from relay daemon: cmd=%s, response={}",
1283 LTTNG_VIEWER_GET_NEXT_INDEX, static_cast<lttng_viewer_next_index_return_code>(rp_status));
1284
1285 if (flags & LTTNG_VIEWER_FLAG_NEW_STREAM) {
1286 BT_CPPLOGD_SPEC(viewer_connection->logger,
1287 "Marking all sessions as possibly needing new streams: "
1288 "response={}, response-flag=NEW_STREAM",
1289 static_cast<lttng_viewer_next_index_return_code>(rp_status));
1290 lttng_live_need_new_streams(lttng_live_msg_iter);
1291 }
1292
1293 switch (rp_status) {
1294 case LTTNG_VIEWER_INDEX_INACTIVE:
1295 {
1296 uint64_t ctf_stream_class_id;
1297
1298 memset(index, 0, sizeof(struct packet_index));
1299 index->ts_cycles.timestamp_end = be64toh(rp.timestamp_end);
1300 stream->current_inactivity_ts = index->ts_cycles.timestamp_end;
1301 ctf_stream_class_id = be64toh(rp.stream_id);
1302 if (stream->ctf_stream_class_id.is_set) {
1303 BT_ASSERT(stream->ctf_stream_class_id.value == ctf_stream_class_id);
1304 } else {
1305 stream->ctf_stream_class_id.value = ctf_stream_class_id;
1306 stream->ctf_stream_class_id.is_set = true;
1307 }
1308 lttng_live_stream_iterator_set_state(stream, LTTNG_LIVE_STREAM_QUIESCENT);
1309 status = LTTNG_LIVE_ITERATOR_STATUS_OK;
1310 break;
1311 }
1312 case LTTNG_VIEWER_INDEX_OK:
1313 {
1314 uint64_t ctf_stream_class_id;
1315
1316 lttng_index_to_packet_index(&rp, index);
1317 ctf_stream_class_id = be64toh(rp.stream_id);
1318 if (stream->ctf_stream_class_id.is_set) {
1319 BT_ASSERT(stream->ctf_stream_class_id.value == ctf_stream_class_id);
1320 } else {
1321 stream->ctf_stream_class_id.value = ctf_stream_class_id;
1322 stream->ctf_stream_class_id.is_set = true;
1323 }
1324 lttng_live_stream_iterator_set_state(stream, LTTNG_LIVE_STREAM_ACTIVE_DATA);
1325
1326 if (flags & LTTNG_VIEWER_FLAG_NEW_METADATA) {
1327 BT_CPPLOGD_SPEC(viewer_connection->logger,
1328 "Marking trace as needing new metadata: "
1329 "response={}, response-flag=NEW_METADATA, trace-id={}",
1330 static_cast<lttng_viewer_next_index_return_code>(rp_status), trace->id);
1331 trace->metadata_stream_state = LTTNG_LIVE_METADATA_STREAM_STATE_NEEDED;
1332 }
1333 status = LTTNG_LIVE_ITERATOR_STATUS_OK;
1334 break;
1335 }
1336 case LTTNG_VIEWER_INDEX_RETRY:
1337 memset(index, 0, sizeof(struct packet_index));
1338 lttng_live_stream_iterator_set_state(stream, LTTNG_LIVE_STREAM_ACTIVE_NO_DATA);
1339 status = LTTNG_LIVE_ITERATOR_STATUS_AGAIN;
1340 goto end;
1341 case LTTNG_VIEWER_INDEX_HUP:
1342 memset(index, 0, sizeof(struct packet_index));
1343 index->offset = EOF;
1344 lttng_live_stream_iterator_set_state(stream, LTTNG_LIVE_STREAM_EOF);
1345 stream->has_stream_hung_up = true;
1346 status = LTTNG_LIVE_ITERATOR_STATUS_END;
1347 break;
1348 case LTTNG_VIEWER_INDEX_ERR:
1349 memset(index, 0, sizeof(struct packet_index));
1350 lttng_live_stream_iterator_set_state(stream, LTTNG_LIVE_STREAM_ACTIVE_NO_DATA);
1351 status = LTTNG_LIVE_ITERATOR_STATUS_ERROR;
1352 goto end;
1353 default:
1354 BT_CPPLOGD_SPEC(viewer_connection->logger,
1355 "Received get_next_index response: unknown value");
1356 memset(index, 0, sizeof(struct packet_index));
1357 lttng_live_stream_iterator_set_state(stream, LTTNG_LIVE_STREAM_ACTIVE_NO_DATA);
1358 status = LTTNG_LIVE_ITERATOR_STATUS_ERROR;
1359 goto end;
1360 }
1361
1362 goto end;
1363
1364 error:
1365 status = viewer_status_to_live_iterator_status(viewer_status);
1366 end:
1367 return status;
1368 }
1369
1370 enum ctf_msg_iter_medium_status
1371 lttng_live_get_stream_bytes(struct lttng_live_msg_iter *lttng_live_msg_iter,
1372 struct lttng_live_stream_iterator *stream, uint8_t *buf,
1373 uint64_t offset, uint64_t req_len, uint64_t *recv_len)
1374 {
1375 enum ctf_msg_iter_medium_status status;
1376 enum lttng_live_viewer_status viewer_status;
1377 struct lttng_viewer_trace_packet rp;
1378 struct lttng_viewer_cmd cmd;
1379 struct lttng_viewer_get_packet rq;
1380 struct live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection;
1381 struct lttng_live_trace *trace = stream->trace;
1382 const size_t cmd_buf_len = sizeof(cmd) + sizeof(rq);
1383 char cmd_buf[cmd_buf_len];
1384 uint32_t flags, rp_status;
1385
1386 BT_CPPLOGD_SPEC(viewer_connection->logger,
1387 "Requesting data from stream: cmd={}, "
1388 "offset={}, request-len={}",
1389 LTTNG_VIEWER_GET_PACKET, offset, req_len);
1390
1391 cmd.cmd = htobe32(LTTNG_VIEWER_GET_PACKET);
1392 cmd.data_size = htobe64((uint64_t) sizeof(rq));
1393 cmd.cmd_version = htobe32(0);
1394
1395 memset(&rq, 0, sizeof(rq));
1396 rq.stream_id = htobe64(stream->viewer_stream_id);
1397 rq.offset = htobe64(offset);
1398 rq.len = htobe32(req_len);
1399
1400 /*
1401 * Merge the cmd and connection request to prevent a write-write
1402 * sequence on the TCP socket. Otherwise, a delayed ACK will prevent the
1403 * second write to be performed quickly in presence of Nagle's algorithm.
1404 */
1405 memcpy(cmd_buf, &cmd, sizeof(cmd));
1406 memcpy(cmd_buf + sizeof(cmd), &rq, sizeof(rq));
1407
1408 viewer_status = lttng_live_send(viewer_connection, &cmd_buf, cmd_buf_len);
1409 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1410 viewer_handle_send_status(viewer_status, "get data packet command");
1411 goto error_convert_status;
1412 }
1413
1414 viewer_status = lttng_live_recv(viewer_connection, &rp, sizeof(rp));
1415 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1416 viewer_handle_recv_status(viewer_status, "get data packet reply");
1417 goto error_convert_status;
1418 }
1419
1420 flags = be32toh(rp.flags);
1421 rp_status = be32toh(rp.status);
1422
1423 BT_CPPLOGD_SPEC(
1424 viewer_connection->logger, "Received response from relay daemon: cmd={}, response={}",
1425 LTTNG_VIEWER_GET_PACKET, static_cast<lttng_viewer_get_packet_return_code>(rp_status));
1426 switch (rp_status) {
1427 case LTTNG_VIEWER_GET_PACKET_OK:
1428 req_len = be32toh(rp.len);
1429 BT_CPPLOGD_SPEC(viewer_connection->logger,
1430 "Got packet from relay daemon: response={}, packet-len={}",
1431 static_cast<lttng_viewer_get_packet_return_code>(rp_status), req_len);
1432 break;
1433 case LTTNG_VIEWER_GET_PACKET_RETRY:
1434 /* Unimplemented by relay daemon */
1435 status = CTF_MSG_ITER_MEDIUM_STATUS_AGAIN;
1436 goto end;
1437 case LTTNG_VIEWER_GET_PACKET_ERR:
1438 if (flags & LTTNG_VIEWER_FLAG_NEW_METADATA) {
1439 BT_CPPLOGD_SPEC(viewer_connection->logger,
1440 "Marking trace as needing new metadata: "
1441 "response={}, response-flag=NEW_METADATA, trace-id={}",
1442 static_cast<lttng_viewer_get_packet_return_code>(rp_status), trace->id);
1443 trace->metadata_stream_state = LTTNG_LIVE_METADATA_STREAM_STATE_NEEDED;
1444 }
1445 if (flags & LTTNG_VIEWER_FLAG_NEW_STREAM) {
1446 BT_CPPLOGD_SPEC(viewer_connection->logger,
1447 "Marking all sessions as possibly needing new streams: "
1448 "response={}, response-flag=NEW_STREAM",
1449 static_cast<lttng_viewer_get_packet_return_code>(rp_status));
1450 lttng_live_need_new_streams(lttng_live_msg_iter);
1451 }
1452 if (flags & (LTTNG_VIEWER_FLAG_NEW_METADATA | LTTNG_VIEWER_FLAG_NEW_STREAM)) {
1453 status = CTF_MSG_ITER_MEDIUM_STATUS_AGAIN;
1454 BT_CPPLOGD_SPEC(viewer_connection->logger,
1455 "Reply with any one flags set means we should retry: response={}",
1456 static_cast<lttng_viewer_get_packet_return_code>(rp_status));
1457 goto end;
1458 }
1459 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
1460 "Received get_data_packet response: error");
1461 status = CTF_MSG_ITER_MEDIUM_STATUS_ERROR;
1462 goto end;
1463 case LTTNG_VIEWER_GET_PACKET_EOF:
1464 status = CTF_MSG_ITER_MEDIUM_STATUS_EOF;
1465 goto end;
1466 default:
1467 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
1468 "Received get_data_packet response: unknown ({})", rp_status);
1469 status = CTF_MSG_ITER_MEDIUM_STATUS_ERROR;
1470 goto end;
1471 }
1472
1473 if (req_len == 0) {
1474 status = CTF_MSG_ITER_MEDIUM_STATUS_ERROR;
1475 goto end;
1476 }
1477
1478 viewer_status = lttng_live_recv(viewer_connection, buf, req_len);
1479 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1480 viewer_handle_recv_status(viewer_status, "get data packet");
1481 goto error_convert_status;
1482 }
1483 *recv_len = req_len;
1484
1485 status = CTF_MSG_ITER_MEDIUM_STATUS_OK;
1486 goto end;
1487
1488 error_convert_status:
1489 status = viewer_status_to_ctf_msg_iter_medium_status(viewer_status);
1490 end:
1491 return status;
1492 }
1493
1494 /*
1495 * Request new streams for a session.
1496 */
1497 enum lttng_live_iterator_status
1498 lttng_live_session_get_new_streams(struct lttng_live_session *session,
1499 bt_self_message_iterator *self_msg_iter)
1500 {
1501 enum lttng_live_iterator_status status = LTTNG_LIVE_ITERATOR_STATUS_OK;
1502 struct lttng_viewer_cmd cmd;
1503 struct lttng_viewer_new_streams_request rq;
1504 struct lttng_viewer_new_streams_response rp;
1505 struct lttng_live_msg_iter *lttng_live_msg_iter = session->lttng_live_msg_iter;
1506 enum lttng_live_viewer_status viewer_status;
1507 struct live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection;
1508 uint32_t streams_count;
1509 const size_t cmd_buf_len = sizeof(cmd) + sizeof(rq);
1510 char cmd_buf[cmd_buf_len];
1511
1512 if (!session->new_streams_needed) {
1513 status = LTTNG_LIVE_ITERATOR_STATUS_OK;
1514 goto end;
1515 }
1516
1517 BT_CPPLOGD_SPEC(viewer_connection->logger,
1518 "Requesting new streams for session: cmd={}, session-id={}",
1519 LTTNG_VIEWER_GET_NEW_STREAMS, session->id);
1520
1521 cmd.cmd = htobe32(LTTNG_VIEWER_GET_NEW_STREAMS);
1522 cmd.data_size = htobe64((uint64_t) sizeof(rq));
1523 cmd.cmd_version = htobe32(0);
1524
1525 memset(&rq, 0, sizeof(rq));
1526 rq.session_id = htobe64(session->id);
1527
1528 /*
1529 * Merge the cmd and connection request to prevent a write-write
1530 * sequence on the TCP socket. Otherwise, a delayed ACK will prevent the
1531 * second write to be performed quickly in presence of Nagle's algorithm.
1532 */
1533 memcpy(cmd_buf, &cmd, sizeof(cmd));
1534 memcpy(cmd_buf + sizeof(cmd), &rq, sizeof(rq));
1535
1536 viewer_status = lttng_live_send(viewer_connection, &cmd_buf, cmd_buf_len);
1537 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1538 viewer_handle_send_status(viewer_status, "get new streams command");
1539 status = viewer_status_to_live_iterator_status(viewer_status);
1540 goto end;
1541 }
1542
1543 viewer_status = lttng_live_recv(viewer_connection, &rp, sizeof(rp));
1544 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1545 viewer_handle_recv_status(viewer_status, "get new streams reply");
1546 status = viewer_status_to_live_iterator_status(viewer_status);
1547 goto end;
1548 }
1549
1550 streams_count = be32toh(rp.streams_count);
1551
1552 switch (be32toh(rp.status)) {
1553 case LTTNG_VIEWER_NEW_STREAMS_OK:
1554 session->new_streams_needed = false;
1555 break;
1556 case LTTNG_VIEWER_NEW_STREAMS_NO_NEW:
1557 session->new_streams_needed = false;
1558 goto end;
1559 case LTTNG_VIEWER_NEW_STREAMS_HUP:
1560 session->new_streams_needed = false;
1561 session->closed = true;
1562 status = LTTNG_LIVE_ITERATOR_STATUS_END;
1563 goto end;
1564 case LTTNG_VIEWER_NEW_STREAMS_ERR:
1565 BT_CPPLOGD_SPEC(viewer_connection->logger, "Received get_new_streams response: error");
1566 status = LTTNG_LIVE_ITERATOR_STATUS_ERROR;
1567 goto end;
1568 default:
1569 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
1570 "Received get_new_streams response: Unknown:"
1571 "return code {}",
1572 be32toh(rp.status));
1573 status = LTTNG_LIVE_ITERATOR_STATUS_ERROR;
1574 goto end;
1575 }
1576
1577 viewer_status = receive_streams(session, streams_count, self_msg_iter);
1578 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1579 viewer_handle_recv_status(viewer_status, "new streams");
1580 status = viewer_status_to_live_iterator_status(viewer_status);
1581 goto end;
1582 }
1583
1584 status = LTTNG_LIVE_ITERATOR_STATUS_OK;
1585 end:
1586 return status;
1587 }
1588
1589 enum lttng_live_viewer_status live_viewer_connection_create(
1590 const char *url, bool in_query, struct lttng_live_msg_iter *lttng_live_msg_iter,
1591 const bt2c::Logger& parentLogger, struct live_viewer_connection **viewer)
1592 {
1593 enum lttng_live_viewer_status status;
1594
1595 live_viewer_connection *viewer_connection = new live_viewer_connection {parentLogger};
1596
1597 if (bt_socket_init(viewer_connection->logger) != 0) {
1598 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Failed to init socket");
1599 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
1600 goto error;
1601 }
1602
1603 viewer_connection->control_sock = BT_INVALID_SOCKET;
1604 viewer_connection->port = -1;
1605 viewer_connection->in_query = in_query;
1606 viewer_connection->lttng_live_msg_iter = lttng_live_msg_iter;
1607 viewer_connection->url = g_string_new(url);
1608 if (!viewer_connection->url) {
1609 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Failed to allocate URL buffer");
1610 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
1611 goto error;
1612 }
1613
1614 BT_CPPLOGD_SPEC(viewer_connection->logger, "Establishing connection to url \"{}\"...", url);
1615 status = lttng_live_connect_viewer(viewer_connection);
1616 /*
1617 * Only print error and append cause in case of error. not in case of
1618 * interruption.
1619 */
1620 if (status == LTTNG_LIVE_VIEWER_STATUS_ERROR) {
1621 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
1622 "Failed to establish connection: "
1623 "url=\"{}\"",
1624 url);
1625 goto error;
1626 } else if (status == LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED) {
1627 goto error;
1628 }
1629 BT_CPPLOGD_SPEC(viewer_connection->logger, "Connection to url \"{}\" is established", url);
1630
1631 *viewer = viewer_connection;
1632 status = LTTNG_LIVE_VIEWER_STATUS_OK;
1633 goto end;
1634
1635 error:
1636 if (viewer_connection) {
1637 live_viewer_connection_destroy(viewer_connection);
1638 }
1639 end:
1640 return status;
1641 }
1642
1643 void live_viewer_connection_destroy(struct live_viewer_connection *viewer_connection)
1644 {
1645 if (!viewer_connection) {
1646 goto end;
1647 }
1648
1649 BT_CPPLOGD_SPEC(viewer_connection->logger, "Closing connection to relay: relay-url=\"{}\"",
1650 viewer_connection->url->str);
1651
1652 lttng_live_disconnect_viewer(viewer_connection);
1653
1654 if (viewer_connection->url) {
1655 g_string_free(viewer_connection->url, true);
1656 }
1657
1658 if (viewer_connection->relay_hostname) {
1659 g_string_free(viewer_connection->relay_hostname, true);
1660 }
1661
1662 if (viewer_connection->target_hostname) {
1663 g_string_free(viewer_connection->target_hostname, true);
1664 }
1665
1666 if (viewer_connection->session_name) {
1667 g_string_free(viewer_connection->session_name, true);
1668 }
1669
1670 if (viewer_connection->proto) {
1671 g_string_free(viewer_connection->proto, true);
1672 }
1673
1674 delete viewer_connection;
1675
1676 bt_socket_fini();
1677
1678 end:
1679 return;
1680 }
This page took 0.063923 seconds and 4 git commands to generate.