src.ctf.lttng-live: lttng_live_metadata_update: use std::vector as metadata buffer
[babeltrace.git] / src / plugins / ctf / lttng-live / viewer-connection.cpp
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright 2019 Francis Deslauriers <francis.deslauriers@efficios.com>
5 * Copyright 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 */
7
8 #include <glib.h>
9 #include <stdint.h>
10 #include <stdio.h>
11
12 #include <babeltrace2/babeltrace.h>
13
14 #include "common/common.h"
15 #include "compat/endian.h" /* IWYU pragma: keep */
16
17 #include "data-stream.hpp"
18 #include "lttng-live.hpp"
19 #include "lttng-viewer-abi.hpp"
20 #include "metadata.hpp"
21 #include "viewer-connection.hpp"
22
23 #define viewer_handle_send_recv_status(_status, _action, _msg_str) \
24 do { \
25 switch (_status) { \
26 case LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED: \
27 break; \
28 case LTTNG_LIVE_VIEWER_STATUS_ERROR: \
29 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, \
30 "Error " _action " " _msg_str); \
31 break; \
32 default: \
33 bt_common_abort(); \
34 } \
35 } while (0)
36
37 #define viewer_handle_send_status(_status, _msg_str) \
38 viewer_handle_send_recv_status(_status, "sending", _msg_str)
39
40 #define viewer_handle_recv_status(_status, _msg_str) \
41 viewer_handle_send_recv_status(_status, "receiving", _msg_str)
42
43 #define LTTNG_LIVE_CPPLOGE_APPEND_CAUSE_ERRNO(_msg, _fmt, ...) \
44 do { \
45 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, _msg ": {}" _fmt, \
46 bt_socket_errormsg(), ##__VA_ARGS__); \
47 } while (0)
48
49 static inline enum lttng_live_iterator_status
50 viewer_status_to_live_iterator_status(enum lttng_live_viewer_status viewer_status)
51 {
52 switch (viewer_status) {
53 case LTTNG_LIVE_VIEWER_STATUS_OK:
54 return LTTNG_LIVE_ITERATOR_STATUS_OK;
55 case LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED:
56 return LTTNG_LIVE_ITERATOR_STATUS_AGAIN;
57 case LTTNG_LIVE_VIEWER_STATUS_ERROR:
58 return LTTNG_LIVE_ITERATOR_STATUS_ERROR;
59 }
60
61 bt_common_abort();
62 }
63
64 static inline enum ctf_msg_iter_medium_status
65 viewer_status_to_ctf_msg_iter_medium_status(enum lttng_live_viewer_status viewer_status)
66 {
67 switch (viewer_status) {
68 case LTTNG_LIVE_VIEWER_STATUS_OK:
69 return CTF_MSG_ITER_MEDIUM_STATUS_OK;
70 case LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED:
71 return CTF_MSG_ITER_MEDIUM_STATUS_AGAIN;
72 case LTTNG_LIVE_VIEWER_STATUS_ERROR:
73 return CTF_MSG_ITER_MEDIUM_STATUS_ERROR;
74 }
75
76 bt_common_abort();
77 }
78
79 static inline void viewer_connection_close_socket(struct live_viewer_connection *viewer_connection)
80 {
81 int ret = bt_socket_close(viewer_connection->control_sock);
82 if (ret == -1) {
83 BT_CPPLOGW_ERRNO_SPEC(viewer_connection->logger,
84 "Error closing viewer connection socket: ", ".");
85 }
86
87 viewer_connection->control_sock = BT_INVALID_SOCKET;
88 }
89
90 /*
91 * This function receives a message from the Relay daemon.
92 * If it received the entire message, it returns _OK,
93 * If it's interrupted, it returns _INTERRUPTED,
94 * otherwise, it returns _ERROR.
95 */
96 static enum lttng_live_viewer_status
97 lttng_live_recv(struct live_viewer_connection *viewer_connection, void *buf, size_t len)
98 {
99 ssize_t received;
100 size_t total_received = 0, to_receive = len;
101 struct lttng_live_msg_iter *lttng_live_msg_iter = viewer_connection->lttng_live_msg_iter;
102 enum lttng_live_viewer_status status;
103 BT_SOCKET sock = viewer_connection->control_sock;
104
105 /*
106 * Receive a message from the Relay.
107 */
108 do {
109 received = bt_socket_recv(sock, (char *) buf + total_received, to_receive, 0);
110 if (received == BT_SOCKET_ERROR) {
111 if (bt_socket_interrupted()) {
112 if (lttng_live_graph_is_canceled(lttng_live_msg_iter)) {
113 /*
114 * This interruption was due to a
115 * SIGINT and the graph is being torn
116 * down.
117 */
118 status = LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED;
119 lttng_live_msg_iter->was_interrupted = true;
120 goto end;
121 } else {
122 /*
123 * A signal was received, but the graph
124 * is not being torn down. Carry on.
125 */
126 continue;
127 }
128 } else {
129 /*
130 * For any other types of socket error, close
131 * the socket and return an error.
132 */
133 LTTNG_LIVE_CPPLOGE_APPEND_CAUSE_ERRNO("Error receiving from Relay", ".");
134
135 viewer_connection_close_socket(viewer_connection);
136 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
137 goto end;
138 }
139 } else if (received == 0) {
140 /*
141 * The recv() call returned 0. This means the
142 * connection was orderly shutdown from the other peer.
143 * If that happens when we are trying to receive
144 * a message from it, it means something when wrong.
145 * Close the socket and return an error.
146 */
147 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
148 "Remote side has closed connection");
149 viewer_connection_close_socket(viewer_connection);
150 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
151 goto end;
152 }
153
154 BT_ASSERT(received <= to_receive);
155 total_received += received;
156 to_receive -= received;
157
158 } while (to_receive > 0);
159
160 BT_ASSERT(total_received == len);
161 status = LTTNG_LIVE_VIEWER_STATUS_OK;
162
163 end:
164 return status;
165 }
166
167 /*
168 * This function sends a message to the Relay daemon.
169 * If it send the message, it returns _OK,
170 * If it's interrupted, it returns _INTERRUPTED,
171 * otherwise, it returns _ERROR.
172 */
173 static enum lttng_live_viewer_status
174 lttng_live_send(struct live_viewer_connection *viewer_connection, const void *buf, size_t len)
175 {
176 enum lttng_live_viewer_status status;
177 struct lttng_live_msg_iter *lttng_live_msg_iter = viewer_connection->lttng_live_msg_iter;
178 BT_SOCKET sock = viewer_connection->control_sock;
179 size_t to_send = len;
180 ssize_t total_sent = 0;
181
182 do {
183 ssize_t sent = bt_socket_send_nosigpipe(sock, (char *) buf + total_sent, to_send);
184 if (sent == BT_SOCKET_ERROR) {
185 if (bt_socket_interrupted()) {
186 if (lttng_live_graph_is_canceled(lttng_live_msg_iter)) {
187 /*
188 * This interruption was a SIGINT and
189 * the graph is being teared down.
190 */
191 status = LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED;
192 lttng_live_msg_iter->was_interrupted = true;
193 goto end;
194 } else {
195 /*
196 * A signal was received, but the graph
197 * is not being teared down. Carry on.
198 */
199 continue;
200 }
201 } else {
202 /*
203 * For any other types of socket error, close
204 * the socket and return an error.
205 */
206 LTTNG_LIVE_CPPLOGE_APPEND_CAUSE_ERRNO("Error sending to Relay", ".");
207
208 viewer_connection_close_socket(viewer_connection);
209 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
210 goto end;
211 }
212 }
213
214 BT_ASSERT(sent <= to_send);
215 total_sent += sent;
216 to_send -= sent;
217
218 } while (to_send > 0);
219
220 BT_ASSERT(total_sent == len);
221 status = LTTNG_LIVE_VIEWER_STATUS_OK;
222
223 end:
224 return status;
225 }
226
227 static int parse_url(struct live_viewer_connection *viewer_connection)
228 {
229 char error_buf[256] = {0};
230 struct bt_common_lttng_live_url_parts lttng_live_url_parts = {};
231 int ret = -1;
232 const char *path = viewer_connection->url->str;
233
234 if (!path) {
235 goto end;
236 }
237
238 lttng_live_url_parts = bt_common_parse_lttng_live_url(path, error_buf, sizeof(error_buf));
239 if (!lttng_live_url_parts.proto) {
240 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Invalid LTTng live URL format: {}",
241 error_buf);
242 goto end;
243 }
244 viewer_connection->proto = lttng_live_url_parts.proto;
245 lttng_live_url_parts.proto = NULL;
246
247 viewer_connection->relay_hostname = lttng_live_url_parts.hostname;
248 lttng_live_url_parts.hostname = NULL;
249
250 if (lttng_live_url_parts.port >= 0) {
251 viewer_connection->port = lttng_live_url_parts.port;
252 } else {
253 viewer_connection->port = LTTNG_DEFAULT_NETWORK_VIEWER_PORT;
254 }
255
256 viewer_connection->target_hostname = lttng_live_url_parts.target_hostname;
257 lttng_live_url_parts.target_hostname = NULL;
258
259 if (lttng_live_url_parts.session_name) {
260 viewer_connection->session_name = lttng_live_url_parts.session_name;
261 lttng_live_url_parts.session_name = NULL;
262 }
263
264 ret = 0;
265
266 end:
267 bt_common_destroy_lttng_live_url_parts(&lttng_live_url_parts);
268 return ret;
269 }
270
271 static enum lttng_live_viewer_status
272 lttng_live_handshake(struct live_viewer_connection *viewer_connection)
273 {
274 struct lttng_viewer_cmd cmd;
275 struct lttng_viewer_connect connect;
276 enum lttng_live_viewer_status status;
277 const size_t cmd_buf_len = sizeof(cmd) + sizeof(connect);
278 char cmd_buf[cmd_buf_len];
279
280 BT_CPPLOGD_SPEC(viewer_connection->logger,
281 "Handshaking with the relay daemon: cmd={}, major-version={}, minor-version={}",
282 LTTNG_VIEWER_CONNECT, LTTNG_LIVE_MAJOR, LTTNG_LIVE_MINOR);
283
284 cmd.cmd = htobe32(LTTNG_VIEWER_CONNECT);
285 cmd.data_size = htobe64((uint64_t) sizeof(connect));
286 cmd.cmd_version = htobe32(0);
287
288 connect.viewer_session_id = -1ULL; /* will be set on recv */
289 connect.major = htobe32(LTTNG_LIVE_MAJOR);
290 connect.minor = htobe32(LTTNG_LIVE_MINOR);
291 connect.type = htobe32(LTTNG_VIEWER_CLIENT_COMMAND);
292
293 /*
294 * Merge the cmd and connection request to prevent a write-write
295 * sequence on the TCP socket. Otherwise, a delayed ACK will prevent the
296 * second write to be performed quickly in presence of Nagle's algorithm
297 */
298 memcpy(cmd_buf, &cmd, sizeof(cmd));
299 memcpy(cmd_buf + sizeof(cmd), &connect, sizeof(connect));
300
301 status = lttng_live_send(viewer_connection, &cmd_buf, cmd_buf_len);
302 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
303 viewer_handle_send_status(status, "viewer connect command");
304 goto end;
305 }
306
307 status = lttng_live_recv(viewer_connection, &connect, sizeof(connect));
308 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
309 viewer_handle_recv_status(status, "viewer connect reply");
310 goto end;
311 }
312
313 BT_CPPLOGI_SPEC(viewer_connection->logger, "Received viewer session ID : {}",
314 (uint64_t) be64toh(connect.viewer_session_id));
315 BT_CPPLOGI_SPEC(viewer_connection->logger, "Relayd version : {}.{}", be32toh(connect.major),
316 be32toh(connect.minor));
317
318 if (LTTNG_LIVE_MAJOR != be32toh(connect.major)) {
319 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
320 "Incompatible lttng-relayd protocol");
321 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
322 goto end;
323 }
324 /* Use the smallest protocol version implemented. */
325 if (LTTNG_LIVE_MINOR > be32toh(connect.minor)) {
326 viewer_connection->minor = be32toh(connect.minor);
327 } else {
328 viewer_connection->minor = LTTNG_LIVE_MINOR;
329 }
330 viewer_connection->major = LTTNG_LIVE_MAJOR;
331
332 status = LTTNG_LIVE_VIEWER_STATUS_OK;
333
334 goto end;
335
336 end:
337 return status;
338 }
339
340 static enum lttng_live_viewer_status
341 lttng_live_connect_viewer(struct live_viewer_connection *viewer_connection)
342 {
343 struct hostent *host;
344 struct sockaddr_in server_addr;
345 enum lttng_live_viewer_status status;
346
347 if (parse_url(viewer_connection)) {
348 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Failed to parse URL");
349 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
350 goto error;
351 }
352
353 BT_CPPLOGD_SPEC(
354 viewer_connection->logger,
355 "Connecting to hostname : {}, port : {}, target hostname : {}, session name : {}, proto : {}",
356 viewer_connection->relay_hostname->str, viewer_connection->port,
357 !viewer_connection->target_hostname ? "<none>" : viewer_connection->target_hostname->str,
358 !viewer_connection->session_name ? "<none>" : viewer_connection->session_name->str,
359 viewer_connection->proto->str);
360
361 host = gethostbyname(viewer_connection->relay_hostname->str);
362 if (!host) {
363 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
364 "Cannot lookup hostname: hostname=\"{}\"",
365 viewer_connection->relay_hostname->str);
366 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
367 goto error;
368 }
369
370 if ((viewer_connection->control_sock = socket(AF_INET, SOCK_STREAM, 0)) == BT_INVALID_SOCKET) {
371 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Socket creation failed: {}",
372 bt_socket_errormsg());
373 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
374 goto error;
375 }
376
377 server_addr.sin_family = AF_INET;
378 server_addr.sin_port = htons(viewer_connection->port);
379 server_addr.sin_addr = *((struct in_addr *) host->h_addr);
380 memset(&(server_addr.sin_zero), 0, 8);
381
382 if (connect(viewer_connection->control_sock, (struct sockaddr *) &server_addr,
383 sizeof(struct sockaddr)) == BT_SOCKET_ERROR) {
384 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Connection failed: {}",
385 bt_socket_errormsg());
386 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
387 goto error;
388 }
389
390 status = lttng_live_handshake(viewer_connection);
391
392 /*
393 * Only print error and append cause in case of error. not in case of
394 * interruption.
395 */
396 if (status == LTTNG_LIVE_VIEWER_STATUS_ERROR) {
397 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Viewer handshake failed");
398 goto error;
399 } else if (status == LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED) {
400 goto end;
401 }
402
403 goto end;
404
405 error:
406 if (viewer_connection->control_sock != BT_INVALID_SOCKET) {
407 if (bt_socket_close(viewer_connection->control_sock) == BT_SOCKET_ERROR) {
408 BT_CPPLOGW_SPEC(viewer_connection->logger, "Error closing socket: {}.",
409 bt_socket_errormsg());
410 }
411 }
412 viewer_connection->control_sock = BT_INVALID_SOCKET;
413 end:
414 return status;
415 }
416
417 static void lttng_live_disconnect_viewer(struct live_viewer_connection *viewer_connection)
418 {
419 if (viewer_connection->control_sock == BT_INVALID_SOCKET) {
420 return;
421 }
422 if (bt_socket_close(viewer_connection->control_sock) == BT_SOCKET_ERROR) {
423 BT_CPPLOGW_SPEC(viewer_connection->logger, "Error closing socket: {}",
424 bt_socket_errormsg());
425 viewer_connection->control_sock = BT_INVALID_SOCKET;
426 }
427 }
428
429 static int list_update_session(bt_value *results, const struct lttng_viewer_session *session,
430 bool *_found, struct live_viewer_connection *viewer_connection)
431 {
432 int ret = 0;
433 uint64_t i, len;
434 bt_value *map = NULL;
435 bt_value *hostname = NULL;
436 bt_value *session_name = NULL;
437 bt_value *btval = NULL;
438 bool found = false;
439
440 len = bt_value_array_get_length(results);
441 for (i = 0; i < len; i++) {
442 const char *hostname_str = NULL;
443 const char *session_name_str = NULL;
444
445 map = bt_value_array_borrow_element_by_index(results, i);
446 hostname = bt_value_map_borrow_entry_value(map, "target-hostname");
447 if (!hostname) {
448 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
449 "Error borrowing \"target-hostname\" entry.");
450 ret = -1;
451 goto end;
452 }
453 session_name = bt_value_map_borrow_entry_value(map, "session-name");
454 if (!session_name) {
455 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
456 "Error borrowing \"session-name\" entry.");
457 ret = -1;
458 goto end;
459 }
460 hostname_str = bt_value_string_get(hostname);
461 session_name_str = bt_value_string_get(session_name);
462
463 if (strcmp(session->hostname, hostname_str) == 0 &&
464 strcmp(session->session_name, session_name_str) == 0) {
465 int64_t val;
466 uint32_t streams = be32toh(session->streams);
467 uint32_t clients = be32toh(session->clients);
468
469 found = true;
470
471 btval = bt_value_map_borrow_entry_value(map, "stream-count");
472 if (!btval) {
473 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
474 "Error borrowing \"stream-count\" entry.");
475 ret = -1;
476 goto end;
477 }
478 val = bt_value_integer_unsigned_get(btval);
479 /* sum */
480 val += streams;
481 bt_value_integer_unsigned_set(btval, val);
482
483 btval = bt_value_map_borrow_entry_value(map, "client-count");
484 if (!btval) {
485 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
486 "Error borrowing \"client-count\" entry.");
487 ret = -1;
488 goto end;
489 }
490 val = bt_value_integer_unsigned_get(btval);
491 /* max */
492 val = bt_max_t(int64_t, clients, val);
493 bt_value_integer_unsigned_set(btval, val);
494 }
495
496 if (found) {
497 break;
498 }
499 }
500 end:
501 *_found = found;
502 return ret;
503 }
504
505 static int list_append_session(bt_value *results, GString *base_url,
506 const struct lttng_viewer_session *session,
507 struct live_viewer_connection *viewer_connection)
508 {
509 int ret = 0;
510 bt_value_map_insert_entry_status insert_status;
511 bt_value_array_append_element_status append_status;
512 bt_value *map = NULL;
513 GString *url = NULL;
514 bool found = false;
515
516 /*
517 * If the session already exists, add the stream count to it,
518 * and do max of client counts.
519 */
520 ret = list_update_session(results, session, &found, viewer_connection);
521 if (ret || found) {
522 goto end;
523 }
524
525 map = bt_value_map_create();
526 if (!map) {
527 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Error creating map value.");
528 ret = -1;
529 goto end;
530 }
531
532 if (base_url->len < 1) {
533 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
534 "Error: base_url length smaller than 1.");
535 ret = -1;
536 goto end;
537 }
538 /*
539 * key = "url",
540 * value = <string>,
541 */
542 url = g_string_new(base_url->str);
543 g_string_append(url, "/host/");
544 g_string_append(url, session->hostname);
545 g_string_append_c(url, '/');
546 g_string_append(url, session->session_name);
547
548 insert_status = bt_value_map_insert_string_entry(map, "url", url->str);
549 if (insert_status != BT_VALUE_MAP_INSERT_ENTRY_STATUS_OK) {
550 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Error inserting \"url\" entry.");
551 ret = -1;
552 goto end;
553 }
554
555 /*
556 * key = "target-hostname",
557 * value = <string>,
558 */
559 insert_status = bt_value_map_insert_string_entry(map, "target-hostname", session->hostname);
560 if (insert_status != BT_VALUE_MAP_INSERT_ENTRY_STATUS_OK) {
561 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
562 "Error inserting \"target-hostname\" entry.");
563 ret = -1;
564 goto end;
565 }
566
567 /*
568 * key = "session-name",
569 * value = <string>,
570 */
571 insert_status = bt_value_map_insert_string_entry(map, "session-name", session->session_name);
572 if (insert_status != BT_VALUE_MAP_INSERT_ENTRY_STATUS_OK) {
573 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
574 "Error inserting \"session-name\" entry.");
575 ret = -1;
576 goto end;
577 }
578
579 /*
580 * key = "timer-us",
581 * value = <integer>,
582 */
583 {
584 uint32_t live_timer = be32toh(session->live_timer);
585
586 insert_status = bt_value_map_insert_unsigned_integer_entry(map, "timer-us", live_timer);
587 if (insert_status != BT_VALUE_MAP_INSERT_ENTRY_STATUS_OK) {
588 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
589 "Error inserting \"timer-us\" entry.");
590 ret = -1;
591 goto end;
592 }
593 }
594
595 /*
596 * key = "stream-count",
597 * value = <integer>,
598 */
599 {
600 uint32_t streams = be32toh(session->streams);
601
602 insert_status = bt_value_map_insert_unsigned_integer_entry(map, "stream-count", streams);
603 if (insert_status != BT_VALUE_MAP_INSERT_ENTRY_STATUS_OK) {
604 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
605 "Error inserting \"stream-count\" entry.");
606 ret = -1;
607 goto end;
608 }
609 }
610
611 /*
612 * key = "client-count",
613 * value = <integer>,
614 */
615 {
616 uint32_t clients = be32toh(session->clients);
617
618 insert_status = bt_value_map_insert_unsigned_integer_entry(map, "client-count", clients);
619 if (insert_status != BT_VALUE_MAP_INSERT_ENTRY_STATUS_OK) {
620 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
621 "Error inserting \"client-count\" entry.");
622 ret = -1;
623 goto end;
624 }
625 }
626
627 append_status = bt_value_array_append_element(results, map);
628 if (append_status != BT_VALUE_ARRAY_APPEND_ELEMENT_STATUS_OK) {
629 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Error appending map to results.");
630 ret = -1;
631 }
632
633 end:
634 if (url) {
635 g_string_free(url, true);
636 }
637 BT_VALUE_PUT_REF_AND_RESET(map);
638 return ret;
639 }
640
641 /*
642 * Data structure returned:
643 *
644 * {
645 * <array> = {
646 * [n] = {
647 * <map> = {
648 * {
649 * key = "url",
650 * value = <string>,
651 * },
652 * {
653 * key = "target-hostname",
654 * value = <string>,
655 * },
656 * {
657 * key = "session-name",
658 * value = <string>,
659 * },
660 * {
661 * key = "timer-us",
662 * value = <integer>,
663 * },
664 * {
665 * key = "stream-count",
666 * value = <integer>,
667 * },
668 * {
669 * key = "client-count",
670 * value = <integer>,
671 * },
672 * },
673 * }
674 * }
675 */
676
677 bt_component_class_query_method_status
678 live_viewer_connection_list_sessions(struct live_viewer_connection *viewer_connection,
679 const bt_value **user_result)
680 {
681 bt_component_class_query_method_status status = BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_OK;
682 bt_value *result = NULL;
683 enum lttng_live_viewer_status viewer_status;
684 struct lttng_viewer_cmd cmd;
685 struct lttng_viewer_list_sessions list;
686 uint32_t i, sessions_count;
687
688 result = bt_value_array_create();
689 if (!result) {
690 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Error creating array");
691 status = BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_MEMORY_ERROR;
692 goto error;
693 }
694
695 BT_CPPLOGD_SPEC(viewer_connection->logger, "Requesting list of sessions: cmd={}",
696 LTTNG_VIEWER_LIST_SESSIONS);
697
698 cmd.cmd = htobe32(LTTNG_VIEWER_LIST_SESSIONS);
699 cmd.data_size = htobe64((uint64_t) 0);
700 cmd.cmd_version = htobe32(0);
701
702 viewer_status = lttng_live_send(viewer_connection, &cmd, sizeof(cmd));
703 if (viewer_status == LTTNG_LIVE_VIEWER_STATUS_ERROR) {
704 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
705 "Error sending list sessions command");
706 status = BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_ERROR;
707 goto error;
708 } else if (viewer_status == LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED) {
709 status = BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_AGAIN;
710 goto error;
711 }
712
713 viewer_status = lttng_live_recv(viewer_connection, &list, sizeof(list));
714 if (viewer_status == LTTNG_LIVE_VIEWER_STATUS_ERROR) {
715 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Error receiving session list");
716 status = BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_ERROR;
717 goto error;
718 } else if (viewer_status == LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED) {
719 status = BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_AGAIN;
720 goto error;
721 }
722
723 sessions_count = be32toh(list.sessions_count);
724 for (i = 0; i < sessions_count; i++) {
725 struct lttng_viewer_session lsession;
726
727 viewer_status = lttng_live_recv(viewer_connection, &lsession, sizeof(lsession));
728 if (viewer_status == LTTNG_LIVE_VIEWER_STATUS_ERROR) {
729 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Error receiving session:");
730 status = BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_ERROR;
731 goto error;
732 } else if (viewer_status == LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED) {
733 status = BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_AGAIN;
734 goto error;
735 }
736
737 lsession.hostname[LTTNG_VIEWER_HOST_NAME_MAX - 1] = '\0';
738 lsession.session_name[LTTNG_VIEWER_NAME_MAX - 1] = '\0';
739 if (list_append_session(result, viewer_connection->url, &lsession, viewer_connection)) {
740 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Error appending session");
741 status = BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_ERROR;
742 goto error;
743 }
744 }
745
746 *user_result = result;
747 goto end;
748 error:
749 BT_VALUE_PUT_REF_AND_RESET(result);
750 end:
751 return status;
752 }
753
754 static enum lttng_live_viewer_status
755 lttng_live_query_session_ids(struct lttng_live_msg_iter *lttng_live_msg_iter)
756 {
757 struct lttng_viewer_cmd cmd;
758 struct lttng_viewer_list_sessions list;
759 struct lttng_viewer_session lsession;
760 uint32_t i, sessions_count;
761 uint64_t session_id;
762 enum lttng_live_viewer_status status;
763 struct live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection;
764
765 BT_CPPLOGD_SPEC(viewer_connection->logger,
766 "Asking the relay daemon for the list of sessions: cmd={}",
767 LTTNG_VIEWER_LIST_SESSIONS);
768
769 cmd.cmd = htobe32(LTTNG_VIEWER_LIST_SESSIONS);
770 cmd.data_size = htobe64((uint64_t) 0);
771 cmd.cmd_version = htobe32(0);
772
773 status = lttng_live_send(viewer_connection, &cmd, sizeof(cmd));
774 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
775 viewer_handle_send_status(status, "list sessions command");
776 goto end;
777 }
778
779 status = lttng_live_recv(viewer_connection, &list, sizeof(list));
780 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
781 viewer_handle_recv_status(status, "session list reply");
782 goto end;
783 }
784
785 sessions_count = be32toh(list.sessions_count);
786 for (i = 0; i < sessions_count; i++) {
787 status = lttng_live_recv(viewer_connection, &lsession, sizeof(lsession));
788 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
789 viewer_handle_recv_status(status, "session reply");
790 goto end;
791 }
792 lsession.hostname[LTTNG_VIEWER_HOST_NAME_MAX - 1] = '\0';
793 lsession.session_name[LTTNG_VIEWER_NAME_MAX - 1] = '\0';
794 session_id = be64toh(lsession.id);
795
796 BT_CPPLOGI_SPEC(viewer_connection->logger,
797 "Adding session to internal list: "
798 "session-id={}, hostname=\"{}\", session-name=\"{}\"",
799 session_id, lsession.hostname, lsession.session_name);
800
801 if ((strncmp(lsession.session_name, viewer_connection->session_name->str,
802 LTTNG_VIEWER_NAME_MAX) == 0) &&
803 (strncmp(lsession.hostname, viewer_connection->target_hostname->str,
804 LTTNG_VIEWER_HOST_NAME_MAX) == 0)) {
805 if (lttng_live_add_session(lttng_live_msg_iter, session_id, lsession.hostname,
806 lsession.session_name)) {
807 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
808 "Failed to add live session");
809 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
810 goto end;
811 }
812 }
813 }
814
815 status = LTTNG_LIVE_VIEWER_STATUS_OK;
816
817 end:
818 return status;
819 }
820
821 enum lttng_live_viewer_status
822 lttng_live_create_viewer_session(struct lttng_live_msg_iter *lttng_live_msg_iter)
823 {
824 struct lttng_viewer_cmd cmd;
825 struct lttng_viewer_create_session_response resp;
826 enum lttng_live_viewer_status status;
827 struct live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection;
828
829 BT_CPPLOGD_SPEC(viewer_connection->logger, "Creating a viewer session: cmd={}",
830 LTTNG_VIEWER_CREATE_SESSION);
831
832 cmd.cmd = htobe32(LTTNG_VIEWER_CREATE_SESSION);
833 cmd.data_size = htobe64((uint64_t) 0);
834 cmd.cmd_version = htobe32(0);
835
836 status = lttng_live_send(viewer_connection, &cmd, sizeof(cmd));
837 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
838 viewer_handle_send_status(status, "create session command");
839 goto end;
840 }
841
842 status = lttng_live_recv(viewer_connection, &resp, sizeof(resp));
843 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
844 viewer_handle_recv_status(status, "create session reply");
845 goto end;
846 }
847
848 if (be32toh(resp.status) != LTTNG_VIEWER_CREATE_SESSION_OK) {
849 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Error creating viewer session");
850 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
851 goto end;
852 }
853
854 status = lttng_live_query_session_ids(lttng_live_msg_iter);
855 if (status == LTTNG_LIVE_VIEWER_STATUS_ERROR) {
856 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
857 "Failed to query live viewer session ids");
858 goto end;
859 } else if (status == LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED) {
860 goto end;
861 }
862
863 end:
864 return status;
865 }
866
867 static enum lttng_live_viewer_status receive_streams(struct lttng_live_session *session,
868 uint32_t stream_count,
869 bt_self_message_iterator *self_msg_iter)
870 {
871 uint32_t i;
872 struct lttng_live_msg_iter *lttng_live_msg_iter = session->lttng_live_msg_iter;
873 enum lttng_live_viewer_status status;
874 struct live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection;
875
876 BT_CPPLOGI_SPEC(viewer_connection->logger, "Getting {} new streams", stream_count);
877 for (i = 0; i < stream_count; i++) {
878 struct lttng_viewer_stream stream;
879 struct lttng_live_stream_iterator *live_stream;
880 uint64_t stream_id;
881 uint64_t ctf_trace_id;
882
883 status = lttng_live_recv(viewer_connection, &stream, sizeof(stream));
884 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
885 viewer_handle_recv_status(status, "stream reply");
886 goto end;
887 }
888 stream.path_name[LTTNG_VIEWER_PATH_MAX - 1] = '\0';
889 stream.channel_name[LTTNG_VIEWER_NAME_MAX - 1] = '\0';
890 stream_id = be64toh(stream.id);
891 ctf_trace_id = be64toh(stream.ctf_trace_id);
892
893 if (stream.metadata_flag) {
894 BT_CPPLOGI_SPEC(viewer_connection->logger, " metadata stream {} : {}/{}", stream_id,
895 stream.path_name, stream.channel_name);
896 if (lttng_live_metadata_create_stream(session, ctf_trace_id, stream_id)) {
897 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
898 "Error creating metadata stream");
899 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
900 goto end;
901 }
902 session->lazy_stream_msg_init = true;
903 } else {
904 BT_CPPLOGI_SPEC(viewer_connection->logger, " stream {} : {}/{}", stream_id,
905 stream.path_name, stream.channel_name);
906 live_stream =
907 lttng_live_stream_iterator_create(session, ctf_trace_id, stream_id, self_msg_iter);
908 if (!live_stream) {
909 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Error creating stream");
910 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
911 goto end;
912 }
913 }
914 }
915 status = LTTNG_LIVE_VIEWER_STATUS_OK;
916
917 end:
918 return status;
919 }
920
921 enum lttng_live_viewer_status lttng_live_session_attach(struct lttng_live_session *session,
922 bt_self_message_iterator *self_msg_iter)
923 {
924 struct lttng_viewer_cmd cmd;
925 enum lttng_live_viewer_status status;
926 struct lttng_viewer_attach_session_request rq;
927 struct lttng_viewer_attach_session_response rp;
928 struct lttng_live_msg_iter *lttng_live_msg_iter = session->lttng_live_msg_iter;
929 struct live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection;
930 uint64_t session_id = session->id;
931 uint32_t streams_count;
932 const size_t cmd_buf_len = sizeof(cmd) + sizeof(rq);
933 char cmd_buf[cmd_buf_len];
934
935 BT_CPPLOGD_SPEC(viewer_connection->logger,
936 "Attaching to session: cmd={}, session-id={}, seek={}",
937 LTTNG_VIEWER_ATTACH_SESSION, session_id, LTTNG_VIEWER_SEEK_LAST);
938
939 cmd.cmd = htobe32(LTTNG_VIEWER_ATTACH_SESSION);
940 cmd.data_size = htobe64((uint64_t) sizeof(rq));
941 cmd.cmd_version = htobe32(0);
942
943 memset(&rq, 0, sizeof(rq));
944 rq.session_id = htobe64(session_id);
945 // TODO: add cmd line parameter to select seek beginning
946 // rq.seek = htobe32(LTTNG_VIEWER_SEEK_BEGINNING);
947 rq.seek = htobe32(LTTNG_VIEWER_SEEK_LAST);
948
949 /*
950 * Merge the cmd and connection request to prevent a write-write
951 * sequence on the TCP socket. Otherwise, a delayed ACK will prevent the
952 * second write to be performed quickly in presence of Nagle's algorithm.
953 */
954 memcpy(cmd_buf, &cmd, sizeof(cmd));
955 memcpy(cmd_buf + sizeof(cmd), &rq, sizeof(rq));
956 status = lttng_live_send(viewer_connection, &cmd_buf, cmd_buf_len);
957 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
958 viewer_handle_send_status(status, "attach session command");
959 goto end;
960 }
961
962 status = lttng_live_recv(viewer_connection, &rp, sizeof(rp));
963 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
964 viewer_handle_recv_status(status, "attach session reply");
965 goto end;
966 }
967
968 streams_count = be32toh(rp.streams_count);
969 switch (be32toh(rp.status)) {
970 case LTTNG_VIEWER_ATTACH_OK:
971 break;
972 case LTTNG_VIEWER_ATTACH_UNK:
973 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Session id {} is unknown",
974 session_id);
975 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
976 goto end;
977 case LTTNG_VIEWER_ATTACH_ALREADY:
978 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
979 "There is already a viewer attached to this session");
980 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
981 goto end;
982 case LTTNG_VIEWER_ATTACH_NOT_LIVE:
983 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Not a live session");
984 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
985 goto end;
986 case LTTNG_VIEWER_ATTACH_SEEK_ERR:
987 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Wrong seek parameter");
988 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
989 goto end;
990 default:
991 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Unknown attach return code {}",
992 be32toh(rp.status));
993 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
994 goto end;
995 }
996
997 /* We receive the initial list of streams. */
998 status = receive_streams(session, streams_count, self_msg_iter);
999 switch (status) {
1000 case LTTNG_LIVE_VIEWER_STATUS_OK:
1001 break;
1002 case LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED:
1003 goto end;
1004 case LTTNG_LIVE_VIEWER_STATUS_ERROR:
1005 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Error receiving streams");
1006 goto end;
1007 default:
1008 bt_common_abort();
1009 }
1010
1011 session->attached = true;
1012 session->new_streams_needed = false;
1013
1014 end:
1015 return status;
1016 }
1017
1018 enum lttng_live_viewer_status lttng_live_session_detach(struct lttng_live_session *session)
1019 {
1020 struct lttng_viewer_cmd cmd;
1021 enum lttng_live_viewer_status status;
1022 struct lttng_viewer_detach_session_request rq;
1023 struct lttng_viewer_detach_session_response rp;
1024 struct lttng_live_msg_iter *lttng_live_msg_iter = session->lttng_live_msg_iter;
1025 struct live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection;
1026 uint64_t session_id = session->id;
1027 const size_t cmd_buf_len = sizeof(cmd) + sizeof(rq);
1028 char cmd_buf[cmd_buf_len];
1029
1030 /*
1031 * The session might already be detached and the viewer socket might
1032 * already been closed. This happens when calling this function when
1033 * tearing down the graph after an error.
1034 */
1035 if (!session->attached || viewer_connection->control_sock == BT_INVALID_SOCKET) {
1036 return LTTNG_LIVE_VIEWER_STATUS_OK;
1037 }
1038
1039 BT_CPPLOGD_SPEC(viewer_connection->logger, "Detaching from session: cmd={}, session-id={}",
1040 LTTNG_VIEWER_DETACH_SESSION, session_id);
1041
1042 cmd.cmd = htobe32(LTTNG_VIEWER_DETACH_SESSION);
1043 cmd.data_size = htobe64((uint64_t) sizeof(rq));
1044 cmd.cmd_version = htobe32(0);
1045
1046 memset(&rq, 0, sizeof(rq));
1047 rq.session_id = htobe64(session_id);
1048
1049 /*
1050 * Merge the cmd and connection request to prevent a write-write
1051 * sequence on the TCP socket. Otherwise, a delayed ACK will prevent the
1052 * second write to be performed quickly in presence of Nagle's algorithm.
1053 */
1054 memcpy(cmd_buf, &cmd, sizeof(cmd));
1055 memcpy(cmd_buf + sizeof(cmd), &rq, sizeof(rq));
1056 status = lttng_live_send(viewer_connection, &cmd_buf, cmd_buf_len);
1057 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1058 viewer_handle_send_status(status, "detach session command");
1059 goto end;
1060 }
1061
1062 status = lttng_live_recv(viewer_connection, &rp, sizeof(rp));
1063 if (status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1064 viewer_handle_recv_status(status, "detach session reply");
1065 goto end;
1066 }
1067
1068 switch (be32toh(rp.status)) {
1069 case LTTNG_VIEWER_DETACH_SESSION_OK:
1070 break;
1071 case LTTNG_VIEWER_DETACH_SESSION_UNK:
1072 BT_CPPLOGW_SPEC(viewer_connection->logger, "Session id {} is unknown", session_id);
1073 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
1074 goto end;
1075 case LTTNG_VIEWER_DETACH_SESSION_ERR:
1076 BT_CPPLOGW_SPEC(viewer_connection->logger, "Error detaching session id {}", session_id);
1077 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
1078 goto end;
1079 default:
1080 BT_CPPLOGE_SPEC(viewer_connection->logger, "Unknown detach return code {}",
1081 be32toh(rp.status));
1082 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
1083 goto end;
1084 }
1085
1086 session->attached = false;
1087
1088 status = LTTNG_LIVE_VIEWER_STATUS_OK;
1089
1090 end:
1091 return status;
1092 }
1093
1094 enum lttng_live_get_one_metadata_status
1095 lttng_live_get_one_metadata_packet(struct lttng_live_trace *trace, std::vector<char>& buf)
1096 {
1097 uint64_t len = 0;
1098 enum lttng_live_get_one_metadata_status status;
1099 enum lttng_live_viewer_status viewer_status;
1100 struct lttng_viewer_cmd cmd;
1101 struct lttng_viewer_get_metadata rq;
1102 struct lttng_viewer_metadata_packet rp;
1103 gchar *data = NULL;
1104 struct lttng_live_session *session = trace->session;
1105 struct lttng_live_msg_iter *lttng_live_msg_iter = session->lttng_live_msg_iter;
1106 struct lttng_live_metadata *metadata = trace->metadata;
1107 struct live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection;
1108 const size_t cmd_buf_len = sizeof(cmd) + sizeof(rq);
1109 char cmd_buf[cmd_buf_len];
1110
1111 BT_CPPLOGD_SPEC(viewer_connection->logger,
1112 "Requesting new metadata for trace:"
1113 "cmd={}, trace-id={}, metadata-stream-id={}",
1114 LTTNG_VIEWER_GET_METADATA, trace->id, metadata->stream_id);
1115
1116 rq.stream_id = htobe64(metadata->stream_id);
1117 cmd.cmd = htobe32(LTTNG_VIEWER_GET_METADATA);
1118 cmd.data_size = htobe64((uint64_t) sizeof(rq));
1119 cmd.cmd_version = htobe32(0);
1120
1121 /*
1122 * Merge the cmd and connection request to prevent a write-write
1123 * sequence on the TCP socket. Otherwise, a delayed ACK will prevent the
1124 * second write to be performed quickly in presence of Nagle's algorithm.
1125 */
1126 memcpy(cmd_buf, &cmd, sizeof(cmd));
1127 memcpy(cmd_buf + sizeof(cmd), &rq, sizeof(rq));
1128 viewer_status = lttng_live_send(viewer_connection, &cmd_buf, cmd_buf_len);
1129 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1130 viewer_handle_send_status(viewer_status, "get metadata command");
1131 status = (enum lttng_live_get_one_metadata_status) viewer_status;
1132 goto end;
1133 }
1134
1135 viewer_status = lttng_live_recv(viewer_connection, &rp, sizeof(rp));
1136 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1137 viewer_handle_recv_status(viewer_status, "get metadata reply");
1138 status = (enum lttng_live_get_one_metadata_status) viewer_status;
1139 goto end;
1140 }
1141
1142 switch (be32toh(rp.status)) {
1143 case LTTNG_VIEWER_METADATA_OK:
1144 BT_CPPLOGD_SPEC(viewer_connection->logger, "Received get_metadata response: ok");
1145 break;
1146 case LTTNG_VIEWER_NO_NEW_METADATA:
1147 BT_CPPLOGD_SPEC(viewer_connection->logger, "Received get_metadata response: no new");
1148 status = LTTNG_LIVE_GET_ONE_METADATA_STATUS_END;
1149 goto end;
1150 case LTTNG_VIEWER_METADATA_ERR:
1151 /*
1152 * The Relayd cannot find this stream id. Maybe its
1153 * gone already. This can happen in short lived UST app
1154 * in a per-pid session.
1155 */
1156 BT_CPPLOGD_SPEC(viewer_connection->logger, "Received get_metadata response: error");
1157 status = LTTNG_LIVE_GET_ONE_METADATA_STATUS_CLOSED;
1158 goto end;
1159 default:
1160 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
1161 "Received get_metadata response: unknown");
1162 status = LTTNG_LIVE_GET_ONE_METADATA_STATUS_ERROR;
1163 goto end;
1164 }
1165
1166 len = be64toh(rp.len);
1167 if (len == 0) {
1168 /*
1169 * We received a `LTTNG_VIEWER_METADATA_OK` with a packet
1170 * length of 0. This means we must try again. This scenario
1171 * arises when a clear command is performed on an lttng session.
1172 */
1173 BT_CPPLOGD_SPEC(
1174 viewer_connection->logger,
1175 "Expecting a metadata packet of size 0. Retry to get a packet from the relay.");
1176 goto empty_metadata_packet_retry;
1177 }
1178
1179 BT_CPPLOGD_SPEC(viewer_connection->logger, "Writing {} bytes to metadata", len);
1180 if (len <= 0) {
1181 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Erroneous response length");
1182 status = LTTNG_LIVE_GET_ONE_METADATA_STATUS_ERROR;
1183 goto end;
1184 }
1185
1186 data = g_new0(gchar, len);
1187 if (!data) {
1188 BT_CPPLOGE_ERRNO_APPEND_CAUSE_SPEC(viewer_connection->logger,
1189 "Failed to allocate data buffer", ".");
1190 status = LTTNG_LIVE_GET_ONE_METADATA_STATUS_ERROR;
1191 goto end;
1192 }
1193
1194 viewer_status = lttng_live_recv(viewer_connection, data, len);
1195 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1196 viewer_handle_recv_status(viewer_status, "get metadata packet");
1197 status = (enum lttng_live_get_one_metadata_status) viewer_status;
1198 goto end;
1199 }
1200
1201 /*
1202 * Write the metadata to the file handle.
1203 */
1204 buf.insert(buf.end(), data, data + len);
1205
1206 empty_metadata_packet_retry:
1207 status = LTTNG_LIVE_GET_ONE_METADATA_STATUS_OK;
1208
1209 end:
1210 g_free(data);
1211 return status;
1212 }
1213
1214 /*
1215 * Assign the fields from a lttng_viewer_index to a packet_index.
1216 */
1217 static void lttng_index_to_packet_index(struct lttng_viewer_index *lindex,
1218 struct packet_index *pindex)
1219 {
1220 BT_ASSERT(lindex);
1221 BT_ASSERT(pindex);
1222
1223 pindex->offset = be64toh(lindex->offset);
1224 pindex->packet_size = be64toh(lindex->packet_size);
1225 pindex->content_size = be64toh(lindex->content_size);
1226 pindex->ts_cycles.timestamp_begin = be64toh(lindex->timestamp_begin);
1227 pindex->ts_cycles.timestamp_end = be64toh(lindex->timestamp_end);
1228 pindex->events_discarded = be64toh(lindex->events_discarded);
1229 }
1230
1231 static void lttng_live_need_new_streams(struct lttng_live_msg_iter *lttng_live_msg_iter)
1232 {
1233 uint64_t session_idx;
1234
1235 for (session_idx = 0; session_idx < lttng_live_msg_iter->sessions->len; session_idx++) {
1236 struct lttng_live_session *session =
1237 (lttng_live_session *) g_ptr_array_index(lttng_live_msg_iter->sessions, session_idx);
1238 BT_CPPLOGD_SPEC(lttng_live_msg_iter->logger,
1239 "Marking session as needing new streams: "
1240 "session-id={}",
1241 session->id);
1242 session->new_streams_needed = true;
1243 }
1244 }
1245
1246 enum lttng_live_iterator_status
1247 lttng_live_get_next_index(struct lttng_live_msg_iter *lttng_live_msg_iter,
1248 struct lttng_live_stream_iterator *stream, struct packet_index *index)
1249 {
1250 struct lttng_viewer_cmd cmd;
1251 struct lttng_viewer_get_next_index rq;
1252 enum lttng_live_viewer_status viewer_status;
1253 struct lttng_viewer_index rp;
1254 enum lttng_live_iterator_status status;
1255 struct live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection;
1256 struct lttng_live_trace *trace = stream->trace;
1257 const size_t cmd_buf_len = sizeof(cmd) + sizeof(rq);
1258 char cmd_buf[cmd_buf_len];
1259 uint32_t flags, rp_status;
1260
1261 BT_CPPLOGD_SPEC(viewer_connection->logger,
1262 "Requesting next index for stream: cmd={}, "
1263 "viewer-stream-id={}",
1264 LTTNG_VIEWER_GET_NEXT_INDEX, stream->viewer_stream_id);
1265 cmd.cmd = htobe32(LTTNG_VIEWER_GET_NEXT_INDEX);
1266 cmd.data_size = htobe64((uint64_t) sizeof(rq));
1267 cmd.cmd_version = htobe32(0);
1268
1269 memset(&rq, 0, sizeof(rq));
1270 rq.stream_id = htobe64(stream->viewer_stream_id);
1271
1272 /*
1273 * Merge the cmd and connection request to prevent a write-write
1274 * sequence on the TCP socket. Otherwise, a delayed ACK will prevent the
1275 * second write to be performed quickly in presence of Nagle's algorithm.
1276 */
1277 memcpy(cmd_buf, &cmd, sizeof(cmd));
1278 memcpy(cmd_buf + sizeof(cmd), &rq, sizeof(rq));
1279
1280 viewer_status = lttng_live_send(viewer_connection, &cmd_buf, cmd_buf_len);
1281 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1282 viewer_handle_send_status(viewer_status, "get next index command");
1283 goto error;
1284 }
1285
1286 viewer_status = lttng_live_recv(viewer_connection, &rp, sizeof(rp));
1287 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1288 viewer_handle_recv_status(viewer_status, "get next index reply");
1289 goto error;
1290 }
1291
1292 flags = be32toh(rp.flags);
1293 rp_status = be32toh(rp.status);
1294
1295 BT_CPPLOGD_SPEC(
1296 viewer_connection->logger, "Received response from relay daemon: cmd=%s, response={}",
1297 LTTNG_VIEWER_GET_NEXT_INDEX, static_cast<lttng_viewer_next_index_return_code>(rp_status));
1298
1299 if (flags & LTTNG_VIEWER_FLAG_NEW_STREAM) {
1300 BT_CPPLOGD_SPEC(viewer_connection->logger,
1301 "Marking all sessions as possibly needing new streams: "
1302 "response={}, response-flag=NEW_STREAM",
1303 static_cast<lttng_viewer_next_index_return_code>(rp_status));
1304 lttng_live_need_new_streams(lttng_live_msg_iter);
1305 }
1306
1307 switch (rp_status) {
1308 case LTTNG_VIEWER_INDEX_INACTIVE:
1309 {
1310 uint64_t ctf_stream_class_id;
1311
1312 memset(index, 0, sizeof(struct packet_index));
1313 index->ts_cycles.timestamp_end = be64toh(rp.timestamp_end);
1314 stream->current_inactivity_ts = index->ts_cycles.timestamp_end;
1315 ctf_stream_class_id = be64toh(rp.stream_id);
1316 if (stream->ctf_stream_class_id.is_set) {
1317 BT_ASSERT(stream->ctf_stream_class_id.value == ctf_stream_class_id);
1318 } else {
1319 stream->ctf_stream_class_id.value = ctf_stream_class_id;
1320 stream->ctf_stream_class_id.is_set = true;
1321 }
1322 lttng_live_stream_iterator_set_state(stream, LTTNG_LIVE_STREAM_QUIESCENT);
1323 status = LTTNG_LIVE_ITERATOR_STATUS_OK;
1324 break;
1325 }
1326 case LTTNG_VIEWER_INDEX_OK:
1327 {
1328 uint64_t ctf_stream_class_id;
1329
1330 lttng_index_to_packet_index(&rp, index);
1331 ctf_stream_class_id = be64toh(rp.stream_id);
1332 if (stream->ctf_stream_class_id.is_set) {
1333 BT_ASSERT(stream->ctf_stream_class_id.value == ctf_stream_class_id);
1334 } else {
1335 stream->ctf_stream_class_id.value = ctf_stream_class_id;
1336 stream->ctf_stream_class_id.is_set = true;
1337 }
1338 lttng_live_stream_iterator_set_state(stream, LTTNG_LIVE_STREAM_ACTIVE_DATA);
1339
1340 if (flags & LTTNG_VIEWER_FLAG_NEW_METADATA) {
1341 BT_CPPLOGD_SPEC(viewer_connection->logger,
1342 "Marking trace as needing new metadata: "
1343 "response={}, response-flag=NEW_METADATA, trace-id={}",
1344 static_cast<lttng_viewer_next_index_return_code>(rp_status), trace->id);
1345 trace->metadata_stream_state = LTTNG_LIVE_METADATA_STREAM_STATE_NEEDED;
1346 }
1347 status = LTTNG_LIVE_ITERATOR_STATUS_OK;
1348 break;
1349 }
1350 case LTTNG_VIEWER_INDEX_RETRY:
1351 memset(index, 0, sizeof(struct packet_index));
1352 lttng_live_stream_iterator_set_state(stream, LTTNG_LIVE_STREAM_ACTIVE_NO_DATA);
1353 status = LTTNG_LIVE_ITERATOR_STATUS_AGAIN;
1354 goto end;
1355 case LTTNG_VIEWER_INDEX_HUP:
1356 memset(index, 0, sizeof(struct packet_index));
1357 index->offset = EOF;
1358 lttng_live_stream_iterator_set_state(stream, LTTNG_LIVE_STREAM_EOF);
1359 stream->has_stream_hung_up = true;
1360 status = LTTNG_LIVE_ITERATOR_STATUS_END;
1361 break;
1362 case LTTNG_VIEWER_INDEX_ERR:
1363 memset(index, 0, sizeof(struct packet_index));
1364 lttng_live_stream_iterator_set_state(stream, LTTNG_LIVE_STREAM_ACTIVE_NO_DATA);
1365 status = LTTNG_LIVE_ITERATOR_STATUS_ERROR;
1366 goto end;
1367 default:
1368 BT_CPPLOGD_SPEC(viewer_connection->logger,
1369 "Received get_next_index response: unknown value");
1370 memset(index, 0, sizeof(struct packet_index));
1371 lttng_live_stream_iterator_set_state(stream, LTTNG_LIVE_STREAM_ACTIVE_NO_DATA);
1372 status = LTTNG_LIVE_ITERATOR_STATUS_ERROR;
1373 goto end;
1374 }
1375
1376 goto end;
1377
1378 error:
1379 status = viewer_status_to_live_iterator_status(viewer_status);
1380 end:
1381 return status;
1382 }
1383
1384 enum ctf_msg_iter_medium_status
1385 lttng_live_get_stream_bytes(struct lttng_live_msg_iter *lttng_live_msg_iter,
1386 struct lttng_live_stream_iterator *stream, uint8_t *buf,
1387 uint64_t offset, uint64_t req_len, uint64_t *recv_len)
1388 {
1389 enum ctf_msg_iter_medium_status status;
1390 enum lttng_live_viewer_status viewer_status;
1391 struct lttng_viewer_trace_packet rp;
1392 struct lttng_viewer_cmd cmd;
1393 struct lttng_viewer_get_packet rq;
1394 struct live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection;
1395 struct lttng_live_trace *trace = stream->trace;
1396 const size_t cmd_buf_len = sizeof(cmd) + sizeof(rq);
1397 char cmd_buf[cmd_buf_len];
1398 uint32_t flags, rp_status;
1399
1400 BT_CPPLOGD_SPEC(viewer_connection->logger,
1401 "Requesting data from stream: cmd={}, "
1402 "offset={}, request-len={}",
1403 LTTNG_VIEWER_GET_PACKET, offset, req_len);
1404
1405 cmd.cmd = htobe32(LTTNG_VIEWER_GET_PACKET);
1406 cmd.data_size = htobe64((uint64_t) sizeof(rq));
1407 cmd.cmd_version = htobe32(0);
1408
1409 memset(&rq, 0, sizeof(rq));
1410 rq.stream_id = htobe64(stream->viewer_stream_id);
1411 rq.offset = htobe64(offset);
1412 rq.len = htobe32(req_len);
1413
1414 /*
1415 * Merge the cmd and connection request to prevent a write-write
1416 * sequence on the TCP socket. Otherwise, a delayed ACK will prevent the
1417 * second write to be performed quickly in presence of Nagle's algorithm.
1418 */
1419 memcpy(cmd_buf, &cmd, sizeof(cmd));
1420 memcpy(cmd_buf + sizeof(cmd), &rq, sizeof(rq));
1421
1422 viewer_status = lttng_live_send(viewer_connection, &cmd_buf, cmd_buf_len);
1423 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1424 viewer_handle_send_status(viewer_status, "get data packet command");
1425 goto error_convert_status;
1426 }
1427
1428 viewer_status = lttng_live_recv(viewer_connection, &rp, sizeof(rp));
1429 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1430 viewer_handle_recv_status(viewer_status, "get data packet reply");
1431 goto error_convert_status;
1432 }
1433
1434 flags = be32toh(rp.flags);
1435 rp_status = be32toh(rp.status);
1436
1437 BT_CPPLOGD_SPEC(
1438 viewer_connection->logger, "Received response from relay daemon: cmd={}, response={}",
1439 LTTNG_VIEWER_GET_PACKET, static_cast<lttng_viewer_get_packet_return_code>(rp_status));
1440 switch (rp_status) {
1441 case LTTNG_VIEWER_GET_PACKET_OK:
1442 req_len = be32toh(rp.len);
1443 BT_CPPLOGD_SPEC(viewer_connection->logger,
1444 "Got packet from relay daemon: response={}, packet-len={}",
1445 static_cast<lttng_viewer_get_packet_return_code>(rp_status), req_len);
1446 break;
1447 case LTTNG_VIEWER_GET_PACKET_RETRY:
1448 /* Unimplemented by relay daemon */
1449 status = CTF_MSG_ITER_MEDIUM_STATUS_AGAIN;
1450 goto end;
1451 case LTTNG_VIEWER_GET_PACKET_ERR:
1452 if (flags & LTTNG_VIEWER_FLAG_NEW_METADATA) {
1453 BT_CPPLOGD_SPEC(viewer_connection->logger,
1454 "Marking trace as needing new metadata: "
1455 "response={}, response-flag=NEW_METADATA, trace-id={}",
1456 static_cast<lttng_viewer_get_packet_return_code>(rp_status), trace->id);
1457 trace->metadata_stream_state = LTTNG_LIVE_METADATA_STREAM_STATE_NEEDED;
1458 }
1459 if (flags & LTTNG_VIEWER_FLAG_NEW_STREAM) {
1460 BT_CPPLOGD_SPEC(viewer_connection->logger,
1461 "Marking all sessions as possibly needing new streams: "
1462 "response={}, response-flag=NEW_STREAM",
1463 static_cast<lttng_viewer_get_packet_return_code>(rp_status));
1464 lttng_live_need_new_streams(lttng_live_msg_iter);
1465 }
1466 if (flags & (LTTNG_VIEWER_FLAG_NEW_METADATA | LTTNG_VIEWER_FLAG_NEW_STREAM)) {
1467 status = CTF_MSG_ITER_MEDIUM_STATUS_AGAIN;
1468 BT_CPPLOGD_SPEC(viewer_connection->logger,
1469 "Reply with any one flags set means we should retry: response={}",
1470 static_cast<lttng_viewer_get_packet_return_code>(rp_status));
1471 goto end;
1472 }
1473 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
1474 "Received get_data_packet response: error");
1475 status = CTF_MSG_ITER_MEDIUM_STATUS_ERROR;
1476 goto end;
1477 case LTTNG_VIEWER_GET_PACKET_EOF:
1478 status = CTF_MSG_ITER_MEDIUM_STATUS_EOF;
1479 goto end;
1480 default:
1481 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
1482 "Received get_data_packet response: unknown ({})", rp_status);
1483 status = CTF_MSG_ITER_MEDIUM_STATUS_ERROR;
1484 goto end;
1485 }
1486
1487 if (req_len == 0) {
1488 status = CTF_MSG_ITER_MEDIUM_STATUS_ERROR;
1489 goto end;
1490 }
1491
1492 viewer_status = lttng_live_recv(viewer_connection, buf, req_len);
1493 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1494 viewer_handle_recv_status(viewer_status, "get data packet");
1495 goto error_convert_status;
1496 }
1497 *recv_len = req_len;
1498
1499 status = CTF_MSG_ITER_MEDIUM_STATUS_OK;
1500 goto end;
1501
1502 error_convert_status:
1503 status = viewer_status_to_ctf_msg_iter_medium_status(viewer_status);
1504 end:
1505 return status;
1506 }
1507
1508 /*
1509 * Request new streams for a session.
1510 */
1511 enum lttng_live_iterator_status
1512 lttng_live_session_get_new_streams(struct lttng_live_session *session,
1513 bt_self_message_iterator *self_msg_iter)
1514 {
1515 enum lttng_live_iterator_status status = LTTNG_LIVE_ITERATOR_STATUS_OK;
1516 struct lttng_viewer_cmd cmd;
1517 struct lttng_viewer_new_streams_request rq;
1518 struct lttng_viewer_new_streams_response rp;
1519 struct lttng_live_msg_iter *lttng_live_msg_iter = session->lttng_live_msg_iter;
1520 enum lttng_live_viewer_status viewer_status;
1521 struct live_viewer_connection *viewer_connection = lttng_live_msg_iter->viewer_connection;
1522 uint32_t streams_count;
1523 const size_t cmd_buf_len = sizeof(cmd) + sizeof(rq);
1524 char cmd_buf[cmd_buf_len];
1525
1526 if (!session->new_streams_needed) {
1527 status = LTTNG_LIVE_ITERATOR_STATUS_OK;
1528 goto end;
1529 }
1530
1531 BT_CPPLOGD_SPEC(viewer_connection->logger,
1532 "Requesting new streams for session: cmd={}, session-id={}",
1533 LTTNG_VIEWER_GET_NEW_STREAMS, session->id);
1534
1535 cmd.cmd = htobe32(LTTNG_VIEWER_GET_NEW_STREAMS);
1536 cmd.data_size = htobe64((uint64_t) sizeof(rq));
1537 cmd.cmd_version = htobe32(0);
1538
1539 memset(&rq, 0, sizeof(rq));
1540 rq.session_id = htobe64(session->id);
1541
1542 /*
1543 * Merge the cmd and connection request to prevent a write-write
1544 * sequence on the TCP socket. Otherwise, a delayed ACK will prevent the
1545 * second write to be performed quickly in presence of Nagle's algorithm.
1546 */
1547 memcpy(cmd_buf, &cmd, sizeof(cmd));
1548 memcpy(cmd_buf + sizeof(cmd), &rq, sizeof(rq));
1549
1550 viewer_status = lttng_live_send(viewer_connection, &cmd_buf, cmd_buf_len);
1551 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1552 viewer_handle_send_status(viewer_status, "get new streams command");
1553 status = viewer_status_to_live_iterator_status(viewer_status);
1554 goto end;
1555 }
1556
1557 viewer_status = lttng_live_recv(viewer_connection, &rp, sizeof(rp));
1558 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1559 viewer_handle_recv_status(viewer_status, "get new streams reply");
1560 status = viewer_status_to_live_iterator_status(viewer_status);
1561 goto end;
1562 }
1563
1564 streams_count = be32toh(rp.streams_count);
1565
1566 switch (be32toh(rp.status)) {
1567 case LTTNG_VIEWER_NEW_STREAMS_OK:
1568 session->new_streams_needed = false;
1569 break;
1570 case LTTNG_VIEWER_NEW_STREAMS_NO_NEW:
1571 session->new_streams_needed = false;
1572 goto end;
1573 case LTTNG_VIEWER_NEW_STREAMS_HUP:
1574 session->new_streams_needed = false;
1575 session->closed = true;
1576 status = LTTNG_LIVE_ITERATOR_STATUS_END;
1577 goto end;
1578 case LTTNG_VIEWER_NEW_STREAMS_ERR:
1579 BT_CPPLOGD_SPEC(viewer_connection->logger, "Received get_new_streams response: error");
1580 status = LTTNG_LIVE_ITERATOR_STATUS_ERROR;
1581 goto end;
1582 default:
1583 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
1584 "Received get_new_streams response: Unknown:"
1585 "return code {}",
1586 be32toh(rp.status));
1587 status = LTTNG_LIVE_ITERATOR_STATUS_ERROR;
1588 goto end;
1589 }
1590
1591 viewer_status = receive_streams(session, streams_count, self_msg_iter);
1592 if (viewer_status != LTTNG_LIVE_VIEWER_STATUS_OK) {
1593 viewer_handle_recv_status(viewer_status, "new streams");
1594 status = viewer_status_to_live_iterator_status(viewer_status);
1595 goto end;
1596 }
1597
1598 status = LTTNG_LIVE_ITERATOR_STATUS_OK;
1599 end:
1600 return status;
1601 }
1602
1603 enum lttng_live_viewer_status live_viewer_connection_create(
1604 const char *url, bool in_query, struct lttng_live_msg_iter *lttng_live_msg_iter,
1605 const bt2c::Logger& parentLogger, struct live_viewer_connection **viewer)
1606 {
1607 enum lttng_live_viewer_status status;
1608
1609 live_viewer_connection *viewer_connection = new live_viewer_connection {parentLogger};
1610
1611 if (bt_socket_init(viewer_connection->logger) != 0) {
1612 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Failed to init socket");
1613 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
1614 goto error;
1615 }
1616
1617 viewer_connection->control_sock = BT_INVALID_SOCKET;
1618 viewer_connection->port = -1;
1619 viewer_connection->in_query = in_query;
1620 viewer_connection->lttng_live_msg_iter = lttng_live_msg_iter;
1621 viewer_connection->url = g_string_new(url);
1622 if (!viewer_connection->url) {
1623 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger, "Failed to allocate URL buffer");
1624 status = LTTNG_LIVE_VIEWER_STATUS_ERROR;
1625 goto error;
1626 }
1627
1628 BT_CPPLOGD_SPEC(viewer_connection->logger, "Establishing connection to url \"{}\"...", url);
1629 status = lttng_live_connect_viewer(viewer_connection);
1630 /*
1631 * Only print error and append cause in case of error. not in case of
1632 * interruption.
1633 */
1634 if (status == LTTNG_LIVE_VIEWER_STATUS_ERROR) {
1635 BT_CPPLOGE_APPEND_CAUSE_SPEC(viewer_connection->logger,
1636 "Failed to establish connection: "
1637 "url=\"{}\"",
1638 url);
1639 goto error;
1640 } else if (status == LTTNG_LIVE_VIEWER_STATUS_INTERRUPTED) {
1641 goto error;
1642 }
1643 BT_CPPLOGD_SPEC(viewer_connection->logger, "Connection to url \"{}\" is established", url);
1644
1645 *viewer = viewer_connection;
1646 status = LTTNG_LIVE_VIEWER_STATUS_OK;
1647 goto end;
1648
1649 error:
1650 if (viewer_connection) {
1651 live_viewer_connection_destroy(viewer_connection);
1652 }
1653 end:
1654 return status;
1655 }
1656
1657 void live_viewer_connection_destroy(struct live_viewer_connection *viewer_connection)
1658 {
1659 if (!viewer_connection) {
1660 goto end;
1661 }
1662
1663 BT_CPPLOGD_SPEC(viewer_connection->logger, "Closing connection to relay: relay-url=\"{}\"",
1664 viewer_connection->url->str);
1665
1666 lttng_live_disconnect_viewer(viewer_connection);
1667
1668 if (viewer_connection->url) {
1669 g_string_free(viewer_connection->url, true);
1670 }
1671
1672 if (viewer_connection->relay_hostname) {
1673 g_string_free(viewer_connection->relay_hostname, true);
1674 }
1675
1676 if (viewer_connection->target_hostname) {
1677 g_string_free(viewer_connection->target_hostname, true);
1678 }
1679
1680 if (viewer_connection->session_name) {
1681 g_string_free(viewer_connection->session_name, true);
1682 }
1683
1684 if (viewer_connection->proto) {
1685 g_string_free(viewer_connection->proto, true);
1686 }
1687
1688 delete viewer_connection;
1689
1690 bt_socket_fini();
1691
1692 end:
1693 return;
1694 }
This page took 0.066371 seconds and 4 git commands to generate.