SoW-2020-0002: Trace Hit Counters: trigger error reporting integration
[lttng-tools.git] / src / bin / lttng-sessiond / agent-thread.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 *
6 */
7
8#define _LGPL_SOURCE
9#include <assert.h>
10
11#include <common/common.h>
12#include <common/sessiond-comm/sessiond-comm.h>
13#include <common/uri.h>
14#include <common/utils.h>
15
16#include <common/compat/endian.h>
17
18#include "fd-limit.h"
19#include "agent-thread.h"
20#include "agent.h"
21#include "lttng-sessiond.h"
22#include "session.h"
23#include "utils.h"
24#include "thread.h"
25
26struct thread_notifiers {
27 struct lttng_pipe *quit_pipe;
28 sem_t ready;
29};
30
31struct agent_app_id {
32 pid_t pid;
33 enum lttng_domain_type domain;
34};
35
36struct agent_protocol_version {
37 unsigned int major, minor;
38};
39
40static int agent_tracing_enabled = -1;
41
42/*
43 * Note that there is not port here. It's set after this URI is parsed so we
44 * can let the user define a custom one. However, localhost is ALWAYS the
45 * default listening address.
46 */
47static const char *default_reg_uri =
48 "tcp://" DEFAULT_NETWORK_VIEWER_BIND_ADDRESS;
49
50/*
51 * Update agent application using the given socket. This is done just after
52 * registration was successful.
53 *
54 * This will acquire the various sessions' lock; none must be held by the
55 * caller.
56 * The caller must hold the session list lock.
57 */
58static void update_agent_app(const struct agent_app *app)
59{
60 struct ltt_session *session, *stmp;
61 struct ltt_session_list *list;
62 struct agent *trigger_agent;
63 struct lttng_ht_iter iter;
64
65 list = session_get_list();
66 assert(list);
67
68 cds_list_for_each_entry_safe(session, stmp, &list->head, list) {
69 if (!session_get(session)) {
70 continue;
71 }
72
73 session_lock(session);
74 if (session->ust_session) {
75 const struct agent *agt;
76
77 rcu_read_lock();
78 agt = trace_ust_find_agent(session->ust_session, app->domain);
79 if (agt) {
80 agent_update(agt, app);
81 }
82 rcu_read_unlock();
83 }
84 session_unlock(session);
85 session_put(session);
86 }
87
88 /* Do we need more locking here? maybe against trigger add? */
89 rcu_read_lock();
90 cds_lfht_for_each_entry (trigger_agents_ht_by_domain->ht, &iter.iter,
91 trigger_agent, node.node) {
92 agent_update(trigger_agent, app);
93 }
94 rcu_read_unlock();
95}
96
97/*
98 * Create and init socket from uri.
99 */
100static struct lttcomm_sock *init_tcp_socket(void)
101{
102 int ret;
103 struct lttng_uri *uri = NULL;
104 struct lttcomm_sock *sock = NULL;
105 unsigned int port;
106 bool bind_succeeded = false;
107
108 /*
109 * This should never fail since the URI is hardcoded and the port is set
110 * before this thread is launched.
111 */
112 ret = uri_parse(default_reg_uri, &uri);
113 assert(ret);
114 assert(config.agent_tcp_port.begin > 0);
115 uri->port = config.agent_tcp_port.begin;
116
117 sock = lttcomm_alloc_sock_from_uri(uri);
118 uri_free(uri);
119 if (sock == NULL) {
120 ERR("[agent-thread] agent allocating TCP socket");
121 goto error;
122 }
123
124 ret = lttcomm_create_sock(sock);
125 if (ret < 0) {
126 goto error;
127 }
128
129 for (port = config.agent_tcp_port.begin;
130 port <= config.agent_tcp_port.end; port++) {
131 ret = lttcomm_sock_set_port(sock, (uint16_t) port);
132 if (ret) {
133 ERR("[agent-thread] Failed to set port %u on socket",
134 port);
135 goto error;
136 }
137 DBG3("[agent-thread] Trying to bind on port %u", port);
138 ret = sock->ops->bind(sock);
139 if (!ret) {
140 bind_succeeded = true;
141 break;
142 }
143
144 if (errno == EADDRINUSE) {
145 DBG("Failed to bind to port %u since it is already in use",
146 port);
147 } else {
148 PERROR("Failed to bind to port %u", port);
149 goto error;
150 }
151 }
152
153 if (!bind_succeeded) {
154 if (config.agent_tcp_port.begin == config.agent_tcp_port.end) {
155 WARN("Another process is already using the agent port %i. "
156 "Agent support will be deactivated.",
157 config.agent_tcp_port.begin);
158 goto error;
159 } else {
160 WARN("All ports in the range [%i, %i] are already in use. "
161 "Agent support will be deactivated.",
162 config.agent_tcp_port.begin,
163 config.agent_tcp_port.end);
164 goto error;
165 }
166 }
167
168 ret = sock->ops->listen(sock, -1);
169 if (ret < 0) {
170 goto error;
171 }
172
173 DBG("[agent-thread] Listening on TCP port %u and socket %d",
174 port, sock->fd);
175
176 return sock;
177
178error:
179 if (sock) {
180 lttcomm_destroy_sock(sock);
181 }
182 return NULL;
183}
184
185/*
186 * Close and destroy the given TCP socket.
187 */
188static void destroy_tcp_socket(struct lttcomm_sock *sock)
189{
190 int ret;
191 uint16_t port;
192
193 assert(sock);
194
195 ret = lttcomm_sock_get_port(sock, &port);
196 if (ret) {
197 ERR("[agent-thread] Failed to get port of agent TCP socket");
198 port = 0;
199 }
200
201 DBG3("[agent-thread] Destroy TCP socket on port %" PRIu16,
202 port);
203
204 /* This will return gracefully if fd is invalid. */
205 sock->ops->close(sock);
206 lttcomm_destroy_sock(sock);
207}
208
209static const char *domain_type_str(enum lttng_domain_type domain_type)
210{
211 switch (domain_type) {
212 case LTTNG_DOMAIN_NONE:
213 return "none";
214 case LTTNG_DOMAIN_KERNEL:
215 return "kernel";
216 case LTTNG_DOMAIN_UST:
217 return "ust";
218 case LTTNG_DOMAIN_JUL:
219 return "jul";
220 case LTTNG_DOMAIN_LOG4J:
221 return "log4j";
222 case LTTNG_DOMAIN_PYTHON:
223 return "python";
224 default:
225 return "unknown";
226 }
227}
228
229static bool is_agent_protocol_version_supported(
230 const struct agent_protocol_version *version)
231{
232 const bool is_supported = version->major == AGENT_MAJOR_VERSION &&
233 version->minor == AGENT_MINOR_VERSION;
234
235 if (!is_supported) {
236 WARN("Refusing agent connection: unsupported protocol version %ui.%ui, expected %i.%i",
237 version->major, version->minor,
238 AGENT_MAJOR_VERSION, AGENT_MINOR_VERSION);
239 }
240
241 return is_supported;
242}
243
244/*
245 * Handle a new agent connection on the registration socket.
246 *
247 * Returns 0 on success, or else a negative errno value.
248 * On success, the resulting socket is returned through `agent_app_socket`
249 * and the application's reported id is updated through `agent_app_id`.
250 */
251static int accept_agent_connection(
252 struct lttcomm_sock *reg_sock,
253 struct agent_app_id *agent_app_id,
254 struct lttcomm_sock **agent_app_socket)
255{
256 int ret;
257 struct agent_protocol_version agent_version;
258 ssize_t size;
259 struct agent_register_msg msg;
260 struct lttcomm_sock *new_sock;
261
262 assert(reg_sock);
263
264 new_sock = reg_sock->ops->accept(reg_sock);
265 if (!new_sock) {
266 ret = -ENOTCONN;
267 goto end;
268 }
269
270 size = new_sock->ops->recvmsg(new_sock, &msg, sizeof(msg), 0);
271 if (size < sizeof(msg)) {
272 if (size < 0) {
273 PERROR("Failed to register new agent application");
274 } else if (size != 0) {
275 ERR("Failed to register new agent application: invalid registration message length: expected length = %zu, message length = %zd",
276 sizeof(msg), size);
277 } else {
278 DBG("Failed to register new agent application: connection closed");
279 }
280 ret = -EINVAL;
281 goto error_close_socket;
282 }
283
284 agent_version = (struct agent_protocol_version) {
285 be32toh(msg.major_version),
286 be32toh(msg.minor_version),
287 };
288
289 /* Test communication protocol version of the registering agent. */
290 if (!is_agent_protocol_version_supported(&agent_version)) {
291 ret = -EINVAL;
292 goto error_close_socket;
293 }
294
295 *agent_app_id = (struct agent_app_id) {
296 .domain = (enum lttng_domain_type) be32toh(msg.domain),
297 .pid = (pid_t) be32toh(msg.pid),
298 };
299
300 DBG2("New registration for agent application: pid = %ld, domain = %s, socket fd = %d",
301 (long) agent_app_id->pid,
302 domain_type_str(agent_app_id->domain), new_sock->fd);
303
304 *agent_app_socket = new_sock;
305 new_sock = NULL;
306 ret = 0;
307 goto end;
308
309error_close_socket:
310 new_sock->ops->close(new_sock);
311 lttcomm_destroy_sock(new_sock);
312end:
313 return ret;
314}
315
316bool agent_tracing_is_enabled(void)
317{
318 int enabled;
319
320 enabled = uatomic_read(&agent_tracing_enabled);
321 assert(enabled != -1);
322 return enabled == 1;
323}
324
325/*
326 * Write agent TCP port using the rundir.
327 */
328static int write_agent_port(uint16_t port)
329{
330 return utils_create_pid_file((pid_t) port,
331 config.agent_port_file_path.value);
332}
333
334static
335void mark_thread_as_ready(struct thread_notifiers *notifiers)
336{
337 DBG("Marking agent management thread as ready");
338 sem_post(&notifiers->ready);
339}
340
341static
342void wait_until_thread_is_ready(struct thread_notifiers *notifiers)
343{
344 DBG("Waiting for agent management thread to be ready");
345 sem_wait(&notifiers->ready);
346 DBG("Agent management thread is ready");
347}
348
349/*
350 * This thread manage application notify communication.
351 */
352static void *thread_agent_management(void *data)
353{
354 int i, ret, pollfd;
355 uint32_t revents, nb_fd;
356 struct lttng_poll_event events;
357 struct lttcomm_sock *reg_sock;
358 struct thread_notifiers *notifiers = data;
359 const int quit_pipe_read_fd = lttng_pipe_get_readfd(
360 notifiers->quit_pipe);
361
362 DBG("[agent-thread] Manage agent application registration.");
363
364 rcu_register_thread();
365 rcu_thread_online();
366
367 /* Agent initialization call MUST be called before starting the thread. */
368 assert(agent_apps_ht_by_sock);
369
370 /* Create pollset with size 2, quit pipe and registration socket. */
371 ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
372 if (ret < 0) {
373 goto error_poll_create;
374 }
375
376 ret = lttng_poll_add(&events, quit_pipe_read_fd,
377 LPOLLIN | LPOLLERR);
378 if (ret < 0) {
379 goto error_tcp_socket;
380 }
381
382 reg_sock = init_tcp_socket();
383 if (reg_sock) {
384 uint16_t port;
385
386 assert(lttcomm_sock_get_port(reg_sock, &port) == 0);
387
388 ret = write_agent_port(port);
389 if (ret) {
390 ERR("[agent-thread] Failed to create agent port file: agent tracing will be unavailable");
391 /* Don't prevent the launch of the sessiond on error. */
392 mark_thread_as_ready(notifiers);
393 goto error;
394 }
395 } else {
396 /* Don't prevent the launch of the sessiond on error. */
397 mark_thread_as_ready(notifiers);
398 goto error_tcp_socket;
399 }
400
401 /*
402 * Signal that the agent thread is ready. The command thread
403 * may start to query whether or not agent tracing is enabled.
404 */
405 uatomic_set(&agent_tracing_enabled, 1);
406 mark_thread_as_ready(notifiers);
407
408 /* Add TCP socket to the poll set. */
409 ret = lttng_poll_add(&events, reg_sock->fd,
410 LPOLLIN | LPOLLERR | LPOLLHUP | LPOLLRDHUP);
411 if (ret < 0) {
412 goto error;
413 }
414
415 while (1) {
416 DBG3("[agent-thread] Manage agent polling");
417
418 /* Inifinite blocking call, waiting for transmission */
419restart:
420 ret = lttng_poll_wait(&events, -1);
421 DBG3("[agent-thread] Manage agent return from poll on %d fds",
422 LTTNG_POLL_GETNB(&events));
423 if (ret < 0) {
424 /*
425 * Restart interrupted system call.
426 */
427 if (errno == EINTR) {
428 goto restart;
429 }
430 goto error;
431 }
432 nb_fd = ret;
433 DBG3("[agent-thread] %d fd ready", nb_fd);
434
435 for (i = 0; i < nb_fd; i++) {
436 /* Fetch once the poll data */
437 revents = LTTNG_POLL_GETEV(&events, i);
438 pollfd = LTTNG_POLL_GETFD(&events, i);
439
440 /* Thread quit pipe has been closed. Killing thread. */
441 if (pollfd == quit_pipe_read_fd) {
442 goto exit;
443 }
444
445 /* Activity on the registration socket. */
446 if (revents & LPOLLIN) {
447 struct agent_app_id new_app_id;
448 struct agent_app *new_app = NULL;
449 struct lttcomm_sock *new_app_socket;
450 int new_app_socket_fd;
451
452 assert(pollfd == reg_sock->fd);
453
454 ret = accept_agent_connection(
455 reg_sock, &new_app_id, &new_app_socket);
456 if (ret < 0) {
457 /* Errors are already logged. */
458 continue;
459 }
460
461 /*
462 * new_app_socket's ownership has been
463 * transferred to the new agent app.
464 */
465 new_app = agent_create_app(new_app_id.pid,
466 new_app_id.domain,
467 new_app_socket);
468 if (!new_app) {
469 new_app_socket->ops->close(
470 new_app_socket);
471 continue;
472 }
473 new_app_socket_fd = new_app_socket->fd;
474 new_app_socket = NULL;
475
476 /*
477 * Since this is a command socket (write then
478 * read), only add poll error event to only
479 * detect shutdown.
480 */
481 ret = lttng_poll_add(&events, new_app_socket_fd,
482 LPOLLERR | LPOLLHUP | LPOLLRDHUP);
483 if (ret < 0) {
484 agent_destroy_app(new_app);
485 continue;
486 }
487
488 /*
489 * Prevent sessions from being modified while
490 * the agent application's configuration is
491 * updated.
492 */
493 session_lock_list();
494
495 /*
496 * Update the newly registered applications's
497 * configuration.
498 */
499 update_agent_app(new_app);
500
501 ret = agent_send_registration_done(new_app);
502 if (ret < 0) {
503 agent_destroy_app(new_app);
504 /* Removing from the poll set. */
505 ret = lttng_poll_del(&events,
506 new_app_socket_fd);
507 if (ret < 0) {
508 session_unlock_list();
509 goto error;
510 }
511 continue;
512 }
513
514 /* Publish the new agent app. */
515 agent_add_app(new_app);
516
517 session_unlock_list();
518 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
519 /* Removing from the poll set */
520 ret = lttng_poll_del(&events, pollfd);
521 if (ret < 0) {
522 goto error;
523 }
524 agent_destroy_app_by_sock(pollfd);
525 } else {
526 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
527 goto error;
528 }
529 }
530 }
531
532exit:
533 /* Whatever happens, try to delete it and exit. */
534 (void) lttng_poll_del(&events, reg_sock->fd);
535error:
536 destroy_tcp_socket(reg_sock);
537error_tcp_socket:
538 lttng_poll_clean(&events);
539error_poll_create:
540 uatomic_set(&agent_tracing_enabled, 0);
541 DBG("[agent-thread] Cleaning up and stopping.");
542 rcu_thread_offline();
543 rcu_unregister_thread();
544 return NULL;
545}
546
547static bool shutdown_agent_management_thread(void *data)
548{
549 struct thread_notifiers *notifiers = data;
550 const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
551
552 return notify_thread_pipe(write_fd) == 1;
553}
554
555static void cleanup_agent_management_thread(void *data)
556{
557 struct thread_notifiers *notifiers = data;
558
559 lttng_pipe_destroy(notifiers->quit_pipe);
560 sem_destroy(&notifiers->ready);
561 free(notifiers);
562}
563
564bool launch_agent_management_thread(void)
565{
566 struct thread_notifiers *notifiers;
567 struct lttng_thread *thread;
568
569 notifiers = zmalloc(sizeof(*notifiers));
570 if (!notifiers) {
571 goto error_alloc;
572 }
573
574 sem_init(&notifiers->ready, 0, 0);
575 notifiers->quit_pipe = lttng_pipe_open(FD_CLOEXEC);
576 if (!notifiers->quit_pipe) {
577 goto error;
578 }
579 thread = lttng_thread_create("Agent management",
580 thread_agent_management,
581 shutdown_agent_management_thread,
582 cleanup_agent_management_thread,
583 notifiers);
584 if (!thread) {
585 goto error;
586 }
587 wait_until_thread_is_ready(notifiers);
588 lttng_thread_put(thread);
589 return true;
590error:
591 cleanup_agent_management_thread(notifiers);
592error_alloc:
593 return false;
594}
This page took 0.026239 seconds and 5 git commands to generate.