Commit | Line | Data |
---|---|---|
3bd1e081 MD |
1 | /* |
2 | * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca> | |
3 | * Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
4 | * | |
d14d33bf AM |
5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License, version 2 only, | |
7 | * as published by the Free Software Foundation. | |
3bd1e081 MD |
8 | * |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
d14d33bf AM |
14 | * You should have received a copy of the GNU General Public License along |
15 | * with this program; if not, write to the Free Software Foundation, Inc., | |
16 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | |
3bd1e081 MD |
17 | */ |
18 | ||
19 | #define _GNU_SOURCE | |
20 | #include <assert.h> | |
f02e1e8a | 21 | #include <lttng/ust-ctl.h> |
3bd1e081 MD |
22 | #include <poll.h> |
23 | #include <pthread.h> | |
24 | #include <stdlib.h> | |
25 | #include <string.h> | |
26 | #include <sys/mman.h> | |
27 | #include <sys/socket.h> | |
dbb5dfe6 | 28 | #include <sys/stat.h> |
3bd1e081 | 29 | #include <sys/types.h> |
77c7c900 | 30 | #include <inttypes.h> |
3bd1e081 | 31 | #include <unistd.h> |
ffe60014 | 32 | #include <urcu/list.h> |
331744e3 | 33 | #include <signal.h> |
0857097f | 34 | |
990570ed | 35 | #include <common/common.h> |
10a8a223 | 36 | #include <common/sessiond-comm/sessiond-comm.h> |
00e2e675 | 37 | #include <common/relayd/relayd.h> |
dbb5dfe6 | 38 | #include <common/compat/fcntl.h> |
331744e3 JD |
39 | #include <common/consumer-metadata-cache.h> |
40 | #include <common/consumer-timer.h> | |
fe4477ee | 41 | #include <common/utils.h> |
10a8a223 DG |
42 | |
43 | #include "ust-consumer.h" | |
3bd1e081 MD |
44 | |
45 | extern struct lttng_consumer_global_data consumer_data; | |
46 | extern int consumer_poll_timeout; | |
47 | extern volatile int consumer_quit; | |
48 | ||
49 | /* | |
ffe60014 DG |
50 | * Free channel object and all streams associated with it. This MUST be used |
51 | * only and only if the channel has _NEVER_ been added to the global channel | |
52 | * hash table. | |
3bd1e081 | 53 | */ |
ffe60014 | 54 | static void destroy_channel(struct lttng_consumer_channel *channel) |
3bd1e081 | 55 | { |
ffe60014 DG |
56 | struct lttng_consumer_stream *stream, *stmp; |
57 | ||
58 | assert(channel); | |
59 | ||
60 | DBG("UST consumer cleaning stream list"); | |
61 | ||
62 | cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head, | |
63 | send_node) { | |
64 | cds_list_del(&stream->send_node); | |
65 | ustctl_destroy_stream(stream->ustream); | |
66 | free(stream); | |
67 | } | |
68 | ||
69 | /* | |
70 | * If a channel is available meaning that was created before the streams | |
71 | * were, delete it. | |
72 | */ | |
73 | if (channel->uchan) { | |
74 | lttng_ustconsumer_del_channel(channel); | |
75 | } | |
76 | free(channel); | |
77 | } | |
3bd1e081 MD |
78 | |
79 | /* | |
ffe60014 | 80 | * Add channel to internal consumer state. |
3bd1e081 | 81 | * |
ffe60014 | 82 | * Returns 0 on success or else a negative value. |
3bd1e081 | 83 | */ |
ffe60014 DG |
84 | static int add_channel(struct lttng_consumer_channel *channel, |
85 | struct lttng_consumer_local_data *ctx) | |
3bd1e081 MD |
86 | { |
87 | int ret = 0; | |
88 | ||
ffe60014 DG |
89 | assert(channel); |
90 | assert(ctx); | |
91 | ||
92 | if (ctx->on_recv_channel != NULL) { | |
93 | ret = ctx->on_recv_channel(channel); | |
94 | if (ret == 0) { | |
d8ef542d | 95 | ret = consumer_add_channel(channel, ctx); |
ffe60014 DG |
96 | } else if (ret < 0) { |
97 | /* Most likely an ENOMEM. */ | |
98 | lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR); | |
99 | goto error; | |
100 | } | |
101 | } else { | |
d8ef542d | 102 | ret = consumer_add_channel(channel, ctx); |
3bd1e081 MD |
103 | } |
104 | ||
d88aee68 | 105 | DBG("UST consumer channel added (key: %" PRIu64 ")", channel->key); |
ffe60014 DG |
106 | |
107 | error: | |
3bd1e081 MD |
108 | return ret; |
109 | } | |
110 | ||
111 | /* | |
ffe60014 DG |
112 | * Allocate and return a consumer channel object. |
113 | */ | |
114 | static struct lttng_consumer_channel *allocate_channel(uint64_t session_id, | |
115 | const char *pathname, const char *name, uid_t uid, gid_t gid, | |
1624d5b7 JD |
116 | int relayd_id, uint64_t key, enum lttng_event_output output, |
117 | uint64_t tracefile_size, uint64_t tracefile_count) | |
ffe60014 DG |
118 | { |
119 | assert(pathname); | |
120 | assert(name); | |
121 | ||
122 | return consumer_allocate_channel(key, session_id, pathname, name, uid, gid, | |
1624d5b7 | 123 | relayd_id, output, tracefile_size, tracefile_count); |
ffe60014 DG |
124 | } |
125 | ||
126 | /* | |
127 | * Allocate and return a consumer stream object. If _alloc_ret is not NULL, the | |
128 | * error value if applicable is set in it else it is kept untouched. | |
3bd1e081 | 129 | * |
ffe60014 | 130 | * Return NULL on error else the newly allocated stream object. |
3bd1e081 | 131 | */ |
ffe60014 DG |
132 | static struct lttng_consumer_stream *allocate_stream(int cpu, int key, |
133 | struct lttng_consumer_channel *channel, | |
134 | struct lttng_consumer_local_data *ctx, int *_alloc_ret) | |
135 | { | |
136 | int alloc_ret; | |
137 | struct lttng_consumer_stream *stream = NULL; | |
138 | ||
139 | assert(channel); | |
140 | assert(ctx); | |
141 | ||
142 | stream = consumer_allocate_stream(channel->key, | |
143 | key, | |
144 | LTTNG_CONSUMER_ACTIVE_STREAM, | |
145 | channel->name, | |
146 | channel->uid, | |
147 | channel->gid, | |
148 | channel->relayd_id, | |
149 | channel->session_id, | |
150 | cpu, | |
151 | &alloc_ret, | |
152 | channel->type); | |
153 | if (stream == NULL) { | |
154 | switch (alloc_ret) { | |
155 | case -ENOENT: | |
156 | /* | |
157 | * We could not find the channel. Can happen if cpu hotplug | |
158 | * happens while tearing down. | |
159 | */ | |
160 | DBG3("Could not find channel"); | |
161 | break; | |
162 | case -ENOMEM: | |
163 | case -EINVAL: | |
164 | default: | |
165 | lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR); | |
166 | break; | |
167 | } | |
168 | goto error; | |
169 | } | |
170 | ||
171 | stream->chan = channel; | |
172 | ||
173 | error: | |
174 | if (_alloc_ret) { | |
175 | *_alloc_ret = alloc_ret; | |
176 | } | |
177 | return stream; | |
178 | } | |
179 | ||
180 | /* | |
181 | * Send the given stream pointer to the corresponding thread. | |
182 | * | |
183 | * Returns 0 on success else a negative value. | |
184 | */ | |
185 | static int send_stream_to_thread(struct lttng_consumer_stream *stream, | |
186 | struct lttng_consumer_local_data *ctx) | |
187 | { | |
dae10966 DG |
188 | int ret; |
189 | struct lttng_pipe *stream_pipe; | |
ffe60014 DG |
190 | |
191 | /* Get the right pipe where the stream will be sent. */ | |
192 | if (stream->metadata_flag) { | |
dae10966 | 193 | stream_pipe = ctx->consumer_metadata_pipe; |
ffe60014 | 194 | } else { |
dae10966 | 195 | stream_pipe = ctx->consumer_data_pipe; |
ffe60014 DG |
196 | } |
197 | ||
dae10966 | 198 | ret = lttng_pipe_write(stream_pipe, &stream, sizeof(stream)); |
ffe60014 | 199 | if (ret < 0) { |
dae10966 DG |
200 | ERR("Consumer write %s stream to pipe %d", |
201 | stream->metadata_flag ? "metadata" : "data", | |
202 | lttng_pipe_get_writefd(stream_pipe)); | |
ffe60014 DG |
203 | } |
204 | ||
205 | return ret; | |
206 | } | |
207 | ||
208 | /* | |
209 | * Search for a relayd object related to the stream. If found, send the stream | |
210 | * to the relayd. | |
211 | * | |
212 | * On success, returns 0 else a negative value. | |
213 | */ | |
214 | static int send_stream_to_relayd(struct lttng_consumer_stream *stream) | |
215 | { | |
216 | int ret = 0; | |
217 | struct consumer_relayd_sock_pair *relayd; | |
218 | ||
219 | assert(stream); | |
220 | ||
221 | relayd = consumer_find_relayd(stream->net_seq_idx); | |
222 | if (relayd != NULL) { | |
223 | pthread_mutex_lock(&relayd->ctrl_sock_mutex); | |
224 | /* Add stream on the relayd */ | |
225 | ret = relayd_add_stream(&relayd->control_sock, stream->name, | |
0f907de1 JD |
226 | stream->chan->pathname, &stream->relayd_stream_id, |
227 | stream->chan->tracefile_size, | |
228 | stream->chan->tracefile_count); | |
ffe60014 DG |
229 | pthread_mutex_unlock(&relayd->ctrl_sock_mutex); |
230 | if (ret < 0) { | |
231 | goto error; | |
232 | } | |
d88aee68 DG |
233 | } else if (stream->net_seq_idx != (uint64_t) -1ULL) { |
234 | ERR("Network sequence index %" PRIu64 " unknown. Not adding stream.", | |
ffe60014 DG |
235 | stream->net_seq_idx); |
236 | ret = -1; | |
237 | goto error; | |
238 | } | |
239 | ||
240 | error: | |
241 | return ret; | |
242 | } | |
243 | ||
d88aee68 DG |
244 | /* |
245 | * Create streams for the given channel using liblttng-ust-ctl. | |
246 | * | |
247 | * Return 0 on success else a negative value. | |
248 | */ | |
ffe60014 DG |
249 | static int create_ust_streams(struct lttng_consumer_channel *channel, |
250 | struct lttng_consumer_local_data *ctx) | |
251 | { | |
252 | int ret, cpu = 0; | |
253 | struct ustctl_consumer_stream *ustream; | |
254 | struct lttng_consumer_stream *stream; | |
255 | ||
256 | assert(channel); | |
257 | assert(ctx); | |
258 | ||
259 | /* | |
260 | * While a stream is available from ustctl. When NULL is returned, we've | |
261 | * reached the end of the possible stream for the channel. | |
262 | */ | |
263 | while ((ustream = ustctl_create_stream(channel->uchan, cpu))) { | |
264 | int wait_fd; | |
265 | ||
749d339a | 266 | wait_fd = ustctl_stream_get_wait_fd(ustream); |
ffe60014 DG |
267 | |
268 | /* Allocate consumer stream object. */ | |
269 | stream = allocate_stream(cpu, wait_fd, channel, ctx, &ret); | |
270 | if (!stream) { | |
271 | goto error_alloc; | |
272 | } | |
273 | stream->ustream = ustream; | |
274 | /* | |
275 | * Store it so we can save multiple function calls afterwards since | |
276 | * this value is used heavily in the stream threads. This is UST | |
277 | * specific so this is why it's done after allocation. | |
278 | */ | |
279 | stream->wait_fd = wait_fd; | |
280 | ||
b31398bb DG |
281 | /* |
282 | * Increment channel refcount since the channel reference has now been | |
283 | * assigned in the allocation process above. | |
284 | */ | |
285 | uatomic_inc(&stream->chan->refcount); | |
286 | ||
ffe60014 DG |
287 | /* |
288 | * Order is important this is why a list is used. On error, the caller | |
289 | * should clean this list. | |
290 | */ | |
291 | cds_list_add_tail(&stream->send_node, &channel->streams.head); | |
292 | ||
293 | ret = ustctl_get_max_subbuf_size(stream->ustream, | |
294 | &stream->max_sb_size); | |
295 | if (ret < 0) { | |
296 | ERR("ustctl_get_max_subbuf_size failed for stream %s", | |
297 | stream->name); | |
298 | goto error; | |
299 | } | |
300 | ||
301 | /* Do actions once stream has been received. */ | |
302 | if (ctx->on_recv_stream) { | |
303 | ret = ctx->on_recv_stream(stream); | |
304 | if (ret < 0) { | |
305 | goto error; | |
306 | } | |
307 | } | |
308 | ||
d88aee68 | 309 | DBG("UST consumer add stream %s (key: %" PRIu64 ") with relayd id %" PRIu64, |
ffe60014 DG |
310 | stream->name, stream->key, stream->relayd_stream_id); |
311 | ||
312 | /* Set next CPU stream. */ | |
313 | channel->streams.count = ++cpu; | |
d88aee68 DG |
314 | |
315 | /* Keep stream reference when creating metadata. */ | |
316 | if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA) { | |
317 | channel->metadata_stream = stream; | |
318 | } | |
ffe60014 DG |
319 | } |
320 | ||
321 | return 0; | |
322 | ||
323 | error: | |
324 | error_alloc: | |
325 | return ret; | |
326 | } | |
327 | ||
328 | /* | |
329 | * Create an UST channel with the given attributes and send it to the session | |
330 | * daemon using the ust ctl API. | |
331 | * | |
332 | * Return 0 on success or else a negative value. | |
333 | */ | |
334 | static int create_ust_channel(struct ustctl_consumer_channel_attr *attr, | |
335 | struct ustctl_consumer_channel **chanp) | |
336 | { | |
337 | int ret; | |
338 | struct ustctl_consumer_channel *channel; | |
339 | ||
340 | assert(attr); | |
341 | assert(chanp); | |
342 | ||
343 | DBG3("Creating channel to ustctl with attr: [overwrite: %d, " | |
344 | "subbuf_size: %" PRIu64 ", num_subbuf: %" PRIu64 ", " | |
345 | "switch_timer_interval: %u, read_timer_interval: %u, " | |
346 | "output: %d, type: %d", attr->overwrite, attr->subbuf_size, | |
347 | attr->num_subbuf, attr->switch_timer_interval, | |
348 | attr->read_timer_interval, attr->output, attr->type); | |
349 | ||
350 | channel = ustctl_create_channel(attr); | |
351 | if (!channel) { | |
352 | ret = -1; | |
353 | goto error_create; | |
354 | } | |
355 | ||
356 | *chanp = channel; | |
357 | ||
358 | return 0; | |
359 | ||
360 | error_create: | |
361 | return ret; | |
362 | } | |
363 | ||
d88aee68 DG |
364 | /* |
365 | * Send a single given stream to the session daemon using the sock. | |
366 | * | |
367 | * Return 0 on success else a negative value. | |
368 | */ | |
ffe60014 DG |
369 | static int send_sessiond_stream(int sock, struct lttng_consumer_stream *stream) |
370 | { | |
371 | int ret; | |
372 | ||
373 | assert(stream); | |
374 | assert(sock >= 0); | |
375 | ||
d88aee68 | 376 | DBG2("UST consumer sending stream %" PRIu64 " to sessiond", stream->key); |
ffe60014 DG |
377 | |
378 | /* Send stream to session daemon. */ | |
379 | ret = ustctl_send_stream_to_sessiond(sock, stream->ustream); | |
380 | if (ret < 0) { | |
381 | goto error; | |
382 | } | |
383 | ||
ffe60014 DG |
384 | error: |
385 | return ret; | |
386 | } | |
387 | ||
388 | /* | |
389 | * Send channel to sessiond. | |
390 | * | |
d88aee68 | 391 | * Return 0 on success or else a negative value. |
ffe60014 DG |
392 | */ |
393 | static int send_sessiond_channel(int sock, | |
394 | struct lttng_consumer_channel *channel, | |
395 | struct lttng_consumer_local_data *ctx, int *relayd_error) | |
396 | { | |
397 | int ret; | |
398 | struct lttng_consumer_stream *stream; | |
399 | ||
400 | assert(channel); | |
401 | assert(ctx); | |
402 | assert(sock >= 0); | |
403 | ||
404 | DBG("UST consumer sending channel %s to sessiond", channel->name); | |
405 | ||
406 | /* Send channel to sessiond. */ | |
407 | ret = ustctl_send_channel_to_sessiond(sock, channel->uchan); | |
408 | if (ret < 0) { | |
409 | goto error; | |
410 | } | |
411 | ||
d8ef542d MD |
412 | ret = ustctl_channel_close_wakeup_fd(channel->uchan); |
413 | if (ret < 0) { | |
414 | goto error; | |
415 | } | |
416 | ||
ffe60014 DG |
417 | /* The channel was sent successfully to the sessiond at this point. */ |
418 | cds_list_for_each_entry(stream, &channel->streams.head, send_node) { | |
419 | /* Try to send the stream to the relayd if one is available. */ | |
420 | ret = send_stream_to_relayd(stream); | |
421 | if (ret < 0) { | |
422 | /* | |
423 | * Flag that the relayd was the problem here probably due to a | |
424 | * communicaton error on the socket. | |
425 | */ | |
426 | if (relayd_error) { | |
427 | *relayd_error = 1; | |
428 | } | |
429 | goto error; | |
430 | } | |
431 | ||
432 | /* Send stream to session daemon. */ | |
433 | ret = send_sessiond_stream(sock, stream); | |
434 | if (ret < 0) { | |
435 | goto error; | |
436 | } | |
437 | } | |
438 | ||
439 | /* Tell sessiond there is no more stream. */ | |
440 | ret = ustctl_send_stream_to_sessiond(sock, NULL); | |
441 | if (ret < 0) { | |
442 | goto error; | |
443 | } | |
444 | ||
445 | DBG("UST consumer NULL stream sent to sessiond"); | |
446 | ||
447 | return 0; | |
448 | ||
449 | error: | |
450 | return ret; | |
451 | } | |
452 | ||
453 | /* | |
454 | * Creates a channel and streams and add the channel it to the channel internal | |
455 | * state. The created stream must ONLY be sent once the GET_CHANNEL command is | |
456 | * received. | |
457 | * | |
458 | * Return 0 on success or else, a negative value is returned and the channel | |
459 | * MUST be destroyed by consumer_del_channel(). | |
460 | */ | |
461 | static int ask_channel(struct lttng_consumer_local_data *ctx, int sock, | |
462 | struct lttng_consumer_channel *channel, | |
463 | struct ustctl_consumer_channel_attr *attr) | |
3bd1e081 MD |
464 | { |
465 | int ret; | |
466 | ||
ffe60014 DG |
467 | assert(ctx); |
468 | assert(channel); | |
469 | assert(attr); | |
470 | ||
471 | /* | |
472 | * This value is still used by the kernel consumer since for the kernel, | |
473 | * the stream ownership is not IN the consumer so we need to have the | |
474 | * number of left stream that needs to be initialized so we can know when | |
475 | * to delete the channel (see consumer.c). | |
476 | * | |
477 | * As for the user space tracer now, the consumer creates and sends the | |
478 | * stream to the session daemon which only sends them to the application | |
479 | * once every stream of a channel is received making this value useless | |
480 | * because we they will be added to the poll thread before the application | |
481 | * receives them. This ensures that a stream can not hang up during | |
482 | * initilization of a channel. | |
483 | */ | |
484 | channel->nb_init_stream_left = 0; | |
485 | ||
486 | /* The reply msg status is handled in the following call. */ | |
487 | ret = create_ust_channel(attr, &channel->uchan); | |
488 | if (ret < 0) { | |
489 | goto error; | |
3bd1e081 MD |
490 | } |
491 | ||
d8ef542d MD |
492 | channel->wait_fd = ustctl_channel_get_wait_fd(channel->uchan); |
493 | ||
ffe60014 DG |
494 | /* Open all streams for this channel. */ |
495 | ret = create_ust_streams(channel, ctx); | |
496 | if (ret < 0) { | |
497 | goto error; | |
498 | } | |
499 | ||
500 | error: | |
3bd1e081 MD |
501 | return ret; |
502 | } | |
503 | ||
d88aee68 DG |
504 | /* |
505 | * Send all stream of a channel to the right thread handling it. | |
506 | * | |
507 | * On error, return a negative value else 0 on success. | |
508 | */ | |
509 | static int send_streams_to_thread(struct lttng_consumer_channel *channel, | |
510 | struct lttng_consumer_local_data *ctx) | |
511 | { | |
512 | int ret = 0; | |
513 | struct lttng_consumer_stream *stream, *stmp; | |
514 | ||
515 | assert(channel); | |
516 | assert(ctx); | |
517 | ||
518 | /* Send streams to the corresponding thread. */ | |
519 | cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head, | |
520 | send_node) { | |
521 | /* Sending the stream to the thread. */ | |
522 | ret = send_stream_to_thread(stream, ctx); | |
523 | if (ret < 0) { | |
524 | /* | |
525 | * If we are unable to send the stream to the thread, there is | |
526 | * a big problem so just stop everything. | |
527 | */ | |
528 | goto error; | |
529 | } | |
530 | ||
531 | /* Remove node from the channel stream list. */ | |
532 | cds_list_del(&stream->send_node); | |
533 | } | |
534 | ||
535 | error: | |
536 | return ret; | |
537 | } | |
538 | ||
539 | /* | |
540 | * Write metadata to the given channel using ustctl to convert the string to | |
541 | * the ringbuffer. | |
331744e3 JD |
542 | * Called only from consumer_metadata_cache_write. |
543 | * The metadata cache lock MUST be acquired to write in the cache. | |
d88aee68 DG |
544 | * |
545 | * Return 0 on success else a negative value. | |
546 | */ | |
331744e3 | 547 | int lttng_ustconsumer_push_metadata(struct lttng_consumer_channel *metadata, |
d88aee68 DG |
548 | const char *metadata_str, uint64_t target_offset, uint64_t len) |
549 | { | |
550 | int ret; | |
551 | ||
552 | assert(metadata); | |
553 | assert(metadata_str); | |
554 | ||
555 | DBG("UST consumer writing metadata to channel %s", metadata->name); | |
556 | ||
73811ecc DG |
557 | if (!metadata->metadata_stream) { |
558 | ret = 0; | |
559 | goto error; | |
560 | } | |
561 | ||
331744e3 JD |
562 | assert(target_offset <= metadata->metadata_cache->max_offset); |
563 | ret = ustctl_write_metadata_to_channel(metadata->uchan, | |
564 | metadata_str + target_offset, len); | |
d88aee68 | 565 | if (ret < 0) { |
8fd623e0 | 566 | ERR("ustctl write metadata fail with ret %d, len %" PRIu64, ret, len); |
d88aee68 DG |
567 | goto error; |
568 | } | |
d88aee68 DG |
569 | |
570 | ustctl_flush_buffer(metadata->metadata_stream->ustream, 1); | |
571 | ||
572 | error: | |
573 | return ret; | |
574 | } | |
575 | ||
7972aab2 DG |
576 | /* |
577 | * Flush channel's streams using the given key to retrieve the channel. | |
578 | * | |
579 | * Return 0 on success else an LTTng error code. | |
580 | */ | |
581 | static int flush_channel(uint64_t chan_key) | |
582 | { | |
583 | int ret = 0; | |
584 | struct lttng_consumer_channel *channel; | |
585 | struct lttng_consumer_stream *stream; | |
586 | struct lttng_ht *ht; | |
587 | struct lttng_ht_iter iter; | |
588 | ||
8fd623e0 | 589 | DBG("UST consumer flush channel key %" PRIu64, chan_key); |
7972aab2 | 590 | |
a500c257 | 591 | rcu_read_lock(); |
7972aab2 DG |
592 | channel = consumer_find_channel(chan_key); |
593 | if (!channel) { | |
8fd623e0 | 594 | ERR("UST consumer flush channel %" PRIu64 " not found", chan_key); |
7972aab2 DG |
595 | ret = LTTNG_ERR_UST_CHAN_NOT_FOUND; |
596 | goto error; | |
597 | } | |
598 | ||
599 | ht = consumer_data.stream_per_chan_id_ht; | |
600 | ||
601 | /* For each stream of the channel id, flush it. */ | |
7972aab2 DG |
602 | cds_lfht_for_each_entry_duplicate(ht->ht, |
603 | ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct, | |
604 | &channel->key, &iter.iter, stream, node_channel_id.node) { | |
605 | ustctl_flush_buffer(stream->ustream, 1); | |
606 | } | |
7972aab2 | 607 | error: |
a500c257 | 608 | rcu_read_unlock(); |
7972aab2 DG |
609 | return ret; |
610 | } | |
611 | ||
d88aee68 DG |
612 | /* |
613 | * Close metadata stream wakeup_fd using the given key to retrieve the channel. | |
a500c257 | 614 | * RCU read side lock MUST be acquired before calling this function. |
d88aee68 DG |
615 | * |
616 | * Return 0 on success else an LTTng error code. | |
617 | */ | |
618 | static int close_metadata(uint64_t chan_key) | |
619 | { | |
ea88ca2a | 620 | int ret = 0; |
d88aee68 DG |
621 | struct lttng_consumer_channel *channel; |
622 | ||
8fd623e0 | 623 | DBG("UST consumer close metadata key %" PRIu64, chan_key); |
d88aee68 DG |
624 | |
625 | channel = consumer_find_channel(chan_key); | |
626 | if (!channel) { | |
84cc9aa0 DG |
627 | /* |
628 | * This is possible if the metadata thread has issue a delete because | |
629 | * the endpoint point of the stream hung up. There is no way the | |
630 | * session daemon can know about it thus use a DBG instead of an actual | |
631 | * error. | |
632 | */ | |
633 | DBG("UST consumer close metadata %" PRIu64 " not found", chan_key); | |
d88aee68 DG |
634 | ret = LTTNG_ERR_UST_CHAN_NOT_FOUND; |
635 | goto error; | |
636 | } | |
637 | ||
ea88ca2a | 638 | pthread_mutex_lock(&consumer_data.lock); |
73811ecc DG |
639 | |
640 | if (cds_lfht_is_node_deleted(&channel->node.node)) { | |
641 | goto error_unlock; | |
642 | } | |
643 | ||
644 | if (channel->switch_timer_enabled == 1) { | |
645 | DBG("Deleting timer on metadata channel"); | |
646 | consumer_timer_switch_stop(channel); | |
647 | } | |
648 | ||
649 | if (channel->metadata_stream) { | |
ea88ca2a MD |
650 | ret = ustctl_stream_close_wakeup_fd(channel->metadata_stream->ustream); |
651 | if (ret < 0) { | |
652 | ERR("UST consumer unable to close fd of metadata (ret: %d)", ret); | |
653 | ret = LTTCOMM_CONSUMERD_ERROR_METADATA; | |
654 | goto error_unlock; | |
655 | } | |
331744e3 | 656 | } |
d88aee68 | 657 | |
ea88ca2a MD |
658 | error_unlock: |
659 | pthread_mutex_unlock(&consumer_data.lock); | |
d88aee68 DG |
660 | error: |
661 | return ret; | |
662 | } | |
663 | ||
664 | /* | |
665 | * RCU read side lock MUST be acquired before calling this function. | |
666 | * | |
667 | * Return 0 on success else an LTTng error code. | |
668 | */ | |
669 | static int setup_metadata(struct lttng_consumer_local_data *ctx, uint64_t key) | |
670 | { | |
671 | int ret; | |
672 | struct lttng_consumer_channel *metadata; | |
673 | ||
8fd623e0 | 674 | DBG("UST consumer setup metadata key %" PRIu64, key); |
d88aee68 DG |
675 | |
676 | metadata = consumer_find_channel(key); | |
677 | if (!metadata) { | |
678 | ERR("UST consumer push metadata %" PRIu64 " not found", key); | |
679 | ret = LTTNG_ERR_UST_CHAN_NOT_FOUND; | |
680 | goto error; | |
681 | } | |
682 | ||
683 | /* | |
684 | * Send metadata stream to relayd if one available. Availability is | |
685 | * known if the stream is still in the list of the channel. | |
686 | */ | |
687 | if (cds_list_empty(&metadata->streams.head)) { | |
688 | ERR("Metadata channel key %" PRIu64 ", no stream available.", key); | |
689 | ret = LTTCOMM_CONSUMERD_ERROR_METADATA; | |
690 | goto error; | |
691 | } | |
692 | ||
693 | /* Send metadata stream to relayd if needed. */ | |
694 | ret = send_stream_to_relayd(metadata->metadata_stream); | |
695 | if (ret < 0) { | |
696 | ret = LTTCOMM_CONSUMERD_ERROR_METADATA; | |
697 | goto error; | |
698 | } | |
699 | ||
700 | ret = send_streams_to_thread(metadata, ctx); | |
701 | if (ret < 0) { | |
702 | /* | |
703 | * If we are unable to send the stream to the thread, there is | |
704 | * a big problem so just stop everything. | |
705 | */ | |
706 | ret = LTTCOMM_CONSUMERD_FATAL; | |
707 | goto error; | |
708 | } | |
709 | /* List MUST be empty after or else it could be reused. */ | |
710 | assert(cds_list_empty(&metadata->streams.head)); | |
711 | ||
712 | ret = 0; | |
713 | ||
714 | error: | |
715 | return ret; | |
716 | } | |
717 | ||
331744e3 JD |
718 | /* |
719 | * Receive the metadata updates from the sessiond. | |
720 | */ | |
721 | int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset, | |
722 | uint64_t len, struct lttng_consumer_channel *channel) | |
723 | { | |
724 | int ret, ret_code = LTTNG_OK; | |
725 | char *metadata_str; | |
726 | ||
8fd623e0 | 727 | DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key, len); |
331744e3 JD |
728 | |
729 | metadata_str = zmalloc(len * sizeof(char)); | |
730 | if (!metadata_str) { | |
731 | PERROR("zmalloc metadata string"); | |
732 | ret_code = LTTCOMM_CONSUMERD_ENOMEM; | |
733 | goto end; | |
734 | } | |
735 | ||
736 | /* Receive metadata string. */ | |
737 | ret = lttcomm_recv_unix_sock(sock, metadata_str, len); | |
738 | if (ret < 0) { | |
739 | /* Session daemon is dead so return gracefully. */ | |
740 | ret_code = ret; | |
741 | goto end_free; | |
742 | } | |
743 | ||
73811ecc DG |
744 | /* |
745 | * XXX: The consumer data lock is acquired before calling metadata cache | |
746 | * write which calls push metadata that MUST be protected by the consumer | |
747 | * lock in order to be able to check the validity of the metadata stream of | |
748 | * the channel. | |
749 | * | |
750 | * Note that this will be subject to change to better fine grained locking | |
751 | * and ultimately try to get rid of this global consumer data lock. | |
752 | */ | |
753 | pthread_mutex_lock(&consumer_data.lock); | |
754 | ||
331744e3 JD |
755 | pthread_mutex_lock(&channel->metadata_cache->lock); |
756 | ret = consumer_metadata_cache_write(channel, offset, len, metadata_str); | |
757 | if (ret < 0) { | |
758 | /* Unable to handle metadata. Notify session daemon. */ | |
759 | ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA; | |
a32bd775 DG |
760 | /* |
761 | * Skip metadata flush on write error since the offset and len might | |
762 | * not have been updated which could create an infinite loop below when | |
763 | * waiting for the metadata cache to be flushed. | |
764 | */ | |
765 | pthread_mutex_unlock(&channel->metadata_cache->lock); | |
766 | pthread_mutex_unlock(&consumer_data.lock); | |
767 | goto end_free; | |
331744e3 JD |
768 | } |
769 | pthread_mutex_unlock(&channel->metadata_cache->lock); | |
73811ecc | 770 | pthread_mutex_unlock(&consumer_data.lock); |
331744e3 JD |
771 | |
772 | while (consumer_metadata_cache_flushed(channel, offset + len)) { | |
773 | DBG("Waiting for metadata to be flushed"); | |
774 | usleep(DEFAULT_METADATA_AVAILABILITY_WAIT_TIME); | |
775 | } | |
776 | ||
777 | end_free: | |
778 | free(metadata_str); | |
779 | end: | |
780 | return ret_code; | |
781 | } | |
782 | ||
4cbc1a04 DG |
783 | /* |
784 | * Receive command from session daemon and process it. | |
785 | * | |
786 | * Return 1 on success else a negative value or 0. | |
787 | */ | |
3bd1e081 MD |
788 | int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx, |
789 | int sock, struct pollfd *consumer_sockpoll) | |
790 | { | |
791 | ssize_t ret; | |
f50f23d9 | 792 | enum lttng_error_code ret_code = LTTNG_OK; |
3bd1e081 | 793 | struct lttcomm_consumer_msg msg; |
ffe60014 | 794 | struct lttng_consumer_channel *channel = NULL; |
3bd1e081 MD |
795 | |
796 | ret = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg)); | |
797 | if (ret != sizeof(msg)) { | |
173af62f DG |
798 | DBG("Consumer received unexpected message size %zd (expects %zu)", |
799 | ret, sizeof(msg)); | |
ffe60014 | 800 | lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD); |
3be74084 DG |
801 | /* |
802 | * The ret value might 0 meaning an orderly shutdown but this is ok | |
803 | * since the caller handles this. | |
804 | */ | |
489f70e9 MD |
805 | if (ret > 0) { |
806 | ret = -1; | |
807 | } | |
3bd1e081 MD |
808 | return ret; |
809 | } | |
810 | if (msg.cmd_type == LTTNG_CONSUMER_STOP) { | |
f50f23d9 DG |
811 | /* |
812 | * Notify the session daemon that the command is completed. | |
813 | * | |
814 | * On transport layer error, the function call will print an error | |
815 | * message so handling the returned code is a bit useless since we | |
816 | * return an error code anyway. | |
817 | */ | |
818 | (void) consumer_send_status_msg(sock, ret_code); | |
3bd1e081 MD |
819 | return -ENOENT; |
820 | } | |
821 | ||
3f8e211f | 822 | /* relayd needs RCU read-side lock */ |
b0b335c8 MD |
823 | rcu_read_lock(); |
824 | ||
3bd1e081 | 825 | switch (msg.cmd_type) { |
00e2e675 DG |
826 | case LTTNG_CONSUMER_ADD_RELAYD_SOCKET: |
827 | { | |
f50f23d9 | 828 | /* Session daemon status message are handled in the following call. */ |
7735ef9e DG |
829 | ret = consumer_add_relayd_socket(msg.u.relayd_sock.net_index, |
830 | msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll, | |
46e6455f | 831 | &msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id); |
00e2e675 DG |
832 | goto end_nosignal; |
833 | } | |
173af62f DG |
834 | case LTTNG_CONSUMER_DESTROY_RELAYD: |
835 | { | |
a6ba4fe1 | 836 | uint64_t index = msg.u.destroy_relayd.net_seq_idx; |
173af62f DG |
837 | struct consumer_relayd_sock_pair *relayd; |
838 | ||
a6ba4fe1 | 839 | DBG("UST consumer destroying relayd %" PRIu64, index); |
173af62f DG |
840 | |
841 | /* Get relayd reference if exists. */ | |
a6ba4fe1 | 842 | relayd = consumer_find_relayd(index); |
173af62f | 843 | if (relayd == NULL) { |
3448e266 | 844 | DBG("Unable to find relayd %" PRIu64, index); |
f50f23d9 | 845 | ret_code = LTTNG_ERR_NO_CONSUMER; |
173af62f DG |
846 | } |
847 | ||
a6ba4fe1 DG |
848 | /* |
849 | * Each relayd socket pair has a refcount of stream attached to it | |
850 | * which tells if the relayd is still active or not depending on the | |
851 | * refcount value. | |
852 | * | |
853 | * This will set the destroy flag of the relayd object and destroy it | |
854 | * if the refcount reaches zero when called. | |
855 | * | |
856 | * The destroy can happen either here or when a stream fd hangs up. | |
857 | */ | |
f50f23d9 DG |
858 | if (relayd) { |
859 | consumer_flag_relayd_for_destroy(relayd); | |
860 | } | |
861 | ||
d88aee68 | 862 | goto end_msg_sessiond; |
173af62f | 863 | } |
3bd1e081 MD |
864 | case LTTNG_CONSUMER_UPDATE_STREAM: |
865 | { | |
3f8e211f | 866 | rcu_read_unlock(); |
7ad0a0cb | 867 | return -ENOSYS; |
3bd1e081 | 868 | } |
6d805429 | 869 | case LTTNG_CONSUMER_DATA_PENDING: |
53632229 | 870 | { |
3be74084 | 871 | int ret, is_data_pending; |
6d805429 | 872 | uint64_t id = msg.u.data_pending.session_id; |
ca22feea | 873 | |
6d805429 | 874 | DBG("UST consumer data pending command for id %" PRIu64, id); |
ca22feea | 875 | |
3be74084 | 876 | is_data_pending = consumer_data_pending(id); |
ca22feea DG |
877 | |
878 | /* Send back returned value to session daemon */ | |
3be74084 DG |
879 | ret = lttcomm_send_unix_sock(sock, &is_data_pending, |
880 | sizeof(is_data_pending)); | |
ca22feea | 881 | if (ret < 0) { |
3be74084 | 882 | DBG("Error when sending the data pending ret code: %d", ret); |
489f70e9 | 883 | goto error_fatal; |
ca22feea | 884 | } |
f50f23d9 DG |
885 | |
886 | /* | |
887 | * No need to send back a status message since the data pending | |
888 | * returned value is the response. | |
889 | */ | |
ca22feea | 890 | break; |
53632229 | 891 | } |
ffe60014 DG |
892 | case LTTNG_CONSUMER_ASK_CHANNEL_CREATION: |
893 | { | |
894 | int ret; | |
895 | struct ustctl_consumer_channel_attr attr; | |
896 | ||
897 | /* Create a plain object and reserve a channel key. */ | |
898 | channel = allocate_channel(msg.u.ask_channel.session_id, | |
899 | msg.u.ask_channel.pathname, msg.u.ask_channel.name, | |
900 | msg.u.ask_channel.uid, msg.u.ask_channel.gid, | |
901 | msg.u.ask_channel.relayd_id, msg.u.ask_channel.key, | |
1624d5b7 JD |
902 | (enum lttng_event_output) msg.u.ask_channel.output, |
903 | msg.u.ask_channel.tracefile_size, | |
904 | msg.u.ask_channel.tracefile_count); | |
ffe60014 DG |
905 | if (!channel) { |
906 | goto end_channel_error; | |
907 | } | |
908 | ||
909 | /* Build channel attributes from received message. */ | |
910 | attr.subbuf_size = msg.u.ask_channel.subbuf_size; | |
911 | attr.num_subbuf = msg.u.ask_channel.num_subbuf; | |
912 | attr.overwrite = msg.u.ask_channel.overwrite; | |
913 | attr.switch_timer_interval = msg.u.ask_channel.switch_timer_interval; | |
914 | attr.read_timer_interval = msg.u.ask_channel.read_timer_interval; | |
7972aab2 | 915 | attr.chan_id = msg.u.ask_channel.chan_id; |
ffe60014 DG |
916 | memcpy(attr.uuid, msg.u.ask_channel.uuid, sizeof(attr.uuid)); |
917 | ||
918 | /* Translate the event output type to UST. */ | |
919 | switch (channel->output) { | |
920 | case LTTNG_EVENT_SPLICE: | |
921 | /* Splice not supported so fallback on mmap(). */ | |
922 | case LTTNG_EVENT_MMAP: | |
923 | default: | |
924 | attr.output = CONSUMER_CHANNEL_MMAP; | |
925 | break; | |
926 | }; | |
927 | ||
928 | /* Translate and save channel type. */ | |
929 | switch (msg.u.ask_channel.type) { | |
930 | case LTTNG_UST_CHAN_PER_CPU: | |
931 | channel->type = CONSUMER_CHANNEL_TYPE_DATA; | |
932 | attr.type = LTTNG_UST_CHAN_PER_CPU; | |
8633d6e3 MD |
933 | /* |
934 | * Set refcount to 1 for owner. Below, we will | |
935 | * pass ownership to the | |
936 | * consumer_thread_channel_poll() thread. | |
937 | */ | |
938 | channel->refcount = 1; | |
ffe60014 DG |
939 | break; |
940 | case LTTNG_UST_CHAN_METADATA: | |
941 | channel->type = CONSUMER_CHANNEL_TYPE_METADATA; | |
942 | attr.type = LTTNG_UST_CHAN_METADATA; | |
943 | break; | |
944 | default: | |
945 | assert(0); | |
946 | goto error_fatal; | |
947 | }; | |
948 | ||
949 | ret = ask_channel(ctx, sock, channel, &attr); | |
950 | if (ret < 0) { | |
951 | goto end_channel_error; | |
952 | } | |
953 | ||
fc643247 MD |
954 | if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) { |
955 | ret = consumer_metadata_cache_allocate(channel); | |
956 | if (ret < 0) { | |
957 | ERR("Allocating metadata cache"); | |
958 | goto end_channel_error; | |
959 | } | |
960 | consumer_timer_switch_start(channel, attr.switch_timer_interval); | |
961 | attr.switch_timer_interval = 0; | |
962 | } | |
963 | ||
ffe60014 DG |
964 | /* |
965 | * Add the channel to the internal state AFTER all streams were created | |
966 | * and successfully sent to session daemon. This way, all streams must | |
967 | * be ready before this channel is visible to the threads. | |
fc643247 MD |
968 | * If add_channel succeeds, ownership of the channel is |
969 | * passed to consumer_thread_channel_poll(). | |
ffe60014 DG |
970 | */ |
971 | ret = add_channel(channel, ctx); | |
972 | if (ret < 0) { | |
ea88ca2a MD |
973 | if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) { |
974 | if (channel->switch_timer_enabled == 1) { | |
975 | consumer_timer_switch_stop(channel); | |
976 | } | |
977 | consumer_metadata_cache_destroy(channel); | |
978 | } | |
ffe60014 DG |
979 | goto end_channel_error; |
980 | } | |
981 | ||
982 | /* | |
983 | * Channel and streams are now created. Inform the session daemon that | |
984 | * everything went well and should wait to receive the channel and | |
985 | * streams with ustctl API. | |
986 | */ | |
987 | ret = consumer_send_status_channel(sock, channel); | |
988 | if (ret < 0) { | |
989 | /* | |
489f70e9 | 990 | * There is probably a problem on the socket. |
ffe60014 | 991 | */ |
489f70e9 | 992 | goto error_fatal; |
ffe60014 DG |
993 | } |
994 | ||
995 | break; | |
996 | } | |
997 | case LTTNG_CONSUMER_GET_CHANNEL: | |
998 | { | |
999 | int ret, relayd_err = 0; | |
d88aee68 | 1000 | uint64_t key = msg.u.get_channel.key; |
ffe60014 | 1001 | struct lttng_consumer_channel *channel; |
ffe60014 DG |
1002 | |
1003 | channel = consumer_find_channel(key); | |
1004 | if (!channel) { | |
8fd623e0 | 1005 | ERR("UST consumer get channel key %" PRIu64 " not found", key); |
ffe60014 DG |
1006 | ret_code = LTTNG_ERR_UST_CHAN_NOT_FOUND; |
1007 | goto end_msg_sessiond; | |
1008 | } | |
1009 | ||
1010 | /* Inform sessiond that we are about to send channel and streams. */ | |
1011 | ret = consumer_send_status_msg(sock, LTTNG_OK); | |
1012 | if (ret < 0) { | |
1013 | /* Somehow, the session daemon is not responding anymore. */ | |
489f70e9 | 1014 | goto error_fatal; |
ffe60014 DG |
1015 | } |
1016 | ||
1017 | /* Send everything to sessiond. */ | |
1018 | ret = send_sessiond_channel(sock, channel, ctx, &relayd_err); | |
1019 | if (ret < 0) { | |
1020 | if (relayd_err) { | |
1021 | /* | |
1022 | * We were unable to send to the relayd the stream so avoid | |
1023 | * sending back a fatal error to the thread since this is OK | |
1024 | * and the consumer can continue its work. | |
1025 | */ | |
1026 | ret_code = LTTNG_ERR_RELAYD_CONNECT_FAIL; | |
1027 | goto end_msg_sessiond; | |
1028 | } | |
1029 | /* | |
1030 | * The communicaton was broken hence there is a bad state between | |
1031 | * the consumer and sessiond so stop everything. | |
1032 | */ | |
1033 | goto error_fatal; | |
1034 | } | |
1035 | ||
d88aee68 DG |
1036 | ret = send_streams_to_thread(channel, ctx); |
1037 | if (ret < 0) { | |
1038 | /* | |
1039 | * If we are unable to send the stream to the thread, there is | |
1040 | * a big problem so just stop everything. | |
1041 | */ | |
1042 | goto error_fatal; | |
ffe60014 | 1043 | } |
ffe60014 DG |
1044 | /* List MUST be empty after or else it could be reused. */ |
1045 | assert(cds_list_empty(&channel->streams.head)); | |
1046 | ||
d88aee68 DG |
1047 | goto end_msg_sessiond; |
1048 | } | |
1049 | case LTTNG_CONSUMER_DESTROY_CHANNEL: | |
1050 | { | |
1051 | uint64_t key = msg.u.destroy_channel.key; | |
d88aee68 | 1052 | |
a0cbdd2e MD |
1053 | /* |
1054 | * Only called if streams have not been sent to stream | |
1055 | * manager thread. However, channel has been sent to | |
1056 | * channel manager thread. | |
1057 | */ | |
1058 | notify_thread_del_channel(ctx, key); | |
d88aee68 | 1059 | goto end_msg_sessiond; |
ffe60014 | 1060 | } |
d88aee68 DG |
1061 | case LTTNG_CONSUMER_CLOSE_METADATA: |
1062 | { | |
1063 | int ret; | |
1064 | ||
1065 | ret = close_metadata(msg.u.close_metadata.key); | |
1066 | if (ret != 0) { | |
1067 | ret_code = ret; | |
1068 | } | |
1069 | ||
1070 | goto end_msg_sessiond; | |
1071 | } | |
7972aab2 DG |
1072 | case LTTNG_CONSUMER_FLUSH_CHANNEL: |
1073 | { | |
1074 | int ret; | |
1075 | ||
1076 | ret = flush_channel(msg.u.flush_channel.key); | |
1077 | if (ret != 0) { | |
1078 | ret_code = ret; | |
1079 | } | |
1080 | ||
1081 | goto end_msg_sessiond; | |
1082 | } | |
d88aee68 | 1083 | case LTTNG_CONSUMER_PUSH_METADATA: |
ffe60014 DG |
1084 | { |
1085 | int ret; | |
d88aee68 | 1086 | uint64_t len = msg.u.push_metadata.len; |
d88aee68 | 1087 | uint64_t key = msg.u.push_metadata.key; |
331744e3 | 1088 | uint64_t offset = msg.u.push_metadata.target_offset; |
ffe60014 DG |
1089 | struct lttng_consumer_channel *channel; |
1090 | ||
8fd623e0 DG |
1091 | DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key, |
1092 | len); | |
ffe60014 DG |
1093 | |
1094 | channel = consumer_find_channel(key); | |
1095 | if (!channel) { | |
8fd623e0 | 1096 | ERR("UST consumer push metadata %" PRIu64 " not found", key); |
ffe60014 | 1097 | ret_code = LTTNG_ERR_UST_CHAN_NOT_FOUND; |
4a2eb0ca | 1098 | goto end_msg_sessiond; |
d88aee68 DG |
1099 | } |
1100 | ||
1101 | /* Tell session daemon we are ready to receive the metadata. */ | |
ffe60014 DG |
1102 | ret = consumer_send_status_msg(sock, LTTNG_OK); |
1103 | if (ret < 0) { | |
1104 | /* Somehow, the session daemon is not responding anymore. */ | |
d88aee68 DG |
1105 | goto error_fatal; |
1106 | } | |
1107 | ||
1108 | /* Wait for more data. */ | |
1109 | if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) { | |
489f70e9 | 1110 | goto error_fatal; |
d88aee68 DG |
1111 | } |
1112 | ||
331744e3 JD |
1113 | ret = lttng_ustconsumer_recv_metadata(sock, key, offset, |
1114 | len, channel); | |
d88aee68 | 1115 | if (ret < 0) { |
331744e3 | 1116 | /* error receiving from sessiond */ |
489f70e9 | 1117 | goto error_fatal; |
331744e3 JD |
1118 | } else { |
1119 | ret_code = ret; | |
d88aee68 DG |
1120 | goto end_msg_sessiond; |
1121 | } | |
d88aee68 DG |
1122 | } |
1123 | case LTTNG_CONSUMER_SETUP_METADATA: | |
1124 | { | |
1125 | int ret; | |
1126 | ||
1127 | ret = setup_metadata(ctx, msg.u.setup_metadata.key); | |
1128 | if (ret) { | |
1129 | ret_code = ret; | |
1130 | } | |
1131 | goto end_msg_sessiond; | |
ffe60014 | 1132 | } |
3bd1e081 MD |
1133 | default: |
1134 | break; | |
1135 | } | |
3f8e211f | 1136 | |
3bd1e081 | 1137 | end_nosignal: |
b0b335c8 | 1138 | rcu_read_unlock(); |
4cbc1a04 DG |
1139 | |
1140 | /* | |
1141 | * Return 1 to indicate success since the 0 value can be a socket | |
1142 | * shutdown during the recv() or send() call. | |
1143 | */ | |
1144 | return 1; | |
ffe60014 DG |
1145 | |
1146 | end_msg_sessiond: | |
1147 | /* | |
1148 | * The returned value here is not useful since either way we'll return 1 to | |
1149 | * the caller because the session daemon socket management is done | |
1150 | * elsewhere. Returning a negative code or 0 will shutdown the consumer. | |
1151 | */ | |
489f70e9 MD |
1152 | ret = consumer_send_status_msg(sock, ret_code); |
1153 | if (ret < 0) { | |
1154 | goto error_fatal; | |
1155 | } | |
ffe60014 DG |
1156 | rcu_read_unlock(); |
1157 | return 1; | |
1158 | end_channel_error: | |
1159 | if (channel) { | |
1160 | /* | |
1161 | * Free channel here since no one has a reference to it. We don't | |
1162 | * free after that because a stream can store this pointer. | |
1163 | */ | |
1164 | destroy_channel(channel); | |
1165 | } | |
1166 | /* We have to send a status channel message indicating an error. */ | |
1167 | ret = consumer_send_status_channel(sock, NULL); | |
1168 | if (ret < 0) { | |
1169 | /* Stop everything if session daemon can not be notified. */ | |
1170 | goto error_fatal; | |
1171 | } | |
1172 | rcu_read_unlock(); | |
1173 | return 1; | |
1174 | error_fatal: | |
1175 | rcu_read_unlock(); | |
1176 | /* This will issue a consumer stop. */ | |
1177 | return -1; | |
3bd1e081 MD |
1178 | } |
1179 | ||
ffe60014 DG |
1180 | /* |
1181 | * Wrapper over the mmap() read offset from ust-ctl library. Since this can be | |
1182 | * compiled out, we isolate it in this library. | |
1183 | */ | |
1184 | int lttng_ustctl_get_mmap_read_offset(struct lttng_consumer_stream *stream, | |
1185 | unsigned long *off) | |
3bd1e081 | 1186 | { |
ffe60014 DG |
1187 | assert(stream); |
1188 | assert(stream->ustream); | |
b5c5fc29 | 1189 | |
ffe60014 | 1190 | return ustctl_get_mmap_read_offset(stream->ustream, off); |
3bd1e081 MD |
1191 | } |
1192 | ||
ffe60014 DG |
1193 | /* |
1194 | * Wrapper over the mmap() read offset from ust-ctl library. Since this can be | |
1195 | * compiled out, we isolate it in this library. | |
1196 | */ | |
1197 | void *lttng_ustctl_get_mmap_base(struct lttng_consumer_stream *stream) | |
d056b477 | 1198 | { |
ffe60014 DG |
1199 | assert(stream); |
1200 | assert(stream->ustream); | |
1201 | ||
1202 | return ustctl_get_mmap_base(stream->ustream); | |
d056b477 MD |
1203 | } |
1204 | ||
ffe60014 DG |
1205 | /* |
1206 | * Take a snapshot for a specific fd | |
1207 | * | |
1208 | * Returns 0 on success, < 0 on error | |
1209 | */ | |
1210 | int lttng_ustconsumer_take_snapshot(struct lttng_consumer_stream *stream) | |
3bd1e081 | 1211 | { |
ffe60014 DG |
1212 | assert(stream); |
1213 | assert(stream->ustream); | |
1214 | ||
1215 | return ustctl_snapshot(stream->ustream); | |
3bd1e081 MD |
1216 | } |
1217 | ||
ffe60014 DG |
1218 | /* |
1219 | * Get the produced position | |
1220 | * | |
1221 | * Returns 0 on success, < 0 on error | |
1222 | */ | |
1223 | int lttng_ustconsumer_get_produced_snapshot( | |
1224 | struct lttng_consumer_stream *stream, unsigned long *pos) | |
3bd1e081 | 1225 | { |
ffe60014 DG |
1226 | assert(stream); |
1227 | assert(stream->ustream); | |
1228 | assert(pos); | |
7a57cf92 | 1229 | |
ffe60014 DG |
1230 | return ustctl_snapshot_get_produced(stream->ustream, pos); |
1231 | } | |
7a57cf92 | 1232 | |
ffe60014 DG |
1233 | /* |
1234 | * Called when the stream signal the consumer that it has hang up. | |
1235 | */ | |
1236 | void lttng_ustconsumer_on_stream_hangup(struct lttng_consumer_stream *stream) | |
1237 | { | |
1238 | assert(stream); | |
1239 | assert(stream->ustream); | |
2c1dd183 | 1240 | |
ffe60014 DG |
1241 | ustctl_flush_buffer(stream->ustream, 0); |
1242 | stream->hangup_flush_done = 1; | |
1243 | } | |
ee77a7b0 | 1244 | |
ffe60014 DG |
1245 | void lttng_ustconsumer_del_channel(struct lttng_consumer_channel *chan) |
1246 | { | |
1247 | assert(chan); | |
1248 | assert(chan->uchan); | |
e316aad5 | 1249 | |
ea88ca2a MD |
1250 | if (chan->switch_timer_enabled == 1) { |
1251 | consumer_timer_switch_stop(chan); | |
1252 | } | |
1253 | consumer_metadata_cache_destroy(chan); | |
ffe60014 | 1254 | ustctl_destroy_channel(chan->uchan); |
3bd1e081 MD |
1255 | } |
1256 | ||
1257 | void lttng_ustconsumer_del_stream(struct lttng_consumer_stream *stream) | |
1258 | { | |
ffe60014 DG |
1259 | assert(stream); |
1260 | assert(stream->ustream); | |
d41f73b7 | 1261 | |
ea88ca2a MD |
1262 | if (stream->chan->switch_timer_enabled == 1) { |
1263 | consumer_timer_switch_stop(stream->chan); | |
1264 | } | |
ffe60014 DG |
1265 | ustctl_destroy_stream(stream->ustream); |
1266 | } | |
d41f73b7 MD |
1267 | |
1268 | int lttng_ustconsumer_read_subbuffer(struct lttng_consumer_stream *stream, | |
1269 | struct lttng_consumer_local_data *ctx) | |
1270 | { | |
1d4dfdef | 1271 | unsigned long len, subbuf_size, padding; |
d41f73b7 MD |
1272 | int err; |
1273 | long ret = 0; | |
d41f73b7 | 1274 | char dummy; |
ffe60014 DG |
1275 | struct ustctl_consumer_stream *ustream; |
1276 | ||
1277 | assert(stream); | |
1278 | assert(stream->ustream); | |
1279 | assert(ctx); | |
d41f73b7 | 1280 | |
ffe60014 DG |
1281 | DBG2("In UST read_subbuffer (wait_fd: %d, name: %s)", stream->wait_fd, |
1282 | stream->name); | |
1283 | ||
1284 | /* Ease our life for what's next. */ | |
1285 | ustream = stream->ustream; | |
d41f73b7 MD |
1286 | |
1287 | /* We can consume the 1 byte written into the wait_fd by UST */ | |
effcf122 | 1288 | if (!stream->hangup_flush_done) { |
c617c0c6 MD |
1289 | ssize_t readlen; |
1290 | ||
effcf122 MD |
1291 | do { |
1292 | readlen = read(stream->wait_fd, &dummy, 1); | |
87dc6a9c | 1293 | } while (readlen == -1 && errno == EINTR); |
effcf122 MD |
1294 | if (readlen == -1) { |
1295 | ret = readlen; | |
1296 | goto end; | |
1297 | } | |
d41f73b7 MD |
1298 | } |
1299 | ||
d41f73b7 | 1300 | /* Get the next subbuffer */ |
ffe60014 | 1301 | err = ustctl_get_next_subbuf(ustream); |
d41f73b7 | 1302 | if (err != 0) { |
1d4dfdef | 1303 | ret = err; /* ustctl_get_next_subbuf returns negative, caller expect positive. */ |
d41f73b7 MD |
1304 | /* |
1305 | * This is a debug message even for single-threaded consumer, | |
1306 | * because poll() have more relaxed criterions than get subbuf, | |
1307 | * so get_subbuf may fail for short race windows where poll() | |
1308 | * would issue wakeups. | |
1309 | */ | |
1310 | DBG("Reserving sub buffer failed (everything is normal, " | |
ffe60014 | 1311 | "it is due to concurrency) [ret: %d]", err); |
d41f73b7 MD |
1312 | goto end; |
1313 | } | |
ffe60014 | 1314 | assert(stream->chan->output == CONSUMER_CHANNEL_MMAP); |
1d4dfdef | 1315 | /* Get the full padded subbuffer size */ |
ffe60014 | 1316 | err = ustctl_get_padded_subbuf_size(ustream, &len); |
effcf122 | 1317 | assert(err == 0); |
1d4dfdef DG |
1318 | |
1319 | /* Get subbuffer data size (without padding) */ | |
ffe60014 | 1320 | err = ustctl_get_subbuf_size(ustream, &subbuf_size); |
1d4dfdef DG |
1321 | assert(err == 0); |
1322 | ||
1323 | /* Make sure we don't get a subbuffer size bigger than the padded */ | |
1324 | assert(len >= subbuf_size); | |
1325 | ||
1326 | padding = len - subbuf_size; | |
d41f73b7 | 1327 | /* write the subbuffer to the tracefile */ |
1d4dfdef | 1328 | ret = lttng_consumer_on_read_subbuffer_mmap(ctx, stream, subbuf_size, padding); |
91dfef6e DG |
1329 | /* |
1330 | * The mmap operation should write subbuf_size amount of data when network | |
1331 | * streaming or the full padding (len) size when we are _not_ streaming. | |
1332 | */ | |
d88aee68 DG |
1333 | if ((ret != subbuf_size && stream->net_seq_idx != (uint64_t) -1ULL) || |
1334 | (ret != len && stream->net_seq_idx == (uint64_t) -1ULL)) { | |
d41f73b7 | 1335 | /* |
91dfef6e | 1336 | * Display the error but continue processing to try to release the |
c5c45efa DG |
1337 | * subbuffer. This is a DBG statement since any unexpected kill or |
1338 | * signal, the application gets unregistered, relayd gets closed or | |
1339 | * anything that affects the buffer lifetime will trigger this error. | |
1340 | * So, for the sake of the user, don't print this error since it can | |
1341 | * happen and it is OK with the code flow. | |
d41f73b7 | 1342 | */ |
c5c45efa | 1343 | DBG("Error writing to tracefile " |
8fd623e0 | 1344 | "(ret: %ld != len: %lu != subbuf_size: %lu)", |
91dfef6e | 1345 | ret, len, subbuf_size); |
d41f73b7 | 1346 | } |
ffe60014 | 1347 | err = ustctl_put_next_subbuf(ustream); |
effcf122 | 1348 | assert(err == 0); |
331744e3 | 1349 | |
d41f73b7 MD |
1350 | end: |
1351 | return ret; | |
1352 | } | |
1353 | ||
ffe60014 DG |
1354 | /* |
1355 | * Called when a stream is created. | |
fe4477ee JD |
1356 | * |
1357 | * Return 0 on success or else a negative value. | |
ffe60014 | 1358 | */ |
d41f73b7 MD |
1359 | int lttng_ustconsumer_on_recv_stream(struct lttng_consumer_stream *stream) |
1360 | { | |
fe4477ee JD |
1361 | int ret; |
1362 | ||
1363 | /* Don't create anything if this is set for streaming. */ | |
1364 | if (stream->net_seq_idx == (uint64_t) -1ULL) { | |
1365 | ret = utils_create_stream_file(stream->chan->pathname, stream->name, | |
1366 | stream->chan->tracefile_size, stream->tracefile_count_current, | |
1367 | stream->uid, stream->gid); | |
1368 | if (ret < 0) { | |
1369 | goto error; | |
1370 | } | |
1371 | stream->out_fd = ret; | |
1372 | stream->tracefile_size_current = 0; | |
1373 | } | |
1374 | ret = 0; | |
1375 | ||
1376 | error: | |
1377 | return ret; | |
d41f73b7 | 1378 | } |
ca22feea DG |
1379 | |
1380 | /* | |
1381 | * Check if data is still being extracted from the buffers for a specific | |
4e9a4686 DG |
1382 | * stream. Consumer data lock MUST be acquired before calling this function |
1383 | * and the stream lock. | |
ca22feea | 1384 | * |
6d805429 | 1385 | * Return 1 if the traced data are still getting read else 0 meaning that the |
ca22feea DG |
1386 | * data is available for trace viewer reading. |
1387 | */ | |
6d805429 | 1388 | int lttng_ustconsumer_data_pending(struct lttng_consumer_stream *stream) |
ca22feea DG |
1389 | { |
1390 | int ret; | |
1391 | ||
1392 | assert(stream); | |
ffe60014 | 1393 | assert(stream->ustream); |
ca22feea | 1394 | |
6d805429 | 1395 | DBG("UST consumer checking data pending"); |
c8f59ee5 | 1396 | |
ffe60014 | 1397 | ret = ustctl_get_next_subbuf(stream->ustream); |
ca22feea DG |
1398 | if (ret == 0) { |
1399 | /* There is still data so let's put back this subbuffer. */ | |
ffe60014 | 1400 | ret = ustctl_put_subbuf(stream->ustream); |
ca22feea | 1401 | assert(ret == 0); |
6d805429 | 1402 | ret = 1; /* Data is pending */ |
4e9a4686 | 1403 | goto end; |
ca22feea DG |
1404 | } |
1405 | ||
6d805429 DG |
1406 | /* Data is NOT pending so ready to be read. */ |
1407 | ret = 0; | |
ca22feea | 1408 | |
6efae65e DG |
1409 | end: |
1410 | return ret; | |
ca22feea | 1411 | } |
d88aee68 DG |
1412 | |
1413 | /* | |
1414 | * Close every metadata stream wait fd of the metadata hash table. This | |
1415 | * function MUST be used very carefully so not to run into a race between the | |
1416 | * metadata thread handling streams and this function closing their wait fd. | |
1417 | * | |
1418 | * For UST, this is used when the session daemon hangs up. Its the metadata | |
1419 | * producer so calling this is safe because we are assured that no state change | |
1420 | * can occur in the metadata thread for the streams in the hash table. | |
1421 | */ | |
1422 | void lttng_ustconsumer_close_metadata(struct lttng_ht *metadata_ht) | |
1423 | { | |
1424 | int ret; | |
1425 | struct lttng_ht_iter iter; | |
1426 | struct lttng_consumer_stream *stream; | |
1427 | ||
1428 | assert(metadata_ht); | |
1429 | assert(metadata_ht->ht); | |
1430 | ||
1431 | DBG("UST consumer closing all metadata streams"); | |
1432 | ||
1433 | rcu_read_lock(); | |
1434 | cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream, | |
1435 | node.node) { | |
1436 | int fd = stream->wait_fd; | |
1437 | ||
1438 | /* | |
1439 | * Whatever happens here we have to continue to try to close every | |
1440 | * streams. Let's report at least the error on failure. | |
1441 | */ | |
1442 | ret = ustctl_stream_close_wakeup_fd(stream->ustream); | |
1443 | if (ret) { | |
1444 | ERR("Unable to close metadata stream fd %d ret %d", fd, ret); | |
1445 | } | |
1446 | DBG("Metadata wait fd %d closed", fd); | |
1447 | } | |
1448 | rcu_read_unlock(); | |
1449 | } | |
d8ef542d MD |
1450 | |
1451 | void lttng_ustconsumer_close_stream_wakeup(struct lttng_consumer_stream *stream) | |
1452 | { | |
1453 | int ret; | |
1454 | ||
1455 | ret = ustctl_stream_close_wakeup_fd(stream->ustream); | |
1456 | if (ret < 0) { | |
1457 | ERR("Unable to close wakeup fd"); | |
1458 | } | |
1459 | } | |
331744e3 JD |
1460 | |
1461 | int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx, | |
1462 | struct lttng_consumer_channel *channel) | |
1463 | { | |
1464 | struct lttcomm_metadata_request_msg request; | |
1465 | struct lttcomm_consumer_msg msg; | |
1466 | enum lttng_error_code ret_code = LTTNG_OK; | |
1467 | uint64_t len, key, offset; | |
1468 | int ret; | |
1469 | ||
1470 | assert(channel); | |
1471 | assert(channel->metadata_cache); | |
1472 | ||
1473 | /* send the metadata request to sessiond */ | |
1474 | switch (consumer_data.type) { | |
1475 | case LTTNG_CONSUMER64_UST: | |
1476 | request.bits_per_long = 64; | |
1477 | break; | |
1478 | case LTTNG_CONSUMER32_UST: | |
1479 | request.bits_per_long = 32; | |
1480 | break; | |
1481 | default: | |
1482 | request.bits_per_long = 0; | |
1483 | break; | |
1484 | } | |
1485 | ||
1486 | request.session_id = channel->session_id; | |
1487 | request.uid = channel->uid; | |
1488 | request.key = channel->key; | |
1489 | DBG("Sending metadata request to sessiond, session %" PRIu64, | |
1490 | channel->session_id); | |
1491 | ||
1492 | ret = lttcomm_send_unix_sock(ctx->consumer_metadata_socket, &request, | |
1493 | sizeof(request)); | |
1494 | if (ret < 0) { | |
1495 | ERR("Asking metadata to sessiond"); | |
1496 | goto end; | |
1497 | } | |
1498 | ||
1499 | /* Receive the metadata from sessiond */ | |
1500 | ret = lttcomm_recv_unix_sock(ctx->consumer_metadata_socket, &msg, | |
1501 | sizeof(msg)); | |
1502 | if (ret != sizeof(msg)) { | |
8fd623e0 | 1503 | DBG("Consumer received unexpected message size %d (expects %zu)", |
331744e3 JD |
1504 | ret, sizeof(msg)); |
1505 | lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD); | |
1506 | /* | |
1507 | * The ret value might 0 meaning an orderly shutdown but this is ok | |
1508 | * since the caller handles this. | |
1509 | */ | |
1510 | goto end; | |
1511 | } | |
1512 | ||
1513 | if (msg.cmd_type == LTTNG_ERR_UND) { | |
1514 | /* No registry found */ | |
1515 | (void) consumer_send_status_msg(ctx->consumer_metadata_socket, | |
1516 | ret_code); | |
1517 | ret = 0; | |
1518 | goto end; | |
1519 | } else if (msg.cmd_type != LTTNG_CONSUMER_PUSH_METADATA) { | |
1520 | ERR("Unexpected cmd_type received %d", msg.cmd_type); | |
1521 | ret = -1; | |
1522 | goto end; | |
1523 | } | |
1524 | ||
1525 | len = msg.u.push_metadata.len; | |
1526 | key = msg.u.push_metadata.key; | |
1527 | offset = msg.u.push_metadata.target_offset; | |
1528 | ||
1529 | assert(key == channel->key); | |
1530 | if (len == 0) { | |
1531 | DBG("No new metadata to receive for key %" PRIu64, key); | |
1532 | } | |
1533 | ||
1534 | /* Tell session daemon we are ready to receive the metadata. */ | |
1535 | ret = consumer_send_status_msg(ctx->consumer_metadata_socket, | |
1536 | LTTNG_OK); | |
1537 | if (ret < 0 || len == 0) { | |
1538 | /* | |
1539 | * Somehow, the session daemon is not responding anymore or there is | |
1540 | * nothing to receive. | |
1541 | */ | |
1542 | goto end; | |
1543 | } | |
1544 | ||
1545 | ret_code = lttng_ustconsumer_recv_metadata(ctx->consumer_metadata_socket, | |
1546 | key, offset, len, channel); | |
1547 | (void) consumer_send_status_msg(ctx->consumer_metadata_socket, ret_code); | |
1548 | ret = 0; | |
1549 | ||
1550 | end: | |
1551 | return ret; | |
1552 | } |