Fix: compile without UST support
[lttng-tools.git] / src / common / ust-consumer / ust-consumer.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _GNU_SOURCE
20 #include <assert.h>
21 #include <lttng/ust-ctl.h>
22 #include <poll.h>
23 #include <pthread.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/mman.h>
27 #include <sys/socket.h>
28 #include <sys/stat.h>
29 #include <sys/types.h>
30 #include <inttypes.h>
31 #include <unistd.h>
32 #include <urcu/list.h>
33 #include <signal.h>
34
35 #include <common/common.h>
36 #include <common/sessiond-comm/sessiond-comm.h>
37 #include <common/relayd/relayd.h>
38 #include <common/compat/fcntl.h>
39 #include <common/consumer-metadata-cache.h>
40 #include <common/consumer-stream.h>
41 #include <common/consumer-timer.h>
42 #include <common/utils.h>
43 #include <common/index/index.h>
44
45 #include "ust-consumer.h"
46
47 extern struct lttng_consumer_global_data consumer_data;
48 extern int consumer_poll_timeout;
49 extern volatile int consumer_quit;
50
51 /*
52 * Free channel object and all streams associated with it. This MUST be used
53 * only and only if the channel has _NEVER_ been added to the global channel
54 * hash table.
55 */
56 static void destroy_channel(struct lttng_consumer_channel *channel)
57 {
58 struct lttng_consumer_stream *stream, *stmp;
59
60 assert(channel);
61
62 DBG("UST consumer cleaning stream list");
63
64 cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
65 send_node) {
66 cds_list_del(&stream->send_node);
67 ustctl_destroy_stream(stream->ustream);
68 free(stream);
69 }
70
71 /*
72 * If a channel is available meaning that was created before the streams
73 * were, delete it.
74 */
75 if (channel->uchan) {
76 lttng_ustconsumer_del_channel(channel);
77 }
78 free(channel);
79 }
80
81 /*
82 * Add channel to internal consumer state.
83 *
84 * Returns 0 on success or else a negative value.
85 */
86 static int add_channel(struct lttng_consumer_channel *channel,
87 struct lttng_consumer_local_data *ctx)
88 {
89 int ret = 0;
90
91 assert(channel);
92 assert(ctx);
93
94 if (ctx->on_recv_channel != NULL) {
95 ret = ctx->on_recv_channel(channel);
96 if (ret == 0) {
97 ret = consumer_add_channel(channel, ctx);
98 } else if (ret < 0) {
99 /* Most likely an ENOMEM. */
100 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
101 goto error;
102 }
103 } else {
104 ret = consumer_add_channel(channel, ctx);
105 }
106
107 DBG("UST consumer channel added (key: %" PRIu64 ")", channel->key);
108
109 error:
110 return ret;
111 }
112
113 /*
114 * Allocate and return a consumer channel object.
115 */
116 static struct lttng_consumer_channel *allocate_channel(uint64_t session_id,
117 const char *pathname, const char *name, uid_t uid, gid_t gid,
118 uint64_t relayd_id, uint64_t key, enum lttng_event_output output,
119 uint64_t tracefile_size, uint64_t tracefile_count,
120 uint64_t session_id_per_pid, unsigned int monitor,
121 unsigned int live_timer_interval)
122 {
123 assert(pathname);
124 assert(name);
125
126 return consumer_allocate_channel(key, session_id, pathname, name, uid,
127 gid, relayd_id, output, tracefile_size,
128 tracefile_count, session_id_per_pid, monitor, live_timer_interval);
129 }
130
131 /*
132 * Allocate and return a consumer stream object. If _alloc_ret is not NULL, the
133 * error value if applicable is set in it else it is kept untouched.
134 *
135 * Return NULL on error else the newly allocated stream object.
136 */
137 static struct lttng_consumer_stream *allocate_stream(int cpu, int key,
138 struct lttng_consumer_channel *channel,
139 struct lttng_consumer_local_data *ctx, int *_alloc_ret)
140 {
141 int alloc_ret;
142 struct lttng_consumer_stream *stream = NULL;
143
144 assert(channel);
145 assert(ctx);
146
147 stream = consumer_allocate_stream(channel->key,
148 key,
149 LTTNG_CONSUMER_ACTIVE_STREAM,
150 channel->name,
151 channel->uid,
152 channel->gid,
153 channel->relayd_id,
154 channel->session_id,
155 cpu,
156 &alloc_ret,
157 channel->type,
158 channel->monitor);
159 if (stream == NULL) {
160 switch (alloc_ret) {
161 case -ENOENT:
162 /*
163 * We could not find the channel. Can happen if cpu hotplug
164 * happens while tearing down.
165 */
166 DBG3("Could not find channel");
167 break;
168 case -ENOMEM:
169 case -EINVAL:
170 default:
171 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
172 break;
173 }
174 goto error;
175 }
176
177 stream->chan = channel;
178
179 error:
180 if (_alloc_ret) {
181 *_alloc_ret = alloc_ret;
182 }
183 return stream;
184 }
185
186 /*
187 * Send the given stream pointer to the corresponding thread.
188 *
189 * Returns 0 on success else a negative value.
190 */
191 static int send_stream_to_thread(struct lttng_consumer_stream *stream,
192 struct lttng_consumer_local_data *ctx)
193 {
194 int ret;
195 struct lttng_pipe *stream_pipe;
196
197 /* Get the right pipe where the stream will be sent. */
198 if (stream->metadata_flag) {
199 ret = consumer_add_metadata_stream(stream);
200 if (ret) {
201 ERR("Consumer add metadata stream %" PRIu64 " failed.",
202 stream->key);
203 goto error;
204 }
205 stream_pipe = ctx->consumer_metadata_pipe;
206 } else {
207 ret = consumer_add_data_stream(stream);
208 if (ret) {
209 ERR("Consumer add stream %" PRIu64 " failed.",
210 stream->key);
211 goto error;
212 }
213 stream_pipe = ctx->consumer_data_pipe;
214 }
215
216 /*
217 * From this point on, the stream's ownership has been moved away from
218 * the channel and becomes globally visible.
219 */
220 stream->globally_visible = 1;
221
222 ret = lttng_pipe_write(stream_pipe, &stream, sizeof(stream));
223 if (ret < 0) {
224 ERR("Consumer write %s stream to pipe %d",
225 stream->metadata_flag ? "metadata" : "data",
226 lttng_pipe_get_writefd(stream_pipe));
227 if (stream->metadata_flag) {
228 consumer_del_stream_for_metadata(stream);
229 } else {
230 consumer_del_stream_for_data(stream);
231 }
232 }
233 error:
234 return ret;
235 }
236
237 /*
238 * Create streams for the given channel using liblttng-ust-ctl.
239 *
240 * Return 0 on success else a negative value.
241 */
242 static int create_ust_streams(struct lttng_consumer_channel *channel,
243 struct lttng_consumer_local_data *ctx)
244 {
245 int ret, cpu = 0;
246 struct ustctl_consumer_stream *ustream;
247 struct lttng_consumer_stream *stream;
248
249 assert(channel);
250 assert(ctx);
251
252 /*
253 * While a stream is available from ustctl. When NULL is returned, we've
254 * reached the end of the possible stream for the channel.
255 */
256 while ((ustream = ustctl_create_stream(channel->uchan, cpu))) {
257 int wait_fd;
258 int ust_metadata_pipe[2];
259
260 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && channel->monitor) {
261 ret = utils_create_pipe_cloexec_nonblock(ust_metadata_pipe);
262 if (ret < 0) {
263 ERR("Create ust metadata poll pipe");
264 goto error;
265 }
266 wait_fd = ust_metadata_pipe[0];
267 } else {
268 wait_fd = ustctl_stream_get_wait_fd(ustream);
269 }
270
271 /* Allocate consumer stream object. */
272 stream = allocate_stream(cpu, wait_fd, channel, ctx, &ret);
273 if (!stream) {
274 goto error_alloc;
275 }
276 stream->ustream = ustream;
277 /*
278 * Store it so we can save multiple function calls afterwards since
279 * this value is used heavily in the stream threads. This is UST
280 * specific so this is why it's done after allocation.
281 */
282 stream->wait_fd = wait_fd;
283
284 /*
285 * Increment channel refcount since the channel reference has now been
286 * assigned in the allocation process above.
287 */
288 if (stream->chan->monitor) {
289 uatomic_inc(&stream->chan->refcount);
290 }
291
292 /*
293 * Order is important this is why a list is used. On error, the caller
294 * should clean this list.
295 */
296 cds_list_add_tail(&stream->send_node, &channel->streams.head);
297
298 ret = ustctl_get_max_subbuf_size(stream->ustream,
299 &stream->max_sb_size);
300 if (ret < 0) {
301 ERR("ustctl_get_max_subbuf_size failed for stream %s",
302 stream->name);
303 goto error;
304 }
305
306 /* Do actions once stream has been received. */
307 if (ctx->on_recv_stream) {
308 ret = ctx->on_recv_stream(stream);
309 if (ret < 0) {
310 goto error;
311 }
312 }
313
314 DBG("UST consumer add stream %s (key: %" PRIu64 ") with relayd id %" PRIu64,
315 stream->name, stream->key, stream->relayd_stream_id);
316
317 /* Set next CPU stream. */
318 channel->streams.count = ++cpu;
319
320 /* Keep stream reference when creating metadata. */
321 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA) {
322 channel->metadata_stream = stream;
323 stream->ust_metadata_poll_pipe[0] = ust_metadata_pipe[0];
324 stream->ust_metadata_poll_pipe[1] = ust_metadata_pipe[1];
325 }
326 }
327
328 return 0;
329
330 error:
331 error_alloc:
332 return ret;
333 }
334
335 /*
336 * Create an UST channel with the given attributes and send it to the session
337 * daemon using the ust ctl API.
338 *
339 * Return 0 on success or else a negative value.
340 */
341 static int create_ust_channel(struct ustctl_consumer_channel_attr *attr,
342 struct ustctl_consumer_channel **chanp)
343 {
344 int ret;
345 struct ustctl_consumer_channel *channel;
346
347 assert(attr);
348 assert(chanp);
349
350 DBG3("Creating channel to ustctl with attr: [overwrite: %d, "
351 "subbuf_size: %" PRIu64 ", num_subbuf: %" PRIu64 ", "
352 "switch_timer_interval: %u, read_timer_interval: %u, "
353 "output: %d, type: %d", attr->overwrite, attr->subbuf_size,
354 attr->num_subbuf, attr->switch_timer_interval,
355 attr->read_timer_interval, attr->output, attr->type);
356
357 channel = ustctl_create_channel(attr);
358 if (!channel) {
359 ret = -1;
360 goto error_create;
361 }
362
363 *chanp = channel;
364
365 return 0;
366
367 error_create:
368 return ret;
369 }
370
371 /*
372 * Send a single given stream to the session daemon using the sock.
373 *
374 * Return 0 on success else a negative value.
375 */
376 static int send_sessiond_stream(int sock, struct lttng_consumer_stream *stream)
377 {
378 int ret;
379
380 assert(stream);
381 assert(sock >= 0);
382
383 DBG("UST consumer sending stream %" PRIu64 " to sessiond", stream->key);
384
385 /* Send stream to session daemon. */
386 ret = ustctl_send_stream_to_sessiond(sock, stream->ustream);
387 if (ret < 0) {
388 goto error;
389 }
390
391 error:
392 return ret;
393 }
394
395 /*
396 * Send channel to sessiond.
397 *
398 * Return 0 on success or else a negative value.
399 */
400 static int send_sessiond_channel(int sock,
401 struct lttng_consumer_channel *channel,
402 struct lttng_consumer_local_data *ctx, int *relayd_error)
403 {
404 int ret, ret_code = LTTNG_OK;
405 struct lttng_consumer_stream *stream;
406
407 assert(channel);
408 assert(ctx);
409 assert(sock >= 0);
410
411 DBG("UST consumer sending channel %s to sessiond", channel->name);
412
413 if (channel->relayd_id != (uint64_t) -1ULL) {
414 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
415 /* Try to send the stream to the relayd if one is available. */
416 ret = consumer_send_relayd_stream(stream, stream->chan->pathname);
417 if (ret < 0) {
418 /*
419 * Flag that the relayd was the problem here probably due to a
420 * communicaton error on the socket.
421 */
422 if (relayd_error) {
423 *relayd_error = 1;
424 }
425 ret_code = LTTNG_ERR_RELAYD_CONNECT_FAIL;
426 }
427 }
428 }
429
430 /* Inform sessiond that we are about to send channel and streams. */
431 ret = consumer_send_status_msg(sock, ret_code);
432 if (ret < 0 || ret_code != LTTNG_OK) {
433 /*
434 * Either the session daemon is not responding or the relayd died so we
435 * stop now.
436 */
437 goto error;
438 }
439
440 /* Send channel to sessiond. */
441 ret = ustctl_send_channel_to_sessiond(sock, channel->uchan);
442 if (ret < 0) {
443 goto error;
444 }
445
446 ret = ustctl_channel_close_wakeup_fd(channel->uchan);
447 if (ret < 0) {
448 goto error;
449 }
450
451 /* The channel was sent successfully to the sessiond at this point. */
452 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
453 /* Send stream to session daemon. */
454 ret = send_sessiond_stream(sock, stream);
455 if (ret < 0) {
456 goto error;
457 }
458 }
459
460 /* Tell sessiond there is no more stream. */
461 ret = ustctl_send_stream_to_sessiond(sock, NULL);
462 if (ret < 0) {
463 goto error;
464 }
465
466 DBG("UST consumer NULL stream sent to sessiond");
467
468 return 0;
469
470 error:
471 if (ret_code != LTTNG_OK) {
472 ret = -1;
473 }
474 return ret;
475 }
476
477 /*
478 * Creates a channel and streams and add the channel it to the channel internal
479 * state. The created stream must ONLY be sent once the GET_CHANNEL command is
480 * received.
481 *
482 * Return 0 on success or else, a negative value is returned and the channel
483 * MUST be destroyed by consumer_del_channel().
484 */
485 static int ask_channel(struct lttng_consumer_local_data *ctx, int sock,
486 struct lttng_consumer_channel *channel,
487 struct ustctl_consumer_channel_attr *attr)
488 {
489 int ret;
490
491 assert(ctx);
492 assert(channel);
493 assert(attr);
494
495 /*
496 * This value is still used by the kernel consumer since for the kernel,
497 * the stream ownership is not IN the consumer so we need to have the
498 * number of left stream that needs to be initialized so we can know when
499 * to delete the channel (see consumer.c).
500 *
501 * As for the user space tracer now, the consumer creates and sends the
502 * stream to the session daemon which only sends them to the application
503 * once every stream of a channel is received making this value useless
504 * because we they will be added to the poll thread before the application
505 * receives them. This ensures that a stream can not hang up during
506 * initilization of a channel.
507 */
508 channel->nb_init_stream_left = 0;
509
510 /* The reply msg status is handled in the following call. */
511 ret = create_ust_channel(attr, &channel->uchan);
512 if (ret < 0) {
513 goto end;
514 }
515
516 channel->wait_fd = ustctl_channel_get_wait_fd(channel->uchan);
517
518 /*
519 * For the snapshots (no monitor), we create the metadata streams
520 * on demand, not during the channel creation.
521 */
522 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && !channel->monitor) {
523 ret = 0;
524 goto end;
525 }
526
527 /* Open all streams for this channel. */
528 ret = create_ust_streams(channel, ctx);
529 if (ret < 0) {
530 goto end;
531 }
532
533 end:
534 return ret;
535 }
536
537 /*
538 * Send all stream of a channel to the right thread handling it.
539 *
540 * On error, return a negative value else 0 on success.
541 */
542 static int send_streams_to_thread(struct lttng_consumer_channel *channel,
543 struct lttng_consumer_local_data *ctx)
544 {
545 int ret = 0;
546 struct lttng_consumer_stream *stream, *stmp;
547
548 assert(channel);
549 assert(ctx);
550
551 /* Send streams to the corresponding thread. */
552 cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
553 send_node) {
554 /* Sending the stream to the thread. */
555 ret = send_stream_to_thread(stream, ctx);
556 if (ret < 0) {
557 /*
558 * If we are unable to send the stream to the thread, there is
559 * a big problem so just stop everything.
560 */
561 /* Remove node from the channel stream list. */
562 cds_list_del(&stream->send_node);
563 goto error;
564 }
565
566 /* Remove node from the channel stream list. */
567 cds_list_del(&stream->send_node);
568
569 }
570
571 error:
572 return ret;
573 }
574
575 /*
576 * Flush channel's streams using the given key to retrieve the channel.
577 *
578 * Return 0 on success else an LTTng error code.
579 */
580 static int flush_channel(uint64_t chan_key)
581 {
582 int ret = 0;
583 struct lttng_consumer_channel *channel;
584 struct lttng_consumer_stream *stream;
585 struct lttng_ht *ht;
586 struct lttng_ht_iter iter;
587
588 DBG("UST consumer flush channel key %" PRIu64, chan_key);
589
590 rcu_read_lock();
591 channel = consumer_find_channel(chan_key);
592 if (!channel) {
593 ERR("UST consumer flush channel %" PRIu64 " not found", chan_key);
594 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
595 goto error;
596 }
597
598 ht = consumer_data.stream_per_chan_id_ht;
599
600 /* For each stream of the channel id, flush it. */
601 cds_lfht_for_each_entry_duplicate(ht->ht,
602 ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct,
603 &channel->key, &iter.iter, stream, node_channel_id.node) {
604 ustctl_flush_buffer(stream->ustream, 1);
605 }
606 error:
607 rcu_read_unlock();
608 return ret;
609 }
610 /*
611 * Close metadata stream wakeup_fd using the given key to retrieve the channel.
612 * RCU read side lock MUST be acquired before calling this function.
613 *
614 * NOTE: This function does NOT take any channel nor stream lock.
615 *
616 * Return 0 on success else LTTng error code.
617 */
618 static int _close_metadata(struct lttng_consumer_channel *channel)
619 {
620 int ret = LTTNG_OK;
621
622 assert(channel);
623 assert(channel->type == CONSUMER_CHANNEL_TYPE_METADATA);
624
625 if (channel->switch_timer_enabled == 1) {
626 DBG("Deleting timer on metadata channel");
627 consumer_timer_switch_stop(channel);
628 }
629
630 if (channel->metadata_stream) {
631 ret = ustctl_stream_close_wakeup_fd(channel->metadata_stream->ustream);
632 if (ret < 0) {
633 ERR("UST consumer unable to close fd of metadata (ret: %d)", ret);
634 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
635 }
636
637 if (channel->monitor) {
638 /* Close the read-side in consumer_del_metadata_stream */
639 ret = close(channel->metadata_stream->ust_metadata_poll_pipe[1]);
640 if (ret < 0) {
641 PERROR("Close UST metadata write-side poll pipe");
642 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
643 }
644 }
645 }
646
647 return ret;
648 }
649
650 /*
651 * Close metadata stream wakeup_fd using the given key to retrieve the channel.
652 * RCU read side lock MUST be acquired before calling this function.
653 *
654 * Return 0 on success else an LTTng error code.
655 */
656 static int close_metadata(uint64_t chan_key)
657 {
658 int ret = 0;
659 struct lttng_consumer_channel *channel;
660
661 DBG("UST consumer close metadata key %" PRIu64, chan_key);
662
663 channel = consumer_find_channel(chan_key);
664 if (!channel) {
665 /*
666 * This is possible if the metadata thread has issue a delete because
667 * the endpoint point of the stream hung up. There is no way the
668 * session daemon can know about it thus use a DBG instead of an actual
669 * error.
670 */
671 DBG("UST consumer close metadata %" PRIu64 " not found", chan_key);
672 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
673 goto error;
674 }
675
676 pthread_mutex_lock(&consumer_data.lock);
677 pthread_mutex_lock(&channel->lock);
678
679 if (cds_lfht_is_node_deleted(&channel->node.node)) {
680 goto error_unlock;
681 }
682
683 ret = _close_metadata(channel);
684
685 error_unlock:
686 pthread_mutex_unlock(&channel->lock);
687 pthread_mutex_unlock(&consumer_data.lock);
688 error:
689 return ret;
690 }
691
692 /*
693 * RCU read side lock MUST be acquired before calling this function.
694 *
695 * Return 0 on success else an LTTng error code.
696 */
697 static int setup_metadata(struct lttng_consumer_local_data *ctx, uint64_t key)
698 {
699 int ret;
700 struct lttng_consumer_channel *metadata;
701
702 DBG("UST consumer setup metadata key %" PRIu64, key);
703
704 metadata = consumer_find_channel(key);
705 if (!metadata) {
706 ERR("UST consumer push metadata %" PRIu64 " not found", key);
707 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
708 goto end;
709 }
710
711 /*
712 * In no monitor mode, the metadata channel has no stream(s) so skip the
713 * ownership transfer to the metadata thread.
714 */
715 if (!metadata->monitor) {
716 DBG("Metadata channel in no monitor");
717 ret = 0;
718 goto end;
719 }
720
721 /*
722 * Send metadata stream to relayd if one available. Availability is
723 * known if the stream is still in the list of the channel.
724 */
725 if (cds_list_empty(&metadata->streams.head)) {
726 ERR("Metadata channel key %" PRIu64 ", no stream available.", key);
727 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
728 goto error_no_stream;
729 }
730
731 /* Send metadata stream to relayd if needed. */
732 if (metadata->metadata_stream->net_seq_idx != (uint64_t) -1ULL) {
733 ret = consumer_send_relayd_stream(metadata->metadata_stream,
734 metadata->pathname);
735 if (ret < 0) {
736 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
737 goto error;
738 }
739 }
740
741 ret = send_streams_to_thread(metadata, ctx);
742 if (ret < 0) {
743 /*
744 * If we are unable to send the stream to the thread, there is
745 * a big problem so just stop everything.
746 */
747 ret = LTTCOMM_CONSUMERD_FATAL;
748 goto error;
749 }
750 /* List MUST be empty after or else it could be reused. */
751 assert(cds_list_empty(&metadata->streams.head));
752
753 ret = 0;
754 goto end;
755
756 error:
757 /*
758 * Delete metadata channel on error. At this point, the metadata stream can
759 * NOT be monitored by the metadata thread thus having the guarantee that
760 * the stream is still in the local stream list of the channel. This call
761 * will make sure to clean that list.
762 */
763 cds_list_del(&metadata->metadata_stream->send_node);
764 consumer_stream_destroy(metadata->metadata_stream, NULL);
765 error_no_stream:
766 end:
767 return ret;
768 }
769
770 /*
771 * Snapshot the whole metadata.
772 *
773 * Returns 0 on success, < 0 on error
774 */
775 static int snapshot_metadata(uint64_t key, char *path, uint64_t relayd_id,
776 struct lttng_consumer_local_data *ctx)
777 {
778 int ret = 0;
779 struct lttng_consumer_channel *metadata_channel;
780 struct lttng_consumer_stream *metadata_stream;
781
782 assert(path);
783 assert(ctx);
784
785 DBG("UST consumer snapshot metadata with key %" PRIu64 " at path %s",
786 key, path);
787
788 rcu_read_lock();
789
790 metadata_channel = consumer_find_channel(key);
791 if (!metadata_channel) {
792 ERR("UST snapshot metadata channel not found for key %" PRIu64,
793 key);
794 ret = -1;
795 goto error;
796 }
797 assert(!metadata_channel->monitor);
798
799 /*
800 * Ask the sessiond if we have new metadata waiting and update the
801 * consumer metadata cache.
802 */
803 ret = lttng_ustconsumer_request_metadata(ctx, metadata_channel, 0, 1);
804 if (ret < 0) {
805 goto error;
806 }
807
808 /*
809 * The metadata stream is NOT created in no monitor mode when the channel
810 * is created on a sessiond ask channel command.
811 */
812 ret = create_ust_streams(metadata_channel, ctx);
813 if (ret < 0) {
814 goto error;
815 }
816
817 metadata_stream = metadata_channel->metadata_stream;
818 assert(metadata_stream);
819
820 if (relayd_id != (uint64_t) -1ULL) {
821 metadata_stream->net_seq_idx = relayd_id;
822 ret = consumer_send_relayd_stream(metadata_stream, path);
823 if (ret < 0) {
824 goto error_stream;
825 }
826 } else {
827 ret = utils_create_stream_file(path, metadata_stream->name,
828 metadata_stream->chan->tracefile_size,
829 metadata_stream->tracefile_count_current,
830 metadata_stream->uid, metadata_stream->gid, NULL);
831 if (ret < 0) {
832 goto error_stream;
833 }
834 metadata_stream->out_fd = ret;
835 metadata_stream->tracefile_size_current = 0;
836 }
837
838 do {
839 ret = lttng_consumer_read_subbuffer(metadata_stream, ctx);
840 if (ret < 0) {
841 goto error_stream;
842 }
843 } while (ret > 0);
844
845 error_stream:
846 /*
847 * Clean up the stream completly because the next snapshot will use a new
848 * metadata stream.
849 */
850 cds_list_del(&metadata_stream->send_node);
851 consumer_stream_destroy(metadata_stream, NULL);
852 metadata_channel->metadata_stream = NULL;
853
854 error:
855 rcu_read_unlock();
856 return ret;
857 }
858
859 /*
860 * Take a snapshot of all the stream of a channel.
861 *
862 * Returns 0 on success, < 0 on error
863 */
864 static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
865 uint64_t max_stream_size, struct lttng_consumer_local_data *ctx)
866 {
867 int ret;
868 unsigned use_relayd = 0;
869 unsigned long consumed_pos, produced_pos;
870 struct lttng_consumer_channel *channel;
871 struct lttng_consumer_stream *stream;
872
873 assert(path);
874 assert(ctx);
875
876 rcu_read_lock();
877
878 if (relayd_id != (uint64_t) -1ULL) {
879 use_relayd = 1;
880 }
881
882 channel = consumer_find_channel(key);
883 if (!channel) {
884 ERR("UST snapshot channel not found for key %" PRIu64, key);
885 ret = -1;
886 goto error;
887 }
888 assert(!channel->monitor);
889 DBG("UST consumer snapshot channel %" PRIu64, key);
890
891 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
892 /* Lock stream because we are about to change its state. */
893 pthread_mutex_lock(&stream->lock);
894 stream->net_seq_idx = relayd_id;
895
896 if (use_relayd) {
897 ret = consumer_send_relayd_stream(stream, path);
898 if (ret < 0) {
899 goto error_unlock;
900 }
901 } else {
902 ret = utils_create_stream_file(path, stream->name,
903 stream->chan->tracefile_size,
904 stream->tracefile_count_current,
905 stream->uid, stream->gid, NULL);
906 if (ret < 0) {
907 goto error_unlock;
908 }
909 stream->out_fd = ret;
910 stream->tracefile_size_current = 0;
911
912 DBG("UST consumer snapshot stream %s/%s (%" PRIu64 ")", path,
913 stream->name, stream->key);
914 }
915
916 ustctl_flush_buffer(stream->ustream, 1);
917
918 ret = lttng_ustconsumer_take_snapshot(stream);
919 if (ret < 0) {
920 ERR("Taking UST snapshot");
921 goto error_unlock;
922 }
923
924 ret = lttng_ustconsumer_get_produced_snapshot(stream, &produced_pos);
925 if (ret < 0) {
926 ERR("Produced UST snapshot position");
927 goto error_unlock;
928 }
929
930 ret = lttng_ustconsumer_get_consumed_snapshot(stream, &consumed_pos);
931 if (ret < 0) {
932 ERR("Consumerd UST snapshot position");
933 goto error_unlock;
934 }
935
936 /*
937 * The original value is sent back if max stream size is larger than
938 * the possible size of the snapshot. Also, we asume that the session
939 * daemon should never send a maximum stream size that is lower than
940 * subbuffer size.
941 */
942 consumed_pos = consumer_get_consumed_maxsize(consumed_pos,
943 produced_pos, max_stream_size);
944
945 while (consumed_pos < produced_pos) {
946 ssize_t read_len;
947 unsigned long len, padded_len;
948
949 DBG("UST consumer taking snapshot at pos %lu", consumed_pos);
950
951 ret = ustctl_get_subbuf(stream->ustream, &consumed_pos);
952 if (ret < 0) {
953 if (ret != -EAGAIN) {
954 PERROR("ustctl_get_subbuf snapshot");
955 goto error_close_stream;
956 }
957 DBG("UST consumer get subbuf failed. Skipping it.");
958 consumed_pos += stream->max_sb_size;
959 continue;
960 }
961
962 ret = ustctl_get_subbuf_size(stream->ustream, &len);
963 if (ret < 0) {
964 ERR("Snapshot ustctl_get_subbuf_size");
965 goto error_put_subbuf;
966 }
967
968 ret = ustctl_get_padded_subbuf_size(stream->ustream, &padded_len);
969 if (ret < 0) {
970 ERR("Snapshot ustctl_get_padded_subbuf_size");
971 goto error_put_subbuf;
972 }
973
974 read_len = lttng_consumer_on_read_subbuffer_mmap(ctx, stream, len,
975 padded_len - len, NULL);
976 if (use_relayd) {
977 if (read_len != len) {
978 ret = -EPERM;
979 goto error_put_subbuf;
980 }
981 } else {
982 if (read_len != padded_len) {
983 ret = -EPERM;
984 goto error_put_subbuf;
985 }
986 }
987
988 ret = ustctl_put_subbuf(stream->ustream);
989 if (ret < 0) {
990 ERR("Snapshot ustctl_put_subbuf");
991 goto error_close_stream;
992 }
993 consumed_pos += stream->max_sb_size;
994 }
995
996 /* Simply close the stream so we can use it on the next snapshot. */
997 consumer_stream_close(stream);
998 pthread_mutex_unlock(&stream->lock);
999 }
1000
1001 rcu_read_unlock();
1002 return 0;
1003
1004 error_put_subbuf:
1005 if (ustctl_put_subbuf(stream->ustream) < 0) {
1006 ERR("Snapshot ustctl_put_subbuf");
1007 }
1008 error_close_stream:
1009 consumer_stream_close(stream);
1010 error_unlock:
1011 pthread_mutex_unlock(&stream->lock);
1012 error:
1013 rcu_read_unlock();
1014 return ret;
1015 }
1016
1017 /*
1018 * Receive the metadata updates from the sessiond.
1019 */
1020 int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset,
1021 uint64_t len, struct lttng_consumer_channel *channel,
1022 int timer, int wait)
1023 {
1024 int ret, ret_code = LTTNG_OK;
1025 char *metadata_str;
1026
1027 DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key, len);
1028
1029 metadata_str = zmalloc(len * sizeof(char));
1030 if (!metadata_str) {
1031 PERROR("zmalloc metadata string");
1032 ret_code = LTTCOMM_CONSUMERD_ENOMEM;
1033 goto end;
1034 }
1035
1036 /* Receive metadata string. */
1037 ret = lttcomm_recv_unix_sock(sock, metadata_str, len);
1038 if (ret < 0) {
1039 /* Session daemon is dead so return gracefully. */
1040 ret_code = ret;
1041 goto end_free;
1042 }
1043
1044 pthread_mutex_lock(&channel->metadata_cache->lock);
1045 ret = consumer_metadata_cache_write(channel, offset, len, metadata_str);
1046 if (ret < 0) {
1047 /* Unable to handle metadata. Notify session daemon. */
1048 ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
1049 /*
1050 * Skip metadata flush on write error since the offset and len might
1051 * not have been updated which could create an infinite loop below when
1052 * waiting for the metadata cache to be flushed.
1053 */
1054 pthread_mutex_unlock(&channel->metadata_cache->lock);
1055 goto end_free;
1056 }
1057 pthread_mutex_unlock(&channel->metadata_cache->lock);
1058
1059 if (!wait) {
1060 goto end_free;
1061 }
1062 while (consumer_metadata_cache_flushed(channel, offset + len, timer)) {
1063 DBG("Waiting for metadata to be flushed");
1064 usleep(DEFAULT_METADATA_AVAILABILITY_WAIT_TIME);
1065 }
1066
1067 end_free:
1068 free(metadata_str);
1069 end:
1070 return ret_code;
1071 }
1072
1073 /*
1074 * Receive command from session daemon and process it.
1075 *
1076 * Return 1 on success else a negative value or 0.
1077 */
1078 int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
1079 int sock, struct pollfd *consumer_sockpoll)
1080 {
1081 ssize_t ret;
1082 enum lttng_error_code ret_code = LTTNG_OK;
1083 struct lttcomm_consumer_msg msg;
1084 struct lttng_consumer_channel *channel = NULL;
1085
1086 ret = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg));
1087 if (ret != sizeof(msg)) {
1088 DBG("Consumer received unexpected message size %zd (expects %zu)",
1089 ret, sizeof(msg));
1090 /*
1091 * The ret value might 0 meaning an orderly shutdown but this is ok
1092 * since the caller handles this.
1093 */
1094 if (ret > 0) {
1095 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
1096 ret = -1;
1097 }
1098 return ret;
1099 }
1100 if (msg.cmd_type == LTTNG_CONSUMER_STOP) {
1101 /*
1102 * Notify the session daemon that the command is completed.
1103 *
1104 * On transport layer error, the function call will print an error
1105 * message so handling the returned code is a bit useless since we
1106 * return an error code anyway.
1107 */
1108 (void) consumer_send_status_msg(sock, ret_code);
1109 return -ENOENT;
1110 }
1111
1112 /* relayd needs RCU read-side lock */
1113 rcu_read_lock();
1114
1115 switch (msg.cmd_type) {
1116 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
1117 {
1118 /* Session daemon status message are handled in the following call. */
1119 ret = consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
1120 msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll,
1121 &msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id,
1122 msg.u.relayd_sock.relayd_session_id);
1123 goto end_nosignal;
1124 }
1125 case LTTNG_CONSUMER_DESTROY_RELAYD:
1126 {
1127 uint64_t index = msg.u.destroy_relayd.net_seq_idx;
1128 struct consumer_relayd_sock_pair *relayd;
1129
1130 DBG("UST consumer destroying relayd %" PRIu64, index);
1131
1132 /* Get relayd reference if exists. */
1133 relayd = consumer_find_relayd(index);
1134 if (relayd == NULL) {
1135 DBG("Unable to find relayd %" PRIu64, index);
1136 ret_code = LTTNG_ERR_NO_CONSUMER;
1137 }
1138
1139 /*
1140 * Each relayd socket pair has a refcount of stream attached to it
1141 * which tells if the relayd is still active or not depending on the
1142 * refcount value.
1143 *
1144 * This will set the destroy flag of the relayd object and destroy it
1145 * if the refcount reaches zero when called.
1146 *
1147 * The destroy can happen either here or when a stream fd hangs up.
1148 */
1149 if (relayd) {
1150 consumer_flag_relayd_for_destroy(relayd);
1151 }
1152
1153 goto end_msg_sessiond;
1154 }
1155 case LTTNG_CONSUMER_UPDATE_STREAM:
1156 {
1157 rcu_read_unlock();
1158 return -ENOSYS;
1159 }
1160 case LTTNG_CONSUMER_DATA_PENDING:
1161 {
1162 int ret, is_data_pending;
1163 uint64_t id = msg.u.data_pending.session_id;
1164
1165 DBG("UST consumer data pending command for id %" PRIu64, id);
1166
1167 is_data_pending = consumer_data_pending(id);
1168
1169 /* Send back returned value to session daemon */
1170 ret = lttcomm_send_unix_sock(sock, &is_data_pending,
1171 sizeof(is_data_pending));
1172 if (ret < 0) {
1173 DBG("Error when sending the data pending ret code: %d", ret);
1174 goto error_fatal;
1175 }
1176
1177 /*
1178 * No need to send back a status message since the data pending
1179 * returned value is the response.
1180 */
1181 break;
1182 }
1183 case LTTNG_CONSUMER_ASK_CHANNEL_CREATION:
1184 {
1185 int ret;
1186 struct ustctl_consumer_channel_attr attr;
1187
1188 /* Create a plain object and reserve a channel key. */
1189 channel = allocate_channel(msg.u.ask_channel.session_id,
1190 msg.u.ask_channel.pathname, msg.u.ask_channel.name,
1191 msg.u.ask_channel.uid, msg.u.ask_channel.gid,
1192 msg.u.ask_channel.relayd_id, msg.u.ask_channel.key,
1193 (enum lttng_event_output) msg.u.ask_channel.output,
1194 msg.u.ask_channel.tracefile_size,
1195 msg.u.ask_channel.tracefile_count,
1196 msg.u.ask_channel.session_id_per_pid,
1197 msg.u.ask_channel.monitor,
1198 msg.u.ask_channel.live_timer_interval);
1199 if (!channel) {
1200 goto end_channel_error;
1201 }
1202
1203 /*
1204 * Assign UST application UID to the channel. This value is ignored for
1205 * per PID buffers. This is specific to UST thus setting this after the
1206 * allocation.
1207 */
1208 channel->ust_app_uid = msg.u.ask_channel.ust_app_uid;
1209
1210 /* Build channel attributes from received message. */
1211 attr.subbuf_size = msg.u.ask_channel.subbuf_size;
1212 attr.num_subbuf = msg.u.ask_channel.num_subbuf;
1213 attr.overwrite = msg.u.ask_channel.overwrite;
1214 attr.switch_timer_interval = msg.u.ask_channel.switch_timer_interval;
1215 attr.read_timer_interval = msg.u.ask_channel.read_timer_interval;
1216 attr.chan_id = msg.u.ask_channel.chan_id;
1217 attr.output = msg.u.ask_channel.output;
1218 memcpy(attr.uuid, msg.u.ask_channel.uuid, sizeof(attr.uuid));
1219
1220 /* Translate and save channel type. */
1221 switch (msg.u.ask_channel.type) {
1222 case LTTNG_UST_CHAN_PER_CPU:
1223 channel->type = CONSUMER_CHANNEL_TYPE_DATA;
1224 attr.type = LTTNG_UST_CHAN_PER_CPU;
1225 /*
1226 * Set refcount to 1 for owner. Below, we will
1227 * pass ownership to the
1228 * consumer_thread_channel_poll() thread.
1229 */
1230 channel->refcount = 1;
1231 break;
1232 case LTTNG_UST_CHAN_METADATA:
1233 channel->type = CONSUMER_CHANNEL_TYPE_METADATA;
1234 attr.type = LTTNG_UST_CHAN_METADATA;
1235 break;
1236 default:
1237 assert(0);
1238 goto error_fatal;
1239 };
1240
1241 ret = ask_channel(ctx, sock, channel, &attr);
1242 if (ret < 0) {
1243 goto end_channel_error;
1244 }
1245
1246 if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
1247 ret = consumer_metadata_cache_allocate(channel);
1248 if (ret < 0) {
1249 ERR("Allocating metadata cache");
1250 goto end_channel_error;
1251 }
1252 consumer_timer_switch_start(channel, attr.switch_timer_interval);
1253 attr.switch_timer_interval = 0;
1254 } else {
1255 consumer_timer_live_start(channel,
1256 msg.u.ask_channel.live_timer_interval);
1257 }
1258
1259 /*
1260 * Add the channel to the internal state AFTER all streams were created
1261 * and successfully sent to session daemon. This way, all streams must
1262 * be ready before this channel is visible to the threads.
1263 * If add_channel succeeds, ownership of the channel is
1264 * passed to consumer_thread_channel_poll().
1265 */
1266 ret = add_channel(channel, ctx);
1267 if (ret < 0) {
1268 if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
1269 if (channel->switch_timer_enabled == 1) {
1270 consumer_timer_switch_stop(channel);
1271 }
1272 consumer_metadata_cache_destroy(channel);
1273 }
1274 if (channel->live_timer_enabled == 1) {
1275 consumer_timer_live_stop(channel);
1276 }
1277 goto end_channel_error;
1278 }
1279
1280 /*
1281 * Channel and streams are now created. Inform the session daemon that
1282 * everything went well and should wait to receive the channel and
1283 * streams with ustctl API.
1284 */
1285 ret = consumer_send_status_channel(sock, channel);
1286 if (ret < 0) {
1287 /*
1288 * There is probably a problem on the socket.
1289 */
1290 goto error_fatal;
1291 }
1292
1293 break;
1294 }
1295 case LTTNG_CONSUMER_GET_CHANNEL:
1296 {
1297 int ret, relayd_err = 0;
1298 uint64_t key = msg.u.get_channel.key;
1299 struct lttng_consumer_channel *channel;
1300
1301 channel = consumer_find_channel(key);
1302 if (!channel) {
1303 ERR("UST consumer get channel key %" PRIu64 " not found", key);
1304 ret_code = LTTNG_ERR_UST_CHAN_NOT_FOUND;
1305 goto end_msg_sessiond;
1306 }
1307
1308 /* Send everything to sessiond. */
1309 ret = send_sessiond_channel(sock, channel, ctx, &relayd_err);
1310 if (ret < 0) {
1311 if (relayd_err) {
1312 /*
1313 * We were unable to send to the relayd the stream so avoid
1314 * sending back a fatal error to the thread since this is OK
1315 * and the consumer can continue its work. The above call
1316 * has sent the error status message to the sessiond.
1317 */
1318 goto end_nosignal;
1319 }
1320 /*
1321 * The communicaton was broken hence there is a bad state between
1322 * the consumer and sessiond so stop everything.
1323 */
1324 goto error_fatal;
1325 }
1326
1327 /*
1328 * In no monitor mode, the streams ownership is kept inside the channel
1329 * so don't send them to the data thread.
1330 */
1331 if (!channel->monitor) {
1332 goto end_msg_sessiond;
1333 }
1334
1335 ret = send_streams_to_thread(channel, ctx);
1336 if (ret < 0) {
1337 /*
1338 * If we are unable to send the stream to the thread, there is
1339 * a big problem so just stop everything.
1340 */
1341 goto error_fatal;
1342 }
1343 /* List MUST be empty after or else it could be reused. */
1344 assert(cds_list_empty(&channel->streams.head));
1345 goto end_msg_sessiond;
1346 }
1347 case LTTNG_CONSUMER_DESTROY_CHANNEL:
1348 {
1349 uint64_t key = msg.u.destroy_channel.key;
1350
1351 /*
1352 * Only called if streams have not been sent to stream
1353 * manager thread. However, channel has been sent to
1354 * channel manager thread.
1355 */
1356 notify_thread_del_channel(ctx, key);
1357 goto end_msg_sessiond;
1358 }
1359 case LTTNG_CONSUMER_CLOSE_METADATA:
1360 {
1361 int ret;
1362
1363 ret = close_metadata(msg.u.close_metadata.key);
1364 if (ret != 0) {
1365 ret_code = ret;
1366 }
1367
1368 goto end_msg_sessiond;
1369 }
1370 case LTTNG_CONSUMER_FLUSH_CHANNEL:
1371 {
1372 int ret;
1373
1374 ret = flush_channel(msg.u.flush_channel.key);
1375 if (ret != 0) {
1376 ret_code = ret;
1377 }
1378
1379 goto end_msg_sessiond;
1380 }
1381 case LTTNG_CONSUMER_PUSH_METADATA:
1382 {
1383 int ret;
1384 uint64_t len = msg.u.push_metadata.len;
1385 uint64_t key = msg.u.push_metadata.key;
1386 uint64_t offset = msg.u.push_metadata.target_offset;
1387 struct lttng_consumer_channel *channel;
1388
1389 DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key,
1390 len);
1391
1392 channel = consumer_find_channel(key);
1393 if (!channel) {
1394 ERR("UST consumer push metadata %" PRIu64 " not found", key);
1395 ret_code = LTTNG_ERR_UST_CHAN_NOT_FOUND;
1396 goto end_msg_sessiond;
1397 }
1398
1399 /* Tell session daemon we are ready to receive the metadata. */
1400 ret = consumer_send_status_msg(sock, LTTNG_OK);
1401 if (ret < 0) {
1402 /* Somehow, the session daemon is not responding anymore. */
1403 goto error_fatal;
1404 }
1405
1406 /* Wait for more data. */
1407 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
1408 goto error_fatal;
1409 }
1410
1411 ret = lttng_ustconsumer_recv_metadata(sock, key, offset,
1412 len, channel, 0, 1);
1413 if (ret < 0) {
1414 /* error receiving from sessiond */
1415 goto error_fatal;
1416 } else {
1417 ret_code = ret;
1418 goto end_msg_sessiond;
1419 }
1420 }
1421 case LTTNG_CONSUMER_SETUP_METADATA:
1422 {
1423 int ret;
1424
1425 ret = setup_metadata(ctx, msg.u.setup_metadata.key);
1426 if (ret) {
1427 ret_code = ret;
1428 }
1429 goto end_msg_sessiond;
1430 }
1431 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
1432 {
1433 if (msg.u.snapshot_channel.metadata) {
1434 ret = snapshot_metadata(msg.u.snapshot_channel.key,
1435 msg.u.snapshot_channel.pathname,
1436 msg.u.snapshot_channel.relayd_id,
1437 ctx);
1438 if (ret < 0) {
1439 ERR("Snapshot metadata failed");
1440 ret_code = LTTNG_ERR_UST_META_FAIL;
1441 }
1442 } else {
1443 ret = snapshot_channel(msg.u.snapshot_channel.key,
1444 msg.u.snapshot_channel.pathname,
1445 msg.u.snapshot_channel.relayd_id,
1446 msg.u.snapshot_channel.max_stream_size,
1447 ctx);
1448 if (ret < 0) {
1449 ERR("Snapshot channel failed");
1450 ret_code = LTTNG_ERR_UST_CHAN_FAIL;
1451 }
1452 }
1453
1454 ret = consumer_send_status_msg(sock, ret_code);
1455 if (ret < 0) {
1456 /* Somehow, the session daemon is not responding anymore. */
1457 goto end_nosignal;
1458 }
1459 break;
1460 }
1461 default:
1462 break;
1463 }
1464
1465 end_nosignal:
1466 rcu_read_unlock();
1467
1468 /*
1469 * Return 1 to indicate success since the 0 value can be a socket
1470 * shutdown during the recv() or send() call.
1471 */
1472 return 1;
1473
1474 end_msg_sessiond:
1475 /*
1476 * The returned value here is not useful since either way we'll return 1 to
1477 * the caller because the session daemon socket management is done
1478 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
1479 */
1480 ret = consumer_send_status_msg(sock, ret_code);
1481 if (ret < 0) {
1482 goto error_fatal;
1483 }
1484 rcu_read_unlock();
1485 return 1;
1486 end_channel_error:
1487 if (channel) {
1488 /*
1489 * Free channel here since no one has a reference to it. We don't
1490 * free after that because a stream can store this pointer.
1491 */
1492 destroy_channel(channel);
1493 }
1494 /* We have to send a status channel message indicating an error. */
1495 ret = consumer_send_status_channel(sock, NULL);
1496 if (ret < 0) {
1497 /* Stop everything if session daemon can not be notified. */
1498 goto error_fatal;
1499 }
1500 rcu_read_unlock();
1501 return 1;
1502 error_fatal:
1503 rcu_read_unlock();
1504 /* This will issue a consumer stop. */
1505 return -1;
1506 }
1507
1508 /*
1509 * Wrapper over the mmap() read offset from ust-ctl library. Since this can be
1510 * compiled out, we isolate it in this library.
1511 */
1512 int lttng_ustctl_get_mmap_read_offset(struct lttng_consumer_stream *stream,
1513 unsigned long *off)
1514 {
1515 assert(stream);
1516 assert(stream->ustream);
1517
1518 return ustctl_get_mmap_read_offset(stream->ustream, off);
1519 }
1520
1521 /*
1522 * Wrapper over the mmap() read offset from ust-ctl library. Since this can be
1523 * compiled out, we isolate it in this library.
1524 */
1525 void *lttng_ustctl_get_mmap_base(struct lttng_consumer_stream *stream)
1526 {
1527 assert(stream);
1528 assert(stream->ustream);
1529
1530 return ustctl_get_mmap_base(stream->ustream);
1531 }
1532
1533 /*
1534 * Take a snapshot for a specific fd
1535 *
1536 * Returns 0 on success, < 0 on error
1537 */
1538 int lttng_ustconsumer_take_snapshot(struct lttng_consumer_stream *stream)
1539 {
1540 assert(stream);
1541 assert(stream->ustream);
1542
1543 return ustctl_snapshot(stream->ustream);
1544 }
1545
1546 /*
1547 * Get the produced position
1548 *
1549 * Returns 0 on success, < 0 on error
1550 */
1551 int lttng_ustconsumer_get_produced_snapshot(
1552 struct lttng_consumer_stream *stream, unsigned long *pos)
1553 {
1554 assert(stream);
1555 assert(stream->ustream);
1556 assert(pos);
1557
1558 return ustctl_snapshot_get_produced(stream->ustream, pos);
1559 }
1560
1561 /*
1562 * Get the consumed position
1563 *
1564 * Returns 0 on success, < 0 on error
1565 */
1566 int lttng_ustconsumer_get_consumed_snapshot(
1567 struct lttng_consumer_stream *stream, unsigned long *pos)
1568 {
1569 assert(stream);
1570 assert(stream->ustream);
1571 assert(pos);
1572
1573 return ustctl_snapshot_get_consumed(stream->ustream, pos);
1574 }
1575
1576 void lttng_ustconsumer_flush_buffer(struct lttng_consumer_stream *stream,
1577 int producer)
1578 {
1579 assert(stream);
1580 assert(stream->ustream);
1581
1582 ustctl_flush_buffer(stream->ustream, producer);
1583 }
1584
1585 int lttng_ustconsumer_get_current_timestamp(
1586 struct lttng_consumer_stream *stream, uint64_t *ts)
1587 {
1588 assert(stream);
1589 assert(stream->ustream);
1590 assert(ts);
1591
1592 return ustctl_get_current_timestamp(stream->ustream, ts);
1593 }
1594
1595 /*
1596 * Called when the stream signal the consumer that it has hang up.
1597 */
1598 void lttng_ustconsumer_on_stream_hangup(struct lttng_consumer_stream *stream)
1599 {
1600 assert(stream);
1601 assert(stream->ustream);
1602
1603 ustctl_flush_buffer(stream->ustream, 0);
1604 stream->hangup_flush_done = 1;
1605 }
1606
1607 void lttng_ustconsumer_del_channel(struct lttng_consumer_channel *chan)
1608 {
1609 assert(chan);
1610 assert(chan->uchan);
1611
1612 if (chan->switch_timer_enabled == 1) {
1613 consumer_timer_switch_stop(chan);
1614 }
1615 consumer_metadata_cache_destroy(chan);
1616 ustctl_destroy_channel(chan->uchan);
1617 }
1618
1619 void lttng_ustconsumer_del_stream(struct lttng_consumer_stream *stream)
1620 {
1621 assert(stream);
1622 assert(stream->ustream);
1623
1624 if (stream->chan->switch_timer_enabled == 1) {
1625 consumer_timer_switch_stop(stream->chan);
1626 }
1627 ustctl_destroy_stream(stream->ustream);
1628 }
1629
1630 /*
1631 * Populate index values of a UST stream. Values are set in big endian order.
1632 *
1633 * Return 0 on success or else a negative value.
1634 */
1635 static int get_index_values(struct lttng_packet_index *index,
1636 struct ustctl_consumer_stream *ustream)
1637 {
1638 int ret;
1639
1640 ret = ustctl_get_timestamp_begin(ustream, &index->timestamp_begin);
1641 if (ret < 0) {
1642 PERROR("ustctl_get_timestamp_begin");
1643 goto error;
1644 }
1645 index->timestamp_begin = htobe64(index->timestamp_begin);
1646
1647 ret = ustctl_get_timestamp_end(ustream, &index->timestamp_end);
1648 if (ret < 0) {
1649 PERROR("ustctl_get_timestamp_end");
1650 goto error;
1651 }
1652 index->timestamp_end = htobe64(index->timestamp_end);
1653
1654 ret = ustctl_get_events_discarded(ustream, &index->events_discarded);
1655 if (ret < 0) {
1656 PERROR("ustctl_get_events_discarded");
1657 goto error;
1658 }
1659 index->events_discarded = htobe64(index->events_discarded);
1660
1661 ret = ustctl_get_content_size(ustream, &index->content_size);
1662 if (ret < 0) {
1663 PERROR("ustctl_get_content_size");
1664 goto error;
1665 }
1666 index->content_size = htobe64(index->content_size);
1667
1668 ret = ustctl_get_packet_size(ustream, &index->packet_size);
1669 if (ret < 0) {
1670 PERROR("ustctl_get_packet_size");
1671 goto error;
1672 }
1673 index->packet_size = htobe64(index->packet_size);
1674
1675 ret = ustctl_get_stream_id(ustream, &index->stream_id);
1676 if (ret < 0) {
1677 PERROR("ustctl_get_stream_id");
1678 goto error;
1679 }
1680 index->stream_id = htobe64(index->stream_id);
1681
1682 error:
1683 return ret;
1684 }
1685
1686 /*
1687 * Write up to one packet from the metadata cache to the channel.
1688 *
1689 * Returns the number of bytes pushed in the cache, or a negative value
1690 * on error.
1691 */
1692 static
1693 int commit_one_metadata_packet(struct lttng_consumer_stream *stream)
1694 {
1695 ssize_t write_len;
1696 int ret;
1697
1698 pthread_mutex_lock(&stream->chan->metadata_cache->lock);
1699 if (stream->chan->metadata_cache->contiguous
1700 == stream->ust_metadata_pushed) {
1701 ret = 0;
1702 goto end;
1703 }
1704
1705 write_len = ustctl_write_one_packet_to_channel(stream->chan->uchan,
1706 &stream->chan->metadata_cache->data[stream->ust_metadata_pushed],
1707 stream->chan->metadata_cache->contiguous
1708 - stream->ust_metadata_pushed);
1709 assert(write_len != 0);
1710 if (write_len < 0) {
1711 ERR("Writing one metadata packet");
1712 ret = -1;
1713 goto end;
1714 }
1715 stream->ust_metadata_pushed += write_len;
1716
1717 assert(stream->chan->metadata_cache->contiguous >=
1718 stream->ust_metadata_pushed);
1719 ret = write_len;
1720
1721 end:
1722 pthread_mutex_unlock(&stream->chan->metadata_cache->lock);
1723 return ret;
1724 }
1725
1726
1727 /*
1728 * Sync metadata meaning request them to the session daemon and snapshot to the
1729 * metadata thread can consumer them.
1730 *
1731 * Metadata stream lock MUST be acquired.
1732 *
1733 * Return 0 if new metadatda is available, EAGAIN if the metadata stream
1734 * is empty or a negative value on error.
1735 */
1736 int lttng_ustconsumer_sync_metadata(struct lttng_consumer_local_data *ctx,
1737 struct lttng_consumer_stream *metadata)
1738 {
1739 int ret;
1740 int retry = 0;
1741
1742 assert(ctx);
1743 assert(metadata);
1744
1745 /*
1746 * Request metadata from the sessiond, but don't wait for the flush
1747 * because we locked the metadata thread.
1748 */
1749 ret = lttng_ustconsumer_request_metadata(ctx, metadata->chan, 0, 0);
1750 if (ret < 0) {
1751 goto end;
1752 }
1753
1754 ret = commit_one_metadata_packet(metadata);
1755 if (ret <= 0) {
1756 goto end;
1757 } else if (ret > 0) {
1758 retry = 1;
1759 }
1760
1761 ustctl_flush_buffer(metadata->ustream, 1);
1762 ret = ustctl_snapshot(metadata->ustream);
1763 if (ret < 0) {
1764 if (errno != EAGAIN) {
1765 ERR("Sync metadata, taking UST snapshot");
1766 goto end;
1767 }
1768 DBG("No new metadata when syncing them.");
1769 /* No new metadata, exit. */
1770 ret = ENODATA;
1771 goto end;
1772 }
1773
1774 /*
1775 * After this flush, we still need to extract metadata.
1776 */
1777 if (retry) {
1778 ret = EAGAIN;
1779 }
1780
1781 end:
1782 return ret;
1783 }
1784
1785 /*
1786 * Read subbuffer from the given stream.
1787 *
1788 * Stream lock MUST be acquired.
1789 *
1790 * Return 0 on success else a negative value.
1791 */
1792 int lttng_ustconsumer_read_subbuffer(struct lttng_consumer_stream *stream,
1793 struct lttng_consumer_local_data *ctx)
1794 {
1795 unsigned long len, subbuf_size, padding;
1796 int err, write_index = 1;
1797 long ret = 0;
1798 char dummy;
1799 struct ustctl_consumer_stream *ustream;
1800 struct lttng_packet_index index;
1801
1802 assert(stream);
1803 assert(stream->ustream);
1804 assert(ctx);
1805
1806 DBG("In UST read_subbuffer (wait_fd: %d, name: %s)", stream->wait_fd,
1807 stream->name);
1808
1809 /* Ease our life for what's next. */
1810 ustream = stream->ustream;
1811
1812 /* We can consume the 1 byte written into the wait_fd by UST */
1813 if (stream->monitor && !stream->hangup_flush_done) {
1814 ssize_t readlen;
1815
1816 do {
1817 readlen = read(stream->wait_fd, &dummy, 1);
1818 } while (readlen == -1 && errno == EINTR);
1819 if (readlen == -1 && errno != EAGAIN && errno != EWOULDBLOCK) {
1820 ret = readlen;
1821 goto end;
1822 }
1823 }
1824
1825 retry:
1826 /* Get the next subbuffer */
1827 err = ustctl_get_next_subbuf(ustream);
1828 if (err != 0) {
1829 /*
1830 * Populate metadata info if the existing info has
1831 * already been read.
1832 */
1833 if (stream->metadata_flag) {
1834 ret = commit_one_metadata_packet(stream);
1835 if (ret <= 0) {
1836 goto end;
1837 }
1838 ustctl_flush_buffer(stream->ustream, 1);
1839 goto retry;
1840 }
1841
1842 ret = err; /* ustctl_get_next_subbuf returns negative, caller expect positive. */
1843 /*
1844 * This is a debug message even for single-threaded consumer,
1845 * because poll() have more relaxed criterions than get subbuf,
1846 * so get_subbuf may fail for short race windows where poll()
1847 * would issue wakeups.
1848 */
1849 DBG("Reserving sub buffer failed (everything is normal, "
1850 "it is due to concurrency) [ret: %d]", err);
1851 goto end;
1852 }
1853 assert(stream->chan->output == CONSUMER_CHANNEL_MMAP);
1854
1855 if (!stream->metadata_flag) {
1856 index.offset = htobe64(stream->out_fd_offset);
1857 ret = get_index_values(&index, ustream);
1858 if (ret < 0) {
1859 goto end;
1860 }
1861 } else {
1862 write_index = 0;
1863 }
1864
1865 /* Get the full padded subbuffer size */
1866 err = ustctl_get_padded_subbuf_size(ustream, &len);
1867 assert(err == 0);
1868
1869 /* Get subbuffer data size (without padding) */
1870 err = ustctl_get_subbuf_size(ustream, &subbuf_size);
1871 assert(err == 0);
1872
1873 /* Make sure we don't get a subbuffer size bigger than the padded */
1874 assert(len >= subbuf_size);
1875
1876 padding = len - subbuf_size;
1877 /* write the subbuffer to the tracefile */
1878 ret = lttng_consumer_on_read_subbuffer_mmap(ctx, stream, subbuf_size, padding, &index);
1879 /*
1880 * The mmap operation should write subbuf_size amount of data when network
1881 * streaming or the full padding (len) size when we are _not_ streaming.
1882 */
1883 if ((ret != subbuf_size && stream->net_seq_idx != (uint64_t) -1ULL) ||
1884 (ret != len && stream->net_seq_idx == (uint64_t) -1ULL)) {
1885 /*
1886 * Display the error but continue processing to try to release the
1887 * subbuffer. This is a DBG statement since any unexpected kill or
1888 * signal, the application gets unregistered, relayd gets closed or
1889 * anything that affects the buffer lifetime will trigger this error.
1890 * So, for the sake of the user, don't print this error since it can
1891 * happen and it is OK with the code flow.
1892 */
1893 DBG("Error writing to tracefile "
1894 "(ret: %ld != len: %lu != subbuf_size: %lu)",
1895 ret, len, subbuf_size);
1896 write_index = 0;
1897 }
1898 err = ustctl_put_next_subbuf(ustream);
1899 assert(err == 0);
1900
1901 /* Write index if needed. */
1902 if (!write_index) {
1903 goto end;
1904 }
1905
1906 if (stream->chan->live_timer_interval && !stream->metadata_flag) {
1907 /*
1908 * In live, block until all the metadata is sent.
1909 */
1910 err = consumer_stream_sync_metadata(ctx, stream->session_id);
1911 if (err < 0) {
1912 goto end;
1913 }
1914 }
1915
1916 assert(!stream->metadata_flag);
1917 err = consumer_stream_write_index(stream, &index);
1918 if (err < 0) {
1919 goto end;
1920 }
1921
1922 end:
1923 return ret;
1924 }
1925
1926 /*
1927 * Called when a stream is created.
1928 *
1929 * Return 0 on success or else a negative value.
1930 */
1931 int lttng_ustconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
1932 {
1933 int ret;
1934
1935 assert(stream);
1936
1937 /* Don't create anything if this is set for streaming. */
1938 if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor) {
1939 ret = utils_create_stream_file(stream->chan->pathname, stream->name,
1940 stream->chan->tracefile_size, stream->tracefile_count_current,
1941 stream->uid, stream->gid, NULL);
1942 if (ret < 0) {
1943 goto error;
1944 }
1945 stream->out_fd = ret;
1946 stream->tracefile_size_current = 0;
1947
1948 if (!stream->metadata_flag) {
1949 ret = index_create_file(stream->chan->pathname,
1950 stream->name, stream->uid, stream->gid,
1951 stream->chan->tracefile_size,
1952 stream->tracefile_count_current);
1953 if (ret < 0) {
1954 goto error;
1955 }
1956 stream->index_fd = ret;
1957 }
1958 }
1959 ret = 0;
1960
1961 error:
1962 return ret;
1963 }
1964
1965 /*
1966 * Check if data is still being extracted from the buffers for a specific
1967 * stream. Consumer data lock MUST be acquired before calling this function
1968 * and the stream lock.
1969 *
1970 * Return 1 if the traced data are still getting read else 0 meaning that the
1971 * data is available for trace viewer reading.
1972 */
1973 int lttng_ustconsumer_data_pending(struct lttng_consumer_stream *stream)
1974 {
1975 int ret;
1976
1977 assert(stream);
1978 assert(stream->ustream);
1979
1980 DBG("UST consumer checking data pending");
1981
1982 if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) {
1983 ret = 0;
1984 goto end;
1985 }
1986
1987 if (stream->chan->type == CONSUMER_CHANNEL_TYPE_METADATA) {
1988 uint64_t contiguous, pushed;
1989
1990 /* Ease our life a bit. */
1991 contiguous = stream->chan->metadata_cache->contiguous;
1992 pushed = stream->ust_metadata_pushed;
1993
1994 /*
1995 * We can simply check whether all contiguously available data
1996 * has been pushed to the ring buffer, since the push operation
1997 * is performed within get_next_subbuf(), and because both
1998 * get_next_subbuf() and put_next_subbuf() are issued atomically
1999 * thanks to the stream lock within
2000 * lttng_ustconsumer_read_subbuffer(). This basically means that
2001 * whetnever ust_metadata_pushed is incremented, the associated
2002 * metadata has been consumed from the metadata stream.
2003 */
2004 DBG("UST consumer metadata pending check: contiguous %" PRIu64 " vs pushed %" PRIu64,
2005 contiguous, pushed);
2006 assert(((int64_t) contiguous - pushed) >= 0);
2007 if ((contiguous != pushed) ||
2008 (((int64_t) contiguous - pushed) > 0 || contiguous == 0)) {
2009 ret = 1; /* Data is pending */
2010 goto end;
2011 }
2012 } else {
2013 ret = ustctl_get_next_subbuf(stream->ustream);
2014 if (ret == 0) {
2015 /*
2016 * There is still data so let's put back this
2017 * subbuffer.
2018 */
2019 ret = ustctl_put_subbuf(stream->ustream);
2020 assert(ret == 0);
2021 ret = 1; /* Data is pending */
2022 goto end;
2023 }
2024 }
2025
2026 /* Data is NOT pending so ready to be read. */
2027 ret = 0;
2028
2029 end:
2030 return ret;
2031 }
2032
2033 /*
2034 * Close every metadata stream wait fd of the metadata hash table. This
2035 * function MUST be used very carefully so not to run into a race between the
2036 * metadata thread handling streams and this function closing their wait fd.
2037 *
2038 * For UST, this is used when the session daemon hangs up. Its the metadata
2039 * producer so calling this is safe because we are assured that no state change
2040 * can occur in the metadata thread for the streams in the hash table.
2041 */
2042 void lttng_ustconsumer_close_metadata(struct lttng_ht *metadata_ht)
2043 {
2044 struct lttng_ht_iter iter;
2045 struct lttng_consumer_stream *stream;
2046
2047 assert(metadata_ht);
2048 assert(metadata_ht->ht);
2049
2050 DBG("UST consumer closing all metadata streams");
2051
2052 rcu_read_lock();
2053 cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream,
2054 node.node) {
2055 pthread_mutex_lock(&stream->chan->lock);
2056 /*
2057 * Whatever returned value, we must continue to try to close everything
2058 * so ignore it.
2059 */
2060 (void) _close_metadata(stream->chan);
2061 DBG("Metadata wait fd %d and poll pipe fd %d closed", stream->wait_fd,
2062 stream->ust_metadata_poll_pipe[1]);
2063 pthread_mutex_unlock(&stream->chan->lock);
2064
2065 }
2066 rcu_read_unlock();
2067 }
2068
2069 void lttng_ustconsumer_close_stream_wakeup(struct lttng_consumer_stream *stream)
2070 {
2071 int ret;
2072
2073 ret = ustctl_stream_close_wakeup_fd(stream->ustream);
2074 if (ret < 0) {
2075 ERR("Unable to close wakeup fd");
2076 }
2077 }
2078
2079 /*
2080 * Please refer to consumer-timer.c before adding any lock within this
2081 * function or any of its callees. Timers have a very strict locking
2082 * semantic with respect to teardown. Failure to respect this semantic
2083 * introduces deadlocks.
2084 */
2085 int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
2086 struct lttng_consumer_channel *channel, int timer, int wait)
2087 {
2088 struct lttcomm_metadata_request_msg request;
2089 struct lttcomm_consumer_msg msg;
2090 enum lttng_error_code ret_code = LTTNG_OK;
2091 uint64_t len, key, offset;
2092 int ret;
2093
2094 assert(channel);
2095 assert(channel->metadata_cache);
2096
2097 /* send the metadata request to sessiond */
2098 switch (consumer_data.type) {
2099 case LTTNG_CONSUMER64_UST:
2100 request.bits_per_long = 64;
2101 break;
2102 case LTTNG_CONSUMER32_UST:
2103 request.bits_per_long = 32;
2104 break;
2105 default:
2106 request.bits_per_long = 0;
2107 break;
2108 }
2109
2110 request.session_id = channel->session_id;
2111 request.session_id_per_pid = channel->session_id_per_pid;
2112 /*
2113 * Request the application UID here so the metadata of that application can
2114 * be sent back. The channel UID corresponds to the user UID of the session
2115 * used for the rights on the stream file(s).
2116 */
2117 request.uid = channel->ust_app_uid;
2118 request.key = channel->key;
2119
2120 DBG("Sending metadata request to sessiond, session id %" PRIu64
2121 ", per-pid %" PRIu64 ", app UID %u and channek key %" PRIu64,
2122 request.session_id, request.session_id_per_pid, request.uid,
2123 request.key);
2124
2125 pthread_mutex_lock(&ctx->metadata_socket_lock);
2126 ret = lttcomm_send_unix_sock(ctx->consumer_metadata_socket, &request,
2127 sizeof(request));
2128 if (ret < 0) {
2129 ERR("Asking metadata to sessiond");
2130 goto end;
2131 }
2132
2133 /* Receive the metadata from sessiond */
2134 ret = lttcomm_recv_unix_sock(ctx->consumer_metadata_socket, &msg,
2135 sizeof(msg));
2136 if (ret != sizeof(msg)) {
2137 DBG("Consumer received unexpected message size %d (expects %zu)",
2138 ret, sizeof(msg));
2139 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
2140 /*
2141 * The ret value might 0 meaning an orderly shutdown but this is ok
2142 * since the caller handles this.
2143 */
2144 goto end;
2145 }
2146
2147 if (msg.cmd_type == LTTNG_ERR_UND) {
2148 /* No registry found */
2149 (void) consumer_send_status_msg(ctx->consumer_metadata_socket,
2150 ret_code);
2151 ret = 0;
2152 goto end;
2153 } else if (msg.cmd_type != LTTNG_CONSUMER_PUSH_METADATA) {
2154 ERR("Unexpected cmd_type received %d", msg.cmd_type);
2155 ret = -1;
2156 goto end;
2157 }
2158
2159 len = msg.u.push_metadata.len;
2160 key = msg.u.push_metadata.key;
2161 offset = msg.u.push_metadata.target_offset;
2162
2163 assert(key == channel->key);
2164 if (len == 0) {
2165 DBG("No new metadata to receive for key %" PRIu64, key);
2166 }
2167
2168 /* Tell session daemon we are ready to receive the metadata. */
2169 ret = consumer_send_status_msg(ctx->consumer_metadata_socket,
2170 LTTNG_OK);
2171 if (ret < 0 || len == 0) {
2172 /*
2173 * Somehow, the session daemon is not responding anymore or there is
2174 * nothing to receive.
2175 */
2176 goto end;
2177 }
2178
2179 ret_code = lttng_ustconsumer_recv_metadata(ctx->consumer_metadata_socket,
2180 key, offset, len, channel, timer, wait);
2181 if (ret_code >= 0) {
2182 /*
2183 * Only send the status msg if the sessiond is alive meaning a positive
2184 * ret code.
2185 */
2186 (void) consumer_send_status_msg(ctx->consumer_metadata_socket, ret_code);
2187 }
2188 ret = 0;
2189
2190 end:
2191 pthread_mutex_unlock(&ctx->metadata_socket_lock);
2192 return ret;
2193 }
This page took 0.10833 seconds and 6 git commands to generate.