Live timer set up
[lttng-tools.git] / src / common / ust-consumer / ust-consumer.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _GNU_SOURCE
20 #include <assert.h>
21 #include <lttng/ust-ctl.h>
22 #include <poll.h>
23 #include <pthread.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/mman.h>
27 #include <sys/socket.h>
28 #include <sys/stat.h>
29 #include <sys/types.h>
30 #include <inttypes.h>
31 #include <unistd.h>
32 #include <urcu/list.h>
33 #include <signal.h>
34
35 #include <common/common.h>
36 #include <common/sessiond-comm/sessiond-comm.h>
37 #include <common/relayd/relayd.h>
38 #include <common/compat/fcntl.h>
39 #include <common/consumer-metadata-cache.h>
40 #include <common/consumer-stream.h>
41 #include <common/consumer-timer.h>
42 #include <common/utils.h>
43 #include <common/index/index.h>
44
45 #include "ust-consumer.h"
46
47 extern struct lttng_consumer_global_data consumer_data;
48 extern int consumer_poll_timeout;
49 extern volatile int consumer_quit;
50
51 /*
52 * Free channel object and all streams associated with it. This MUST be used
53 * only and only if the channel has _NEVER_ been added to the global channel
54 * hash table.
55 */
56 static void destroy_channel(struct lttng_consumer_channel *channel)
57 {
58 struct lttng_consumer_stream *stream, *stmp;
59
60 assert(channel);
61
62 DBG("UST consumer cleaning stream list");
63
64 cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
65 send_node) {
66 cds_list_del(&stream->send_node);
67 ustctl_destroy_stream(stream->ustream);
68 free(stream);
69 }
70
71 /*
72 * If a channel is available meaning that was created before the streams
73 * were, delete it.
74 */
75 if (channel->uchan) {
76 lttng_ustconsumer_del_channel(channel);
77 }
78 free(channel);
79 }
80
81 /*
82 * Add channel to internal consumer state.
83 *
84 * Returns 0 on success or else a negative value.
85 */
86 static int add_channel(struct lttng_consumer_channel *channel,
87 struct lttng_consumer_local_data *ctx)
88 {
89 int ret = 0;
90
91 assert(channel);
92 assert(ctx);
93
94 if (ctx->on_recv_channel != NULL) {
95 ret = ctx->on_recv_channel(channel);
96 if (ret == 0) {
97 ret = consumer_add_channel(channel, ctx);
98 } else if (ret < 0) {
99 /* Most likely an ENOMEM. */
100 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
101 goto error;
102 }
103 } else {
104 ret = consumer_add_channel(channel, ctx);
105 }
106
107 DBG("UST consumer channel added (key: %" PRIu64 ")", channel->key);
108
109 error:
110 return ret;
111 }
112
113 /*
114 * Allocate and return a consumer channel object.
115 */
116 static struct lttng_consumer_channel *allocate_channel(uint64_t session_id,
117 const char *pathname, const char *name, uid_t uid, gid_t gid,
118 uint64_t relayd_id, uint64_t key, enum lttng_event_output output,
119 uint64_t tracefile_size, uint64_t tracefile_count,
120 uint64_t session_id_per_pid, unsigned int monitor,
121 unsigned int live_timer_interval)
122 {
123 assert(pathname);
124 assert(name);
125
126 return consumer_allocate_channel(key, session_id, pathname, name, uid,
127 gid, relayd_id, output, tracefile_size,
128 tracefile_count, session_id_per_pid, monitor, live_timer_interval);
129 }
130
131 /*
132 * Allocate and return a consumer stream object. If _alloc_ret is not NULL, the
133 * error value if applicable is set in it else it is kept untouched.
134 *
135 * Return NULL on error else the newly allocated stream object.
136 */
137 static struct lttng_consumer_stream *allocate_stream(int cpu, int key,
138 struct lttng_consumer_channel *channel,
139 struct lttng_consumer_local_data *ctx, int *_alloc_ret)
140 {
141 int alloc_ret;
142 struct lttng_consumer_stream *stream = NULL;
143
144 assert(channel);
145 assert(ctx);
146
147 stream = consumer_allocate_stream(channel->key,
148 key,
149 LTTNG_CONSUMER_ACTIVE_STREAM,
150 channel->name,
151 channel->uid,
152 channel->gid,
153 channel->relayd_id,
154 channel->session_id,
155 cpu,
156 &alloc_ret,
157 channel->type,
158 channel->monitor);
159 if (stream == NULL) {
160 switch (alloc_ret) {
161 case -ENOENT:
162 /*
163 * We could not find the channel. Can happen if cpu hotplug
164 * happens while tearing down.
165 */
166 DBG3("Could not find channel");
167 break;
168 case -ENOMEM:
169 case -EINVAL:
170 default:
171 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
172 break;
173 }
174 goto error;
175 }
176
177 stream->chan = channel;
178
179 error:
180 if (_alloc_ret) {
181 *_alloc_ret = alloc_ret;
182 }
183 return stream;
184 }
185
186 /*
187 * Send the given stream pointer to the corresponding thread.
188 *
189 * Returns 0 on success else a negative value.
190 */
191 static int send_stream_to_thread(struct lttng_consumer_stream *stream,
192 struct lttng_consumer_local_data *ctx)
193 {
194 int ret;
195 struct lttng_pipe *stream_pipe;
196
197 /* Get the right pipe where the stream will be sent. */
198 if (stream->metadata_flag) {
199 ret = consumer_add_metadata_stream(stream);
200 if (ret) {
201 ERR("Consumer add metadata stream %" PRIu64 " failed.",
202 stream->key);
203 goto error;
204 }
205 stream_pipe = ctx->consumer_metadata_pipe;
206 } else {
207 ret = consumer_add_data_stream(stream);
208 if (ret) {
209 ERR("Consumer add stream %" PRIu64 " failed.",
210 stream->key);
211 goto error;
212 }
213 stream_pipe = ctx->consumer_data_pipe;
214 }
215
216 /*
217 * From this point on, the stream's ownership has been moved away from
218 * the channel and becomes globally visible.
219 */
220 stream->globally_visible = 1;
221
222 ret = lttng_pipe_write(stream_pipe, &stream, sizeof(stream));
223 if (ret < 0) {
224 ERR("Consumer write %s stream to pipe %d",
225 stream->metadata_flag ? "metadata" : "data",
226 lttng_pipe_get_writefd(stream_pipe));
227 if (stream->metadata_flag) {
228 consumer_del_stream_for_metadata(stream);
229 } else {
230 consumer_del_stream_for_data(stream);
231 }
232 }
233 error:
234 return ret;
235 }
236
237 /*
238 * Create streams for the given channel using liblttng-ust-ctl.
239 *
240 * Return 0 on success else a negative value.
241 */
242 static int create_ust_streams(struct lttng_consumer_channel *channel,
243 struct lttng_consumer_local_data *ctx)
244 {
245 int ret, cpu = 0;
246 struct ustctl_consumer_stream *ustream;
247 struct lttng_consumer_stream *stream;
248
249 assert(channel);
250 assert(ctx);
251
252 /*
253 * While a stream is available from ustctl. When NULL is returned, we've
254 * reached the end of the possible stream for the channel.
255 */
256 while ((ustream = ustctl_create_stream(channel->uchan, cpu))) {
257 int wait_fd;
258 int ust_metadata_pipe[2];
259
260 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && channel->monitor) {
261 ret = utils_create_pipe_cloexec_nonblock(ust_metadata_pipe);
262 if (ret < 0) {
263 ERR("Create ust metadata poll pipe");
264 goto error;
265 }
266 wait_fd = ust_metadata_pipe[0];
267 } else {
268 wait_fd = ustctl_stream_get_wait_fd(ustream);
269 }
270
271 /* Allocate consumer stream object. */
272 stream = allocate_stream(cpu, wait_fd, channel, ctx, &ret);
273 if (!stream) {
274 goto error_alloc;
275 }
276 stream->ustream = ustream;
277 /*
278 * Store it so we can save multiple function calls afterwards since
279 * this value is used heavily in the stream threads. This is UST
280 * specific so this is why it's done after allocation.
281 */
282 stream->wait_fd = wait_fd;
283
284 /*
285 * Increment channel refcount since the channel reference has now been
286 * assigned in the allocation process above.
287 */
288 if (stream->chan->monitor) {
289 uatomic_inc(&stream->chan->refcount);
290 }
291
292 /*
293 * Order is important this is why a list is used. On error, the caller
294 * should clean this list.
295 */
296 cds_list_add_tail(&stream->send_node, &channel->streams.head);
297
298 ret = ustctl_get_max_subbuf_size(stream->ustream,
299 &stream->max_sb_size);
300 if (ret < 0) {
301 ERR("ustctl_get_max_subbuf_size failed for stream %s",
302 stream->name);
303 goto error;
304 }
305
306 /* Do actions once stream has been received. */
307 if (ctx->on_recv_stream) {
308 ret = ctx->on_recv_stream(stream);
309 if (ret < 0) {
310 goto error;
311 }
312 }
313
314 DBG("UST consumer add stream %s (key: %" PRIu64 ") with relayd id %" PRIu64,
315 stream->name, stream->key, stream->relayd_stream_id);
316
317 /* Set next CPU stream. */
318 channel->streams.count = ++cpu;
319
320 /* Keep stream reference when creating metadata. */
321 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA) {
322 channel->metadata_stream = stream;
323 stream->ust_metadata_poll_pipe[0] = ust_metadata_pipe[0];
324 stream->ust_metadata_poll_pipe[1] = ust_metadata_pipe[1];
325 }
326 }
327
328 return 0;
329
330 error:
331 error_alloc:
332 return ret;
333 }
334
335 /*
336 * Create an UST channel with the given attributes and send it to the session
337 * daemon using the ust ctl API.
338 *
339 * Return 0 on success or else a negative value.
340 */
341 static int create_ust_channel(struct ustctl_consumer_channel_attr *attr,
342 struct ustctl_consumer_channel **chanp)
343 {
344 int ret;
345 struct ustctl_consumer_channel *channel;
346
347 assert(attr);
348 assert(chanp);
349
350 DBG3("Creating channel to ustctl with attr: [overwrite: %d, "
351 "subbuf_size: %" PRIu64 ", num_subbuf: %" PRIu64 ", "
352 "switch_timer_interval: %u, read_timer_interval: %u, "
353 "output: %d, type: %d", attr->overwrite, attr->subbuf_size,
354 attr->num_subbuf, attr->switch_timer_interval,
355 attr->read_timer_interval, attr->output, attr->type);
356
357 channel = ustctl_create_channel(attr);
358 if (!channel) {
359 ret = -1;
360 goto error_create;
361 }
362
363 *chanp = channel;
364
365 return 0;
366
367 error_create:
368 return ret;
369 }
370
371 /*
372 * Send a single given stream to the session daemon using the sock.
373 *
374 * Return 0 on success else a negative value.
375 */
376 static int send_sessiond_stream(int sock, struct lttng_consumer_stream *stream)
377 {
378 int ret;
379
380 assert(stream);
381 assert(sock >= 0);
382
383 DBG("UST consumer sending stream %" PRIu64 " to sessiond", stream->key);
384
385 /* Send stream to session daemon. */
386 ret = ustctl_send_stream_to_sessiond(sock, stream->ustream);
387 if (ret < 0) {
388 goto error;
389 }
390
391 error:
392 return ret;
393 }
394
395 /*
396 * Send channel to sessiond.
397 *
398 * Return 0 on success or else a negative value.
399 */
400 static int send_sessiond_channel(int sock,
401 struct lttng_consumer_channel *channel,
402 struct lttng_consumer_local_data *ctx, int *relayd_error)
403 {
404 int ret, ret_code = LTTNG_OK;
405 struct lttng_consumer_stream *stream;
406
407 assert(channel);
408 assert(ctx);
409 assert(sock >= 0);
410
411 DBG("UST consumer sending channel %s to sessiond", channel->name);
412
413 if (channel->relayd_id != (uint64_t) -1ULL) {
414 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
415 /* Try to send the stream to the relayd if one is available. */
416 ret = consumer_send_relayd_stream(stream, stream->chan->pathname);
417 if (ret < 0) {
418 /*
419 * Flag that the relayd was the problem here probably due to a
420 * communicaton error on the socket.
421 */
422 if (relayd_error) {
423 *relayd_error = 1;
424 }
425 ret_code = LTTNG_ERR_RELAYD_CONNECT_FAIL;
426 }
427 }
428 }
429
430 /* Inform sessiond that we are about to send channel and streams. */
431 ret = consumer_send_status_msg(sock, ret_code);
432 if (ret < 0 || ret_code != LTTNG_OK) {
433 /*
434 * Either the session daemon is not responding or the relayd died so we
435 * stop now.
436 */
437 goto error;
438 }
439
440 /* Send channel to sessiond. */
441 ret = ustctl_send_channel_to_sessiond(sock, channel->uchan);
442 if (ret < 0) {
443 goto error;
444 }
445
446 ret = ustctl_channel_close_wakeup_fd(channel->uchan);
447 if (ret < 0) {
448 goto error;
449 }
450
451 /* The channel was sent successfully to the sessiond at this point. */
452 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
453 /* Send stream to session daemon. */
454 ret = send_sessiond_stream(sock, stream);
455 if (ret < 0) {
456 goto error;
457 }
458 }
459
460 /* Tell sessiond there is no more stream. */
461 ret = ustctl_send_stream_to_sessiond(sock, NULL);
462 if (ret < 0) {
463 goto error;
464 }
465
466 DBG("UST consumer NULL stream sent to sessiond");
467
468 return 0;
469
470 error:
471 if (ret_code != LTTNG_OK) {
472 ret = -1;
473 }
474 return ret;
475 }
476
477 /*
478 * Creates a channel and streams and add the channel it to the channel internal
479 * state. The created stream must ONLY be sent once the GET_CHANNEL command is
480 * received.
481 *
482 * Return 0 on success or else, a negative value is returned and the channel
483 * MUST be destroyed by consumer_del_channel().
484 */
485 static int ask_channel(struct lttng_consumer_local_data *ctx, int sock,
486 struct lttng_consumer_channel *channel,
487 struct ustctl_consumer_channel_attr *attr)
488 {
489 int ret;
490
491 assert(ctx);
492 assert(channel);
493 assert(attr);
494
495 /*
496 * This value is still used by the kernel consumer since for the kernel,
497 * the stream ownership is not IN the consumer so we need to have the
498 * number of left stream that needs to be initialized so we can know when
499 * to delete the channel (see consumer.c).
500 *
501 * As for the user space tracer now, the consumer creates and sends the
502 * stream to the session daemon which only sends them to the application
503 * once every stream of a channel is received making this value useless
504 * because we they will be added to the poll thread before the application
505 * receives them. This ensures that a stream can not hang up during
506 * initilization of a channel.
507 */
508 channel->nb_init_stream_left = 0;
509
510 /* The reply msg status is handled in the following call. */
511 ret = create_ust_channel(attr, &channel->uchan);
512 if (ret < 0) {
513 goto end;
514 }
515
516 channel->wait_fd = ustctl_channel_get_wait_fd(channel->uchan);
517
518 /*
519 * For the snapshots (no monitor), we create the metadata streams
520 * on demand, not during the channel creation.
521 */
522 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && !channel->monitor) {
523 ret = 0;
524 goto end;
525 }
526
527 /* Open all streams for this channel. */
528 ret = create_ust_streams(channel, ctx);
529 if (ret < 0) {
530 goto end;
531 }
532
533 end:
534 return ret;
535 }
536
537 /*
538 * Send all stream of a channel to the right thread handling it.
539 *
540 * On error, return a negative value else 0 on success.
541 */
542 static int send_streams_to_thread(struct lttng_consumer_channel *channel,
543 struct lttng_consumer_local_data *ctx)
544 {
545 int ret = 0;
546 struct lttng_consumer_stream *stream, *stmp;
547
548 assert(channel);
549 assert(ctx);
550
551 /* Send streams to the corresponding thread. */
552 cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
553 send_node) {
554 /* Sending the stream to the thread. */
555 ret = send_stream_to_thread(stream, ctx);
556 if (ret < 0) {
557 /*
558 * If we are unable to send the stream to the thread, there is
559 * a big problem so just stop everything.
560 */
561 /* Remove node from the channel stream list. */
562 cds_list_del(&stream->send_node);
563 goto error;
564 }
565
566 /* Remove node from the channel stream list. */
567 cds_list_del(&stream->send_node);
568
569 }
570
571 error:
572 return ret;
573 }
574
575 /*
576 * Flush channel's streams using the given key to retrieve the channel.
577 *
578 * Return 0 on success else an LTTng error code.
579 */
580 static int flush_channel(uint64_t chan_key)
581 {
582 int ret = 0;
583 struct lttng_consumer_channel *channel;
584 struct lttng_consumer_stream *stream;
585 struct lttng_ht *ht;
586 struct lttng_ht_iter iter;
587
588 DBG("UST consumer flush channel key %" PRIu64, chan_key);
589
590 rcu_read_lock();
591 channel = consumer_find_channel(chan_key);
592 if (!channel) {
593 ERR("UST consumer flush channel %" PRIu64 " not found", chan_key);
594 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
595 goto error;
596 }
597
598 ht = consumer_data.stream_per_chan_id_ht;
599
600 /* For each stream of the channel id, flush it. */
601 cds_lfht_for_each_entry_duplicate(ht->ht,
602 ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct,
603 &channel->key, &iter.iter, stream, node_channel_id.node) {
604 ustctl_flush_buffer(stream->ustream, 1);
605 }
606 error:
607 rcu_read_unlock();
608 return ret;
609 }
610 /*
611 * Close metadata stream wakeup_fd using the given key to retrieve the channel.
612 * RCU read side lock MUST be acquired before calling this function.
613 *
614 * NOTE: This function does NOT take any channel nor stream lock.
615 *
616 * Return 0 on success else LTTng error code.
617 */
618 static int _close_metadata(struct lttng_consumer_channel *channel)
619 {
620 int ret = LTTNG_OK;
621
622 assert(channel);
623 assert(channel->type == CONSUMER_CHANNEL_TYPE_METADATA);
624
625 if (channel->switch_timer_enabled == 1) {
626 DBG("Deleting timer on metadata channel");
627 consumer_timer_switch_stop(channel);
628 }
629
630 if (channel->metadata_stream) {
631 ret = ustctl_stream_close_wakeup_fd(channel->metadata_stream->ustream);
632 if (ret < 0) {
633 ERR("UST consumer unable to close fd of metadata (ret: %d)", ret);
634 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
635 }
636
637 if (channel->monitor) {
638 /* Close the read-side in consumer_del_metadata_stream */
639 ret = close(channel->metadata_stream->ust_metadata_poll_pipe[1]);
640 if (ret < 0) {
641 PERROR("Close UST metadata write-side poll pipe");
642 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
643 }
644 }
645 }
646
647 return ret;
648 }
649
650 /*
651 * Close metadata stream wakeup_fd using the given key to retrieve the channel.
652 * RCU read side lock MUST be acquired before calling this function.
653 *
654 * Return 0 on success else an LTTng error code.
655 */
656 static int close_metadata(uint64_t chan_key)
657 {
658 int ret = 0;
659 struct lttng_consumer_channel *channel;
660
661 DBG("UST consumer close metadata key %" PRIu64, chan_key);
662
663 channel = consumer_find_channel(chan_key);
664 if (!channel) {
665 /*
666 * This is possible if the metadata thread has issue a delete because
667 * the endpoint point of the stream hung up. There is no way the
668 * session daemon can know about it thus use a DBG instead of an actual
669 * error.
670 */
671 DBG("UST consumer close metadata %" PRIu64 " not found", chan_key);
672 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
673 goto error;
674 }
675
676 pthread_mutex_lock(&consumer_data.lock);
677 pthread_mutex_lock(&channel->lock);
678
679 if (cds_lfht_is_node_deleted(&channel->node.node)) {
680 goto error_unlock;
681 }
682
683 ret = _close_metadata(channel);
684
685 error_unlock:
686 pthread_mutex_unlock(&channel->lock);
687 pthread_mutex_unlock(&consumer_data.lock);
688 error:
689 return ret;
690 }
691
692 /*
693 * RCU read side lock MUST be acquired before calling this function.
694 *
695 * Return 0 on success else an LTTng error code.
696 */
697 static int setup_metadata(struct lttng_consumer_local_data *ctx, uint64_t key)
698 {
699 int ret;
700 struct lttng_consumer_channel *metadata;
701
702 DBG("UST consumer setup metadata key %" PRIu64, key);
703
704 metadata = consumer_find_channel(key);
705 if (!metadata) {
706 ERR("UST consumer push metadata %" PRIu64 " not found", key);
707 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
708 goto end;
709 }
710
711 /*
712 * In no monitor mode, the metadata channel has no stream(s) so skip the
713 * ownership transfer to the metadata thread.
714 */
715 if (!metadata->monitor) {
716 DBG("Metadata channel in no monitor");
717 ret = 0;
718 goto end;
719 }
720
721 /*
722 * Send metadata stream to relayd if one available. Availability is
723 * known if the stream is still in the list of the channel.
724 */
725 if (cds_list_empty(&metadata->streams.head)) {
726 ERR("Metadata channel key %" PRIu64 ", no stream available.", key);
727 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
728 goto error_no_stream;
729 }
730
731 /* Send metadata stream to relayd if needed. */
732 if (metadata->metadata_stream->net_seq_idx != (uint64_t) -1ULL) {
733 ret = consumer_send_relayd_stream(metadata->metadata_stream,
734 metadata->pathname);
735 if (ret < 0) {
736 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
737 goto error;
738 }
739 }
740
741 ret = send_streams_to_thread(metadata, ctx);
742 if (ret < 0) {
743 /*
744 * If we are unable to send the stream to the thread, there is
745 * a big problem so just stop everything.
746 */
747 ret = LTTCOMM_CONSUMERD_FATAL;
748 goto error;
749 }
750 /* List MUST be empty after or else it could be reused. */
751 assert(cds_list_empty(&metadata->streams.head));
752
753 ret = 0;
754 goto end;
755
756 error:
757 /*
758 * Delete metadata channel on error. At this point, the metadata stream can
759 * NOT be monitored by the metadata thread thus having the guarantee that
760 * the stream is still in the local stream list of the channel. This call
761 * will make sure to clean that list.
762 */
763 cds_list_del(&metadata->metadata_stream->send_node);
764 consumer_stream_destroy(metadata->metadata_stream, NULL);
765 error_no_stream:
766 end:
767 return ret;
768 }
769
770 /*
771 * Snapshot the whole metadata.
772 *
773 * Returns 0 on success, < 0 on error
774 */
775 static int snapshot_metadata(uint64_t key, char *path, uint64_t relayd_id,
776 struct lttng_consumer_local_data *ctx)
777 {
778 int ret = 0;
779 struct lttng_consumer_channel *metadata_channel;
780 struct lttng_consumer_stream *metadata_stream;
781
782 assert(path);
783 assert(ctx);
784
785 DBG("UST consumer snapshot metadata with key %" PRIu64 " at path %s",
786 key, path);
787
788 rcu_read_lock();
789
790 metadata_channel = consumer_find_channel(key);
791 if (!metadata_channel) {
792 ERR("UST snapshot metadata channel not found for key %" PRIu64,
793 key);
794 ret = -1;
795 goto error;
796 }
797 assert(!metadata_channel->monitor);
798
799 /*
800 * Ask the sessiond if we have new metadata waiting and update the
801 * consumer metadata cache.
802 */
803 ret = lttng_ustconsumer_request_metadata(ctx, metadata_channel, 0);
804 if (ret < 0) {
805 goto error;
806 }
807
808 /*
809 * The metadata stream is NOT created in no monitor mode when the channel
810 * is created on a sessiond ask channel command.
811 */
812 ret = create_ust_streams(metadata_channel, ctx);
813 if (ret < 0) {
814 goto error;
815 }
816
817 metadata_stream = metadata_channel->metadata_stream;
818 assert(metadata_stream);
819
820 if (relayd_id != (uint64_t) -1ULL) {
821 metadata_stream->net_seq_idx = relayd_id;
822 ret = consumer_send_relayd_stream(metadata_stream, path);
823 if (ret < 0) {
824 goto error_stream;
825 }
826 } else {
827 ret = utils_create_stream_file(path, metadata_stream->name,
828 metadata_stream->chan->tracefile_size,
829 metadata_stream->tracefile_count_current,
830 metadata_stream->uid, metadata_stream->gid, NULL);
831 if (ret < 0) {
832 goto error_stream;
833 }
834 metadata_stream->out_fd = ret;
835 metadata_stream->tracefile_size_current = 0;
836 }
837
838 pthread_mutex_lock(&metadata_channel->metadata_cache->lock);
839
840 do {
841 ret = lttng_consumer_read_subbuffer(metadata_stream, ctx);
842 if (ret < 0) {
843 goto error_unlock;
844 }
845 } while (ret > 0);
846
847 error_unlock:
848 pthread_mutex_unlock(&metadata_channel->metadata_cache->lock);
849
850 error_stream:
851 /*
852 * Clean up the stream completly because the next snapshot will use a new
853 * metadata stream.
854 */
855 cds_list_del(&metadata_stream->send_node);
856 consumer_stream_destroy(metadata_stream, NULL);
857 metadata_channel->metadata_stream = NULL;
858
859 error:
860 rcu_read_unlock();
861 return ret;
862 }
863
864 /*
865 * Take a snapshot of all the stream of a channel.
866 *
867 * Returns 0 on success, < 0 on error
868 */
869 static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
870 uint64_t max_stream_size, struct lttng_consumer_local_data *ctx)
871 {
872 int ret;
873 unsigned use_relayd = 0;
874 unsigned long consumed_pos, produced_pos;
875 struct lttng_consumer_channel *channel;
876 struct lttng_consumer_stream *stream;
877
878 assert(path);
879 assert(ctx);
880
881 rcu_read_lock();
882
883 if (relayd_id != (uint64_t) -1ULL) {
884 use_relayd = 1;
885 }
886
887 channel = consumer_find_channel(key);
888 if (!channel) {
889 ERR("UST snapshot channel not found for key %" PRIu64, key);
890 ret = -1;
891 goto error;
892 }
893 assert(!channel->monitor);
894 DBG("UST consumer snapshot channel %" PRIu64, key);
895
896 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
897 /* Lock stream because we are about to change its state. */
898 pthread_mutex_lock(&stream->lock);
899 stream->net_seq_idx = relayd_id;
900
901 if (use_relayd) {
902 ret = consumer_send_relayd_stream(stream, path);
903 if (ret < 0) {
904 goto error_unlock;
905 }
906 } else {
907 ret = utils_create_stream_file(path, stream->name,
908 stream->chan->tracefile_size,
909 stream->tracefile_count_current,
910 stream->uid, stream->gid, NULL);
911 if (ret < 0) {
912 goto error_unlock;
913 }
914 stream->out_fd = ret;
915 stream->tracefile_size_current = 0;
916
917 DBG("UST consumer snapshot stream %s/%s (%" PRIu64 ")", path,
918 stream->name, stream->key);
919 }
920
921 ustctl_flush_buffer(stream->ustream, 1);
922
923 ret = lttng_ustconsumer_take_snapshot(stream);
924 if (ret < 0) {
925 ERR("Taking UST snapshot");
926 goto error_unlock;
927 }
928
929 ret = lttng_ustconsumer_get_produced_snapshot(stream, &produced_pos);
930 if (ret < 0) {
931 ERR("Produced UST snapshot position");
932 goto error_unlock;
933 }
934
935 ret = lttng_ustconsumer_get_consumed_snapshot(stream, &consumed_pos);
936 if (ret < 0) {
937 ERR("Consumerd UST snapshot position");
938 goto error_unlock;
939 }
940
941 /*
942 * The original value is sent back if max stream size is larger than
943 * the possible size of the snapshot. Also, we asume that the session
944 * daemon should never send a maximum stream size that is lower than
945 * subbuffer size.
946 */
947 consumed_pos = consumer_get_consumed_maxsize(consumed_pos,
948 produced_pos, max_stream_size);
949
950 while (consumed_pos < produced_pos) {
951 ssize_t read_len;
952 unsigned long len, padded_len;
953
954 DBG("UST consumer taking snapshot at pos %lu", consumed_pos);
955
956 ret = ustctl_get_subbuf(stream->ustream, &consumed_pos);
957 if (ret < 0) {
958 if (ret != -EAGAIN) {
959 PERROR("ustctl_get_subbuf snapshot");
960 goto error_close_stream;
961 }
962 DBG("UST consumer get subbuf failed. Skipping it.");
963 consumed_pos += stream->max_sb_size;
964 continue;
965 }
966
967 ret = ustctl_get_subbuf_size(stream->ustream, &len);
968 if (ret < 0) {
969 ERR("Snapshot ustctl_get_subbuf_size");
970 goto error_put_subbuf;
971 }
972
973 ret = ustctl_get_padded_subbuf_size(stream->ustream, &padded_len);
974 if (ret < 0) {
975 ERR("Snapshot ustctl_get_padded_subbuf_size");
976 goto error_put_subbuf;
977 }
978
979 read_len = lttng_consumer_on_read_subbuffer_mmap(ctx, stream, len,
980 padded_len - len, NULL);
981 if (use_relayd) {
982 if (read_len != len) {
983 ret = -EPERM;
984 goto error_put_subbuf;
985 }
986 } else {
987 if (read_len != padded_len) {
988 ret = -EPERM;
989 goto error_put_subbuf;
990 }
991 }
992
993 ret = ustctl_put_subbuf(stream->ustream);
994 if (ret < 0) {
995 ERR("Snapshot ustctl_put_subbuf");
996 goto error_close_stream;
997 }
998 consumed_pos += stream->max_sb_size;
999 }
1000
1001 /* Simply close the stream so we can use it on the next snapshot. */
1002 consumer_stream_close(stream);
1003 pthread_mutex_unlock(&stream->lock);
1004 }
1005
1006 rcu_read_unlock();
1007 return 0;
1008
1009 error_put_subbuf:
1010 if (ustctl_put_subbuf(stream->ustream) < 0) {
1011 ERR("Snapshot ustctl_put_subbuf");
1012 }
1013 error_close_stream:
1014 consumer_stream_close(stream);
1015 error_unlock:
1016 pthread_mutex_unlock(&stream->lock);
1017 error:
1018 rcu_read_unlock();
1019 return ret;
1020 }
1021
1022 /*
1023 * Receive the metadata updates from the sessiond.
1024 */
1025 int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset,
1026 uint64_t len, struct lttng_consumer_channel *channel,
1027 int timer)
1028 {
1029 int ret, ret_code = LTTNG_OK;
1030 char *metadata_str;
1031
1032 DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key, len);
1033
1034 metadata_str = zmalloc(len * sizeof(char));
1035 if (!metadata_str) {
1036 PERROR("zmalloc metadata string");
1037 ret_code = LTTCOMM_CONSUMERD_ENOMEM;
1038 goto end;
1039 }
1040
1041 /* Receive metadata string. */
1042 ret = lttcomm_recv_unix_sock(sock, metadata_str, len);
1043 if (ret < 0) {
1044 /* Session daemon is dead so return gracefully. */
1045 ret_code = ret;
1046 goto end_free;
1047 }
1048
1049 pthread_mutex_lock(&channel->metadata_cache->lock);
1050 ret = consumer_metadata_cache_write(channel, offset, len, metadata_str);
1051 if (ret < 0) {
1052 /* Unable to handle metadata. Notify session daemon. */
1053 ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
1054 /*
1055 * Skip metadata flush on write error since the offset and len might
1056 * not have been updated which could create an infinite loop below when
1057 * waiting for the metadata cache to be flushed.
1058 */
1059 pthread_mutex_unlock(&channel->metadata_cache->lock);
1060 goto end_free;
1061 }
1062 pthread_mutex_unlock(&channel->metadata_cache->lock);
1063
1064 while (consumer_metadata_cache_flushed(channel, offset + len, timer)) {
1065 DBG("Waiting for metadata to be flushed");
1066 usleep(DEFAULT_METADATA_AVAILABILITY_WAIT_TIME);
1067 }
1068
1069 end_free:
1070 free(metadata_str);
1071 end:
1072 return ret_code;
1073 }
1074
1075 /*
1076 * Receive command from session daemon and process it.
1077 *
1078 * Return 1 on success else a negative value or 0.
1079 */
1080 int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
1081 int sock, struct pollfd *consumer_sockpoll)
1082 {
1083 ssize_t ret;
1084 enum lttng_error_code ret_code = LTTNG_OK;
1085 struct lttcomm_consumer_msg msg;
1086 struct lttng_consumer_channel *channel = NULL;
1087
1088 ret = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg));
1089 if (ret != sizeof(msg)) {
1090 DBG("Consumer received unexpected message size %zd (expects %zu)",
1091 ret, sizeof(msg));
1092 /*
1093 * The ret value might 0 meaning an orderly shutdown but this is ok
1094 * since the caller handles this.
1095 */
1096 if (ret > 0) {
1097 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
1098 ret = -1;
1099 }
1100 return ret;
1101 }
1102 if (msg.cmd_type == LTTNG_CONSUMER_STOP) {
1103 /*
1104 * Notify the session daemon that the command is completed.
1105 *
1106 * On transport layer error, the function call will print an error
1107 * message so handling the returned code is a bit useless since we
1108 * return an error code anyway.
1109 */
1110 (void) consumer_send_status_msg(sock, ret_code);
1111 return -ENOENT;
1112 }
1113
1114 /* relayd needs RCU read-side lock */
1115 rcu_read_lock();
1116
1117 switch (msg.cmd_type) {
1118 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
1119 {
1120 /* Session daemon status message are handled in the following call. */
1121 ret = consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
1122 msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll,
1123 &msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id);
1124 goto end_nosignal;
1125 }
1126 case LTTNG_CONSUMER_DESTROY_RELAYD:
1127 {
1128 uint64_t index = msg.u.destroy_relayd.net_seq_idx;
1129 struct consumer_relayd_sock_pair *relayd;
1130
1131 DBG("UST consumer destroying relayd %" PRIu64, index);
1132
1133 /* Get relayd reference if exists. */
1134 relayd = consumer_find_relayd(index);
1135 if (relayd == NULL) {
1136 DBG("Unable to find relayd %" PRIu64, index);
1137 ret_code = LTTNG_ERR_NO_CONSUMER;
1138 }
1139
1140 /*
1141 * Each relayd socket pair has a refcount of stream attached to it
1142 * which tells if the relayd is still active or not depending on the
1143 * refcount value.
1144 *
1145 * This will set the destroy flag of the relayd object and destroy it
1146 * if the refcount reaches zero when called.
1147 *
1148 * The destroy can happen either here or when a stream fd hangs up.
1149 */
1150 if (relayd) {
1151 consumer_flag_relayd_for_destroy(relayd);
1152 }
1153
1154 goto end_msg_sessiond;
1155 }
1156 case LTTNG_CONSUMER_UPDATE_STREAM:
1157 {
1158 rcu_read_unlock();
1159 return -ENOSYS;
1160 }
1161 case LTTNG_CONSUMER_DATA_PENDING:
1162 {
1163 int ret, is_data_pending;
1164 uint64_t id = msg.u.data_pending.session_id;
1165
1166 DBG("UST consumer data pending command for id %" PRIu64, id);
1167
1168 is_data_pending = consumer_data_pending(id);
1169
1170 /* Send back returned value to session daemon */
1171 ret = lttcomm_send_unix_sock(sock, &is_data_pending,
1172 sizeof(is_data_pending));
1173 if (ret < 0) {
1174 DBG("Error when sending the data pending ret code: %d", ret);
1175 goto error_fatal;
1176 }
1177
1178 /*
1179 * No need to send back a status message since the data pending
1180 * returned value is the response.
1181 */
1182 break;
1183 }
1184 case LTTNG_CONSUMER_ASK_CHANNEL_CREATION:
1185 {
1186 int ret;
1187 struct ustctl_consumer_channel_attr attr;
1188
1189 /* Create a plain object and reserve a channel key. */
1190 channel = allocate_channel(msg.u.ask_channel.session_id,
1191 msg.u.ask_channel.pathname, msg.u.ask_channel.name,
1192 msg.u.ask_channel.uid, msg.u.ask_channel.gid,
1193 msg.u.ask_channel.relayd_id, msg.u.ask_channel.key,
1194 (enum lttng_event_output) msg.u.ask_channel.output,
1195 msg.u.ask_channel.tracefile_size,
1196 msg.u.ask_channel.tracefile_count,
1197 msg.u.ask_channel.session_id_per_pid,
1198 msg.u.ask_channel.monitor,
1199 msg.u.ask_channel.live_timer_interval);
1200 if (!channel) {
1201 goto end_channel_error;
1202 }
1203
1204 /*
1205 * Assign UST application UID to the channel. This value is ignored for
1206 * per PID buffers. This is specific to UST thus setting this after the
1207 * allocation.
1208 */
1209 channel->ust_app_uid = msg.u.ask_channel.ust_app_uid;
1210
1211 /* Build channel attributes from received message. */
1212 attr.subbuf_size = msg.u.ask_channel.subbuf_size;
1213 attr.num_subbuf = msg.u.ask_channel.num_subbuf;
1214 attr.overwrite = msg.u.ask_channel.overwrite;
1215 attr.switch_timer_interval = msg.u.ask_channel.switch_timer_interval;
1216 attr.read_timer_interval = msg.u.ask_channel.read_timer_interval;
1217 attr.chan_id = msg.u.ask_channel.chan_id;
1218 attr.output = msg.u.ask_channel.output;
1219 memcpy(attr.uuid, msg.u.ask_channel.uuid, sizeof(attr.uuid));
1220
1221 /* Translate and save channel type. */
1222 switch (msg.u.ask_channel.type) {
1223 case LTTNG_UST_CHAN_PER_CPU:
1224 channel->type = CONSUMER_CHANNEL_TYPE_DATA;
1225 attr.type = LTTNG_UST_CHAN_PER_CPU;
1226 /*
1227 * Set refcount to 1 for owner. Below, we will
1228 * pass ownership to the
1229 * consumer_thread_channel_poll() thread.
1230 */
1231 channel->refcount = 1;
1232 break;
1233 case LTTNG_UST_CHAN_METADATA:
1234 channel->type = CONSUMER_CHANNEL_TYPE_METADATA;
1235 attr.type = LTTNG_UST_CHAN_METADATA;
1236 break;
1237 default:
1238 assert(0);
1239 goto error_fatal;
1240 };
1241
1242 ret = ask_channel(ctx, sock, channel, &attr);
1243 if (ret < 0) {
1244 goto end_channel_error;
1245 }
1246
1247 if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
1248 ret = consumer_metadata_cache_allocate(channel);
1249 if (ret < 0) {
1250 ERR("Allocating metadata cache");
1251 goto end_channel_error;
1252 }
1253 consumer_timer_switch_start(channel, attr.switch_timer_interval);
1254 attr.switch_timer_interval = 0;
1255 }
1256
1257 /*
1258 * Add the channel to the internal state AFTER all streams were created
1259 * and successfully sent to session daemon. This way, all streams must
1260 * be ready before this channel is visible to the threads.
1261 * If add_channel succeeds, ownership of the channel is
1262 * passed to consumer_thread_channel_poll().
1263 */
1264 ret = add_channel(channel, ctx);
1265 if (ret < 0) {
1266 if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
1267 if (channel->switch_timer_enabled == 1) {
1268 consumer_timer_switch_stop(channel);
1269 }
1270 consumer_metadata_cache_destroy(channel);
1271 }
1272 goto end_channel_error;
1273 }
1274
1275 /*
1276 * Channel and streams are now created. Inform the session daemon that
1277 * everything went well and should wait to receive the channel and
1278 * streams with ustctl API.
1279 */
1280 ret = consumer_send_status_channel(sock, channel);
1281 if (ret < 0) {
1282 /*
1283 * There is probably a problem on the socket.
1284 */
1285 goto error_fatal;
1286 }
1287
1288 break;
1289 }
1290 case LTTNG_CONSUMER_GET_CHANNEL:
1291 {
1292 int ret, relayd_err = 0;
1293 uint64_t key = msg.u.get_channel.key;
1294 struct lttng_consumer_channel *channel;
1295
1296 channel = consumer_find_channel(key);
1297 if (!channel) {
1298 ERR("UST consumer get channel key %" PRIu64 " not found", key);
1299 ret_code = LTTNG_ERR_UST_CHAN_NOT_FOUND;
1300 goto end_msg_sessiond;
1301 }
1302
1303 /* Send everything to sessiond. */
1304 ret = send_sessiond_channel(sock, channel, ctx, &relayd_err);
1305 if (ret < 0) {
1306 if (relayd_err) {
1307 /*
1308 * We were unable to send to the relayd the stream so avoid
1309 * sending back a fatal error to the thread since this is OK
1310 * and the consumer can continue its work. The above call
1311 * has sent the error status message to the sessiond.
1312 */
1313 goto end_nosignal;
1314 }
1315 /*
1316 * The communicaton was broken hence there is a bad state between
1317 * the consumer and sessiond so stop everything.
1318 */
1319 goto error_fatal;
1320 }
1321
1322 /*
1323 * In no monitor mode, the streams ownership is kept inside the channel
1324 * so don't send them to the data thread.
1325 */
1326 if (!channel->monitor) {
1327 goto end_msg_sessiond;
1328 }
1329
1330 ret = send_streams_to_thread(channel, ctx);
1331 if (ret < 0) {
1332 /*
1333 * If we are unable to send the stream to the thread, there is
1334 * a big problem so just stop everything.
1335 */
1336 goto error_fatal;
1337 }
1338 /* List MUST be empty after or else it could be reused. */
1339 assert(cds_list_empty(&channel->streams.head));
1340 goto end_msg_sessiond;
1341 }
1342 case LTTNG_CONSUMER_DESTROY_CHANNEL:
1343 {
1344 uint64_t key = msg.u.destroy_channel.key;
1345
1346 /*
1347 * Only called if streams have not been sent to stream
1348 * manager thread. However, channel has been sent to
1349 * channel manager thread.
1350 */
1351 notify_thread_del_channel(ctx, key);
1352 goto end_msg_sessiond;
1353 }
1354 case LTTNG_CONSUMER_CLOSE_METADATA:
1355 {
1356 int ret;
1357
1358 ret = close_metadata(msg.u.close_metadata.key);
1359 if (ret != 0) {
1360 ret_code = ret;
1361 }
1362
1363 goto end_msg_sessiond;
1364 }
1365 case LTTNG_CONSUMER_FLUSH_CHANNEL:
1366 {
1367 int ret;
1368
1369 ret = flush_channel(msg.u.flush_channel.key);
1370 if (ret != 0) {
1371 ret_code = ret;
1372 }
1373
1374 goto end_msg_sessiond;
1375 }
1376 case LTTNG_CONSUMER_PUSH_METADATA:
1377 {
1378 int ret;
1379 uint64_t len = msg.u.push_metadata.len;
1380 uint64_t key = msg.u.push_metadata.key;
1381 uint64_t offset = msg.u.push_metadata.target_offset;
1382 struct lttng_consumer_channel *channel;
1383
1384 DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key,
1385 len);
1386
1387 channel = consumer_find_channel(key);
1388 if (!channel) {
1389 ERR("UST consumer push metadata %" PRIu64 " not found", key);
1390 ret_code = LTTNG_ERR_UST_CHAN_NOT_FOUND;
1391 goto end_msg_sessiond;
1392 }
1393
1394 /* Tell session daemon we are ready to receive the metadata. */
1395 ret = consumer_send_status_msg(sock, LTTNG_OK);
1396 if (ret < 0) {
1397 /* Somehow, the session daemon is not responding anymore. */
1398 goto error_fatal;
1399 }
1400
1401 /* Wait for more data. */
1402 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
1403 goto error_fatal;
1404 }
1405
1406 ret = lttng_ustconsumer_recv_metadata(sock, key, offset,
1407 len, channel, 0);
1408 if (ret < 0) {
1409 /* error receiving from sessiond */
1410 goto error_fatal;
1411 } else {
1412 ret_code = ret;
1413 goto end_msg_sessiond;
1414 }
1415 }
1416 case LTTNG_CONSUMER_SETUP_METADATA:
1417 {
1418 int ret;
1419
1420 ret = setup_metadata(ctx, msg.u.setup_metadata.key);
1421 if (ret) {
1422 ret_code = ret;
1423 }
1424 goto end_msg_sessiond;
1425 }
1426 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
1427 {
1428 if (msg.u.snapshot_channel.metadata) {
1429 ret = snapshot_metadata(msg.u.snapshot_channel.key,
1430 msg.u.snapshot_channel.pathname,
1431 msg.u.snapshot_channel.relayd_id,
1432 ctx);
1433 if (ret < 0) {
1434 ERR("Snapshot metadata failed");
1435 ret_code = LTTNG_ERR_UST_META_FAIL;
1436 }
1437 } else {
1438 ret = snapshot_channel(msg.u.snapshot_channel.key,
1439 msg.u.snapshot_channel.pathname,
1440 msg.u.snapshot_channel.relayd_id,
1441 msg.u.snapshot_channel.max_stream_size,
1442 ctx);
1443 if (ret < 0) {
1444 ERR("Snapshot channel failed");
1445 ret_code = LTTNG_ERR_UST_CHAN_FAIL;
1446 }
1447 }
1448
1449 ret = consumer_send_status_msg(sock, ret_code);
1450 if (ret < 0) {
1451 /* Somehow, the session daemon is not responding anymore. */
1452 goto end_nosignal;
1453 }
1454 break;
1455 }
1456 default:
1457 break;
1458 }
1459
1460 end_nosignal:
1461 rcu_read_unlock();
1462
1463 /*
1464 * Return 1 to indicate success since the 0 value can be a socket
1465 * shutdown during the recv() or send() call.
1466 */
1467 return 1;
1468
1469 end_msg_sessiond:
1470 /*
1471 * The returned value here is not useful since either way we'll return 1 to
1472 * the caller because the session daemon socket management is done
1473 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
1474 */
1475 ret = consumer_send_status_msg(sock, ret_code);
1476 if (ret < 0) {
1477 goto error_fatal;
1478 }
1479 rcu_read_unlock();
1480 return 1;
1481 end_channel_error:
1482 if (channel) {
1483 /*
1484 * Free channel here since no one has a reference to it. We don't
1485 * free after that because a stream can store this pointer.
1486 */
1487 destroy_channel(channel);
1488 }
1489 /* We have to send a status channel message indicating an error. */
1490 ret = consumer_send_status_channel(sock, NULL);
1491 if (ret < 0) {
1492 /* Stop everything if session daemon can not be notified. */
1493 goto error_fatal;
1494 }
1495 rcu_read_unlock();
1496 return 1;
1497 error_fatal:
1498 rcu_read_unlock();
1499 /* This will issue a consumer stop. */
1500 return -1;
1501 }
1502
1503 /*
1504 * Wrapper over the mmap() read offset from ust-ctl library. Since this can be
1505 * compiled out, we isolate it in this library.
1506 */
1507 int lttng_ustctl_get_mmap_read_offset(struct lttng_consumer_stream *stream,
1508 unsigned long *off)
1509 {
1510 assert(stream);
1511 assert(stream->ustream);
1512
1513 return ustctl_get_mmap_read_offset(stream->ustream, off);
1514 }
1515
1516 /*
1517 * Wrapper over the mmap() read offset from ust-ctl library. Since this can be
1518 * compiled out, we isolate it in this library.
1519 */
1520 void *lttng_ustctl_get_mmap_base(struct lttng_consumer_stream *stream)
1521 {
1522 assert(stream);
1523 assert(stream->ustream);
1524
1525 return ustctl_get_mmap_base(stream->ustream);
1526 }
1527
1528 /*
1529 * Take a snapshot for a specific fd
1530 *
1531 * Returns 0 on success, < 0 on error
1532 */
1533 int lttng_ustconsumer_take_snapshot(struct lttng_consumer_stream *stream)
1534 {
1535 assert(stream);
1536 assert(stream->ustream);
1537
1538 return ustctl_snapshot(stream->ustream);
1539 }
1540
1541 /*
1542 * Get the produced position
1543 *
1544 * Returns 0 on success, < 0 on error
1545 */
1546 int lttng_ustconsumer_get_produced_snapshot(
1547 struct lttng_consumer_stream *stream, unsigned long *pos)
1548 {
1549 assert(stream);
1550 assert(stream->ustream);
1551 assert(pos);
1552
1553 return ustctl_snapshot_get_produced(stream->ustream, pos);
1554 }
1555
1556 /*
1557 * Get the consumed position
1558 *
1559 * Returns 0 on success, < 0 on error
1560 */
1561 int lttng_ustconsumer_get_consumed_snapshot(
1562 struct lttng_consumer_stream *stream, unsigned long *pos)
1563 {
1564 assert(stream);
1565 assert(stream->ustream);
1566 assert(pos);
1567
1568 return ustctl_snapshot_get_consumed(stream->ustream, pos);
1569 }
1570
1571 /*
1572 * Called when the stream signal the consumer that it has hang up.
1573 */
1574 void lttng_ustconsumer_on_stream_hangup(struct lttng_consumer_stream *stream)
1575 {
1576 assert(stream);
1577 assert(stream->ustream);
1578
1579 ustctl_flush_buffer(stream->ustream, 0);
1580 stream->hangup_flush_done = 1;
1581 }
1582
1583 void lttng_ustconsumer_del_channel(struct lttng_consumer_channel *chan)
1584 {
1585 assert(chan);
1586 assert(chan->uchan);
1587
1588 if (chan->switch_timer_enabled == 1) {
1589 consumer_timer_switch_stop(chan);
1590 }
1591 consumer_metadata_cache_destroy(chan);
1592 ustctl_destroy_channel(chan->uchan);
1593 }
1594
1595 void lttng_ustconsumer_del_stream(struct lttng_consumer_stream *stream)
1596 {
1597 assert(stream);
1598 assert(stream->ustream);
1599
1600 if (stream->chan->switch_timer_enabled == 1) {
1601 consumer_timer_switch_stop(stream->chan);
1602 }
1603 ustctl_destroy_stream(stream->ustream);
1604 }
1605
1606 /*
1607 * Populate index values of a UST stream. Values are set in big endian order.
1608 *
1609 * Return 0 on success or else a negative value.
1610 */
1611 static int get_index_values(struct lttng_packet_index *index,
1612 struct ustctl_consumer_stream *ustream)
1613 {
1614 int ret;
1615
1616 ret = ustctl_get_timestamp_begin(ustream, &index->timestamp_begin);
1617 if (ret < 0) {
1618 PERROR("ustctl_get_timestamp_begin");
1619 goto error;
1620 }
1621 index->timestamp_begin = htobe64(index->timestamp_begin);
1622
1623 ret = ustctl_get_timestamp_end(ustream, &index->timestamp_end);
1624 if (ret < 0) {
1625 PERROR("ustctl_get_timestamp_end");
1626 goto error;
1627 }
1628 index->timestamp_end = htobe64(index->timestamp_end);
1629
1630 ret = ustctl_get_events_discarded(ustream, &index->events_discarded);
1631 if (ret < 0) {
1632 PERROR("ustctl_get_events_discarded");
1633 goto error;
1634 }
1635 index->events_discarded = htobe64(index->events_discarded);
1636
1637 ret = ustctl_get_content_size(ustream, &index->content_size);
1638 if (ret < 0) {
1639 PERROR("ustctl_get_content_size");
1640 goto error;
1641 }
1642 index->content_size = htobe64(index->content_size);
1643
1644 ret = ustctl_get_packet_size(ustream, &index->packet_size);
1645 if (ret < 0) {
1646 PERROR("ustctl_get_packet_size");
1647 goto error;
1648 }
1649 index->packet_size = htobe64(index->packet_size);
1650
1651 ret = ustctl_get_stream_id(ustream, &index->stream_id);
1652 if (ret < 0) {
1653 PERROR("ustctl_get_stream_id");
1654 goto error;
1655 }
1656 index->stream_id = htobe64(index->stream_id);
1657
1658 error:
1659 return ret;
1660 }
1661
1662
1663 int lttng_ustconsumer_read_subbuffer(struct lttng_consumer_stream *stream,
1664 struct lttng_consumer_local_data *ctx)
1665 {
1666 unsigned long len, subbuf_size, padding;
1667 int err, write_index = 0;
1668 long ret = 0;
1669 char dummy;
1670 struct ustctl_consumer_stream *ustream;
1671 struct lttng_packet_index index;
1672
1673 assert(stream);
1674 assert(stream->ustream);
1675 assert(ctx);
1676
1677 DBG("In UST read_subbuffer (wait_fd: %d, name: %s)", stream->wait_fd,
1678 stream->name);
1679
1680 /* Ease our life for what's next. */
1681 ustream = stream->ustream;
1682
1683 /* Indicate that for this stream we have to write the index. */
1684 if (stream->index_fd >= 0) {
1685 write_index = 1;
1686 }
1687
1688 /* We can consume the 1 byte written into the wait_fd by UST */
1689 if (stream->monitor && !stream->hangup_flush_done) {
1690 ssize_t readlen;
1691
1692 do {
1693 readlen = read(stream->wait_fd, &dummy, 1);
1694 } while (readlen == -1 && errno == EINTR);
1695 if (readlen == -1 && errno != EAGAIN && errno != EWOULDBLOCK) {
1696 ret = readlen;
1697 goto end;
1698 }
1699 }
1700
1701 retry:
1702 /* Get the next subbuffer */
1703 err = ustctl_get_next_subbuf(ustream);
1704 if (err != 0) {
1705 /*
1706 * Populate metadata info if the existing info has
1707 * already been read.
1708 */
1709 if (stream->metadata_flag) {
1710 ssize_t write_len;
1711
1712 if (stream->chan->metadata_cache->contiguous
1713 == stream->ust_metadata_pushed) {
1714 ret = 0;
1715 goto end;
1716 }
1717
1718 write_len = ustctl_write_one_packet_to_channel(stream->chan->uchan,
1719 &stream->chan->metadata_cache->data[stream->ust_metadata_pushed],
1720 stream->chan->metadata_cache->contiguous
1721 - stream->ust_metadata_pushed);
1722 assert(write_len != 0);
1723 if (write_len < 0) {
1724 ERR("Writing one metadata packet");
1725 ret = -1;
1726 goto end;
1727 }
1728 stream->ust_metadata_pushed += write_len;
1729 ustctl_flush_buffer(stream->ustream, 1);
1730 goto retry;
1731 }
1732
1733 ret = err; /* ustctl_get_next_subbuf returns negative, caller expect positive. */
1734 /*
1735 * This is a debug message even for single-threaded consumer,
1736 * because poll() have more relaxed criterions than get subbuf,
1737 * so get_subbuf may fail for short race windows where poll()
1738 * would issue wakeups.
1739 */
1740 DBG("Reserving sub buffer failed (everything is normal, "
1741 "it is due to concurrency) [ret: %d]", err);
1742 goto end;
1743 }
1744 assert(stream->chan->output == CONSUMER_CHANNEL_MMAP);
1745
1746 if (!stream->metadata_flag && write_index) {
1747 index.offset = htobe64(stream->out_fd_offset);
1748 ret = get_index_values(&index, ustream);
1749 if (ret < 0) {
1750 goto end;
1751 }
1752 }
1753
1754 /* Get the full padded subbuffer size */
1755 err = ustctl_get_padded_subbuf_size(ustream, &len);
1756 assert(err == 0);
1757
1758 /* Get subbuffer data size (without padding) */
1759 err = ustctl_get_subbuf_size(ustream, &subbuf_size);
1760 assert(err == 0);
1761
1762 /* Make sure we don't get a subbuffer size bigger than the padded */
1763 assert(len >= subbuf_size);
1764
1765 padding = len - subbuf_size;
1766 /* write the subbuffer to the tracefile */
1767 ret = lttng_consumer_on_read_subbuffer_mmap(ctx, stream, subbuf_size, padding, &index);
1768 /*
1769 * The mmap operation should write subbuf_size amount of data when network
1770 * streaming or the full padding (len) size when we are _not_ streaming.
1771 */
1772 if ((ret != subbuf_size && stream->net_seq_idx != (uint64_t) -1ULL) ||
1773 (ret != len && stream->net_seq_idx == (uint64_t) -1ULL)) {
1774 /*
1775 * Display the error but continue processing to try to release the
1776 * subbuffer. This is a DBG statement since any unexpected kill or
1777 * signal, the application gets unregistered, relayd gets closed or
1778 * anything that affects the buffer lifetime will trigger this error.
1779 * So, for the sake of the user, don't print this error since it can
1780 * happen and it is OK with the code flow.
1781 */
1782 DBG("Error writing to tracefile "
1783 "(ret: %ld != len: %lu != subbuf_size: %lu)",
1784 ret, len, subbuf_size);
1785 write_index = 0;
1786 }
1787 err = ustctl_put_next_subbuf(ustream);
1788 assert(err == 0);
1789
1790 /* Write index if needed. */
1791 if (write_index) {
1792 err = index_write(stream->index_fd, &index, sizeof(index));
1793 if (err < 0) {
1794 ret = -1;
1795 goto end;
1796 }
1797 }
1798
1799 end:
1800 return ret;
1801 }
1802
1803 /*
1804 * Called when a stream is created.
1805 *
1806 * Return 0 on success or else a negative value.
1807 */
1808 int lttng_ustconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
1809 {
1810 int ret;
1811
1812 assert(stream);
1813
1814 /* Don't create anything if this is set for streaming. */
1815 if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor) {
1816 ret = utils_create_stream_file(stream->chan->pathname, stream->name,
1817 stream->chan->tracefile_size, stream->tracefile_count_current,
1818 stream->uid, stream->gid, NULL);
1819 if (ret < 0) {
1820 goto error;
1821 }
1822 stream->out_fd = ret;
1823 stream->tracefile_size_current = 0;
1824
1825 if (!stream->metadata_flag) {
1826 ret = index_create_file(stream->chan->pathname,
1827 stream->name, stream->uid, stream->gid,
1828 stream->chan->tracefile_size,
1829 stream->tracefile_count_current);
1830 if (ret < 0) {
1831 goto error;
1832 }
1833 stream->index_fd = ret;
1834 }
1835 }
1836 ret = 0;
1837
1838 error:
1839 return ret;
1840 }
1841
1842 /*
1843 * Check if data is still being extracted from the buffers for a specific
1844 * stream. Consumer data lock MUST be acquired before calling this function
1845 * and the stream lock.
1846 *
1847 * Return 1 if the traced data are still getting read else 0 meaning that the
1848 * data is available for trace viewer reading.
1849 */
1850 int lttng_ustconsumer_data_pending(struct lttng_consumer_stream *stream)
1851 {
1852 int ret;
1853
1854 assert(stream);
1855 assert(stream->ustream);
1856
1857 DBG("UST consumer checking data pending");
1858
1859 if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) {
1860 ret = 0;
1861 goto end;
1862 }
1863
1864 if (stream->chan->type == CONSUMER_CHANNEL_TYPE_METADATA) {
1865 uint64_t contiguous, pushed;
1866
1867 /* Ease our life a bit. */
1868 contiguous = stream->chan->metadata_cache->contiguous;
1869 pushed = stream->ust_metadata_pushed;
1870
1871 /*
1872 * We can simply check whether all contiguously available data
1873 * has been pushed to the ring buffer, since the push operation
1874 * is performed within get_next_subbuf(), and because both
1875 * get_next_subbuf() and put_next_subbuf() are issued atomically
1876 * thanks to the stream lock within
1877 * lttng_ustconsumer_read_subbuffer(). This basically means that
1878 * whetnever ust_metadata_pushed is incremented, the associated
1879 * metadata has been consumed from the metadata stream.
1880 */
1881 DBG("UST consumer metadata pending check: contiguous %" PRIu64 " vs pushed %" PRIu64,
1882 contiguous, pushed);
1883 assert(((int64_t) contiguous - pushed) >= 0);
1884 if ((contiguous != pushed) ||
1885 (((int64_t) contiguous - pushed) > 0 || contiguous == 0)) {
1886 ret = 1; /* Data is pending */
1887 goto end;
1888 }
1889 } else {
1890 ret = ustctl_get_next_subbuf(stream->ustream);
1891 if (ret == 0) {
1892 /*
1893 * There is still data so let's put back this
1894 * subbuffer.
1895 */
1896 ret = ustctl_put_subbuf(stream->ustream);
1897 assert(ret == 0);
1898 ret = 1; /* Data is pending */
1899 goto end;
1900 }
1901 }
1902
1903 /* Data is NOT pending so ready to be read. */
1904 ret = 0;
1905
1906 end:
1907 return ret;
1908 }
1909
1910 /*
1911 * Close every metadata stream wait fd of the metadata hash table. This
1912 * function MUST be used very carefully so not to run into a race between the
1913 * metadata thread handling streams and this function closing their wait fd.
1914 *
1915 * For UST, this is used when the session daemon hangs up. Its the metadata
1916 * producer so calling this is safe because we are assured that no state change
1917 * can occur in the metadata thread for the streams in the hash table.
1918 */
1919 void lttng_ustconsumer_close_metadata(struct lttng_ht *metadata_ht)
1920 {
1921 struct lttng_ht_iter iter;
1922 struct lttng_consumer_stream *stream;
1923
1924 assert(metadata_ht);
1925 assert(metadata_ht->ht);
1926
1927 DBG("UST consumer closing all metadata streams");
1928
1929 rcu_read_lock();
1930 cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream,
1931 node.node) {
1932 pthread_mutex_lock(&stream->chan->lock);
1933 /*
1934 * Whatever returned value, we must continue to try to close everything
1935 * so ignore it.
1936 */
1937 (void) _close_metadata(stream->chan);
1938 DBG("Metadata wait fd %d and poll pipe fd %d closed", stream->wait_fd,
1939 stream->ust_metadata_poll_pipe[1]);
1940 pthread_mutex_unlock(&stream->chan->lock);
1941
1942 }
1943 rcu_read_unlock();
1944 }
1945
1946 void lttng_ustconsumer_close_stream_wakeup(struct lttng_consumer_stream *stream)
1947 {
1948 int ret;
1949
1950 ret = ustctl_stream_close_wakeup_fd(stream->ustream);
1951 if (ret < 0) {
1952 ERR("Unable to close wakeup fd");
1953 }
1954 }
1955
1956 /*
1957 * Please refer to consumer-timer.c before adding any lock within this
1958 * function or any of its callees. Timers have a very strict locking
1959 * semantic with respect to teardown. Failure to respect this semantic
1960 * introduces deadlocks.
1961 */
1962 int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
1963 struct lttng_consumer_channel *channel, int timer)
1964 {
1965 struct lttcomm_metadata_request_msg request;
1966 struct lttcomm_consumer_msg msg;
1967 enum lttng_error_code ret_code = LTTNG_OK;
1968 uint64_t len, key, offset;
1969 int ret;
1970
1971 assert(channel);
1972 assert(channel->metadata_cache);
1973
1974 /* send the metadata request to sessiond */
1975 switch (consumer_data.type) {
1976 case LTTNG_CONSUMER64_UST:
1977 request.bits_per_long = 64;
1978 break;
1979 case LTTNG_CONSUMER32_UST:
1980 request.bits_per_long = 32;
1981 break;
1982 default:
1983 request.bits_per_long = 0;
1984 break;
1985 }
1986
1987 request.session_id = channel->session_id;
1988 request.session_id_per_pid = channel->session_id_per_pid;
1989 /*
1990 * Request the application UID here so the metadata of that application can
1991 * be sent back. The channel UID corresponds to the user UID of the session
1992 * used for the rights on the stream file(s).
1993 */
1994 request.uid = channel->ust_app_uid;
1995 request.key = channel->key;
1996
1997 DBG("Sending metadata request to sessiond, session id %" PRIu64
1998 ", per-pid %" PRIu64 ", app UID %u and channek key %" PRIu64,
1999 request.session_id, request.session_id_per_pid, request.uid,
2000 request.key);
2001
2002 pthread_mutex_lock(&ctx->metadata_socket_lock);
2003 ret = lttcomm_send_unix_sock(ctx->consumer_metadata_socket, &request,
2004 sizeof(request));
2005 if (ret < 0) {
2006 ERR("Asking metadata to sessiond");
2007 goto end;
2008 }
2009
2010 /* Receive the metadata from sessiond */
2011 ret = lttcomm_recv_unix_sock(ctx->consumer_metadata_socket, &msg,
2012 sizeof(msg));
2013 if (ret != sizeof(msg)) {
2014 DBG("Consumer received unexpected message size %d (expects %zu)",
2015 ret, sizeof(msg));
2016 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
2017 /*
2018 * The ret value might 0 meaning an orderly shutdown but this is ok
2019 * since the caller handles this.
2020 */
2021 goto end;
2022 }
2023
2024 if (msg.cmd_type == LTTNG_ERR_UND) {
2025 /* No registry found */
2026 (void) consumer_send_status_msg(ctx->consumer_metadata_socket,
2027 ret_code);
2028 ret = 0;
2029 goto end;
2030 } else if (msg.cmd_type != LTTNG_CONSUMER_PUSH_METADATA) {
2031 ERR("Unexpected cmd_type received %d", msg.cmd_type);
2032 ret = -1;
2033 goto end;
2034 }
2035
2036 len = msg.u.push_metadata.len;
2037 key = msg.u.push_metadata.key;
2038 offset = msg.u.push_metadata.target_offset;
2039
2040 assert(key == channel->key);
2041 if (len == 0) {
2042 DBG("No new metadata to receive for key %" PRIu64, key);
2043 }
2044
2045 /* Tell session daemon we are ready to receive the metadata. */
2046 ret = consumer_send_status_msg(ctx->consumer_metadata_socket,
2047 LTTNG_OK);
2048 if (ret < 0 || len == 0) {
2049 /*
2050 * Somehow, the session daemon is not responding anymore or there is
2051 * nothing to receive.
2052 */
2053 goto end;
2054 }
2055
2056 ret_code = lttng_ustconsumer_recv_metadata(ctx->consumer_metadata_socket,
2057 key, offset, len, channel, timer);
2058 if (ret_code >= 0) {
2059 /*
2060 * Only send the status msg if the sessiond is alive meaning a positive
2061 * ret code.
2062 */
2063 (void) consumer_send_status_msg(ctx->consumer_metadata_socket, ret_code);
2064 }
2065 ret = 0;
2066
2067 end:
2068 pthread_mutex_unlock(&ctx->metadata_socket_lock);
2069 return ret;
2070 }
This page took 0.108876 seconds and 6 git commands to generate.