relayd: Implement custom EfficiOS session clear
[lttng-tools.git] / src / bin / lttng-relayd / stream.c
1 /*
2 * Copyright (C) 2013 - Julien Desfossez <jdesfossez@efficios.com>
3 * David Goulet <dgoulet@efficios.com>
4 * 2015 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License, version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 51
17 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #define _LGPL_SOURCE
21 #include <common/common.h>
22 #include <common/utils.h>
23 #include <common/defaults.h>
24 #include <urcu/rculist.h>
25 #include <sys/stat.h>
26
27 #include "lttng-relayd.h"
28 #include "index.h"
29 #include "stream.h"
30 #include "viewer-stream.h"
31
32 /* Should be called with RCU read-side lock held. */
33 bool stream_get(struct relay_stream *stream)
34 {
35 bool has_ref = false;
36
37 pthread_mutex_lock(&stream->reflock);
38 if (stream->ref.refcount != 0) {
39 has_ref = true;
40 urcu_ref_get(&stream->ref);
41 }
42 pthread_mutex_unlock(&stream->reflock);
43
44 return has_ref;
45 }
46
47 /*
48 * Get stream from stream id from the streams hash table. Return stream
49 * if found else NULL. A stream reference is taken when a stream is
50 * returned. stream_put() must be called on that stream.
51 */
52 struct relay_stream *stream_get_by_id(uint64_t stream_id)
53 {
54 struct lttng_ht_node_u64 *node;
55 struct lttng_ht_iter iter;
56 struct relay_stream *stream = NULL;
57
58 rcu_read_lock();
59 lttng_ht_lookup(relay_streams_ht, &stream_id, &iter);
60 node = lttng_ht_iter_get_node_u64(&iter);
61 if (!node) {
62 DBG("Relay stream %" PRIu64 " not found", stream_id);
63 goto end;
64 }
65 stream = caa_container_of(node, struct relay_stream, node);
66 if (!stream_get(stream)) {
67 stream = NULL;
68 }
69 end:
70 rcu_read_unlock();
71 return stream;
72 }
73
74 /*
75 * We keep ownership of path_name and channel_name.
76 */
77 struct relay_stream *stream_create(struct ctf_trace *trace,
78 uint64_t stream_handle, char *path_name,
79 char *channel_name, uint64_t tracefile_size,
80 uint64_t tracefile_count)
81 {
82 int ret;
83 struct relay_stream *stream = NULL;
84 struct relay_session *session = trace->session;
85
86 stream = zmalloc(sizeof(struct relay_stream));
87 if (stream == NULL) {
88 PERROR("relay stream zmalloc");
89 goto error_no_alloc;
90 }
91
92 stream->stream_handle = stream_handle;
93 stream->prev_seq = -1ULL;
94 stream->prev_index_seq = -1ULL;
95 stream->last_net_seq_num = -1ULL;
96 stream->ctf_stream_id = -1ULL;
97 stream->tracefile_size = tracefile_size;
98 stream->tracefile_count = tracefile_count;
99 stream->path_name = path_name;
100 stream->channel_name = channel_name;
101 stream->beacon_ts_end = -1ULL;
102 lttng_ht_node_init_u64(&stream->node, stream->stream_handle);
103 pthread_mutex_init(&stream->lock, NULL);
104 pthread_mutex_init(&stream->reflock, NULL);
105 urcu_ref_init(&stream->ref);
106 ctf_trace_get(trace);
107 stream->trace = trace;
108
109 stream->indexes_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
110 if (!stream->indexes_ht) {
111 ERR("Cannot created indexes_ht");
112 ret = -1;
113 goto end;
114 }
115
116 ret = utils_mkdir_recursive(stream->path_name, S_IRWXU | S_IRWXG,
117 -1, -1);
118 if (ret < 0) {
119 ERR("relay creating output directory");
120 goto end;
121 }
122
123 /*
124 * No need to use run_as API here because whatever we receive,
125 * the relayd uses its own credentials for the stream files.
126 */
127 ret = utils_create_stream_file(stream->path_name, stream->channel_name,
128 stream->tracefile_size, 0, -1, -1, NULL);
129 if (ret < 0) {
130 ERR("Create output file");
131 goto end;
132 }
133 stream->stream_fd = stream_fd_create(ret);
134 if (!stream->stream_fd) {
135 if (close(ret)) {
136 PERROR("Error closing file %d", ret);
137 }
138 ret = -1;
139 goto end;
140 }
141 stream->tfa = tracefile_array_create(stream->tracefile_count);
142 if (!stream->tfa) {
143 ret = -1;
144 goto end;
145 }
146 if (stream->tracefile_size) {
147 DBG("Tracefile %s/%s_0 created", stream->path_name, stream->channel_name);
148 } else {
149 DBG("Tracefile %s/%s created", stream->path_name, stream->channel_name);
150 }
151
152 if (!strncmp(stream->channel_name, DEFAULT_METADATA_NAME, LTTNG_NAME_MAX)) {
153 stream->is_metadata = 1;
154 }
155
156 stream->in_recv_list = true;
157
158 /*
159 * Add the stream in the recv list of the session. Once the end stream
160 * message is received, all session streams are published.
161 */
162 pthread_mutex_lock(&session->recv_list_lock);
163 cds_list_add_rcu(&stream->recv_node, &session->recv_list);
164 session->stream_count++;
165 pthread_mutex_unlock(&session->recv_list_lock);
166
167 /*
168 * Both in the ctf_trace object and the global stream ht since the data
169 * side of the relayd does not have the concept of session.
170 */
171 lttng_ht_add_unique_u64(relay_streams_ht, &stream->node);
172 stream->in_stream_ht = true;
173
174 DBG("Relay new stream added %s with ID %" PRIu64, stream->channel_name,
175 stream->stream_handle);
176 ret = 0;
177
178 end:
179 if (ret) {
180 if (stream->stream_fd) {
181 stream_fd_put(stream->stream_fd);
182 stream->stream_fd = NULL;
183 }
184 stream_put(stream);
185 stream = NULL;
186 }
187 return stream;
188
189 error_no_alloc:
190 /*
191 * path_name and channel_name need to be freed explicitly here
192 * because we cannot rely on stream_put().
193 */
194 free(path_name);
195 free(channel_name);
196 return NULL;
197 }
198
199 /*
200 * Called with the session lock held.
201 */
202 void stream_publish(struct relay_stream *stream)
203 {
204 struct relay_session *session;
205
206 pthread_mutex_lock(&stream->lock);
207 if (stream->published) {
208 goto unlock;
209 }
210
211 session = stream->trace->session;
212
213 pthread_mutex_lock(&session->recv_list_lock);
214 if (stream->in_recv_list) {
215 cds_list_del_rcu(&stream->recv_node);
216 stream->in_recv_list = false;
217 }
218 pthread_mutex_unlock(&session->recv_list_lock);
219
220 pthread_mutex_lock(&stream->trace->stream_list_lock);
221 cds_list_add_rcu(&stream->stream_node, &stream->trace->stream_list);
222 pthread_mutex_unlock(&stream->trace->stream_list_lock);
223
224 stream->published = true;
225 unlock:
226 pthread_mutex_unlock(&stream->lock);
227 }
228
229 /*
230 * Stream must be protected by holding the stream lock or by virtue of being
231 * called from stream_destroy, in which case it is guaranteed to be accessed
232 * from a single thread by the reflock.
233 */
234 static void stream_unpublish(struct relay_stream *stream)
235 {
236 if (stream->in_stream_ht) {
237 struct lttng_ht_iter iter;
238 int ret;
239
240 iter.iter.node = &stream->node.node;
241 ret = lttng_ht_del(relay_streams_ht, &iter);
242 assert(!ret);
243 stream->in_stream_ht = false;
244 }
245 if (stream->published) {
246 pthread_mutex_lock(&stream->trace->stream_list_lock);
247 cds_list_del_rcu(&stream->stream_node);
248 pthread_mutex_unlock(&stream->trace->stream_list_lock);
249 stream->published = false;
250 }
251 }
252
253 static void stream_destroy(struct relay_stream *stream)
254 {
255 if (stream->indexes_ht) {
256 /*
257 * Calling lttng_ht_destroy in call_rcu worker thread so
258 * we don't hold the RCU read-side lock while calling
259 * it.
260 */
261 lttng_ht_destroy(stream->indexes_ht);
262 }
263 if (stream->tfa) {
264 tracefile_array_destroy(stream->tfa);
265 }
266 free(stream->path_name);
267 free(stream->channel_name);
268 free(stream);
269 }
270
271 static void stream_destroy_rcu(struct rcu_head *rcu_head)
272 {
273 struct relay_stream *stream =
274 caa_container_of(rcu_head, struct relay_stream, rcu_node);
275
276 stream_destroy(stream);
277 }
278
279 /*
280 * No need to take stream->lock since this is only called on the final
281 * stream_put which ensures that a single thread may act on the stream.
282 *
283 * At that point, the object is also protected by the reflock which
284 * guarantees that no other thread may share ownership of this stream.
285 */
286 static void stream_release(struct urcu_ref *ref)
287 {
288 struct relay_stream *stream =
289 caa_container_of(ref, struct relay_stream, ref);
290 struct relay_session *session;
291
292 session = stream->trace->session;
293
294 DBG("Releasing stream id %" PRIu64, stream->stream_handle);
295
296 pthread_mutex_lock(&session->recv_list_lock);
297 session->stream_count--;
298 if (stream->in_recv_list) {
299 cds_list_del_rcu(&stream->recv_node);
300 stream->in_recv_list = false;
301 }
302 pthread_mutex_unlock(&session->recv_list_lock);
303
304 stream_unpublish(stream);
305
306 if (stream->stream_fd) {
307 stream_fd_put(stream->stream_fd);
308 stream->stream_fd = NULL;
309 }
310 if (stream->index_file) {
311 lttng_index_file_put(stream->index_file);
312 stream->index_file = NULL;
313 }
314 if (stream->trace) {
315 ctf_trace_put(stream->trace);
316 stream->trace = NULL;
317 }
318
319 call_rcu(&stream->rcu_node, stream_destroy_rcu);
320 }
321
322 void stream_put(struct relay_stream *stream)
323 {
324 DBG("stream put for stream id %" PRIu64, stream->stream_handle);
325 /*
326 * Ensure existence of stream->reflock for stream unlock.
327 */
328 rcu_read_lock();
329 /*
330 * Stream reflock ensures that concurrent test and update of
331 * stream ref is atomic.
332 */
333 pthread_mutex_lock(&stream->reflock);
334 assert(stream->ref.refcount != 0);
335 /*
336 * Wait until we have processed all the stream packets before
337 * actually putting our last stream reference.
338 */
339 DBG("stream put stream id %" PRIu64 " refcount %d",
340 stream->stream_handle,
341 (int) stream->ref.refcount);
342 urcu_ref_put(&stream->ref, stream_release);
343 pthread_mutex_unlock(&stream->reflock);
344 rcu_read_unlock();
345 }
346
347 void try_stream_close(struct relay_stream *stream)
348 {
349 bool session_aborted;
350 struct relay_session *session = stream->trace->session;
351
352 DBG("Trying to close stream %" PRIu64, stream->stream_handle);
353
354 pthread_mutex_lock(&session->lock);
355 session_aborted = session->aborted;
356 pthread_mutex_unlock(&session->lock);
357
358 pthread_mutex_lock(&stream->lock);
359 /*
360 * Can be called concurently by connection close and reception of last
361 * pending data.
362 */
363 if (stream->closed) {
364 pthread_mutex_unlock(&stream->lock);
365 DBG("closing stream %" PRIu64 " aborted since it is already marked as closed", stream->stream_handle);
366 return;
367 }
368
369 stream->close_requested = true;
370
371 if (stream->last_net_seq_num == -1ULL) {
372 /*
373 * Handle connection close without explicit stream close
374 * command.
375 *
376 * We can be clever about indexes partially received in
377 * cases where we received the data socket part, but not
378 * the control socket part: since we're currently closing
379 * the stream on behalf of the control socket, we *know*
380 * there won't be any more control information for this
381 * socket. Therefore, we can destroy all indexes for
382 * which we have received only the file descriptor (from
383 * data socket). This takes care of consumerd crashes
384 * between sending the data and control information for
385 * a packet. Since those are sent in that order, we take
386 * care of consumerd crashes.
387 */
388 relay_index_close_partial_fd(stream);
389 /*
390 * Use the highest net_seq_num we currently have pending
391 * As end of stream indicator. Leave last_net_seq_num
392 * at -1ULL if we cannot find any index.
393 */
394 stream->last_net_seq_num = relay_index_find_last(stream);
395 /* Fall-through into the next check. */
396 }
397
398 if (stream->last_net_seq_num != -1ULL &&
399 ((int64_t) (stream->prev_seq - stream->last_net_seq_num)) < 0
400 && !session_aborted) {
401 /*
402 * Don't close since we still have data pending. This
403 * handles cases where an explicit close command has
404 * been received for this stream, and cases where the
405 * connection has been closed, and we are awaiting for
406 * index information from the data socket. It is
407 * therefore expected that all the index fd information
408 * we need has already been received on the control
409 * socket. Matching index information from data socket
410 * should be Expected Soon(TM).
411 *
412 * TODO: We should implement a timer to garbage collect
413 * streams after a timeout to be resilient against a
414 * consumerd implementation that would not match this
415 * expected behavior.
416 */
417 pthread_mutex_unlock(&stream->lock);
418 DBG("closing stream %" PRIu64 " aborted since it still has data pending", stream->stream_handle);
419 return;
420 }
421 /*
422 * We received all the indexes we can expect.
423 */
424 stream_unpublish(stream);
425 stream->closed = true;
426 /* Relay indexes are only used by the "consumer/sessiond" end. */
427 relay_index_close_all(stream);
428 pthread_mutex_unlock(&stream->lock);
429 DBG("Succeeded in closing stream %" PRIu64, stream->stream_handle);
430 stream_put(stream);
431 }
432
433 static void print_stream_indexes(struct relay_stream *stream)
434 {
435 struct lttng_ht_iter iter;
436 struct relay_index *index;
437
438 rcu_read_lock();
439 cds_lfht_for_each_entry(stream->indexes_ht->ht, &iter.iter, index,
440 index_n.node) {
441 DBG("index %p net_seq_num %" PRIu64 " refcount %ld"
442 " stream %" PRIu64 " trace %" PRIu64
443 " session %" PRIu64,
444 index,
445 index->index_n.key,
446 stream->ref.refcount,
447 index->stream->stream_handle,
448 index->stream->trace->id,
449 index->stream->trace->session->id);
450 }
451 rcu_read_unlock();
452 }
453
454 void print_relay_streams(void)
455 {
456 struct lttng_ht_iter iter;
457 struct relay_stream *stream;
458
459 if (!relay_streams_ht) {
460 return;
461 }
462
463 rcu_read_lock();
464 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
465 node.node) {
466 if (!stream_get(stream)) {
467 continue;
468 }
469 DBG("stream %p refcount %ld stream %" PRIu64 " trace %" PRIu64
470 " session %" PRIu64,
471 stream,
472 stream->ref.refcount,
473 stream->stream_handle,
474 stream->trace->id,
475 stream->trace->session->id);
476 print_stream_indexes(stream);
477 stream_put(stream);
478 }
479 rcu_read_unlock();
480 }
481
482 static int relay_unlink_stream_files_rotation(struct relay_stream *stream)
483 {
484 uint64_t tracefile_size = stream->tracefile_size;
485 uint64_t tracefile_count = stream->tracefile_count;
486 uint64_t count;
487 int ret;
488
489 /*
490 * If the channel is configured to have an open-ended number of tracefiles,
491 * use the current tracefile count number as upper-bound.
492 */
493 if (!tracefile_count) {
494 tracefile_count = stream->tracefile_count_current + 1;
495 }
496
497 /*
498 * Try to unlink each file and each index for this stream. They may not exist,
499 * in which case ENOENT is fine.
500 */
501 for (count = 0; count < tracefile_count; count++) {
502 ret = utils_unlink_stream_file(stream->path_name, stream->channel_name,
503 tracefile_size, count, -1, -1, NULL);
504 if (ret < 0 && errno != ENOENT) {
505 return -1;
506 }
507 }
508 return 0;
509 }
510
511 static int relay_unlink_index_files_rotation(struct relay_stream *stream)
512 {
513 uint64_t tracefile_size = stream->tracefile_size;
514 uint64_t tracefile_count = stream->tracefile_count;
515 uint64_t count;
516 int ret;
517
518 /*
519 * If the channel is configured to have an open-ended number of tracefiles,
520 * use the current tracefile count number as upper-bound.
521 */
522 if (!tracefile_count) {
523 tracefile_count = stream->tracefile_count_current + 1;
524 }
525
526 /*
527 * Try to unlink each file and each index for this stream. They may not exist,
528 * in which case ENOENT is fine.
529 */
530 for (count = 0; count < tracefile_count; count++) {
531 if (stream->index_file) {
532 ret = lttng_index_file_unlink(stream->path_name, stream->channel_name,
533 -1, -1, tracefile_size, count);
534 if (ret < 0 && errno != ENOENT) {
535 return -1;
536 }
537 }
538 }
539 return 0;
540 }
541
542 static int relay_unlink_stream_files(struct relay_stream *stream)
543 {
544 int ret;
545
546 ret = utils_unlink_stream_file(stream->path_name, stream->channel_name,
547 stream->tracefile_size, 0, -1, -1, NULL);
548 if (ret < 0 && errno != ENOENT) {
549 return -1;
550 }
551 return 0;
552 }
553
554 static int relay_unlink_index_files(struct relay_stream *stream)
555 {
556 int ret;
557
558 ret = lttng_index_file_unlink(stream->path_name, stream->channel_name,
559 -1, -1, stream->tracefile_size, 0);
560 if (ret < 0 && errno != ENOENT) {
561 return -1;
562 }
563 return 0;
564 }
565
566 int try_stream_clear_index_data(struct relay_stream *stream)
567 {
568 int ret = 0;
569
570 DBG("try stream clear for handle %" PRIu64 " recv %" PRIu64 " clear_pos_idx %" PRIu64 " clear_pos_data %" PRIu64,
571 stream->stream_handle, stream->index_received_seqcount, stream->clear_position_index_seqcount,
572 stream->clear_position_data_seqcount);
573 if (!stream->index_received_seqcount) {
574 return 0;
575 }
576 if (stream->index_received_seqcount <= stream->clear_position_index_seqcount) {
577 /*
578 * Put ref on current index file. The new index file will be created upon
579 * reception of next index data beyond the clear position.
580 */
581 if (stream->index_file) {
582 lttng_index_file_put(stream->index_file);
583 if (stream->tracefile_size > 0) {
584 ret = relay_unlink_index_files_rotation(stream);
585 } else {
586 ret = relay_unlink_index_files(stream);
587 }
588 if (ret) {
589 return ret;
590 }
591 stream->index_file = NULL;
592 }
593 tracefile_array_reset(stream->tfa);
594 }
595 if (stream->index_received_seqcount == stream->clear_position_data_seqcount) {
596 ret = close(stream->stream_fd->fd);
597 if (ret < 0) {
598 PERROR("Closing tracefile");
599 return -1;
600 }
601 stream->stream_fd->fd = -1;
602 stream->tracefile_size_current = 0;
603
604 if (stream->tracefile_size > 0) {
605 ret = relay_unlink_stream_files_rotation(stream);
606 } else {
607 ret = relay_unlink_stream_files(stream);
608 }
609
610 /* Create new files. */
611 ret = utils_create_stream_file(stream->path_name, stream->channel_name,
612 stream->tracefile_size, 0, -1, -1, NULL);
613 if (ret < 0) {
614 ERR("Create output file");
615 return -1;
616 }
617 stream->stream_fd->fd = ret;
618 }
619 return 0;
620 }
621
622 int stream_clear(struct relay_stream *stream)
623 {
624 int ret = 0;
625
626 pthread_mutex_lock(&stream->lock);
627
628 if (stream->is_metadata) {
629 /* Do not clear metadata streams. */
630 goto end;
631 }
632
633 /*
634 * Clear index and data for all packets up to and including the
635 * clear position index seqcount.
636 *
637 * Clearing the index is straightforward: we can remove the entire
638 * on-disk index for this stream because the control port is an ordered
639 * protocol. We may also have in-flight indexes within the indexes_ht
640 * (pending data reception). We need to mark those so they get
641 * discarded (as well as their associated data content) upon reception
642 * of matching data.
643 *
644 * Clearing the data: because data is written directly into the output files,
645 * we need to carefully handle cases where index or data positions are ahead
646 * of the other.
647 *
648 * In tracefile rotation mode, we need to move the seq_tail to the head
649 * position.
650 */
651
652 /*
653 * If the data received is beyond indexes received, unlink data immediately and
654 * discard indexes when they arrive (up to the clear position).
655 *
656 * If indexes received is beyond data, we will reach the sync point when the
657 * indexes are received, so it will be safe to unlink the data and index files
658 * at that point.
659 *
660 * Clear index and data file(s) immediately if reaching the clear
661 * position (no in-flight indexes).
662 */
663 DBG("stream clear for handle %" PRIu64 " prev_seq %" PRIu64 " prev_index_seq %" PRIu64 " indexes in flight %d",
664 stream->stream_handle, stream->prev_seq, stream->prev_index_seq,
665 stream->indexes_in_flight);
666 if (stream->prev_seq > stream->prev_index_seq) {
667 stream->clear_position_data_seqcount = stream->index_received_seqcount;
668 stream->clear_position_index_seqcount = stream->index_received_seqcount +
669 stream->indexes_in_flight;
670 } else if (stream->prev_seq < stream->prev_index_seq) {
671 stream->clear_position_data_seqcount = stream->index_received_seqcount +
672 stream->indexes_in_flight;
673 stream->clear_position_index_seqcount = stream->index_received_seqcount +
674 stream->indexes_in_flight;
675 } else {
676 assert(stream->indexes_in_flight == 0);
677 stream->clear_position_data_seqcount = stream->index_received_seqcount;
678 stream->clear_position_index_seqcount = stream->index_received_seqcount;
679
680 }
681 ret = try_stream_clear_index_data(stream);
682 if (ret) {
683 goto end;
684 }
685
686 end:
687 pthread_mutex_unlock(&stream->lock);
688 return ret;
689 }
This page took 0.056896 seconds and 6 git commands to generate.