Fix: Possible memory leak when multiple config files are loaded
[lttng-tools.git] / src / common / consumer-stream.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2013 - David Goulet <dgoulet@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License, version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 51
17 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #define _GNU_SOURCE
21 #include <assert.h>
22 #include <inttypes.h>
23 #include <sys/mman.h>
24 #include <unistd.h>
25
26 #include <common/common.h>
27 #include <common/index/index.h>
28 #include <common/kernel-consumer/kernel-consumer.h>
29 #include <common/relayd/relayd.h>
30 #include <common/ust-consumer/ust-consumer.h>
31
32 #include "consumer-stream.h"
33
34 /*
35 * RCU call to free stream. MUST only be used with call_rcu().
36 */
37 static void free_stream_rcu(struct rcu_head *head)
38 {
39 struct lttng_ht_node_u64 *node =
40 caa_container_of(head, struct lttng_ht_node_u64, head);
41 struct lttng_consumer_stream *stream =
42 caa_container_of(node, struct lttng_consumer_stream, node);
43
44 pthread_mutex_destroy(&stream->lock);
45 free(stream);
46 }
47
48 /*
49 * Close stream on the relayd side. This call can destroy a relayd if the
50 * conditions are met.
51 *
52 * A RCU read side lock MUST be acquired if the relayd object was looked up in
53 * a hash table before calling this.
54 */
55 void consumer_stream_relayd_close(struct lttng_consumer_stream *stream,
56 struct consumer_relayd_sock_pair *relayd)
57 {
58 int ret;
59
60 assert(stream);
61 assert(relayd);
62
63 if (stream->sent_to_relayd) {
64 uatomic_dec(&relayd->refcount);
65 assert(uatomic_read(&relayd->refcount) >= 0);
66 }
67
68 /* Closing streams requires to lock the control socket. */
69 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
70 ret = relayd_send_close_stream(&relayd->control_sock,
71 stream->relayd_stream_id,
72 stream->next_net_seq_num - 1);
73 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
74 if (ret < 0) {
75 DBG("Unable to close stream on the relayd. Continuing");
76 /*
77 * Continue here. There is nothing we can do for the relayd.
78 * Chances are that the relayd has closed the socket so we just
79 * continue cleaning up.
80 */
81 }
82
83 /* Both conditions are met, we destroy the relayd. */
84 if (uatomic_read(&relayd->refcount) == 0 &&
85 uatomic_read(&relayd->destroy_flag)) {
86 consumer_destroy_relayd(relayd);
87 }
88 stream->net_seq_idx = (uint64_t) -1ULL;
89 stream->sent_to_relayd = 0;
90 }
91
92 /*
93 * Close stream's file descriptors and, if needed, close stream also on the
94 * relayd side.
95 *
96 * The consumer data lock MUST be acquired.
97 * The stream lock MUST be acquired.
98 */
99 void consumer_stream_close(struct lttng_consumer_stream *stream)
100 {
101 int ret;
102 struct consumer_relayd_sock_pair *relayd;
103
104 assert(stream);
105
106 switch (consumer_data.type) {
107 case LTTNG_CONSUMER_KERNEL:
108 if (stream->mmap_base != NULL) {
109 ret = munmap(stream->mmap_base, stream->mmap_len);
110 if (ret != 0) {
111 PERROR("munmap");
112 }
113 }
114
115 if (stream->wait_fd >= 0) {
116 ret = close(stream->wait_fd);
117 if (ret) {
118 PERROR("close");
119 }
120 stream->wait_fd = -1;
121 }
122 break;
123 case LTTNG_CONSUMER32_UST:
124 case LTTNG_CONSUMER64_UST:
125 {
126 /*
127 * Special case for the metadata since the wait fd is an internal pipe
128 * polled in the metadata thread.
129 */
130 if (stream->metadata_flag && stream->chan->monitor) {
131 int rpipe = stream->ust_metadata_poll_pipe[0];
132
133 /*
134 * This will stop the channel timer if one and close the write side
135 * of the metadata poll pipe.
136 */
137 lttng_ustconsumer_close_metadata(stream->chan);
138 if (rpipe >= 0) {
139 ret = close(rpipe);
140 if (ret < 0) {
141 PERROR("closing metadata pipe read side");
142 }
143 stream->ust_metadata_poll_pipe[0] = -1;
144 }
145 }
146 break;
147 }
148 default:
149 ERR("Unknown consumer_data type");
150 assert(0);
151 }
152
153 /* Close output fd. Could be a socket or local file at this point. */
154 if (stream->out_fd >= 0) {
155 ret = close(stream->out_fd);
156 if (ret) {
157 PERROR("close");
158 }
159 stream->out_fd = -1;
160 }
161
162 if (stream->index_fd >= 0) {
163 ret = close(stream->index_fd);
164 if (ret) {
165 PERROR("close stream index_fd");
166 }
167 stream->index_fd = -1;
168 }
169
170 /* Check and cleanup relayd if needed. */
171 rcu_read_lock();
172 relayd = consumer_find_relayd(stream->net_seq_idx);
173 if (relayd != NULL) {
174 consumer_stream_relayd_close(stream, relayd);
175 }
176 rcu_read_unlock();
177 }
178
179 /*
180 * Delete the stream from all possible hash tables.
181 *
182 * The consumer data lock MUST be acquired.
183 * The stream lock MUST be acquired.
184 */
185 void consumer_stream_delete(struct lttng_consumer_stream *stream,
186 struct lttng_ht *ht)
187 {
188 int ret;
189 struct lttng_ht_iter iter;
190
191 assert(stream);
192 /* Should NEVER be called not in monitor mode. */
193 assert(stream->chan->monitor);
194
195 rcu_read_lock();
196
197 if (ht) {
198 iter.iter.node = &stream->node.node;
199 ret = lttng_ht_del(ht, &iter);
200 assert(!ret);
201 }
202
203 /* Delete from stream per channel ID hash table. */
204 iter.iter.node = &stream->node_channel_id.node;
205 /*
206 * The returned value is of no importance. Even if the node is NOT in the
207 * hash table, we continue since we may have been called by a code path
208 * that did not add the stream to a (all) hash table. Same goes for the
209 * next call ht del call.
210 */
211 (void) lttng_ht_del(consumer_data.stream_per_chan_id_ht, &iter);
212
213 /* Delete from the global stream list. */
214 iter.iter.node = &stream->node_session_id.node;
215 /* See the previous ht del on why we ignore the returned value. */
216 (void) lttng_ht_del(consumer_data.stream_list_ht, &iter);
217
218 rcu_read_unlock();
219
220 if (!stream->metadata_flag) {
221 /* Decrement the stream count of the global consumer data. */
222 assert(consumer_data.stream_count > 0);
223 consumer_data.stream_count--;
224 }
225 }
226
227 /*
228 * Free the given stream within a RCU call.
229 */
230 void consumer_stream_free(struct lttng_consumer_stream *stream)
231 {
232 assert(stream);
233
234 call_rcu(&stream->node.head, free_stream_rcu);
235 }
236
237 /*
238 * Destroy the stream's buffers of the tracer.
239 */
240 void consumer_stream_destroy_buffers(struct lttng_consumer_stream *stream)
241 {
242 assert(stream);
243
244 switch (consumer_data.type) {
245 case LTTNG_CONSUMER_KERNEL:
246 break;
247 case LTTNG_CONSUMER32_UST:
248 case LTTNG_CONSUMER64_UST:
249 lttng_ustconsumer_del_stream(stream);
250 break;
251 default:
252 ERR("Unknown consumer_data type");
253 assert(0);
254 }
255 }
256
257 /*
258 * Destroy and close a already created stream.
259 */
260 static void destroy_close_stream(struct lttng_consumer_stream *stream)
261 {
262 assert(stream);
263
264 DBG("Consumer stream destroy monitored key: %" PRIu64, stream->key);
265
266 /* Destroy tracer buffers of the stream. */
267 consumer_stream_destroy_buffers(stream);
268 /* Close down everything including the relayd if one. */
269 consumer_stream_close(stream);
270 }
271
272 /*
273 * Decrement the stream's channel refcount and if down to 0, return the channel
274 * pointer so it can be destroyed by the caller or NULL if not.
275 */
276 static struct lttng_consumer_channel *unref_channel(
277 struct lttng_consumer_stream *stream)
278 {
279 struct lttng_consumer_channel *free_chan = NULL;
280
281 assert(stream);
282 assert(stream->chan);
283
284 /* Update refcount of channel and see if we need to destroy it. */
285 if (!uatomic_sub_return(&stream->chan->refcount, 1)
286 && !uatomic_read(&stream->chan->nb_init_stream_left)) {
287 free_chan = stream->chan;
288 }
289
290 return free_chan;
291 }
292
293 /*
294 * Destroy a stream completely. This will delete, close and free the stream.
295 * Once return, the stream is NO longer usable. Its channel may get destroyed
296 * if conditions are met for a monitored stream.
297 *
298 * This MUST be called WITHOUT the consumer data and stream lock acquired if
299 * the stream is in _monitor_ mode else it does not matter.
300 */
301 void consumer_stream_destroy(struct lttng_consumer_stream *stream,
302 struct lttng_ht *ht)
303 {
304 assert(stream);
305
306 /* Stream is in monitor mode. */
307 if (stream->monitor) {
308 struct lttng_consumer_channel *free_chan = NULL;
309
310 /*
311 * This means that the stream was successfully removed from the streams
312 * list of the channel and sent to the right thread managing this
313 * stream thus being globally visible.
314 */
315 if (stream->globally_visible) {
316 pthread_mutex_lock(&consumer_data.lock);
317 pthread_mutex_lock(&stream->chan->lock);
318 pthread_mutex_lock(&stream->lock);
319 /* Remove every reference of the stream in the consumer. */
320 consumer_stream_delete(stream, ht);
321
322 destroy_close_stream(stream);
323
324 /* Update channel's refcount of the stream. */
325 free_chan = unref_channel(stream);
326
327 /* Indicates that the consumer data state MUST be updated after this. */
328 consumer_data.need_update = 1;
329
330 pthread_mutex_unlock(&stream->lock);
331 pthread_mutex_unlock(&stream->chan->lock);
332 pthread_mutex_unlock(&consumer_data.lock);
333 } else {
334 /*
335 * If the stream is not visible globally, this needs to be done
336 * outside of the consumer data lock section.
337 */
338 free_chan = unref_channel(stream);
339 }
340
341 if (free_chan) {
342 consumer_del_channel(free_chan);
343 }
344 } else {
345 destroy_close_stream(stream);
346 }
347
348 /* Free stream within a RCU call. */
349 consumer_stream_free(stream);
350 }
351
352 /*
353 * Write index of a specific stream either on the relayd or local disk.
354 *
355 * Return 0 on success or else a negative value.
356 */
357 int consumer_stream_write_index(struct lttng_consumer_stream *stream,
358 struct ctf_packet_index *index)
359 {
360 int ret;
361 struct consumer_relayd_sock_pair *relayd;
362
363 assert(stream);
364 assert(index);
365
366 rcu_read_lock();
367 relayd = consumer_find_relayd(stream->net_seq_idx);
368 if (relayd) {
369 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
370 ret = relayd_send_index(&relayd->control_sock, index,
371 stream->relayd_stream_id, stream->next_net_seq_num - 1);
372 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
373 } else {
374 ssize_t size_ret;
375
376 size_ret = index_write(stream->index_fd, index,
377 sizeof(struct ctf_packet_index));
378 if (size_ret < sizeof(struct ctf_packet_index)) {
379 ret = -1;
380 } else {
381 ret = 0;
382 }
383 }
384 if (ret < 0) {
385 goto error;
386 }
387
388 error:
389 rcu_read_unlock();
390 return ret;
391 }
392
393 /*
394 * Synchronize the metadata using a given session ID. A successful acquisition
395 * of a metadata stream will trigger a request to the session daemon and a
396 * snapshot so the metadata thread can consume it.
397 *
398 * This function call is a rendez-vous point between the metadata thread and
399 * the data thread.
400 *
401 * Return 0 on success or else a negative value.
402 */
403 int consumer_stream_sync_metadata(struct lttng_consumer_local_data *ctx,
404 uint64_t session_id)
405 {
406 int ret;
407 struct lttng_consumer_stream *metadata = NULL, *stream = NULL;
408 struct lttng_ht_iter iter;
409 struct lttng_ht *ht;
410
411 assert(ctx);
412
413 /* Ease our life a bit. */
414 ht = consumer_data.stream_list_ht;
415
416 rcu_read_lock();
417
418 /* Search the metadata associated with the session id of the given stream. */
419
420 cds_lfht_for_each_entry_duplicate(ht->ht,
421 ht->hash_fct(&session_id, lttng_ht_seed), ht->match_fct,
422 &session_id, &iter.iter, stream, node_session_id.node) {
423 if (stream->metadata_flag) {
424 metadata = stream;
425 break;
426 }
427 }
428 if (!metadata) {
429 ret = 0;
430 goto end_unlock_rcu;
431 }
432
433 /*
434 * In UST, since we have to write the metadata from the cache packet
435 * by packet, we might need to start this procedure multiple times
436 * until all the metadata from the cache has been extracted.
437 */
438 do {
439 /*
440 * Steps :
441 * - Lock the metadata stream
442 * - Check if metadata stream node was deleted before locking.
443 * - if yes, release and return success
444 * - Check if new metadata is ready (flush + snapshot pos)
445 * - If nothing : release and return.
446 * - Lock the metadata_rdv_lock
447 * - Unlock the metadata stream
448 * - cond_wait on metadata_rdv to wait the wakeup from the
449 * metadata thread
450 * - Unlock the metadata_rdv_lock
451 */
452 pthread_mutex_lock(&metadata->lock);
453
454 /*
455 * There is a possibility that we were able to acquire a reference on the
456 * stream from the RCU hash table but between then and now, the node might
457 * have been deleted just before the lock is acquired. Thus, after locking,
458 * we make sure the metadata node has not been deleted which means that the
459 * buffers are closed.
460 *
461 * In that case, there is no need to sync the metadata hence returning a
462 * success return code.
463 */
464 ret = cds_lfht_is_node_deleted(&metadata->node.node);
465 if (ret) {
466 ret = 0;
467 goto end_unlock_mutex;
468 }
469
470 switch (ctx->type) {
471 case LTTNG_CONSUMER_KERNEL:
472 /*
473 * Empty the metadata cache and flush the current stream.
474 */
475 ret = lttng_kconsumer_sync_metadata(metadata);
476 break;
477 case LTTNG_CONSUMER32_UST:
478 case LTTNG_CONSUMER64_UST:
479 /*
480 * Ask the sessiond if we have new metadata waiting and update the
481 * consumer metadata cache.
482 */
483 ret = lttng_ustconsumer_sync_metadata(ctx, metadata);
484 break;
485 default:
486 assert(0);
487 ret = -1;
488 break;
489 }
490 /*
491 * Error or no new metadata, we exit here.
492 */
493 if (ret <= 0 || ret == ENODATA) {
494 goto end_unlock_mutex;
495 }
496
497 /*
498 * At this point, new metadata have been flushed, so we wait on the
499 * rendez-vous point for the metadata thread to wake us up when it
500 * finishes consuming the metadata and continue execution.
501 */
502
503 pthread_mutex_lock(&metadata->metadata_rdv_lock);
504
505 /*
506 * Release metadata stream lock so the metadata thread can process it.
507 */
508 pthread_mutex_unlock(&metadata->lock);
509
510 /*
511 * Wait on the rendez-vous point. Once woken up, it means the metadata was
512 * consumed and thus synchronization is achieved.
513 */
514 pthread_cond_wait(&metadata->metadata_rdv, &metadata->metadata_rdv_lock);
515 pthread_mutex_unlock(&metadata->metadata_rdv_lock);
516 } while (ret == EAGAIN);
517
518 ret = 0;
519 goto end_unlock_rcu;
520
521 end_unlock_mutex:
522 pthread_mutex_unlock(&metadata->lock);
523 end_unlock_rcu:
524 rcu_read_unlock();
525 return ret;
526 }
This page took 0.050757 seconds and 5 git commands to generate.