ust consumer: fix double close
[lttng-tools.git] / liblttng-consumer / lttng-consumer.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; only version 2
8 * of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20 #define _GNU_SOURCE
21 #include <assert.h>
22 #include <fcntl.h>
23 #include <poll.h>
24 #include <pthread.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <sys/mman.h>
28 #include <sys/socket.h>
29 #include <sys/types.h>
30 #include <unistd.h>
31
32 #include <lttng-kernel-ctl.h>
33 #include <lttng-sessiond-comm.h>
34 #include <lttng/lttng-consumer.h>
35 #include <lttng/lttng-kconsumer.h>
36 #include <lttng/lttng-ustconsumer.h>
37 #include <lttngerr.h>
38
39 struct lttng_consumer_global_data consumer_data = {
40 .stream_list.head = CDS_LIST_HEAD_INIT(consumer_data.stream_list.head),
41 .channel_list.head = CDS_LIST_HEAD_INIT(consumer_data.channel_list.head),
42 .stream_count = 0,
43 .need_update = 1,
44 .type = LTTNG_CONSUMER_UNKNOWN,
45 };
46
47 /* timeout parameter, to control the polling thread grace period. */
48 int consumer_poll_timeout = -1;
49
50 /*
51 * Flag to inform the polling thread to quit when all fd hung up. Updated by
52 * the consumer_thread_receive_fds when it notices that all fds has hung up.
53 * Also updated by the signal handler (consumer_should_exit()). Read by the
54 * polling threads.
55 */
56 volatile int consumer_quit = 0;
57
58 /*
59 * Find a stream. The consumer_data.lock must be locked during this
60 * call.
61 */
62 static struct lttng_consumer_stream *consumer_find_stream(int key)
63 {
64 struct lttng_consumer_stream *iter;
65
66 cds_list_for_each_entry(iter, &consumer_data.stream_list.head, list) {
67 if (iter->key == key) {
68 DBG("Found stream key %d", key);
69 return iter;
70 }
71 }
72 return NULL;
73 }
74
75 static struct lttng_consumer_channel *consumer_find_channel(int key)
76 {
77 struct lttng_consumer_channel *iter;
78
79 cds_list_for_each_entry(iter, &consumer_data.channel_list.head, list) {
80 if (iter->key == key) {
81 DBG("Found channel key %d", key);
82 return iter;
83 }
84 }
85 return NULL;
86 }
87
88 /*
89 * Remove a stream from the global list protected by a mutex. This
90 * function is also responsible for freeing its data structures.
91 */
92 void consumer_del_stream(struct lttng_consumer_stream *stream)
93 {
94 int ret;
95 struct lttng_consumer_channel *free_chan = NULL;
96
97 pthread_mutex_lock(&consumer_data.lock);
98
99 switch (consumer_data.type) {
100 case LTTNG_CONSUMER_KERNEL:
101 if (stream->mmap_base != NULL) {
102 ret = munmap(stream->mmap_base, stream->mmap_len);
103 if (ret != 0) {
104 perror("munmap");
105 }
106 }
107 break;
108 case LTTNG_CONSUMER_UST:
109 lttng_ustconsumer_del_stream(stream);
110 break;
111 default:
112 ERR("Unknown consumer_data type");
113 assert(0);
114 goto end;
115 }
116
117 cds_list_del(&stream->list);
118 if (consumer_data.stream_count <= 0) {
119 goto end;
120 }
121 consumer_data.stream_count--;
122 if (!stream) {
123 goto end;
124 }
125 if (stream->out_fd >= 0) {
126 close(stream->out_fd);
127 }
128 if (stream->wait_fd >= 0) {
129 close(stream->wait_fd);
130 }
131 if (stream->shm_fd >= 0 && stream->wait_fd != stream->shm_fd) {
132 close(stream->shm_fd);
133 }
134 if (!--stream->chan->refcount)
135 free_chan = stream->chan;
136 free(stream);
137 end:
138 consumer_data.need_update = 1;
139 pthread_mutex_unlock(&consumer_data.lock);
140
141 if (free_chan)
142 consumer_del_channel(free_chan);
143 }
144
145 struct lttng_consumer_stream *consumer_allocate_stream(
146 int channel_key, int stream_key,
147 int shm_fd, int wait_fd,
148 enum lttng_consumer_stream_state state,
149 uint64_t mmap_len,
150 enum lttng_event_output output,
151 const char *path_name)
152 {
153 struct lttng_consumer_stream *stream;
154 int ret;
155
156 stream = malloc(sizeof(*stream));
157 if (stream == NULL) {
158 perror("malloc struct lttng_consumer_stream");
159 goto end;
160 }
161 stream->chan = consumer_find_channel(channel_key);
162 if (!stream->chan) {
163 perror("Unable to find channel key");
164 goto end;
165 }
166 stream->chan->refcount++;
167 stream->key = stream_key;
168 stream->shm_fd = shm_fd;
169 stream->wait_fd = wait_fd;
170 stream->out_fd = -1;
171 stream->out_fd_offset = 0;
172 stream->state = state;
173 stream->mmap_len = mmap_len;
174 stream->mmap_base = NULL;
175 stream->output = output;
176 strncpy(stream->path_name, path_name, PATH_MAX - 1);
177 stream->path_name[PATH_MAX - 1] = '\0';
178
179 switch (consumer_data.type) {
180 case LTTNG_CONSUMER_KERNEL:
181 break;
182 case LTTNG_CONSUMER_UST:
183 ret = lttng_ustconsumer_allocate_stream(stream);
184 if (ret) {
185 free(stream);
186 return NULL;
187 }
188 break;
189 default:
190 ERR("Unknown consumer_data type");
191 assert(0);
192 goto end;
193 }
194 DBG("Allocated stream %s (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, out_fd %d)",
195 stream->path_name, stream->key,
196 stream->shm_fd,
197 stream->wait_fd,
198 (unsigned long long) stream->mmap_len,
199 stream->out_fd);
200 end:
201 return stream;
202 }
203
204 /*
205 * Add a stream to the global list protected by a mutex.
206 */
207 int consumer_add_stream(struct lttng_consumer_stream *stream)
208 {
209 int ret = 0;
210
211 pthread_mutex_lock(&consumer_data.lock);
212 /* Check if already exist */
213 if (consumer_find_stream(stream->key)) {
214 ret = -1;
215 goto end;
216 }
217 cds_list_add(&stream->list, &consumer_data.stream_list.head);
218 consumer_data.stream_count++;
219 consumer_data.need_update = 1;
220
221 switch (consumer_data.type) {
222 case LTTNG_CONSUMER_KERNEL:
223 break;
224 case LTTNG_CONSUMER_UST:
225 /* Streams are in CPU number order (we rely on this) */
226 stream->cpu = stream->chan->nr_streams++;
227 break;
228 default:
229 ERR("Unknown consumer_data type");
230 assert(0);
231 goto end;
232 }
233
234 end:
235 pthread_mutex_unlock(&consumer_data.lock);
236 return ret;
237 }
238
239 /*
240 * Update a stream according to what we just received.
241 */
242 void consumer_change_stream_state(int stream_key,
243 enum lttng_consumer_stream_state state)
244 {
245 struct lttng_consumer_stream *stream;
246
247 pthread_mutex_lock(&consumer_data.lock);
248 stream = consumer_find_stream(stream_key);
249 if (stream) {
250 stream->state = state;
251 }
252 consumer_data.need_update = 1;
253 pthread_mutex_unlock(&consumer_data.lock);
254 }
255
256 /*
257 * Remove a channel from the global list protected by a mutex. This
258 * function is also responsible for freeing its data structures.
259 */
260 void consumer_del_channel(struct lttng_consumer_channel *channel)
261 {
262 int ret;
263
264 pthread_mutex_lock(&consumer_data.lock);
265
266 switch (consumer_data.type) {
267 case LTTNG_CONSUMER_KERNEL:
268 break;
269 case LTTNG_CONSUMER_UST:
270 lttng_ustconsumer_del_channel(channel);
271 break;
272 default:
273 ERR("Unknown consumer_data type");
274 assert(0);
275 goto end;
276 }
277
278 cds_list_del(&channel->list);
279 if (channel->mmap_base != NULL) {
280 ret = munmap(channel->mmap_base, channel->mmap_len);
281 if (ret != 0) {
282 perror("munmap");
283 }
284 }
285 if (channel->wait_fd >= 0) {
286 close(channel->wait_fd);
287 }
288 if (channel->shm_fd >= 0 && channel->wait_fd != channel->shm_fd) {
289 close(channel->shm_fd);
290 }
291 free(channel);
292 end:
293 pthread_mutex_unlock(&consumer_data.lock);
294 }
295
296 struct lttng_consumer_channel *consumer_allocate_channel(
297 int channel_key,
298 int shm_fd, int wait_fd,
299 uint64_t mmap_len,
300 uint64_t max_sb_size)
301 {
302 struct lttng_consumer_channel *channel;
303 int ret;
304
305 channel = malloc(sizeof(*channel));
306 if (channel == NULL) {
307 perror("malloc struct lttng_consumer_channel");
308 goto end;
309 }
310 channel->key = channel_key;
311 channel->shm_fd = shm_fd;
312 channel->wait_fd = wait_fd;
313 channel->mmap_len = mmap_len;
314 channel->max_sb_size = max_sb_size;
315 channel->refcount = 0;
316 channel->nr_streams = 0;
317
318 switch (consumer_data.type) {
319 case LTTNG_CONSUMER_KERNEL:
320 channel->mmap_base = NULL;
321 channel->mmap_len = 0;
322 break;
323 case LTTNG_CONSUMER_UST:
324 ret = lttng_ustconsumer_allocate_channel(channel);
325 if (ret) {
326 free(channel);
327 return NULL;
328 }
329 break;
330 default:
331 ERR("Unknown consumer_data type");
332 assert(0);
333 goto end;
334 }
335 DBG("Allocated channel (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, max_sb_size %llu)",
336 channel->key,
337 channel->shm_fd,
338 channel->wait_fd,
339 (unsigned long long) channel->mmap_len,
340 (unsigned long long) channel->max_sb_size);
341 end:
342 return channel;
343 }
344
345 /*
346 * Add a channel to the global list protected by a mutex.
347 */
348 int consumer_add_channel(struct lttng_consumer_channel *channel)
349 {
350 int ret = 0;
351
352 pthread_mutex_lock(&consumer_data.lock);
353 /* Check if already exist */
354 if (consumer_find_channel(channel->key)) {
355 ret = -1;
356 goto end;
357 }
358 cds_list_add(&channel->list, &consumer_data.channel_list.head);
359 end:
360 pthread_mutex_unlock(&consumer_data.lock);
361 return ret;
362 }
363
364 /*
365 * Allocate the pollfd structure and the local view of the out fds to avoid
366 * doing a lookup in the linked list and concurrency issues when writing is
367 * needed. Called with consumer_data.lock held.
368 *
369 * Returns the number of fds in the structures.
370 */
371 int consumer_update_poll_array(
372 struct lttng_consumer_local_data *ctx, struct pollfd **pollfd,
373 struct lttng_consumer_stream **local_stream)
374 {
375 struct lttng_consumer_stream *iter;
376 int i = 0;
377
378 DBG("Updating poll fd array");
379 cds_list_for_each_entry(iter, &consumer_data.stream_list.head, list) {
380 if (iter->state != LTTNG_CONSUMER_ACTIVE_STREAM) {
381 continue;
382 }
383 DBG("Active FD %d", iter->wait_fd);
384 (*pollfd)[i].fd = iter->wait_fd;
385 (*pollfd)[i].events = POLLIN | POLLPRI;
386 local_stream[i] = iter;
387 i++;
388 }
389
390 /*
391 * Insert the consumer_poll_pipe at the end of the array and don't
392 * increment i so nb_fd is the number of real FD.
393 */
394 (*pollfd)[i].fd = ctx->consumer_poll_pipe[0];
395 (*pollfd)[i].events = POLLIN;
396 return i;
397 }
398
399 /*
400 * Poll on the should_quit pipe and the command socket return -1 on error and
401 * should exit, 0 if data is available on the command socket
402 */
403 int lttng_consumer_poll_socket(struct pollfd *consumer_sockpoll)
404 {
405 int num_rdy;
406
407 num_rdy = poll(consumer_sockpoll, 2, -1);
408 if (num_rdy == -1) {
409 perror("Poll error");
410 goto exit;
411 }
412 if (consumer_sockpoll[0].revents == POLLIN) {
413 DBG("consumer_should_quit wake up");
414 goto exit;
415 }
416 return 0;
417
418 exit:
419 return -1;
420 }
421
422 /*
423 * Set the error socket.
424 */
425 void lttng_consumer_set_error_sock(
426 struct lttng_consumer_local_data *ctx, int sock)
427 {
428 ctx->consumer_error_socket = sock;
429 }
430
431 /*
432 * Set the command socket path.
433 */
434
435 void lttng_consumer_set_command_sock_path(
436 struct lttng_consumer_local_data *ctx, char *sock)
437 {
438 ctx->consumer_command_sock_path = sock;
439 }
440
441 /*
442 * Send return code to the session daemon.
443 * If the socket is not defined, we return 0, it is not a fatal error
444 */
445 int lttng_consumer_send_error(
446 struct lttng_consumer_local_data *ctx, int cmd)
447 {
448 if (ctx->consumer_error_socket > 0) {
449 return lttcomm_send_unix_sock(ctx->consumer_error_socket, &cmd,
450 sizeof(enum lttcomm_sessiond_command));
451 }
452
453 return 0;
454 }
455
456 /*
457 * Close all the tracefiles and stream fds, should be called when all instances
458 * are destroyed.
459 */
460 void lttng_consumer_cleanup(void)
461 {
462 struct lttng_consumer_stream *iter, *tmp;
463 struct lttng_consumer_channel *citer, *ctmp;
464
465 /*
466 * close all outfd. Called when there are no more threads
467 * running (after joining on the threads), no need to protect
468 * list iteration with mutex.
469 */
470 cds_list_for_each_entry_safe(iter, tmp,
471 &consumer_data.stream_list.head, list) {
472 consumer_del_stream(iter);
473 }
474 cds_list_for_each_entry_safe(citer, ctmp,
475 &consumer_data.channel_list.head, list) {
476 consumer_del_channel(citer);
477 }
478 }
479
480 /*
481 * Called from signal handler.
482 */
483 void lttng_consumer_should_exit(struct lttng_consumer_local_data *ctx)
484 {
485 int ret;
486 consumer_quit = 1;
487 ret = write(ctx->consumer_should_quit[1], "4", 1);
488 if (ret < 0) {
489 perror("write consumer quit");
490 }
491 }
492
493 void lttng_consumer_sync_trace_file(
494 struct lttng_consumer_stream *stream, off_t orig_offset)
495 {
496 int outfd = stream->out_fd;
497
498 /*
499 * This does a blocking write-and-wait on any page that belongs to the
500 * subbuffer prior to the one we just wrote.
501 * Don't care about error values, as these are just hints and ways to
502 * limit the amount of page cache used.
503 */
504 if (orig_offset < stream->chan->max_sb_size) {
505 return;
506 }
507 sync_file_range(outfd, orig_offset - stream->chan->max_sb_size,
508 stream->chan->max_sb_size,
509 SYNC_FILE_RANGE_WAIT_BEFORE
510 | SYNC_FILE_RANGE_WRITE
511 | SYNC_FILE_RANGE_WAIT_AFTER);
512 /*
513 * Give hints to the kernel about how we access the file:
514 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
515 * we write it.
516 *
517 * We need to call fadvise again after the file grows because the
518 * kernel does not seem to apply fadvise to non-existing parts of the
519 * file.
520 *
521 * Call fadvise _after_ having waited for the page writeback to
522 * complete because the dirty page writeback semantic is not well
523 * defined. So it can be expected to lead to lower throughput in
524 * streaming.
525 */
526 posix_fadvise(outfd, orig_offset - stream->chan->max_sb_size,
527 stream->chan->max_sb_size, POSIX_FADV_DONTNEED);
528 }
529
530 /*
531 * Initialise the necessary environnement :
532 * - create a new context
533 * - create the poll_pipe
534 * - create the should_quit pipe (for signal handler)
535 * - create the thread pipe (for splice)
536 *
537 * Takes a function pointer as argument, this function is called when data is
538 * available on a buffer. This function is responsible to do the
539 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
540 * buffer configuration and then kernctl_put_next_subbuf at the end.
541 *
542 * Returns a pointer to the new context or NULL on error.
543 */
544 struct lttng_consumer_local_data *lttng_consumer_create(
545 enum lttng_consumer_type type,
546 int (*buffer_ready)(struct lttng_consumer_stream *stream),
547 int (*recv_channel)(struct lttng_consumer_channel *channel),
548 int (*recv_stream)(struct lttng_consumer_stream *stream),
549 int (*update_stream)(int stream_key, uint32_t state))
550 {
551 int ret, i;
552 struct lttng_consumer_local_data *ctx;
553
554 assert(consumer_data.type == LTTNG_CONSUMER_UNKNOWN ||
555 consumer_data.type == type);
556 consumer_data.type = type;
557
558 ctx = malloc(sizeof(struct lttng_consumer_local_data));
559 if (ctx == NULL) {
560 perror("allocating context");
561 goto error;
562 }
563
564 ctx->consumer_error_socket = -1;
565 /* assign the callbacks */
566 ctx->on_buffer_ready = buffer_ready;
567 ctx->on_recv_channel = recv_channel;
568 ctx->on_recv_stream = recv_stream;
569 ctx->on_update_stream = update_stream;
570
571 ret = pipe(ctx->consumer_poll_pipe);
572 if (ret < 0) {
573 perror("Error creating poll pipe");
574 goto error_poll_pipe;
575 }
576
577 ret = pipe(ctx->consumer_should_quit);
578 if (ret < 0) {
579 perror("Error creating recv pipe");
580 goto error_quit_pipe;
581 }
582
583 ret = pipe(ctx->consumer_thread_pipe);
584 if (ret < 0) {
585 perror("Error creating thread pipe");
586 goto error_thread_pipe;
587 }
588
589 return ctx;
590
591
592 error_thread_pipe:
593 for (i = 0; i < 2; i++) {
594 int err;
595
596 err = close(ctx->consumer_should_quit[i]);
597 assert(!err);
598 }
599 error_quit_pipe:
600 for (i = 0; i < 2; i++) {
601 int err;
602
603 err = close(ctx->consumer_poll_pipe[i]);
604 assert(!err);
605 }
606 error_poll_pipe:
607 free(ctx);
608 error:
609 return NULL;
610 }
611
612 /*
613 * Close all fds associated with the instance and free the context.
614 */
615 void lttng_consumer_destroy(struct lttng_consumer_local_data *ctx)
616 {
617 close(ctx->consumer_error_socket);
618 close(ctx->consumer_thread_pipe[0]);
619 close(ctx->consumer_thread_pipe[1]);
620 close(ctx->consumer_poll_pipe[0]);
621 close(ctx->consumer_poll_pipe[1]);
622 close(ctx->consumer_should_quit[0]);
623 close(ctx->consumer_should_quit[1]);
624 unlink(ctx->consumer_command_sock_path);
625 free(ctx);
626 }
627
628 /*
629 * Mmap the ring buffer, read it and write the data to the tracefile.
630 *
631 * Returns the number of bytes written
632 */
633 int lttng_consumer_on_read_subbuffer_mmap(
634 struct lttng_consumer_local_data *ctx,
635 struct lttng_consumer_stream *stream, unsigned long len)
636 {
637 switch (consumer_data.type) {
638 case LTTNG_CONSUMER_KERNEL:
639 return lttng_kconsumer_on_read_subbuffer_mmap(ctx, stream, len);
640 case LTTNG_CONSUMER_UST:
641 return lttng_ustconsumer_on_read_subbuffer_mmap(ctx, stream, len);
642 default:
643 ERR("Unknown consumer_data type");
644 assert(0);
645 }
646 }
647
648 /*
649 * Splice the data from the ring buffer to the tracefile.
650 *
651 * Returns the number of bytes spliced.
652 */
653 int lttng_consumer_on_read_subbuffer_splice(
654 struct lttng_consumer_local_data *ctx,
655 struct lttng_consumer_stream *stream, unsigned long len)
656 {
657 switch (consumer_data.type) {
658 case LTTNG_CONSUMER_KERNEL:
659 return lttng_kconsumer_on_read_subbuffer_splice(ctx, stream, len);
660 case LTTNG_CONSUMER_UST:
661 return -ENOSYS;
662 default:
663 ERR("Unknown consumer_data type");
664 assert(0);
665 return -ENOSYS;
666 }
667
668 }
669
670 /*
671 * Take a snapshot for a specific fd
672 *
673 * Returns 0 on success, < 0 on error
674 */
675 int lttng_consumer_take_snapshot(struct lttng_consumer_local_data *ctx,
676 struct lttng_consumer_stream *stream)
677 {
678 switch (consumer_data.type) {
679 case LTTNG_CONSUMER_KERNEL:
680 return lttng_kconsumer_take_snapshot(ctx, stream);
681 case LTTNG_CONSUMER_UST:
682 return lttng_ustconsumer_take_snapshot(ctx, stream);
683 default:
684 ERR("Unknown consumer_data type");
685 assert(0);
686 return -ENOSYS;
687 }
688
689 }
690
691 /*
692 * Get the produced position
693 *
694 * Returns 0 on success, < 0 on error
695 */
696 int lttng_consumer_get_produced_snapshot(
697 struct lttng_consumer_local_data *ctx,
698 struct lttng_consumer_stream *stream,
699 unsigned long *pos)
700 {
701 switch (consumer_data.type) {
702 case LTTNG_CONSUMER_KERNEL:
703 return lttng_kconsumer_get_produced_snapshot(ctx, stream, pos);
704 case LTTNG_CONSUMER_UST:
705 return lttng_ustconsumer_get_produced_snapshot(ctx, stream, pos);
706 default:
707 ERR("Unknown consumer_data type");
708 assert(0);
709 return -ENOSYS;
710 }
711 }
712
713 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data *ctx,
714 int sock, struct pollfd *consumer_sockpoll)
715 {
716 switch (consumer_data.type) {
717 case LTTNG_CONSUMER_KERNEL:
718 return lttng_kconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
719 case LTTNG_CONSUMER_UST:
720 return lttng_ustconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
721 default:
722 ERR("Unknown consumer_data type");
723 assert(0);
724 return -ENOSYS;
725 }
726 }
727
728 /*
729 * This thread polls the fds in the ltt_fd_list to consume the data and write
730 * it to tracefile if necessary.
731 */
732 void *lttng_consumer_thread_poll_fds(void *data)
733 {
734 int num_rdy, num_hup, high_prio, ret, i;
735 struct pollfd *pollfd = NULL;
736 /* local view of the streams */
737 struct lttng_consumer_stream **local_stream = NULL;
738 /* local view of consumer_data.fds_count */
739 int nb_fd = 0;
740 char tmp;
741 int tmp2;
742 struct lttng_consumer_local_data *ctx = data;
743
744 local_stream = malloc(sizeof(struct lttng_consumer_stream));
745
746 while (1) {
747 high_prio = 0;
748 num_hup = 0;
749
750 /*
751 * the ltt_fd_list has been updated, we need to update our
752 * local array as well
753 */
754 pthread_mutex_lock(&consumer_data.lock);
755 if (consumer_data.need_update) {
756 if (pollfd != NULL) {
757 free(pollfd);
758 pollfd = NULL;
759 }
760 if (local_stream != NULL) {
761 free(local_stream);
762 local_stream = NULL;
763 }
764
765 /* allocate for all fds + 1 for the consumer_poll_pipe */
766 pollfd = malloc((consumer_data.stream_count + 1) * sizeof(struct pollfd));
767 if (pollfd == NULL) {
768 perror("pollfd malloc");
769 pthread_mutex_unlock(&consumer_data.lock);
770 goto end;
771 }
772
773 /* allocate for all fds + 1 for the consumer_poll_pipe */
774 local_stream = malloc((consumer_data.stream_count + 1) *
775 sizeof(struct lttng_consumer_stream));
776 if (local_stream == NULL) {
777 perror("local_stream malloc");
778 pthread_mutex_unlock(&consumer_data.lock);
779 goto end;
780 }
781 ret = consumer_update_poll_array(ctx, &pollfd, local_stream);
782 if (ret < 0) {
783 ERR("Error in allocating pollfd or local_outfds");
784 lttng_consumer_send_error(ctx, CONSUMERD_POLL_ERROR);
785 pthread_mutex_unlock(&consumer_data.lock);
786 goto end;
787 }
788 nb_fd = ret;
789 consumer_data.need_update = 0;
790 }
791 pthread_mutex_unlock(&consumer_data.lock);
792
793 /* poll on the array of fds */
794 DBG("polling on %d fd", nb_fd + 1);
795 num_rdy = poll(pollfd, nb_fd + 1, consumer_poll_timeout);
796 DBG("poll num_rdy : %d", num_rdy);
797 if (num_rdy == -1) {
798 perror("Poll error");
799 lttng_consumer_send_error(ctx, CONSUMERD_POLL_ERROR);
800 goto end;
801 } else if (num_rdy == 0) {
802 DBG("Polling thread timed out");
803 goto end;
804 }
805
806 /* No FDs and consumer_quit, kconsumer_cleanup the thread */
807 if (nb_fd == 0 && consumer_quit == 1) {
808 goto end;
809 }
810
811 /*
812 * If the consumer_poll_pipe triggered poll go
813 * directly to the beginning of the loop to update the
814 * array. We want to prioritize array update over
815 * low-priority reads.
816 */
817 if (pollfd[nb_fd].revents == POLLIN) {
818 DBG("consumer_poll_pipe wake up");
819 tmp2 = read(ctx->consumer_poll_pipe[0], &tmp, 1);
820 if (tmp2 < 0) {
821 perror("read kconsumer poll");
822 }
823 continue;
824 }
825
826 /* Take care of high priority channels first. */
827 for (i = 0; i < nb_fd; i++) {
828 switch(pollfd[i].revents) {
829 case POLLERR:
830 ERR("Error returned in polling fd %d.", pollfd[i].fd);
831 consumer_del_stream(local_stream[i]);
832 num_hup++;
833 break;
834 case POLLHUP:
835 DBG("Polling fd %d tells it has hung up.", pollfd[i].fd);
836 consumer_del_stream(local_stream[i]);
837 num_hup++;
838 break;
839 case POLLNVAL:
840 ERR("Polling fd %d tells fd is not open.", pollfd[i].fd);
841 consumer_del_stream(local_stream[i]);
842 num_hup++;
843 break;
844 case POLLPRI:
845 DBG("Urgent read on fd %d", pollfd[i].fd);
846 high_prio = 1;
847 ret = ctx->on_buffer_ready(local_stream[i]);
848 /* it's ok to have an unavailable sub-buffer */
849 if (ret == EAGAIN) {
850 ret = 0;
851 }
852 break;
853 }
854 }
855
856 /* If every buffer FD has hung up, we end the read loop here */
857 if (nb_fd > 0 && num_hup == nb_fd) {
858 DBG("every buffer FD has hung up\n");
859 if (consumer_quit == 1) {
860 goto end;
861 }
862 continue;
863 }
864
865 /* Take care of low priority channels. */
866 if (high_prio == 0) {
867 for (i = 0; i < nb_fd; i++) {
868 if (pollfd[i].revents == POLLIN) {
869 DBG("Normal read on fd %d", pollfd[i].fd);
870 ret = ctx->on_buffer_ready(local_stream[i]);
871 /* it's ok to have an unavailable subbuffer */
872 if (ret == EAGAIN) {
873 ret = 0;
874 }
875 }
876 }
877 }
878 }
879 end:
880 DBG("polling thread exiting");
881 if (pollfd != NULL) {
882 free(pollfd);
883 pollfd = NULL;
884 }
885 if (local_stream != NULL) {
886 free(local_stream);
887 local_stream = NULL;
888 }
889 return NULL;
890 }
891
892 /*
893 * This thread listens on the consumerd socket and receives the file
894 * descriptors from the session daemon.
895 */
896 void *lttng_consumer_thread_receive_fds(void *data)
897 {
898 int sock, client_socket, ret;
899 /*
900 * structure to poll for incoming data on communication socket avoids
901 * making blocking sockets.
902 */
903 struct pollfd consumer_sockpoll[2];
904 struct lttng_consumer_local_data *ctx = data;
905
906 DBG("Creating command socket %s", ctx->consumer_command_sock_path);
907 unlink(ctx->consumer_command_sock_path);
908 client_socket = lttcomm_create_unix_sock(ctx->consumer_command_sock_path);
909 if (client_socket < 0) {
910 ERR("Cannot create command socket");
911 goto end;
912 }
913
914 ret = lttcomm_listen_unix_sock(client_socket);
915 if (ret < 0) {
916 goto end;
917 }
918
919 DBG("Sending ready command to lttng-sessiond");
920 ret = lttng_consumer_send_error(ctx, CONSUMERD_COMMAND_SOCK_READY);
921 /* return < 0 on error, but == 0 is not fatal */
922 if (ret < 0) {
923 ERR("Error sending ready command to lttng-sessiond");
924 goto end;
925 }
926
927 ret = fcntl(client_socket, F_SETFL, O_NONBLOCK);
928 if (ret < 0) {
929 perror("fcntl O_NONBLOCK");
930 goto end;
931 }
932
933 /* prepare the FDs to poll : to client socket and the should_quit pipe */
934 consumer_sockpoll[0].fd = ctx->consumer_should_quit[0];
935 consumer_sockpoll[0].events = POLLIN | POLLPRI;
936 consumer_sockpoll[1].fd = client_socket;
937 consumer_sockpoll[1].events = POLLIN | POLLPRI;
938
939 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
940 goto end;
941 }
942 DBG("Connection on client_socket");
943
944 /* Blocking call, waiting for transmission */
945 sock = lttcomm_accept_unix_sock(client_socket);
946 if (sock <= 0) {
947 WARN("On accept");
948 goto end;
949 }
950 ret = fcntl(sock, F_SETFL, O_NONBLOCK);
951 if (ret < 0) {
952 perror("fcntl O_NONBLOCK");
953 goto end;
954 }
955
956 /* update the polling structure to poll on the established socket */
957 consumer_sockpoll[1].fd = sock;
958 consumer_sockpoll[1].events = POLLIN | POLLPRI;
959
960 while (1) {
961 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
962 goto end;
963 }
964 DBG("Incoming command on sock");
965 ret = lttng_consumer_recv_cmd(ctx, sock, consumer_sockpoll);
966 if (ret == -ENOENT) {
967 DBG("Received STOP command");
968 goto end;
969 }
970 if (ret < 0) {
971 ERR("Communication interrupted on command socket");
972 goto end;
973 }
974 if (consumer_quit) {
975 DBG("consumer_thread_receive_fds received quit from signal");
976 goto end;
977 }
978 DBG("received fds on sock");
979 }
980 end:
981 DBG("consumer_thread_receive_fds exiting");
982
983 /*
984 * when all fds have hung up, the polling thread
985 * can exit cleanly
986 */
987 consumer_quit = 1;
988
989 /*
990 * 2s of grace period, if no polling events occur during
991 * this period, the polling thread will exit even if there
992 * are still open FDs (should not happen, but safety mechanism).
993 */
994 consumer_poll_timeout = LTTNG_CONSUMER_POLL_TIMEOUT;
995
996 /* wake up the polling thread */
997 ret = write(ctx->consumer_poll_pipe[1], "4", 1);
998 if (ret < 0) {
999 perror("poll pipe write");
1000 }
1001 return NULL;
1002 }
This page took 0.05335 seconds and 5 git commands to generate.