29f1f95d566e7befe7bcc06e5dde1fbcd9f01117
[lttng-tools.git] / liblttkconsumerd / lttkconsumerd.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; only version 2
8 * of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20 #define _GNU_SOURCE
21 #include <fcntl.h>
22 #include <poll.h>
23 #include <pthread.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/mman.h>
27 #include <sys/socket.h>
28 #include <sys/types.h>
29 #include <unistd.h>
30 #include <urcu/list.h>
31 #include <assert.h>
32
33 #include "kernelctl.h"
34 #include "lttkconsumerd.h"
35 #include "lttngerr.h"
36
37 static
38 struct kconsumerd_global_data {
39 /*
40 * kconsumerd_data.lock protects kconsumerd_data.fd_list,
41 * kconsumerd_data.fds_count, and kconsumerd_data.need_update. It
42 * ensures the count matches the number of items in the fd_list.
43 * It ensures the list updates *always* trigger an fd_array
44 * update (therefore need to make list update vs
45 * kconsumerd_data.need_update flag update atomic, and also flag
46 * read, fd array and flag clear atomic).
47 */
48 pthread_mutex_t lock;
49 /*
50 * Number of element for the list below. Protected by
51 * kconsumerd_data.lock.
52 */
53 unsigned int fds_count;
54 /*
55 * List of FDs. Protected by kconsumerd_data.lock.
56 */
57 struct kconsumerd_fd_list fd_list;
58 /*
59 * Flag specifying if the local array of FDs needs update in the
60 * poll function. Protected by kconsumerd_data.lock.
61 */
62 unsigned int need_update;
63 } kconsumerd_data = {
64 .fd_list.head = CDS_LIST_HEAD_INIT(kconsumerd_data.fd_list.head),
65 .fds_count = 0,
66 .need_update = 1,
67 };
68
69 /* timeout parameter, to control the polling thread grace period */
70 static int kconsumerd_poll_timeout = -1;
71
72 /*
73 * flag to inform the polling thread to quit when all fd hung up.
74 * Updated by the kconsumerd_thread_receive_fds when it notices that all
75 * fds has hung up. Also updated by the signal handler
76 * (kconsumerd_should_exit()). Read by the polling threads.
77 */
78 static volatile int kconsumerd_quit = 0;
79
80 /*
81 * kconsumerd_set_error_socket
82 *
83 * Set the error socket
84 */
85 void kconsumerd_set_error_socket(struct kconsumerd_local_data *ctx, int sock)
86 {
87 ctx->kconsumerd_error_socket = sock;
88 }
89
90 /*
91 * kconsumerd_set_command_socket_path
92 *
93 * Set the command socket path
94 */
95 void kconsumerd_set_command_socket_path(struct kconsumerd_local_data *ctx,
96 char *sock)
97 {
98 ctx->kconsumerd_command_sock_path = sock;
99 }
100
101 /*
102 * kconsumerd_find_session_fd
103 *
104 * Find a session fd in the global list.
105 * The kconsumerd_data.lock must be locked during this call
106 *
107 * Return 1 if found else 0
108 */
109 static int kconsumerd_find_session_fd(int fd)
110 {
111 struct kconsumerd_fd *iter;
112
113 cds_list_for_each_entry(iter, &kconsumerd_data.fd_list.head, list) {
114 if (iter->sessiond_fd == fd) {
115 DBG("Duplicate session fd %d", fd);
116 return 1;
117 }
118 }
119
120 return 0;
121 }
122
123 /*
124 * kconsumerd_del_fd
125 *
126 * Remove a fd from the global list protected by a mutex
127 */
128 static void kconsumerd_del_fd(struct kconsumerd_fd *lcf)
129 {
130 int ret;
131 pthread_mutex_lock(&kconsumerd_data.lock);
132 cds_list_del(&lcf->list);
133 if (kconsumerd_data.fds_count > 0) {
134 kconsumerd_data.fds_count--;
135 if (lcf != NULL) {
136 if (lcf->mmap_base != NULL) {
137 ret = munmap(lcf->mmap_base, lcf->mmap_len);
138 if (ret != 0) {
139 perror("munmap");
140 }
141 }
142 if (lcf->out_fd != 0) {
143 close(lcf->out_fd);
144 }
145 close(lcf->consumerd_fd);
146 free(lcf);
147 lcf = NULL;
148 }
149 }
150 kconsumerd_data.need_update = 1;
151 pthread_mutex_unlock(&kconsumerd_data.lock);
152 }
153
154 /*
155 * kconsumerd_add_fd
156 *
157 * Add a fd to the global list protected by a mutex
158 */
159 static int kconsumerd_add_fd(struct lttcomm_kconsumerd_msg *buf, int consumerd_fd)
160 {
161 struct kconsumerd_fd *tmp_fd;
162 int ret = 0;
163
164 pthread_mutex_lock(&kconsumerd_data.lock);
165 /* Check if already exist */
166 ret = kconsumerd_find_session_fd(buf->fd);
167 if (ret == 1) {
168 goto end;
169 }
170
171 tmp_fd = malloc(sizeof(struct kconsumerd_fd));
172 tmp_fd->sessiond_fd = buf->fd;
173 tmp_fd->consumerd_fd = consumerd_fd;
174 tmp_fd->state = buf->state;
175 tmp_fd->max_sb_size = buf->max_sb_size;
176 tmp_fd->out_fd = 0;
177 tmp_fd->out_fd_offset = 0;
178 tmp_fd->mmap_len = 0;
179 tmp_fd->mmap_base = NULL;
180 tmp_fd->output = buf->output;
181 strncpy(tmp_fd->path_name, buf->path_name, PATH_MAX);
182 tmp_fd->path_name[PATH_MAX - 1] = '\0';
183
184 /* Opening the tracefile in write mode */
185 if (tmp_fd->path_name != NULL) {
186 ret = open(tmp_fd->path_name,
187 O_WRONLY|O_CREAT|O_TRUNC, S_IRWXU|S_IRWXG|S_IRWXO);
188 if (ret < 0) {
189 ERR("Opening %s", tmp_fd->path_name);
190 perror("open");
191 goto end;
192 }
193 tmp_fd->out_fd = ret;
194 DBG("Adding %s (%d, %d, %d)", tmp_fd->path_name,
195 tmp_fd->sessiond_fd, tmp_fd->consumerd_fd, tmp_fd->out_fd);
196 }
197
198 if (tmp_fd->output == LTTNG_EVENT_MMAP) {
199 /* get the len of the mmap region */
200 ret = kernctl_get_mmap_len(tmp_fd->consumerd_fd, &tmp_fd->mmap_len);
201 if (ret != 0) {
202 ret = errno;
203 perror("kernctl_get_mmap_len");
204 goto end;
205 }
206
207 tmp_fd->mmap_base = mmap(NULL, tmp_fd->mmap_len,
208 PROT_READ, MAP_PRIVATE, tmp_fd->consumerd_fd, 0);
209 if (tmp_fd->mmap_base == MAP_FAILED) {
210 perror("Error mmaping");
211 ret = -1;
212 goto end;
213 }
214 }
215
216 cds_list_add(&tmp_fd->list, &kconsumerd_data.fd_list.head);
217 kconsumerd_data.fds_count++;
218 kconsumerd_data.need_update = 1;
219 end:
220 pthread_mutex_unlock(&kconsumerd_data.lock);
221 return ret;
222 }
223
224 /*
225 * kconsumerd_change_fd_state
226 *
227 * Update a fd according to what we just received
228 */
229 static void kconsumerd_change_fd_state(int sessiond_fd,
230 enum kconsumerd_fd_state state)
231 {
232 struct kconsumerd_fd *iter;
233
234 pthread_mutex_lock(&kconsumerd_data.lock);
235 cds_list_for_each_entry(iter, &kconsumerd_data.fd_list.head, list) {
236 if (iter->sessiond_fd == sessiond_fd) {
237 iter->state = state;
238 break;
239 }
240 }
241 kconsumerd_data.need_update = 1;
242 pthread_mutex_unlock(&kconsumerd_data.lock);
243 }
244
245 /*
246 * kconsumerd_update_poll_array
247 *
248 * Allocate the pollfd structure and the local view of the out fds
249 * to avoid doing a lookup in the linked list and concurrency issues
250 * when writing is needed.
251 * Returns the number of fds in the structures
252 * Called with kconsumerd_data.lock held.
253 */
254 static int kconsumerd_update_poll_array(struct kconsumerd_local_data *ctx,
255 struct pollfd **pollfd, struct kconsumerd_fd **local_kconsumerd_fd)
256 {
257 struct kconsumerd_fd *iter;
258 int i = 0;
259
260 DBG("Updating poll fd array");
261 cds_list_for_each_entry(iter, &kconsumerd_data.fd_list.head, list) {
262 if (iter->state == ACTIVE_FD) {
263 DBG("Active FD %d", iter->consumerd_fd);
264 (*pollfd)[i].fd = iter->consumerd_fd;
265 (*pollfd)[i].events = POLLIN | POLLPRI;
266 local_kconsumerd_fd[i] = iter;
267 i++;
268 }
269 }
270
271 /*
272 * insert the kconsumerd_poll_pipe at the end of the array and don't
273 * increment i so nb_fd is the number of real FD
274 */
275 (*pollfd)[i].fd = ctx->kconsumerd_poll_pipe[0];
276 (*pollfd)[i].events = POLLIN;
277 return i;
278 }
279
280
281 /*
282 * kconsumerd_on_read_subbuffer_mmap
283 *
284 * mmap the ring buffer, read it and write the data to the tracefile.
285 * Returns the number of bytes written
286 */
287 int kconsumerd_on_read_subbuffer_mmap(struct kconsumerd_local_data *ctx,
288 struct kconsumerd_fd *kconsumerd_fd, unsigned long len)
289 {
290 unsigned long mmap_offset;
291 char *padding = NULL;
292 long ret = 0;
293 off_t orig_offset = kconsumerd_fd->out_fd_offset;
294 int fd = kconsumerd_fd->consumerd_fd;
295 int outfd = kconsumerd_fd->out_fd;
296
297 /* get the offset inside the fd to mmap */
298 ret = kernctl_get_mmap_read_offset(fd, &mmap_offset);
299 if (ret != 0) {
300 ret = errno;
301 perror("kernctl_get_mmap_read_offset");
302 goto end;
303 }
304
305 while (len > 0) {
306 ret = write(outfd, kconsumerd_fd->mmap_base + mmap_offset, len);
307 if (ret >= len) {
308 len = 0;
309 } else if (ret < 0) {
310 ret = errno;
311 perror("Error in file write");
312 goto end;
313 }
314 /* This won't block, but will start writeout asynchronously */
315 sync_file_range(outfd, kconsumerd_fd->out_fd_offset, ret,
316 SYNC_FILE_RANGE_WRITE);
317 kconsumerd_fd->out_fd_offset += ret;
318 }
319
320 /*
321 * This does a blocking write-and-wait on any page that belongs to the
322 * subbuffer prior to the one we just wrote.
323 * Don't care about error values, as these are just hints and ways to
324 * limit the amount of page cache used.
325 */
326 if (orig_offset >= kconsumerd_fd->max_sb_size) {
327 sync_file_range(outfd, orig_offset - kconsumerd_fd->max_sb_size,
328 kconsumerd_fd->max_sb_size,
329 SYNC_FILE_RANGE_WAIT_BEFORE
330 | SYNC_FILE_RANGE_WRITE
331 | SYNC_FILE_RANGE_WAIT_AFTER);
332
333 /*
334 * Give hints to the kernel about how we access the file:
335 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
336 * we write it.
337 *
338 * We need to call fadvise again after the file grows because the
339 * kernel does not seem to apply fadvise to non-existing parts of the
340 * file.
341 *
342 * Call fadvise _after_ having waited for the page writeback to
343 * complete because the dirty page writeback semantic is not well
344 * defined. So it can be expected to lead to lower throughput in
345 * streaming.
346 */
347 posix_fadvise(outfd, orig_offset - kconsumerd_fd->max_sb_size,
348 kconsumerd_fd->max_sb_size, POSIX_FADV_DONTNEED);
349 }
350 goto end;
351
352 end:
353 if (padding != NULL) {
354 free(padding);
355 }
356 return ret;
357 }
358
359 /*
360 * kconsumerd_on_read_subbuffer
361 *
362 * Splice the data from the ring buffer to the tracefile.
363 * Returns the number of bytes spliced
364 */
365 int kconsumerd_on_read_subbuffer_splice(struct kconsumerd_local_data *ctx,
366 struct kconsumerd_fd *kconsumerd_fd, unsigned long len)
367 {
368 long ret = 0;
369 loff_t offset = 0;
370 off_t orig_offset = kconsumerd_fd->out_fd_offset;
371 int fd = kconsumerd_fd->consumerd_fd;
372 int outfd = kconsumerd_fd->out_fd;
373
374 while (len > 0) {
375 DBG("splice chan to pipe offset %lu (fd : %d)",
376 (unsigned long)offset, fd);
377 ret = splice(fd, &offset, ctx->kconsumerd_thread_pipe[1], NULL, len,
378 SPLICE_F_MOVE | SPLICE_F_MORE);
379 DBG("splice chan to pipe ret %ld", ret);
380 if (ret < 0) {
381 ret = errno;
382 perror("Error in relay splice");
383 goto splice_error;
384 }
385
386 ret = splice(ctx->kconsumerd_thread_pipe[0], NULL, outfd, NULL, ret,
387 SPLICE_F_MOVE | SPLICE_F_MORE);
388 DBG("splice pipe to file %ld", ret);
389 if (ret < 0) {
390 ret = errno;
391 perror("Error in file splice");
392 goto splice_error;
393 }
394 if (ret >= len) {
395 len = 0;
396 }
397 /* This won't block, but will start writeout asynchronously */
398 sync_file_range(outfd, kconsumerd_fd->out_fd_offset, ret,
399 SYNC_FILE_RANGE_WRITE);
400 kconsumerd_fd->out_fd_offset += ret;
401 }
402
403 /*
404 * This does a blocking write-and-wait on any page that belongs to the
405 * subbuffer prior to the one we just wrote.
406 * Don't care about error values, as these are just hints and ways to
407 * limit the amount of page cache used.
408 */
409 if (orig_offset >= kconsumerd_fd->max_sb_size) {
410 sync_file_range(outfd, orig_offset - kconsumerd_fd->max_sb_size,
411 kconsumerd_fd->max_sb_size,
412 SYNC_FILE_RANGE_WAIT_BEFORE
413 | SYNC_FILE_RANGE_WRITE
414 | SYNC_FILE_RANGE_WAIT_AFTER);
415 /*
416 * Give hints to the kernel about how we access the file:
417 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
418 * we write it.
419 *
420 * We need to call fadvise again after the file grows because the
421 * kernel does not seem to apply fadvise to non-existing parts of the
422 * file.
423 *
424 * Call fadvise _after_ having waited for the page writeback to
425 * complete because the dirty page writeback semantic is not well
426 * defined. So it can be expected to lead to lower throughput in
427 * streaming.
428 */
429 posix_fadvise(outfd, orig_offset - kconsumerd_fd->max_sb_size,
430 kconsumerd_fd->max_sb_size, POSIX_FADV_DONTNEED);
431 }
432 goto end;
433
434 splice_error:
435 /* send the appropriate error description to sessiond */
436 switch(ret) {
437 case EBADF:
438 kconsumerd_send_error(ctx, KCONSUMERD_SPLICE_EBADF);
439 break;
440 case EINVAL:
441 kconsumerd_send_error(ctx, KCONSUMERD_SPLICE_EINVAL);
442 break;
443 case ENOMEM:
444 kconsumerd_send_error(ctx, KCONSUMERD_SPLICE_ENOMEM);
445 break;
446 case ESPIPE:
447 kconsumerd_send_error(ctx, KCONSUMERD_SPLICE_ESPIPE);
448 break;
449 }
450
451 end:
452 return ret;
453 }
454
455 /*
456 * kconsumerd_poll_socket
457 *
458 * Poll on the should_quit pipe and the command socket
459 * return -1 on error and should exit, 0 if data is
460 * available on the command socket
461 */
462 int kconsumerd_poll_socket(struct pollfd *kconsumerd_sockpoll)
463 {
464 int num_rdy;
465
466 num_rdy = poll(kconsumerd_sockpoll, 2, -1);
467 if (num_rdy == -1) {
468 perror("Poll error");
469 goto exit;
470 }
471 if (kconsumerd_sockpoll[0].revents == POLLIN) {
472 DBG("kconsumerd_should_quit wake up");
473 goto exit;
474 }
475 return 0;
476
477 exit:
478 return -1;
479 }
480
481 /*
482 * kconsumerd_consumerd_recv_fd
483 *
484 * Receives an array of file descriptors and the associated
485 * structures describing each fd (path name).
486 * Returns the size of received data
487 */
488 static int kconsumerd_consumerd_recv_fd(struct kconsumerd_local_data *ctx,
489 int sfd, struct pollfd *kconsumerd_sockpoll, int size,
490 enum kconsumerd_command cmd_type)
491 {
492 struct iovec iov[1];
493 int ret = 0, i, tmp2;
494 struct cmsghdr *cmsg;
495 int nb_fd;
496 char recv_fd[CMSG_SPACE(sizeof(int))];
497 struct lttcomm_kconsumerd_msg lkm;
498
499 /* the number of fds we are about to receive */
500 nb_fd = size / sizeof(struct lttcomm_kconsumerd_msg);
501
502 /*
503 * nb_fd is the number of fds we receive. One fd per recvmsg.
504 */
505 for (i = 0; i < nb_fd; i++) {
506 struct msghdr msg = { 0 };
507
508 /* Prepare to receive the structures */
509 iov[0].iov_base = &lkm;
510 iov[0].iov_len = sizeof(lkm);
511 msg.msg_iov = iov;
512 msg.msg_iovlen = 1;
513
514 msg.msg_control = recv_fd;
515 msg.msg_controllen = sizeof(recv_fd);
516
517 DBG("Waiting to receive fd");
518 if (kconsumerd_poll_socket(kconsumerd_sockpoll) < 0) {
519 goto end;
520 }
521
522 if ((ret = recvmsg(sfd, &msg, 0)) < 0) {
523 perror("recvmsg");
524 continue;
525 }
526
527 if (ret != (size / nb_fd)) {
528 ERR("Received only %d, expected %d", ret, size);
529 kconsumerd_send_error(ctx, KCONSUMERD_ERROR_RECV_FD);
530 goto end;
531 }
532
533 cmsg = CMSG_FIRSTHDR(&msg);
534 if (!cmsg) {
535 ERR("Invalid control message header");
536 ret = -1;
537 kconsumerd_send_error(ctx, KCONSUMERD_ERROR_RECV_FD);
538 goto end;
539 }
540
541 /* if we received fds */
542 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
543 switch (cmd_type) {
544 case ADD_STREAM:
545 DBG("kconsumerd_add_fd %s (%d)", lkm.path_name, ((int *) CMSG_DATA(cmsg))[0]);
546 ret = kconsumerd_add_fd(&lkm, ((int *) CMSG_DATA(cmsg))[0]);
547 if (ret < 0) {
548 kconsumerd_send_error(ctx, KCONSUMERD_OUTFD_ERROR);
549 goto end;
550 }
551 break;
552 case UPDATE_STREAM:
553 kconsumerd_change_fd_state(lkm.fd, lkm.state);
554 break;
555 default:
556 break;
557 }
558 /* signal the poll thread */
559 tmp2 = write(ctx->kconsumerd_poll_pipe[1], "4", 1);
560 if (tmp2 < 0) {
561 perror("write kconsumerd poll");
562 }
563 } else {
564 ERR("Didn't received any fd");
565 kconsumerd_send_error(ctx, KCONSUMERD_ERROR_RECV_FD);
566 ret = -1;
567 goto end;
568 }
569 }
570
571 end:
572 return ret;
573 }
574
575 /*
576 * kconsumerd_thread_poll_fds
577 *
578 * This thread polls the fds in the ltt_fd_list to consume the data
579 * and write it to tracefile if necessary.
580 */
581 void *kconsumerd_thread_poll_fds(void *data)
582 {
583 int num_rdy, num_hup, high_prio, ret, i;
584 struct pollfd *pollfd = NULL;
585 /* local view of the fds */
586 struct kconsumerd_fd **local_kconsumerd_fd = NULL;
587 /* local view of kconsumerd_data.fds_count */
588 int nb_fd = 0;
589 char tmp;
590 int tmp2;
591 struct kconsumerd_local_data *ctx = data;
592
593
594 local_kconsumerd_fd = malloc(sizeof(struct kconsumerd_fd));
595
596 while (1) {
597 high_prio = 0;
598 num_hup = 0;
599
600 /*
601 * the ltt_fd_list has been updated, we need to update our
602 * local array as well
603 */
604 pthread_mutex_lock(&kconsumerd_data.lock);
605 if (kconsumerd_data.need_update) {
606 if (pollfd != NULL) {
607 free(pollfd);
608 pollfd = NULL;
609 }
610 if (local_kconsumerd_fd != NULL) {
611 free(local_kconsumerd_fd);
612 local_kconsumerd_fd = NULL;
613 }
614
615 /* allocate for all fds + 1 for the kconsumerd_poll_pipe */
616 pollfd = malloc((kconsumerd_data.fds_count + 1) * sizeof(struct pollfd));
617 if (pollfd == NULL) {
618 perror("pollfd malloc");
619 pthread_mutex_unlock(&kconsumerd_data.lock);
620 goto end;
621 }
622
623 /* allocate for all fds + 1 for the kconsumerd_poll_pipe */
624 local_kconsumerd_fd = malloc((kconsumerd_data.fds_count + 1) *
625 sizeof(struct kconsumerd_fd));
626 if (local_kconsumerd_fd == NULL) {
627 perror("local_kconsumerd_fd malloc");
628 pthread_mutex_unlock(&kconsumerd_data.lock);
629 goto end;
630 }
631 ret = kconsumerd_update_poll_array(ctx, &pollfd, local_kconsumerd_fd);
632 if (ret < 0) {
633 ERR("Error in allocating pollfd or local_outfds");
634 kconsumerd_send_error(ctx, KCONSUMERD_POLL_ERROR);
635 pthread_mutex_unlock(&kconsumerd_data.lock);
636 goto end;
637 }
638 nb_fd = ret;
639 kconsumerd_data.need_update = 0;
640 }
641 pthread_mutex_unlock(&kconsumerd_data.lock);
642
643 /* poll on the array of fds */
644 DBG("polling on %d fd", nb_fd + 1);
645 num_rdy = poll(pollfd, nb_fd + 1, kconsumerd_poll_timeout);
646 DBG("poll num_rdy : %d", num_rdy);
647 if (num_rdy == -1) {
648 perror("Poll error");
649 kconsumerd_send_error(ctx, KCONSUMERD_POLL_ERROR);
650 goto end;
651 } else if (num_rdy == 0) {
652 DBG("Polling thread timed out");
653 goto end;
654 }
655
656 /* No FDs and kconsumerd_quit, kconsumerd_cleanup the thread */
657 if (nb_fd == 0 && kconsumerd_quit == 1) {
658 goto end;
659 }
660
661 /*
662 * If the kconsumerd_poll_pipe triggered poll go
663 * directly to the beginning of the loop to update the
664 * array. We want to prioritize array update over
665 * low-priority reads.
666 */
667 if (pollfd[nb_fd].revents == POLLIN) {
668 DBG("kconsumerd_poll_pipe wake up");
669 tmp2 = read(ctx->kconsumerd_poll_pipe[0], &tmp, 1);
670 if (tmp2 < 0) {
671 perror("read kconsumerd poll");
672 }
673 continue;
674 }
675
676 /* Take care of high priority channels first. */
677 for (i = 0; i < nb_fd; i++) {
678 switch(pollfd[i].revents) {
679 case POLLERR:
680 ERR("Error returned in polling fd %d.", pollfd[i].fd);
681 kconsumerd_del_fd(local_kconsumerd_fd[i]);
682 num_hup++;
683 break;
684 case POLLHUP:
685 DBG("Polling fd %d tells it has hung up.", pollfd[i].fd);
686 kconsumerd_del_fd(local_kconsumerd_fd[i]);
687 num_hup++;
688 break;
689 case POLLNVAL:
690 ERR("Polling fd %d tells fd is not open.", pollfd[i].fd);
691 kconsumerd_del_fd(local_kconsumerd_fd[i]);
692 num_hup++;
693 break;
694 case POLLPRI:
695 DBG("Urgent read on fd %d", pollfd[i].fd);
696 high_prio = 1;
697 ret = ctx->on_buffer_ready(local_kconsumerd_fd[i]);
698 /* it's ok to have an unavailable sub-buffer */
699 if (ret == EAGAIN) {
700 ret = 0;
701 }
702 break;
703 }
704 }
705
706 /* If every buffer FD has hung up, we end the read loop here */
707 if (nb_fd > 0 && num_hup == nb_fd) {
708 DBG("every buffer FD has hung up\n");
709 if (kconsumerd_quit == 1) {
710 goto end;
711 }
712 continue;
713 }
714
715 /* Take care of low priority channels. */
716 if (high_prio == 0) {
717 for (i = 0; i < nb_fd; i++) {
718 if (pollfd[i].revents == POLLIN) {
719 DBG("Normal read on fd %d", pollfd[i].fd);
720 ret = ctx->on_buffer_ready(local_kconsumerd_fd[i]);
721 /* it's ok to have an unavailable subbuffer */
722 if (ret == EAGAIN) {
723 ret = 0;
724 }
725 }
726 }
727 }
728 }
729 end:
730 DBG("polling thread exiting");
731 if (pollfd != NULL) {
732 free(pollfd);
733 pollfd = NULL;
734 }
735 if (local_kconsumerd_fd != NULL) {
736 free(local_kconsumerd_fd);
737 local_kconsumerd_fd = NULL;
738 }
739 return NULL;
740 }
741
742 /*
743 * kconsumerd_create
744 *
745 * initialise the necessary environnement :
746 * - create a new context
747 * - create the poll_pipe
748 * - create the should_quit pipe (for signal handler)
749 * - create the thread pipe (for splice)
750 * Takes a function pointer as argument, this function is called when data is
751 * available on a buffer. This function is responsible to do the
752 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
753 * buffer configuration and then kernctl_put_next_subbuf at the end.
754 * Returns a pointer to the new context or NULL on error.
755 */
756 struct kconsumerd_local_data *kconsumerd_create(
757 int (*buffer_ready)(struct kconsumerd_fd *kconsumerd_fd))
758 {
759 int ret;
760 struct kconsumerd_local_data *ctx;
761
762 ctx = malloc(sizeof(struct kconsumerd_local_data));
763 if (ctx == NULL) {
764 perror("allocating context");
765 goto end;
766 }
767
768 ctx->on_buffer_ready = buffer_ready;
769
770 ret = pipe(ctx->kconsumerd_poll_pipe);
771 if (ret < 0) {
772 perror("Error creating poll pipe");
773 ctx = NULL;
774 goto end;
775 }
776
777 ret = pipe(ctx->kconsumerd_should_quit);
778 if (ret < 0) {
779 perror("Error creating recv pipe");
780 ctx = NULL;
781 goto end;
782 }
783
784 ret = pipe(ctx->kconsumerd_thread_pipe);
785 if (ret < 0) {
786 perror("Error creating thread pipe");
787 ctx = NULL;
788 goto end;
789 }
790
791 end:
792 return ctx;
793 }
794
795 /*
796 * kconsumerd_destroy
797 *
798 * Close all fds associated with the instance and free the context
799 */
800 void kconsumerd_destroy(struct kconsumerd_local_data *ctx)
801 {
802 close(ctx->kconsumerd_error_socket);
803 close(ctx->kconsumerd_thread_pipe[0]);
804 close(ctx->kconsumerd_thread_pipe[1]);
805 close(ctx->kconsumerd_poll_pipe[0]);
806 close(ctx->kconsumerd_poll_pipe[1]);
807 close(ctx->kconsumerd_should_quit[0]);
808 close(ctx->kconsumerd_should_quit[1]);
809 unlink(ctx->kconsumerd_command_sock_path);
810 free(ctx);
811 ctx = NULL;
812 }
813
814 /*
815 * kconsumerd_thread_receive_fds
816 *
817 * This thread listens on the consumerd socket and
818 * receives the file descriptors from ltt-sessiond
819 */
820 void *kconsumerd_thread_receive_fds(void *data)
821 {
822 int sock, client_socket, ret;
823 struct lttcomm_kconsumerd_header tmp;
824 /*
825 * structure to poll for incoming data on communication socket
826 * avoids making blocking sockets
827 */
828 struct pollfd kconsumerd_sockpoll[2];
829 struct kconsumerd_local_data *ctx = data;
830
831
832 DBG("Creating command socket %s", ctx->kconsumerd_command_sock_path);
833 unlink(ctx->kconsumerd_command_sock_path);
834 client_socket = lttcomm_create_unix_sock(ctx->kconsumerd_command_sock_path);
835 if (client_socket < 0) {
836 ERR("Cannot create command socket");
837 goto end;
838 }
839
840 ret = lttcomm_listen_unix_sock(client_socket);
841 if (ret < 0) {
842 goto end;
843 }
844
845 DBG("Sending ready command to ltt-sessiond");
846 ret = kconsumerd_send_error(ctx, KCONSUMERD_COMMAND_SOCK_READY);
847 if (ret < 0) {
848 ERR("Error sending ready command to ltt-sessiond");
849 goto end;
850 }
851
852 ret = fcntl(client_socket, F_SETFL, O_NONBLOCK);
853 if (ret < 0) {
854 perror("fcntl O_NONBLOCK");
855 goto end;
856 }
857
858 /* prepare the FDs to poll : to client socket and the should_quit pipe */
859 kconsumerd_sockpoll[0].fd = ctx->kconsumerd_should_quit[0];
860 kconsumerd_sockpoll[0].events = POLLIN | POLLPRI;
861 kconsumerd_sockpoll[1].fd = client_socket;
862 kconsumerd_sockpoll[1].events = POLLIN | POLLPRI;
863
864 if (kconsumerd_poll_socket(kconsumerd_sockpoll) < 0) {
865 goto end;
866 }
867 DBG("Connection on client_socket");
868
869 /* Blocking call, waiting for transmission */
870 sock = lttcomm_accept_unix_sock(client_socket);
871 if (sock <= 0) {
872 WARN("On accept");
873 goto end;
874 }
875 ret = fcntl(sock, F_SETFL, O_NONBLOCK);
876 if (ret < 0) {
877 perror("fcntl O_NONBLOCK");
878 goto end;
879 }
880
881 /* update the polling structure to poll on the established socket */
882 kconsumerd_sockpoll[1].fd = sock;
883 kconsumerd_sockpoll[1].events = POLLIN | POLLPRI;
884
885 while (1) {
886 if (kconsumerd_poll_socket(kconsumerd_sockpoll) < 0) {
887 goto end;
888 }
889 DBG("Incoming fds on sock");
890
891 /* We first get the number of fd we are about to receive */
892 ret = lttcomm_recv_unix_sock(sock, &tmp,
893 sizeof(struct lttcomm_kconsumerd_header));
894 if (ret <= 0) {
895 ERR("Communication interrupted on command socket");
896 goto end;
897 }
898 if (tmp.cmd_type == STOP) {
899 DBG("Received STOP command");
900 goto end;
901 }
902 if (kconsumerd_quit) {
903 DBG("kconsumerd_thread_receive_fds received quit from signal");
904 goto end;
905 }
906
907 /* we received a command to add or update fds */
908 ret = kconsumerd_consumerd_recv_fd(ctx, sock, kconsumerd_sockpoll,
909 tmp.payload_size, tmp.cmd_type);
910 if (ret < 0) {
911 ERR("Receiving the FD, exiting");
912 goto end;
913 }
914 DBG("received fds on sock");
915 }
916
917 end:
918 DBG("kconsumerd_thread_receive_fds exiting");
919
920 /*
921 * when all fds have hung up, the polling thread
922 * can exit cleanly
923 */
924 kconsumerd_quit = 1;
925
926 /*
927 * 2s of grace period, if no polling events occur during
928 * this period, the polling thread will exit even if there
929 * are still open FDs (should not happen, but safety mechanism).
930 */
931 kconsumerd_poll_timeout = KCONSUMERD_POLL_GRACE_PERIOD;
932
933 /* wake up the polling thread */
934 ret = write(ctx->kconsumerd_poll_pipe[1], "4", 1);
935 if (ret < 0) {
936 perror("poll pipe write");
937 }
938 return NULL;
939 }
940
941 /*
942 * kconsumerd_cleanup
943 *
944 * Close all the tracefiles and stream fds, should be called when all
945 * instances are destroyed.
946 */
947 void kconsumerd_cleanup(void)
948 {
949 struct kconsumerd_fd *iter, *tmp;
950
951 /*
952 * close all outfd. Called when there are no more threads
953 * running (after joining on the threads), no need to protect
954 * list iteration with mutex.
955 */
956 cds_list_for_each_entry_safe(iter, tmp, &kconsumerd_data.fd_list.head, list) {
957 kconsumerd_del_fd(iter);
958 }
959 }
960
961 /*
962 * kconsumerd_should_exit
963 *
964 * Called from signal handler.
965 */
966 void kconsumerd_should_exit(struct kconsumerd_local_data *ctx)
967 {
968 int ret;
969 kconsumerd_quit = 1;
970 ret = write(ctx->kconsumerd_should_quit[1], "4", 1);
971 if (ret < 0) {
972 perror("write kconsumerd quit");
973 }
974 }
975
976 /*
977 * kconsumerd_send_error
978 *
979 * send return code to ltt-sessiond
980 */
981 int kconsumerd_send_error(struct kconsumerd_local_data *ctx, enum lttcomm_return_code cmd)
982 {
983 if (ctx->kconsumerd_error_socket > 0) {
984 return lttcomm_send_unix_sock(ctx->kconsumerd_error_socket, &cmd,
985 sizeof(enum lttcomm_sessiond_command));
986 }
987
988 return 0;
989 }
This page took 0.048347 seconds and 4 git commands to generate.