Fix disable event
[lttng-tools.git] / liblttngkconsumerd / lttngkconsumerd.c
CommitLineData
1ce86c9a
JD
1/*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
82a3637f
DG
7 * as published by the Free Software Foundation; only version 2
8 * of the License.
1ce86c9a
JD
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#define _GNU_SOURCE
1e307fab 21#include <assert.h>
1ce86c9a
JD
22#include <fcntl.h>
23#include <poll.h>
24#include <pthread.h>
25#include <stdlib.h>
26#include <string.h>
27#include <sys/mman.h>
28#include <sys/socket.h>
29#include <sys/types.h>
30#include <unistd.h>
6533b585 31
1e307fab
DG
32#include <lttng-kernel-ctl.h>
33#include <lttng-sessiond-comm.h>
6533b585 34#include <lttng/lttng-kconsumerd.h>
1e307fab 35#include <lttngerr.h>
1ce86c9a 36
6533b585 37static struct lttng_kconsumerd_global_data {
242cd187
MD
38 /*
39 * kconsumerd_data.lock protects kconsumerd_data.fd_list,
6533b585
DG
40 * kconsumerd_data.fds_count, and kconsumerd_data.need_update. It ensures
41 * the count matches the number of items in the fd_list. It ensures the
42 * list updates *always* trigger an fd_array update (therefore need to make
43 * list update vs kconsumerd_data.need_update flag update atomic, and also
44 * flag read, fd array and flag clear atomic).
242cd187
MD
45 */
46 pthread_mutex_t lock;
47 /*
6533b585 48 * Number of element for the list below. Protected by kconsumerd_data.lock.
242cd187
MD
49 */
50 unsigned int fds_count;
51 /*
6533b585 52 * List of FDs. Protected by kconsumerd_data.lock.
242cd187 53 */
6533b585 54 struct lttng_kconsumerd_fd_list fd_list;
242cd187 55 /*
6533b585
DG
56 * Flag specifying if the local array of FDs needs update in the poll
57 * function. Protected by kconsumerd_data.lock.
242cd187
MD
58 */
59 unsigned int need_update;
60} kconsumerd_data = {
61 .fd_list.head = CDS_LIST_HEAD_INIT(kconsumerd_data.fd_list.head),
cb040cc1
JD
62 .fds_count = 0,
63 .need_update = 1,
1ce86c9a
JD
64};
65
6533b585 66/* timeout parameter, to control the polling thread grace period. */
1ce86c9a
JD
67static int kconsumerd_poll_timeout = -1;
68
3dcd2721 69/*
6533b585
DG
70 * Flag to inform the polling thread to quit when all fd hung up. Updated by
71 * the kconsumerd_thread_receive_fds when it notices that all fds has hung up.
72 * Also updated by the signal handler (kconsumerd_should_exit()). Read by the
73 * polling threads.
3dcd2721
MD
74 */
75static volatile int kconsumerd_quit = 0;
1ce86c9a
JD
76
77/*
6533b585
DG
78 * Find a session fd in the global list. The kconsumerd_data.lock must be
79 * locked during this call.
38079a1b 80 *
6533b585 81 * Return 1 if found else 0.
38079a1b
DG
82 */
83static int kconsumerd_find_session_fd(int fd)
84{
6533b585 85 struct lttng_kconsumerd_fd *iter;
38079a1b 86
242cd187 87 cds_list_for_each_entry(iter, &kconsumerd_data.fd_list.head, list) {
38079a1b
DG
88 if (iter->sessiond_fd == fd) {
89 DBG("Duplicate session fd %d", fd);
38079a1b
DG
90 return 1;
91 }
92 }
38079a1b
DG
93
94 return 0;
95}
96
1ce86c9a 97/*
6533b585 98 * Remove a fd from the global list protected by a mutex.
1ce86c9a 99 */
6533b585 100static void kconsumerd_del_fd(struct lttng_kconsumerd_fd *lcf)
1ce86c9a 101{
8b270bdb 102 int ret;
242cd187 103 pthread_mutex_lock(&kconsumerd_data.lock);
1ce86c9a 104 cds_list_del(&lcf->list);
242cd187
MD
105 if (kconsumerd_data.fds_count > 0) {
106 kconsumerd_data.fds_count--;
1ce86c9a 107 if (lcf != NULL) {
8b270bdb
JD
108 if (lcf->mmap_base != NULL) {
109 ret = munmap(lcf->mmap_base, lcf->mmap_len);
110 if (ret != 0) {
111 perror("munmap");
112 }
113 }
cb040cc1
JD
114 if (lcf->out_fd != 0) {
115 close(lcf->out_fd);
116 }
1ce86c9a
JD
117 close(lcf->consumerd_fd);
118 free(lcf);
119 lcf = NULL;
120 }
121 }
242cd187
MD
122 kconsumerd_data.need_update = 1;
123 pthread_mutex_unlock(&kconsumerd_data.lock);
1ce86c9a
JD
124}
125
126/*
5348b470
JD
127 * Create a struct lttcomm_kconsumerd_msg from the
128 * information received on the receiving socket
1ce86c9a 129 */
5348b470
JD
130struct lttng_kconsumerd_fd *kconsumerd_allocate_fd(
131 struct lttcomm_kconsumerd_msg *buf,
6533b585 132 int consumerd_fd)
1ce86c9a 133{
6533b585 134 struct lttng_kconsumerd_fd *tmp_fd;
38079a1b 135
5348b470
JD
136 tmp_fd = malloc(sizeof(struct lttng_kconsumerd_fd));
137 if (tmp_fd == NULL) {
138 perror("malloc struct lttng_kconsumerd_fd");
38079a1b
DG
139 goto end;
140 }
1ce86c9a 141
1ce86c9a
JD
142 tmp_fd->sessiond_fd = buf->fd;
143 tmp_fd->consumerd_fd = consumerd_fd;
144 tmp_fd->state = buf->state;
145 tmp_fd->max_sb_size = buf->max_sb_size;
cb040cc1
JD
146 tmp_fd->out_fd = 0;
147 tmp_fd->out_fd_offset = 0;
8b270bdb
JD
148 tmp_fd->mmap_len = 0;
149 tmp_fd->mmap_base = NULL;
150 tmp_fd->output = buf->output;
1ce86c9a 151 strncpy(tmp_fd->path_name, buf->path_name, PATH_MAX);
99497cd0 152 tmp_fd->path_name[PATH_MAX - 1] = '\0';
5348b470
JD
153 DBG("Allocated %s (sessiond_fd %d, consumerd_fd %d, out_fd %d)",
154 tmp_fd->path_name, tmp_fd->sessiond_fd,
155 tmp_fd->consumerd_fd, tmp_fd->out_fd);
1ce86c9a 156
5348b470
JD
157end:
158 return tmp_fd;
159}
1ce86c9a 160
5348b470
JD
161/*
162 * Add a fd to the global list protected by a mutex.
163 */
164static int kconsumerd_add_fd(struct lttng_kconsumerd_fd *tmp_fd)
165{
166 int ret;
8b270bdb 167
5348b470
JD
168 pthread_mutex_lock(&kconsumerd_data.lock);
169 /* Check if already exist */
170 ret = kconsumerd_find_session_fd(tmp_fd->sessiond_fd);
171 if (ret == 1) {
172 goto end;
8b270bdb 173 }
242cd187
MD
174 cds_list_add(&tmp_fd->list, &kconsumerd_data.fd_list.head);
175 kconsumerd_data.fds_count++;
176 kconsumerd_data.need_update = 1;
5348b470 177
1ce86c9a 178end:
242cd187 179 pthread_mutex_unlock(&kconsumerd_data.lock);
1ce86c9a
JD
180 return ret;
181}
182
183/*
6533b585 184 * Update a fd according to what we just received.
1ce86c9a
JD
185 */
186static void kconsumerd_change_fd_state(int sessiond_fd,
6533b585 187 enum lttng_kconsumerd_fd_state state)
1ce86c9a 188{
6533b585 189 struct lttng_kconsumerd_fd *iter;
0237248c 190
242cd187
MD
191 pthread_mutex_lock(&kconsumerd_data.lock);
192 cds_list_for_each_entry(iter, &kconsumerd_data.fd_list.head, list) {
1ce86c9a
JD
193 if (iter->sessiond_fd == sessiond_fd) {
194 iter->state = state;
195 break;
196 }
197 }
242cd187
MD
198 kconsumerd_data.need_update = 1;
199 pthread_mutex_unlock(&kconsumerd_data.lock);
1ce86c9a
JD
200}
201
202/*
6533b585
DG
203 * Allocate the pollfd structure and the local view of the out fds to avoid
204 * doing a lookup in the linked list and concurrency issues when writing is
205 * needed. Called with kconsumerd_data.lock held.
1ce86c9a 206 *
6533b585 207 * Returns the number of fds in the structures.
1ce86c9a 208 */
6533b585
DG
209static int kconsumerd_update_poll_array(
210 struct lttng_kconsumerd_local_data *ctx, struct pollfd **pollfd,
211 struct lttng_kconsumerd_fd **local_kconsumerd_fd)
1ce86c9a 212{
6533b585 213 struct lttng_kconsumerd_fd *iter;
1ce86c9a
JD
214 int i = 0;
215
216 DBG("Updating poll fd array");
242cd187 217 cds_list_for_each_entry(iter, &kconsumerd_data.fd_list.head, list) {
1ce86c9a
JD
218 if (iter->state == ACTIVE_FD) {
219 DBG("Active FD %d", iter->consumerd_fd);
220 (*pollfd)[i].fd = iter->consumerd_fd;
221 (*pollfd)[i].events = POLLIN | POLLPRI;
222 local_kconsumerd_fd[i] = iter;
223 i++;
224 }
225 }
226
227 /*
6533b585
DG
228 * Insert the kconsumerd_poll_pipe at the end of the array and don't
229 * increment i so nb_fd is the number of real FD.
1ce86c9a 230 */
cb040cc1 231 (*pollfd)[i].fd = ctx->kconsumerd_poll_pipe[0];
1ce86c9a 232 (*pollfd)[i].events = POLLIN;
1ce86c9a
JD
233 return i;
234}
235
6533b585
DG
236/*
237 * Receives an array of file descriptors and the associated structures
238 * describing each fd (path name).
239 *
240 * Returns the size of received data
241 */
242static int kconsumerd_consumerd_recv_fd(
243 struct lttng_kconsumerd_local_data *ctx, int sfd,
244 struct pollfd *kconsumerd_sockpoll, int size,
245 enum lttng_kconsumerd_command cmd_type)
246{
247 struct iovec iov[1];
26d988bc 248 int ret = 0, i, j, tmp2;
6533b585
DG
249 struct cmsghdr *cmsg;
250 int nb_fd;
251 char recv_fd[CMSG_SPACE(sizeof(int))];
252 struct lttcomm_kconsumerd_msg lkm;
5348b470 253 struct lttng_kconsumerd_fd *new_fd;
26d988bc
MD
254 union {
255 unsigned char vc[4];
256 int vi;
257 } tmp;
6533b585
DG
258
259 /* the number of fds we are about to receive */
260 nb_fd = size / sizeof(struct lttcomm_kconsumerd_msg);
261
262 /*
263 * nb_fd is the number of fds we receive. One fd per recvmsg.
264 */
265 for (i = 0; i < nb_fd; i++) {
266 struct msghdr msg = { 0 };
267
268 /* Prepare to receive the structures */
269 iov[0].iov_base = &lkm;
270 iov[0].iov_len = sizeof(lkm);
271 msg.msg_iov = iov;
272 msg.msg_iovlen = 1;
273
274 msg.msg_control = recv_fd;
275 msg.msg_controllen = sizeof(recv_fd);
276
277 DBG("Waiting to receive fd");
278 if (lttng_kconsumerd_poll_socket(kconsumerd_sockpoll) < 0) {
279 goto end;
280 }
281
282 if ((ret = recvmsg(sfd, &msg, 0)) < 0) {
283 perror("recvmsg");
284 continue;
285 }
286
287 if (ret != (size / nb_fd)) {
288 ERR("Received only %d, expected %d", ret, size);
289 lttng_kconsumerd_send_error(ctx, KCONSUMERD_ERROR_RECV_FD);
290 goto end;
291 }
292
293 cmsg = CMSG_FIRSTHDR(&msg);
294 if (!cmsg) {
295 ERR("Invalid control message header");
296 ret = -1;
297 lttng_kconsumerd_send_error(ctx, KCONSUMERD_ERROR_RECV_FD);
298 goto end;
299 }
300
301 /* if we received fds */
302 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
303 switch (cmd_type) {
304 case ADD_STREAM:
26d988bc
MD
305 for (j = 0; j < sizeof(int); j++)
306 tmp.vc[j] = CMSG_DATA(cmsg)[j];
307 DBG("kconsumerd_add_fd %s (%d)", lkm.path_name, tmp.vi);
308 new_fd = kconsumerd_allocate_fd(&lkm, tmp.vi);
5348b470 309 if (new_fd == NULL) {
6533b585
DG
310 lttng_kconsumerd_send_error(ctx, KCONSUMERD_OUTFD_ERROR);
311 goto end;
312 }
5348b470
JD
313
314 if (ctx->on_recv_fd != NULL) {
315 ret = ctx->on_recv_fd(new_fd);
316 if (ret == 0) {
317 kconsumerd_add_fd(new_fd);
318 } else if (ret < 0) {
319 goto end;
320 }
321 } else {
322 kconsumerd_add_fd(new_fd);
323 }
6533b585
DG
324 break;
325 case UPDATE_STREAM:
5348b470
JD
326 if (ctx->on_update_fd != NULL) {
327 ret = ctx->on_update_fd(lkm.fd, lkm.state);
328 if (ret == 0) {
329 kconsumerd_change_fd_state(lkm.fd, lkm.state);
330 } else if (ret < 0) {
331 goto end;
332 }
333 } else {
334 kconsumerd_change_fd_state(lkm.fd, lkm.state);
335 }
6533b585
DG
336 break;
337 default:
338 break;
339 }
340 /* signal the poll thread */
341 tmp2 = write(ctx->kconsumerd_poll_pipe[1], "4", 1);
342 if (tmp2 < 0) {
343 perror("write kconsumerd poll");
344 }
345 } else {
346 ERR("Didn't received any fd");
347 lttng_kconsumerd_send_error(ctx, KCONSUMERD_ERROR_RECV_FD);
348 ret = -1;
349 goto end;
350 }
351 }
352
353end:
354 return ret;
355}
356
357/*
358 * Set the error socket.
359 */
360void lttng_kconsumerd_set_error_sock(
361 struct lttng_kconsumerd_local_data *ctx, int sock)
362{
363 ctx->kconsumerd_error_socket = sock;
364}
365
366/*
367 * Set the command socket path.
368 */
369
370void lttng_kconsumerd_set_command_sock_path(
371 struct lttng_kconsumerd_local_data *ctx, char *sock)
372{
373 ctx->kconsumerd_command_sock_path = sock;
374}
1ce86c9a 375
92ab9ab6
JD
376static void lttng_kconsumerd_sync_trace_file(
377 struct lttng_kconsumerd_fd *kconsumerd_fd, off_t orig_offset)
378{
379 int outfd = kconsumerd_fd->out_fd;
380 /*
381 * This does a blocking write-and-wait on any page that belongs to the
382 * subbuffer prior to the one we just wrote.
383 * Don't care about error values, as these are just hints and ways to
384 * limit the amount of page cache used.
385 */
386 if (orig_offset >= kconsumerd_fd->max_sb_size) {
387 sync_file_range(outfd, orig_offset - kconsumerd_fd->max_sb_size,
388 kconsumerd_fd->max_sb_size,
389 SYNC_FILE_RANGE_WAIT_BEFORE
390 | SYNC_FILE_RANGE_WRITE
391 | SYNC_FILE_RANGE_WAIT_AFTER);
392 /*
393 * Give hints to the kernel about how we access the file:
394 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
395 * we write it.
396 *
397 * We need to call fadvise again after the file grows because the
398 * kernel does not seem to apply fadvise to non-existing parts of the
399 * file.
400 *
401 * Call fadvise _after_ having waited for the page writeback to
402 * complete because the dirty page writeback semantic is not well
403 * defined. So it can be expected to lead to lower throughput in
404 * streaming.
405 */
406 posix_fadvise(outfd, orig_offset - kconsumerd_fd->max_sb_size,
407 kconsumerd_fd->max_sb_size, POSIX_FADV_DONTNEED);
408 }
409}
410
411
1ce86c9a 412/*
6533b585 413 * Mmap the ring buffer, read it and write the data to the tracefile.
1ce86c9a 414 *
1ce86c9a
JD
415 * Returns the number of bytes written
416 */
6533b585
DG
417int lttng_kconsumerd_on_read_subbuffer_mmap(
418 struct lttng_kconsumerd_local_data *ctx,
419 struct lttng_kconsumerd_fd *kconsumerd_fd, unsigned long len)
1ce86c9a 420{
8b270bdb 421 unsigned long mmap_offset;
1ce86c9a
JD
422 long ret = 0;
423 off_t orig_offset = kconsumerd_fd->out_fd_offset;
424 int fd = kconsumerd_fd->consumerd_fd;
425 int outfd = kconsumerd_fd->out_fd;
426
1ce86c9a
JD
427 /* get the offset inside the fd to mmap */
428 ret = kernctl_get_mmap_read_offset(fd, &mmap_offset);
429 if (ret != 0) {
430 ret = errno;
431 perror("kernctl_get_mmap_read_offset");
432 goto end;
433 }
434
1ce86c9a 435 while (len > 0) {
8b270bdb 436 ret = write(outfd, kconsumerd_fd->mmap_base + mmap_offset, len);
1ce86c9a
JD
437 if (ret >= len) {
438 len = 0;
439 } else if (ret < 0) {
440 ret = errno;
441 perror("Error in file write");
442 goto end;
443 }
444 /* This won't block, but will start writeout asynchronously */
445 sync_file_range(outfd, kconsumerd_fd->out_fd_offset, ret,
446 SYNC_FILE_RANGE_WRITE);
447 kconsumerd_fd->out_fd_offset += ret;
448 }
449
92ab9ab6 450 lttng_kconsumerd_sync_trace_file(kconsumerd_fd, orig_offset);
1ce86c9a 451
1ce86c9a
JD
452 goto end;
453
454end:
1ce86c9a
JD
455 return ret;
456}
457
458/*
1ce86c9a 459 * Splice the data from the ring buffer to the tracefile.
6533b585
DG
460 *
461 * Returns the number of bytes spliced.
1ce86c9a 462 */
6533b585
DG
463int lttng_kconsumerd_on_read_subbuffer_splice(
464 struct lttng_kconsumerd_local_data *ctx,
465 struct lttng_kconsumerd_fd *kconsumerd_fd, unsigned long len)
1ce86c9a
JD
466{
467 long ret = 0;
468 loff_t offset = 0;
469 off_t orig_offset = kconsumerd_fd->out_fd_offset;
470 int fd = kconsumerd_fd->consumerd_fd;
471 int outfd = kconsumerd_fd->out_fd;
472
473 while (len > 0) {
474 DBG("splice chan to pipe offset %lu (fd : %d)",
475 (unsigned long)offset, fd);
cb040cc1 476 ret = splice(fd, &offset, ctx->kconsumerd_thread_pipe[1], NULL, len,
1ce86c9a
JD
477 SPLICE_F_MOVE | SPLICE_F_MORE);
478 DBG("splice chan to pipe ret %ld", ret);
479 if (ret < 0) {
480 ret = errno;
481 perror("Error in relay splice");
482 goto splice_error;
483 }
484
cb040cc1 485 ret = splice(ctx->kconsumerd_thread_pipe[0], NULL, outfd, NULL, ret,
1ce86c9a
JD
486 SPLICE_F_MOVE | SPLICE_F_MORE);
487 DBG("splice pipe to file %ld", ret);
488 if (ret < 0) {
489 ret = errno;
490 perror("Error in file splice");
491 goto splice_error;
492 }
751667bd 493 len -= ret;
1ce86c9a
JD
494 /* This won't block, but will start writeout asynchronously */
495 sync_file_range(outfd, kconsumerd_fd->out_fd_offset, ret,
496 SYNC_FILE_RANGE_WRITE);
497 kconsumerd_fd->out_fd_offset += ret;
498 }
92ab9ab6 499 lttng_kconsumerd_sync_trace_file(kconsumerd_fd, orig_offset);
1ce86c9a 500
1ce86c9a
JD
501 goto end;
502
503splice_error:
504 /* send the appropriate error description to sessiond */
505 switch(ret) {
506 case EBADF:
6533b585 507 lttng_kconsumerd_send_error(ctx, KCONSUMERD_SPLICE_EBADF);
1ce86c9a
JD
508 break;
509 case EINVAL:
6533b585 510 lttng_kconsumerd_send_error(ctx, KCONSUMERD_SPLICE_EINVAL);
1ce86c9a
JD
511 break;
512 case ENOMEM:
6533b585 513 lttng_kconsumerd_send_error(ctx, KCONSUMERD_SPLICE_ENOMEM);
1ce86c9a
JD
514 break;
515 case ESPIPE:
6533b585 516 lttng_kconsumerd_send_error(ctx, KCONSUMERD_SPLICE_ESPIPE);
1ce86c9a 517 break;
1ce86c9a
JD
518 }
519
520end:
521 return ret;
522}
523
92ab9ab6
JD
524/*
525 * Take a snapshot for a specific fd
526 *
527 * Returns 0 on success, < 0 on error
528 */
529int lttng_kconsumerd_take_snapshot(struct lttng_kconsumerd_local_data *ctx,
530 struct lttng_kconsumerd_fd *kconsumerd_fd)
531{
532 int ret = 0;
533 int infd = kconsumerd_fd->consumerd_fd;
534
535 ret = kernctl_snapshot(infd);
536 if (ret != 0) {
537 ret = errno;
538 perror("Getting sub-buffer snapshot.");
539 }
540
541 return ret;
542}
543
544/*
545 * Get the produced position
546 *
547 * Returns 0 on success, < 0 on error
548 */
549int lttng_kconsumerd_get_produced_snapshot(
550 struct lttng_kconsumerd_local_data *ctx,
551 struct lttng_kconsumerd_fd *kconsumerd_fd,
552 unsigned long *pos)
553{
554 int ret;
555 int infd = kconsumerd_fd->consumerd_fd;
556
557 ret = kernctl_snapshot_get_produced(infd, pos);
558 if (ret != 0) {
559 ret = errno;
560 perror("kernctl_snapshot_get_produced");
561 }
562
563 return ret;
564}
565
4de84ad9 566/*
6533b585
DG
567 * Poll on the should_quit pipe and the command socket return -1 on error and
568 * should exit, 0 if data is available on the command socket
4de84ad9 569 */
6533b585 570int lttng_kconsumerd_poll_socket(struct pollfd *kconsumerd_sockpoll)
4de84ad9
JD
571{
572 int num_rdy;
573
574 num_rdy = poll(kconsumerd_sockpoll, 2, -1);
575 if (num_rdy == -1) {
576 perror("Poll error");
577 goto exit;
578 }
579 if (kconsumerd_sockpoll[0].revents == POLLIN) {
580 DBG("kconsumerd_should_quit wake up");
581 goto exit;
582 }
583 return 0;
584
585exit:
586 return -1;
587}
588
1ce86c9a 589/*
6533b585
DG
590 * This thread polls the fds in the ltt_fd_list to consume the data and write
591 * it to tracefile if necessary.
1ce86c9a 592 */
6533b585 593void *lttng_kconsumerd_thread_poll_fds(void *data)
1ce86c9a
JD
594{
595 int num_rdy, num_hup, high_prio, ret, i;
596 struct pollfd *pollfd = NULL;
597 /* local view of the fds */
6533b585 598 struct lttng_kconsumerd_fd **local_kconsumerd_fd = NULL;
242cd187 599 /* local view of kconsumerd_data.fds_count */
1ce86c9a
JD
600 int nb_fd = 0;
601 char tmp;
602 int tmp2;
6533b585 603 struct lttng_kconsumerd_local_data *ctx = data;
1ce86c9a 604
1ce86c9a 605
6533b585 606 local_kconsumerd_fd = malloc(sizeof(struct lttng_kconsumerd_fd));
1ce86c9a
JD
607
608 while (1) {
609 high_prio = 0;
610 num_hup = 0;
611
612 /*
613 * the ltt_fd_list has been updated, we need to update our
614 * local array as well
615 */
242cd187
MD
616 pthread_mutex_lock(&kconsumerd_data.lock);
617 if (kconsumerd_data.need_update) {
1ce86c9a
JD
618 if (pollfd != NULL) {
619 free(pollfd);
620 pollfd = NULL;
621 }
622 if (local_kconsumerd_fd != NULL) {
623 free(local_kconsumerd_fd);
624 local_kconsumerd_fd = NULL;
625 }
0237248c 626
1ce86c9a 627 /* allocate for all fds + 1 for the kconsumerd_poll_pipe */
242cd187 628 pollfd = malloc((kconsumerd_data.fds_count + 1) * sizeof(struct pollfd));
1ce86c9a
JD
629 if (pollfd == NULL) {
630 perror("pollfd malloc");
242cd187 631 pthread_mutex_unlock(&kconsumerd_data.lock);
1ce86c9a
JD
632 goto end;
633 }
0237248c 634
1ce86c9a 635 /* allocate for all fds + 1 for the kconsumerd_poll_pipe */
242cd187 636 local_kconsumerd_fd = malloc((kconsumerd_data.fds_count + 1) *
6533b585 637 sizeof(struct lttng_kconsumerd_fd));
1ce86c9a
JD
638 if (local_kconsumerd_fd == NULL) {
639 perror("local_kconsumerd_fd malloc");
242cd187 640 pthread_mutex_unlock(&kconsumerd_data.lock);
1ce86c9a
JD
641 goto end;
642 }
cb040cc1 643 ret = kconsumerd_update_poll_array(ctx, &pollfd, local_kconsumerd_fd);
1ce86c9a
JD
644 if (ret < 0) {
645 ERR("Error in allocating pollfd or local_outfds");
6533b585 646 lttng_kconsumerd_send_error(ctx, KCONSUMERD_POLL_ERROR);
242cd187 647 pthread_mutex_unlock(&kconsumerd_data.lock);
1ce86c9a
JD
648 goto end;
649 }
650 nb_fd = ret;
242cd187 651 kconsumerd_data.need_update = 0;
1ce86c9a 652 }
242cd187 653 pthread_mutex_unlock(&kconsumerd_data.lock);
1ce86c9a
JD
654
655 /* poll on the array of fds */
656 DBG("polling on %d fd", nb_fd + 1);
657 num_rdy = poll(pollfd, nb_fd + 1, kconsumerd_poll_timeout);
658 DBG("poll num_rdy : %d", num_rdy);
659 if (num_rdy == -1) {
660 perror("Poll error");
6533b585 661 lttng_kconsumerd_send_error(ctx, KCONSUMERD_POLL_ERROR);
1ce86c9a
JD
662 goto end;
663 } else if (num_rdy == 0) {
664 DBG("Polling thread timed out");
665 goto end;
666 }
667
668 /* No FDs and kconsumerd_quit, kconsumerd_cleanup the thread */
669 if (nb_fd == 0 && kconsumerd_quit == 1) {
670 goto end;
671 }
672
673 /*
242cd187
MD
674 * If the kconsumerd_poll_pipe triggered poll go
675 * directly to the beginning of the loop to update the
676 * array. We want to prioritize array update over
677 * low-priority reads.
1ce86c9a 678 */
242cd187 679 if (pollfd[nb_fd].revents == POLLIN) {
1ce86c9a 680 DBG("kconsumerd_poll_pipe wake up");
cb040cc1 681 tmp2 = read(ctx->kconsumerd_poll_pipe[0], &tmp, 1);
f40799e8
DG
682 if (tmp2 < 0) {
683 perror("read kconsumerd poll");
684 }
1ce86c9a
JD
685 continue;
686 }
687
688 /* Take care of high priority channels first. */
689 for (i = 0; i < nb_fd; i++) {
690 switch(pollfd[i].revents) {
691 case POLLERR:
692 ERR("Error returned in polling fd %d.", pollfd[i].fd);
693 kconsumerd_del_fd(local_kconsumerd_fd[i]);
1ce86c9a
JD
694 num_hup++;
695 break;
696 case POLLHUP:
697 DBG("Polling fd %d tells it has hung up.", pollfd[i].fd);
698 kconsumerd_del_fd(local_kconsumerd_fd[i]);
1ce86c9a
JD
699 num_hup++;
700 break;
701 case POLLNVAL:
702 ERR("Polling fd %d tells fd is not open.", pollfd[i].fd);
703 kconsumerd_del_fd(local_kconsumerd_fd[i]);
1ce86c9a
JD
704 num_hup++;
705 break;
706 case POLLPRI:
707 DBG("Urgent read on fd %d", pollfd[i].fd);
708 high_prio = 1;
cb040cc1 709 ret = ctx->on_buffer_ready(local_kconsumerd_fd[i]);
1ce86c9a
JD
710 /* it's ok to have an unavailable sub-buffer */
711 if (ret == EAGAIN) {
712 ret = 0;
713 }
714 break;
715 }
716 }
717
718 /* If every buffer FD has hung up, we end the read loop here */
719 if (nb_fd > 0 && num_hup == nb_fd) {
720 DBG("every buffer FD has hung up\n");
721 if (kconsumerd_quit == 1) {
722 goto end;
723 }
724 continue;
725 }
726
727 /* Take care of low priority channels. */
728 if (high_prio == 0) {
729 for (i = 0; i < nb_fd; i++) {
730 if (pollfd[i].revents == POLLIN) {
731 DBG("Normal read on fd %d", pollfd[i].fd);
cb040cc1 732 ret = ctx->on_buffer_ready(local_kconsumerd_fd[i]);
1ce86c9a
JD
733 /* it's ok to have an unavailable subbuffer */
734 if (ret == EAGAIN) {
735 ret = 0;
736 }
737 }
738 }
739 }
740 }
741end:
742 DBG("polling thread exiting");
743 if (pollfd != NULL) {
744 free(pollfd);
745 pollfd = NULL;
746 }
747 if (local_kconsumerd_fd != NULL) {
748 free(local_kconsumerd_fd);
749 local_kconsumerd_fd = NULL;
750 }
1ce86c9a
JD
751 return NULL;
752}
753
754/*
6533b585 755 * Initialise the necessary environnement :
cb040cc1 756 * - create a new context
4de84ad9
JD
757 * - create the poll_pipe
758 * - create the should_quit pipe (for signal handler)
cb040cc1 759 * - create the thread pipe (for splice)
6533b585 760 *
cb040cc1
JD
761 * Takes a function pointer as argument, this function is called when data is
762 * available on a buffer. This function is responsible to do the
763 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
764 * buffer configuration and then kernctl_put_next_subbuf at the end.
6533b585 765 *
cb040cc1 766 * Returns a pointer to the new context or NULL on error.
1ce86c9a 767 */
6533b585 768struct lttng_kconsumerd_local_data *lttng_kconsumerd_create(
5348b470
JD
769 int (*buffer_ready)(struct lttng_kconsumerd_fd *kconsumerd_fd),
770 int (*recv_fd)(struct lttng_kconsumerd_fd *kconsumerd_fd),
771 int (*update_fd)(int sessiond_fd, uint32_t state))
1ce86c9a 772{
5348b470 773 int ret, i;
6533b585 774 struct lttng_kconsumerd_local_data *ctx;
4de84ad9 775
6533b585 776 ctx = malloc(sizeof(struct lttng_kconsumerd_local_data));
cb040cc1
JD
777 if (ctx == NULL) {
778 perror("allocating context");
5348b470 779 goto error;
cb040cc1
JD
780 }
781
35346445 782 ctx->kconsumerd_error_socket = -1;
5348b470 783 /* assign the callbacks */
cb040cc1 784 ctx->on_buffer_ready = buffer_ready;
5348b470
JD
785 ctx->on_recv_fd = recv_fd;
786 ctx->on_update_fd = update_fd;
4de84ad9 787
cb040cc1 788 ret = pipe(ctx->kconsumerd_poll_pipe);
4de84ad9
JD
789 if (ret < 0) {
790 perror("Error creating poll pipe");
5348b470 791 goto error_poll_pipe;
4de84ad9
JD
792 }
793
cb040cc1 794 ret = pipe(ctx->kconsumerd_should_quit);
4de84ad9
JD
795 if (ret < 0) {
796 perror("Error creating recv pipe");
5348b470 797 goto error_quit_pipe;
cb040cc1
JD
798 }
799
800 ret = pipe(ctx->kconsumerd_thread_pipe);
801 if (ret < 0) {
802 perror("Error creating thread pipe");
5348b470 803 goto error_thread_pipe;
4de84ad9
JD
804 }
805
cb040cc1 806 return ctx;
5348b470
JD
807
808
809error_thread_pipe:
810 for (i = 0; i < 2; i++) {
811 int err;
812
813 err = close(ctx->kconsumerd_should_quit[i]);
814 assert(!err);
815 }
816error_quit_pipe:
817 for (i = 0; i < 2; i++) {
818 int err;
819
820 err = close(ctx->kconsumerd_poll_pipe[i]);
821 assert(!err);
822 }
823error_poll_pipe:
824 free(ctx);
825error:
826 return NULL;
cb040cc1
JD
827}
828
829/*
6533b585 830 * Close all fds associated with the instance and free the context.
cb040cc1 831 */
6533b585 832void lttng_kconsumerd_destroy(struct lttng_kconsumerd_local_data *ctx)
cb040cc1
JD
833{
834 close(ctx->kconsumerd_error_socket);
835 close(ctx->kconsumerd_thread_pipe[0]);
836 close(ctx->kconsumerd_thread_pipe[1]);
837 close(ctx->kconsumerd_poll_pipe[0]);
838 close(ctx->kconsumerd_poll_pipe[1]);
839 close(ctx->kconsumerd_should_quit[0]);
840 close(ctx->kconsumerd_should_quit[1]);
841 unlink(ctx->kconsumerd_command_sock_path);
842 free(ctx);
843 ctx = NULL;
1ce86c9a
JD
844}
845
846/*
6533b585
DG
847 * This thread listens on the consumerd socket and receives the file
848 * descriptors from the session daemon.
1ce86c9a 849 */
6533b585 850void *lttng_kconsumerd_thread_receive_fds(void *data)
1ce86c9a
JD
851{
852 int sock, client_socket, ret;
853 struct lttcomm_kconsumerd_header tmp;
4de84ad9 854 /*
6533b585
DG
855 * structure to poll for incoming data on communication socket avoids
856 * making blocking sockets.
4de84ad9
JD
857 */
858 struct pollfd kconsumerd_sockpoll[2];
6533b585 859 struct lttng_kconsumerd_local_data *ctx = data;
4de84ad9 860
1ce86c9a 861
cb040cc1
JD
862 DBG("Creating command socket %s", ctx->kconsumerd_command_sock_path);
863 unlink(ctx->kconsumerd_command_sock_path);
864 client_socket = lttcomm_create_unix_sock(ctx->kconsumerd_command_sock_path);
1ce86c9a
JD
865 if (client_socket < 0) {
866 ERR("Cannot create command socket");
867 goto end;
868 }
869
870 ret = lttcomm_listen_unix_sock(client_socket);
871 if (ret < 0) {
872 goto end;
873 }
874
875 DBG("Sending ready command to ltt-sessiond");
6533b585 876 ret = lttng_kconsumerd_send_error(ctx, KCONSUMERD_COMMAND_SOCK_READY);
35346445 877 /* return < 0 on error, but == 0 is not fatal */
1ce86c9a
JD
878 if (ret < 0) {
879 ERR("Error sending ready command to ltt-sessiond");
880 goto end;
881 }
882
4de84ad9
JD
883 ret = fcntl(client_socket, F_SETFL, O_NONBLOCK);
884 if (ret < 0) {
885 perror("fcntl O_NONBLOCK");
886 goto end;
887 }
888
889 /* prepare the FDs to poll : to client socket and the should_quit pipe */
cb040cc1 890 kconsumerd_sockpoll[0].fd = ctx->kconsumerd_should_quit[0];
4de84ad9
JD
891 kconsumerd_sockpoll[0].events = POLLIN | POLLPRI;
892 kconsumerd_sockpoll[1].fd = client_socket;
893 kconsumerd_sockpoll[1].events = POLLIN | POLLPRI;
894
6533b585 895 if (lttng_kconsumerd_poll_socket(kconsumerd_sockpoll) < 0) {
4de84ad9
JD
896 goto end;
897 }
898 DBG("Connection on client_socket");
899
1ce86c9a
JD
900 /* Blocking call, waiting for transmission */
901 sock = lttcomm_accept_unix_sock(client_socket);
902 if (sock <= 0) {
903 WARN("On accept");
904 goto end;
905 }
4de84ad9
JD
906 ret = fcntl(sock, F_SETFL, O_NONBLOCK);
907 if (ret < 0) {
908 perror("fcntl O_NONBLOCK");
909 goto end;
910 }
911
912 /* update the polling structure to poll on the established socket */
913 kconsumerd_sockpoll[1].fd = sock;
914 kconsumerd_sockpoll[1].events = POLLIN | POLLPRI;
915
1ce86c9a 916 while (1) {
6533b585 917 if (lttng_kconsumerd_poll_socket(kconsumerd_sockpoll) < 0) {
4de84ad9
JD
918 goto end;
919 }
920 DBG("Incoming fds on sock");
921
1ce86c9a
JD
922 /* We first get the number of fd we are about to receive */
923 ret = lttcomm_recv_unix_sock(sock, &tmp,
924 sizeof(struct lttcomm_kconsumerd_header));
925 if (ret <= 0) {
926 ERR("Communication interrupted on command socket");
927 goto end;
928 }
929 if (tmp.cmd_type == STOP) {
930 DBG("Received STOP command");
931 goto end;
932 }
3dcd2721
MD
933 if (kconsumerd_quit) {
934 DBG("kconsumerd_thread_receive_fds received quit from signal");
935 goto end;
936 }
4de84ad9 937
1ce86c9a 938 /* we received a command to add or update fds */
cb040cc1 939 ret = kconsumerd_consumerd_recv_fd(ctx, sock, kconsumerd_sockpoll,
4de84ad9 940 tmp.payload_size, tmp.cmd_type);
8b270bdb 941 if (ret < 0) {
1ce86c9a
JD
942 ERR("Receiving the FD, exiting");
943 goto end;
944 }
4de84ad9 945 DBG("received fds on sock");
1ce86c9a
JD
946 }
947
948end:
949 DBG("kconsumerd_thread_receive_fds exiting");
950
951 /*
952 * when all fds have hung up, the polling thread
953 * can exit cleanly
954 */
955 kconsumerd_quit = 1;
956
957 /*
958 * 2s of grace period, if no polling events occur during
959 * this period, the polling thread will exit even if there
960 * are still open FDs (should not happen, but safety mechanism).
961 */
6533b585 962 kconsumerd_poll_timeout = LTTNG_KCONSUMERD_POLL_GRACE_PERIOD;
1ce86c9a
JD
963
964 /* wake up the polling thread */
cb040cc1 965 ret = write(ctx->kconsumerd_poll_pipe[1], "4", 1);
1ce86c9a
JD
966 if (ret < 0) {
967 perror("poll pipe write");
968 }
969 return NULL;
970}
971
972/*
6533b585
DG
973 * Close all the tracefiles and stream fds, should be called when all instances
974 * are destroyed.
1ce86c9a 975 */
6533b585 976void lttng_kconsumerd_cleanup(void)
1ce86c9a 977{
6533b585 978 struct lttng_kconsumerd_fd *iter, *tmp;
1ce86c9a 979
3dcd2721
MD
980 /*
981 * close all outfd. Called when there are no more threads
982 * running (after joining on the threads), no need to protect
983 * list iteration with mutex.
984 */
6533b585
DG
985 cds_list_for_each_entry_safe(iter, tmp,
986 &kconsumerd_data.fd_list.head, list) {
1ce86c9a
JD
987 kconsumerd_del_fd(iter);
988 }
989}
990
3dcd2721
MD
991/*
992 * Called from signal handler.
993 */
6533b585 994void lttng_kconsumerd_should_exit(struct lttng_kconsumerd_local_data *ctx)
3dcd2721 995{
4de84ad9 996 int ret;
3dcd2721 997 kconsumerd_quit = 1;
cb040cc1 998 ret = write(ctx->kconsumerd_should_quit[1], "4", 1);
f40799e8
DG
999 if (ret < 0) {
1000 perror("write kconsumerd quit");
1001 }
3dcd2721
MD
1002}
1003
1ce86c9a 1004/*
6533b585 1005 * Send return code to the session daemon.
35346445 1006 * If the socket is not defined, we return 0, it is not a fatal error
1ce86c9a 1007 */
6533b585
DG
1008int lttng_kconsumerd_send_error(
1009 struct lttng_kconsumerd_local_data *ctx, int cmd)
1ce86c9a 1010{
cb040cc1
JD
1011 if (ctx->kconsumerd_error_socket > 0) {
1012 return lttcomm_send_unix_sock(ctx->kconsumerd_error_socket, &cmd,
1ce86c9a
JD
1013 sizeof(enum lttcomm_sessiond_command));
1014 }
1015
1016 return 0;
1017}
This page took 0.071556 seconds and 5 git commands to generate.