static char command_sock_path[PATH_MAX]; /* Global command socket path */
static char error_sock_path[PATH_MAX]; /* Global error path */
+/*
+ * del_fd
+ *
+ * Remove a fd from the global list protected by a mutex
+ */
+static void del_fd(struct ltt_kconsumerd_fd *lcf)
+{
+ DBG("Removing %d", lcf->consumerd_fd);
+ pthread_mutex_lock(&kconsumerd_lock_fds);
+ cds_list_del(&lcf->list);
+ if (fds_count > 0) {
+ fds_count--;
+ DBG("Removed ltt_kconsumerd_fd");
+ if (lcf != NULL) {
+ close(lcf->out_fd);
+ close(lcf->consumerd_fd);
+ free(lcf);
+ lcf = NULL;
+ }
+ }
+ pthread_mutex_unlock(&kconsumerd_lock_fds);
+}
+
/*
* cleanup
*
*/
static void cleanup()
{
+ struct ltt_kconsumerd_fd *iter;
+
+ /* remove the socket file */
unlink(command_sock_path);
+
+ /* unblock the threads */
+ WARN("Terminating the threads before exiting");
+ pthread_cancel(threads[0]);
+ pthread_cancel(threads[1]);
+
+ /* close all outfd */
+ cds_list_for_each_entry(iter, &kconsumerd_fd_list.head, list) {
+ del_fd(iter);
+ }
}
-/* send_error
+/*
+ * send_error
*
* send return code to ltt-sessiond
*/
}
}
-/*
- * cleanup_kconsumerd_fd
- *
- * Close the FDs and frees a ltt_kconsumerd_fd struct
- */
-static void cleanup_kconsumerd_fd(struct ltt_kconsumerd_fd *lcf)
-{
- if (lcf != NULL) {
- close(lcf->out_fd);
- close(lcf->consumerd_fd);
- free(lcf);
- lcf = NULL;
- }
-}
-
/*
* add_fd
*
return ret;
}
-/*
- * del_fd
- *
- * Remove a fd from the global list protected by a mutex
- */
-static void del_fd(struct ltt_kconsumerd_fd *lcf)
-{
- pthread_mutex_lock(&kconsumerd_lock_fds);
- cds_list_del(&lcf->list);
- if (fds_count > 0) {
- fds_count--;
- DBG("Removed ltt_kconsumerd_fd");
- cleanup_kconsumerd_fd(lcf);
- }
- pthread_mutex_unlock(&kconsumerd_lock_fds);
-}
-
-/*
- * close_outfds
- *
- * Close all fds in the previous fd_list
- * Must be used with kconsumerd_lock_fds lock held
- */
-static void close_outfds()
-{
- struct ltt_kconsumerd_fd *iter;
- cds_list_for_each_entry(iter, &kconsumerd_fd_list.head, list) {
- del_fd(iter);
- }
-}
/*
* sighandler
*/
static void sighandler(int sig)
{
- /* unblock the threads */
- pthread_cancel(threads[0]);
- pthread_cancel(threads[1]);
-
- close_outfds();
cleanup();
return;
SPLICE_F_MOVE | SPLICE_F_MORE);
DBG("splice chan to pipe ret %ld", ret);
if (ret < 0) {
+ ret = errno;
perror("Error in relay splice");
- goto write_end;
+ goto splice_error;
}
ret = splice(thread_pipe[0], NULL, outfd, NULL, ret,
SPLICE_F_MOVE | SPLICE_F_MORE);
DBG("splice pipe to file %ld", ret);
if (ret < 0) {
+ ret = errno;
perror("Error in file splice");
- goto write_end;
+ goto splice_error;
}
if (ret >= len) {
len = 0;
SYNC_FILE_RANGE_WRITE);
kconsumerd_fd->out_fd_offset += ret;
}
-write_end:
+
/*
* This does a blocking write-and-wait on any page that belongs to the
* subbuffer prior to the one we just wrote.
posix_fadvise(outfd, orig_offset - kconsumerd_fd->max_sb_size,
kconsumerd_fd->max_sb_size, POSIX_FADV_DONTNEED);
}
+ goto end;
+
+splice_error:
+ /* send the appropriate error description to sessiond */
+ switch(ret) {
+ case EBADF:
+ send_error(KCONSUMERD_SPLICE_EBADF);
+ break;
+ case EINVAL:
+ send_error(KCONSUMERD_SPLICE_EINVAL);
+ break;
+ case ENOMEM:
+ send_error(KCONSUMERD_SPLICE_ENOMEM);
+ break;
+ case ESPIPE:
+ send_error(KCONSUMERD_SPLICE_ESPIPE);
+ break;
+ }
+
+end:
return ret;
}
long ret = 0;
int infd = kconsumerd_fd->consumerd_fd;
- DBG("In read_subbuffer");
+ DBG("In read_subbuffer (infd : %d)", infd);
/* Get the next subbuffer */
err = kernctl_get_next_subbuf(infd);
if (err != 0) {
goto error;
}
+ /* Blocking call, waiting for transmission */
+ sock = lttcomm_accept_unix_sock(client_socket);
+ if (sock <= 0) {
+ WARN("On accept, retrying");
+ goto error;
+ }
while (1) {
- /* Blocking call, waiting for transmission */
- sock = lttcomm_accept_unix_sock(client_socket);
- if (sock <= 0) {
- continue;
- }
-
/* We first get the number of fd we are about to receive */
ret = lttcomm_recv_unix_sock(sock, &tmp,
sizeof(struct lttcomm_kconsumerd_header));
- if (ret < 0) {
- ERR("Receiving the lttcomm_kconsumerd_header");
- continue;
+ if (ret <= 0) {
+ ERR("Receiving the lttcomm_kconsumerd_header, exiting");
+ goto error;
}
ret = consumerd_recv_fd(sock, tmp.payload_size, tmp.cmd_type);
- if (ret < 0) {
- continue;
+ if (ret <= 0) {
+ ERR("Receiving the FD, exiting");
+ goto error;
}
}
DBG("Inside for each");
if (iter->state == ACTIVE_FD) {
DBG("Active FD %d", iter->consumerd_fd);
- pollfd[i]->fd = iter->consumerd_fd;
- pollfd[i]->events = POLLIN | POLLPRI;
+ (*pollfd)[i].fd = iter->consumerd_fd;
+ (*pollfd)[i].events = POLLIN | POLLPRI;
local_kconsumerd_fd[i] = iter;
i++;
} else if (iter->state == DELETE_FD) {
int num_rdy, num_hup, high_prio, ret, i;
struct pollfd *pollfd = NULL;
/* local view of the fds */
- struct ltt_kconsumerd_fd *local_kconsumerd_fd = NULL;
+ struct ltt_kconsumerd_fd **local_kconsumerd_fd = NULL;
/* local view of fds_count */
int nb_fd = 0;
goto end;
}
+ local_kconsumerd_fd = malloc(sizeof(struct ltt_kconsumerd_fd));
+
while (1) {
high_prio = 0;
num_hup = 0;
* local array as well
*/
if (update_fd_array) {
- ret = update_poll_array(&pollfd, &local_kconsumerd_fd);
+ ret = update_poll_array(&pollfd, local_kconsumerd_fd);
if (ret < 0) {
ERR("Error in allocating pollfd or local_outfds");
send_error(KCONSUMERD_POLL_ERROR);
case POLLPRI:
DBG("Urgent read on fd %d", pollfd[i].fd);
high_prio = 1;
- ret = read_subbuffer(&local_kconsumerd_fd[i]);
+ ret = read_subbuffer(local_kconsumerd_fd[i]);
/* it's ok to have an unavailable sub-buffer (FIXME : is it ?) */
if (ret == EAGAIN) {
ret = 0;
if (nb_fd > 0 && num_hup == nb_fd) {
DBG("every buffer FD has hung up\n");
send_error(KCONSUMERD_POLL_HUP);
- continue;
+ goto end;
}
/* Take care of low priority channels. */
switch(pollfd[i].revents) {
case POLLIN:
DBG("Normal read on fd %d", pollfd[i].fd);
- ret = read_subbuffer(&local_kconsumerd_fd[i]);
+ ret = read_subbuffer(local_kconsumerd_fd[i]);
/* it's ok to have an unavailable subbuffer (FIXME : is it ?) */
if (ret == EAGAIN) {
ret = 0;
free(local_kconsumerd_fd);
local_kconsumerd_fd = NULL;
}
+ cleanup();
return NULL;
}