static DEFINE_MUTEX(sessions_mutex);
static struct kmem_cache *event_cache;
-static void lttng_session_lazy_sync_enablers(struct lttng_session *session);
-static void lttng_session_sync_enablers(struct lttng_session *session);
-static void lttng_enabler_destroy(struct lttng_enabler *enabler);
+static void lttng_session_lazy_sync_event_enablers(struct lttng_session *session);
+static void lttng_session_sync_event_enablers(struct lttng_session *session);
+static void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler);
static void _lttng_event_destroy(struct lttng_event *event);
static void _lttng_channel_destroy(struct lttng_channel *chan);
struct lttng_channel *chan, *tmpchan;
struct lttng_event *event, *tmpevent;
struct lttng_metadata_stream *metadata_stream;
- struct lttng_enabler *enabler, *tmpenabler;
+ struct lttng_event_enabler *event_enabler, *tmp_event_enabler;
int ret;
mutex_lock(&sessions_mutex);
WARN_ON(ret);
}
synchronize_trace(); /* Wait for in-flight events to complete */
- list_for_each_entry_safe(enabler, tmpenabler,
+ list_for_each_entry(chan, &session->chan, list) {
+ ret = lttng_syscalls_destroy(chan);
+ WARN_ON(ret);
+ }
+ list_for_each_entry_safe(event_enabler, tmp_event_enabler,
&session->enablers_head, node)
- lttng_enabler_destroy(enabler);
+ lttng_event_enabler_destroy(event_enabler);
list_for_each_entry_safe(event, tmpevent, &session->events, list)
_lttng_event_destroy(event);
list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
BUG_ON(chan->channel_type == METADATA_CHANNEL);
_lttng_channel_destroy(chan);
}
+ mutex_lock(&session->metadata_cache->lock);
list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
_lttng_metadata_channel_hangup(metadata_stream);
+ mutex_unlock(&session->metadata_cache->lock);
lttng_id_tracker_destroy(&session->pid_tracker, false);
lttng_id_tracker_destroy(&session->vpid_tracker, false);
lttng_id_tracker_destroy(&session->uid_tracker, false);
session->tstate = 1;
/* We need to sync enablers with session before activation. */
- lttng_session_sync_enablers(session);
+ lttng_session_sync_event_enablers(session);
/*
* Snapshot the number of events per channel to know the type of header
/* Set transient enabler state to "disabled" */
session->tstate = 0;
- lttng_session_sync_enablers(session);
+ lttng_session_sync_event_enablers(session);
/* Set each stream's quiescent state. */
list_for_each_entry(chan, &session->chan, list) {
}
/* Set transient enabler state to "enabled" */
channel->tstate = 1;
- lttng_session_sync_enablers(channel->session);
+ lttng_session_sync_event_enablers(channel->session);
/* Set atomically the state to "enabled" */
WRITE_ONCE(channel->enabled, 1);
end:
WRITE_ONCE(channel->enabled, 0);
/* Set transient enabler state to "enabled" */
channel->tstate = 0;
- lttng_session_sync_enablers(channel->session);
+ lttng_session_sync_event_enablers(channel->session);
end:
mutex_unlock(&sessions_mutex);
return ret;
goto active; /* Refuse to add channel to active session */
transport = lttng_transport_find(transport_name);
if (!transport) {
- printk(KERN_WARNING "LTTng transport %s not found\n",
+ printk(KERN_WARNING "LTTng: transport %s not found\n",
transport_name);
goto notransport;
}
if (!try_module_get(transport->owner)) {
- printk(KERN_WARNING "LTT : Can't lock transport module.\n");
+ printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
goto notransport;
}
chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
event->enabled = 0;
event->registered = 0;
event->desc = event_desc;
+ switch (event_param->u.syscall.entryexit) {
+ case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
+ ret = -EINVAL;
+ goto register_error;
+ case LTTNG_KERNEL_SYSCALL_ENTRY:
+ event->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
+ break;
+ case LTTNG_KERNEL_SYSCALL_EXIT:
+ event->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
+ break;
+ }
+ switch (event_param->u.syscall.abi) {
+ case LTTNG_KERNEL_SYSCALL_ABI_ALL:
+ ret = -EINVAL;
+ goto register_error;
+ case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
+ event->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
+ break;
+ case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
+ event->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
+ break;
+ }
if (!event->desc) {
ret = -EINVAL;
goto register_error;
event);
break;
case LTTNG_KERNEL_SYSCALL:
- ret = lttng_syscall_filter_enable(event->chan,
- desc->name);
+ ret = lttng_syscall_filter_enable(event->chan, event);
break;
case LTTNG_KERNEL_KPROBE:
case LTTNG_KERNEL_UPROBE:
ret = 0;
break;
case LTTNG_KERNEL_SYSCALL:
- ret = lttng_syscall_filter_disable(event->chan,
- desc->name);
+ ret = lttng_syscall_filter_disable(event->chan, event);
break;
case LTTNG_KERNEL_NOOP:
ret = 0;
struct lttng_enabler *enabler)
{
const char *desc_name, *enabler_name;
+ bool compat = false, entry = false;
enabler_name = enabler->event_param.name;
switch (enabler->event_param.instrumentation) {
case LTTNG_KERNEL_TRACEPOINT:
desc_name = desc->name;
+ switch (enabler->format_type) {
+ case LTTNG_ENABLER_FORMAT_STAR_GLOB:
+ return lttng_match_enabler_star_glob(desc_name, enabler_name);
+ case LTTNG_ENABLER_FORMAT_NAME:
+ return lttng_match_enabler_name(desc_name, enabler_name);
+ default:
+ return -EINVAL;
+ }
break;
case LTTNG_KERNEL_SYSCALL:
desc_name = desc->name;
- if (!strncmp(desc_name, "compat_", strlen("compat_")))
+ if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
desc_name += strlen("compat_");
+ compat = true;
+ }
if (!strncmp(desc_name, "syscall_exit_",
strlen("syscall_exit_"))) {
desc_name += strlen("syscall_exit_");
} else if (!strncmp(desc_name, "syscall_entry_",
strlen("syscall_entry_"))) {
desc_name += strlen("syscall_entry_");
+ entry = true;
} else {
WARN_ON_ONCE(1);
return -EINVAL;
}
+ switch (enabler->event_param.u.syscall.entryexit) {
+ case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
+ break;
+ case LTTNG_KERNEL_SYSCALL_ENTRY:
+ if (!entry)
+ return 0;
+ break;
+ case LTTNG_KERNEL_SYSCALL_EXIT:
+ if (entry)
+ return 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (enabler->event_param.u.syscall.abi) {
+ case LTTNG_KERNEL_SYSCALL_ABI_ALL:
+ break;
+ case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
+ if (compat)
+ return 0;
+ break;
+ case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
+ if (!compat)
+ return 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (enabler->event_param.u.syscall.match) {
+ case LTTNG_SYSCALL_MATCH_NAME:
+ switch (enabler->format_type) {
+ case LTTNG_ENABLER_FORMAT_STAR_GLOB:
+ return lttng_match_enabler_star_glob(desc_name, enabler_name);
+ case LTTNG_ENABLER_FORMAT_NAME:
+ return lttng_match_enabler_name(desc_name, enabler_name);
+ default:
+ return -EINVAL;
+ }
+ break;
+ case LTTNG_SYSCALL_MATCH_NR:
+ return -EINVAL; /* Not implemented. */
+ default:
+ return -EINVAL;
+ }
break;
default:
WARN_ON_ONCE(1);
return -EINVAL;
}
- switch (enabler->type) {
- case LTTNG_ENABLER_STAR_GLOB:
- return lttng_match_enabler_star_glob(desc_name, enabler_name);
- case LTTNG_ENABLER_NAME:
- return lttng_match_enabler_name(desc_name, enabler_name);
- default:
- return -EINVAL;
- }
}
static
-int lttng_event_match_enabler(struct lttng_event *event,
- struct lttng_enabler *enabler)
+int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
+ struct lttng_event *event)
{
- if (enabler->event_param.instrumentation != event->instrumentation)
+ struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(
+ event_enabler);
+
+ if (base_enabler->event_param.instrumentation != event->instrumentation)
return 0;
- if (lttng_desc_match_enabler(event->desc, enabler)
- && event->chan == enabler->chan)
+ if (lttng_desc_match_enabler(event->desc, base_enabler)
+ && event->chan == event_enabler->chan)
return 1;
else
return 0;
}
static
-struct lttng_enabler_ref *lttng_event_enabler_ref(struct lttng_event *event,
+struct lttng_enabler_ref *lttng_enabler_ref(
+ struct list_head *enablers_ref_list,
struct lttng_enabler *enabler)
{
struct lttng_enabler_ref *enabler_ref;
- list_for_each_entry(enabler_ref,
- &event->enablers_ref_head, node) {
+ list_for_each_entry(enabler_ref, enablers_ref_list, node) {
if (enabler_ref->ref == enabler)
return enabler_ref;
}
}
static
-void lttng_create_tracepoint_if_missing(struct lttng_enabler *enabler)
+void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler *event_enabler)
{
- struct lttng_session *session = enabler->chan->session;
+ struct lttng_session *session = event_enabler->chan->session;
struct lttng_probe_desc *probe_desc;
const struct lttng_event_desc *desc;
int i;
struct lttng_event *event;
desc = probe_desc->event_desc[i];
- if (!lttng_desc_match_enabler(desc, enabler))
+ if (!lttng_desc_match_enabler(desc,
+ lttng_event_enabler_as_enabler(event_enabler)))
continue;
event_name = desc->name;
name_len = strlen(event_name);
head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
lttng_hlist_for_each_entry(event, head, hlist) {
if (event->desc == desc
- && event->chan == enabler->chan)
+ && event->chan == event_enabler->chan)
found = 1;
}
if (found)
* We need to create an event for this
* event probe.
*/
- event = _lttng_event_create(enabler->chan,
+ event = _lttng_event_create(event_enabler->chan,
NULL, NULL, desc,
LTTNG_KERNEL_TRACEPOINT);
if (!event) {
- printk(KERN_INFO "Unable to create event %s\n",
+ printk(KERN_INFO "LTTng: Unable to create event %s\n",
probe_desc->event_desc[i]->name);
}
}
}
static
-void lttng_create_syscall_if_missing(struct lttng_enabler *enabler)
+void lttng_create_syscall_event_if_missing(struct lttng_event_enabler *event_enabler)
{
int ret;
- ret = lttng_syscalls_register(enabler->chan, NULL);
+ ret = lttng_syscalls_register(event_enabler->chan, NULL);
WARN_ON_ONCE(ret);
}
* Should be called with sessions mutex held.
*/
static
-void lttng_create_event_if_missing(struct lttng_enabler *enabler)
+void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler)
{
- switch (enabler->event_param.instrumentation) {
+ switch (event_enabler->base.event_param.instrumentation) {
case LTTNG_KERNEL_TRACEPOINT:
- lttng_create_tracepoint_if_missing(enabler);
+ lttng_create_tracepoint_event_if_missing(event_enabler);
break;
case LTTNG_KERNEL_SYSCALL:
- lttng_create_syscall_if_missing(enabler);
+ lttng_create_syscall_event_if_missing(event_enabler);
break;
default:
WARN_ON_ONCE(1);
}
/*
- * Create events associated with an enabler (if not already present),
+ * Create events associated with an event_enabler (if not already present),
* and add backward reference from the event to the enabler.
* Should be called with sessions mutex held.
*/
static
-int lttng_enabler_ref_events(struct lttng_enabler *enabler)
+int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler)
{
- struct lttng_session *session = enabler->chan->session;
+ struct lttng_channel *chan = event_enabler->chan;
+ struct lttng_session *session = event_enabler->chan->session;
+ struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(event_enabler);
struct lttng_event *event;
+ if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
+ base_enabler->event_param.u.syscall.entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT &&
+ base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
+ base_enabler->event_param.u.syscall.match == LTTNG_SYSCALL_MATCH_NAME &&
+ !strcmp(base_enabler->event_param.name, "*")) {
+ if (base_enabler->enabled)
+ WRITE_ONCE(chan->syscall_all, 1);
+ else
+ WRITE_ONCE(chan->syscall_all, 0);
+ }
+
/* First ensure that probe events are created for this enabler. */
- lttng_create_event_if_missing(enabler);
+ lttng_create_event_if_missing(event_enabler);
- /* For each event matching enabler in session event list. */
+ /* For each event matching event_enabler in session event list. */
list_for_each_entry(event, &session->events, list) {
struct lttng_enabler_ref *enabler_ref;
- if (!lttng_event_match_enabler(event, enabler))
+ if (!lttng_event_enabler_match_event(event_enabler, event))
continue;
- enabler_ref = lttng_event_enabler_ref(event, enabler);
+ enabler_ref = lttng_enabler_ref(&event->enablers_ref_head,
+ lttng_event_enabler_as_enabler(event_enabler));
if (!enabler_ref) {
/*
* If no backward ref, create it.
- * Add backward ref from event to enabler.
+ * Add backward ref from event to event_enabler.
*/
enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
if (!enabler_ref)
return -ENOMEM;
- enabler_ref->ref = enabler;
+ enabler_ref->ref = lttng_event_enabler_as_enabler(event_enabler);
list_add(&enabler_ref->node,
&event->enablers_ref_head);
}
/*
* Link filter bytecodes if not linked yet.
*/
- lttng_enabler_event_link_bytecode(event, enabler);
+ lttng_enabler_link_bytecode(event->desc,
+ lttng_static_ctx,
+ &event->bytecode_runtime_head,
+ lttng_event_enabler_as_enabler(event_enabler));
/* TODO: merge event context. */
}
struct lttng_session *session;
list_for_each_entry(session, &sessions, list)
- lttng_session_lazy_sync_enablers(session);
+ lttng_session_lazy_sync_event_enablers(session);
return 0;
}
-struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
+struct lttng_event_enabler *lttng_event_enabler_create(
+ enum lttng_enabler_format_type format_type,
struct lttng_kernel_event *event_param,
struct lttng_channel *chan)
{
- struct lttng_enabler *enabler;
+ struct lttng_event_enabler *event_enabler;
- enabler = kzalloc(sizeof(*enabler), GFP_KERNEL);
- if (!enabler)
+ event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
+ if (!event_enabler)
return NULL;
- enabler->type = type;
- INIT_LIST_HEAD(&enabler->filter_bytecode_head);
- memcpy(&enabler->event_param, event_param,
- sizeof(enabler->event_param));
- enabler->chan = chan;
+ event_enabler->base.format_type = format_type;
+ INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
+ memcpy(&event_enabler->base.event_param, event_param,
+ sizeof(event_enabler->base.event_param));
+ event_enabler->chan = chan;
/* ctx left NULL */
- enabler->enabled = 0;
- enabler->evtype = LTTNG_TYPE_ENABLER;
+ event_enabler->base.enabled = 0;
+ event_enabler->base.evtype = LTTNG_TYPE_ENABLER;
mutex_lock(&sessions_mutex);
- list_add(&enabler->node, &enabler->chan->session->enablers_head);
- lttng_session_lazy_sync_enablers(enabler->chan->session);
+ list_add(&event_enabler->node, &event_enabler->chan->session->enablers_head);
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
mutex_unlock(&sessions_mutex);
- return enabler;
+ return event_enabler;
}
-int lttng_enabler_enable(struct lttng_enabler *enabler)
+int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
{
mutex_lock(&sessions_mutex);
- enabler->enabled = 1;
- lttng_session_lazy_sync_enablers(enabler->chan->session);
+ lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
mutex_unlock(&sessions_mutex);
return 0;
}
-int lttng_enabler_disable(struct lttng_enabler *enabler)
+int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
{
mutex_lock(&sessions_mutex);
- enabler->enabled = 0;
- lttng_session_lazy_sync_enablers(enabler->chan->session);
+ lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
mutex_unlock(&sessions_mutex);
return 0;
}
+static
int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
struct lttng_kernel_filter_bytecode __user *bytecode)
{
sizeof(*bytecode) + bytecode_len);
if (ret)
goto error_free;
+
bytecode_node->enabler = enabler;
/* Enforce length based on allocated size */
bytecode_node->bc.len = bytecode_len;
list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
- lttng_session_lazy_sync_enablers(enabler->chan->session);
+
return 0;
error_free:
return ret;
}
+int lttng_event_enabler_attach_bytecode(struct lttng_event_enabler *event_enabler,
+ struct lttng_kernel_filter_bytecode __user *bytecode)
+{
+ int ret;
+ ret = lttng_enabler_attach_bytecode(
+ lttng_event_enabler_as_enabler(event_enabler), bytecode);
+ if (ret)
+ goto error;
+
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
+ return 0;
+
+error:
+ return ret;
+}
+
int lttng_event_add_callsite(struct lttng_event *event,
struct lttng_kernel_event_callsite __user *callsite)
{
}
}
-int lttng_enabler_attach_context(struct lttng_enabler *enabler,
+int lttng_event_enabler_attach_context(struct lttng_event_enabler *event_enabler,
struct lttng_kernel_context *context_param)
{
return -ENOSYS;
&enabler->filter_bytecode_head, node) {
kfree(filter_node);
}
+}
+
+static
+void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
+{
+ lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
/* Destroy contexts */
- lttng_destroy_context(enabler->ctx);
+ lttng_destroy_context(event_enabler->ctx);
- list_del(&enabler->node);
- kfree(enabler);
+ list_del(&event_enabler->node);
+ kfree(event_enabler);
}
/*
- * lttng_session_sync_enablers should be called just before starting a
+ * lttng_session_sync_event_enablers should be called just before starting a
* session.
* Should be called with sessions mutex held.
*/
static
-void lttng_session_sync_enablers(struct lttng_session *session)
+void lttng_session_sync_event_enablers(struct lttng_session *session)
{
- struct lttng_enabler *enabler;
+ struct lttng_event_enabler *event_enabler;
struct lttng_event *event;
- list_for_each_entry(enabler, &session->enablers_head, node)
- lttng_enabler_ref_events(enabler);
+ list_for_each_entry(event_enabler, &session->enablers_head, node)
+ lttng_event_enabler_ref_events(event_enabler);
/*
* For each event, if at least one of its enablers is enabled,
* and its channel and session transient states are enabled, we
* Should be called with sessions mutex held.
*/
static
-void lttng_session_lazy_sync_enablers(struct lttng_session *session)
+void lttng_session_lazy_sync_event_enablers(struct lttng_session *session)
{
/* We can skip if session is not active */
if (!session->active)
return;
- lttng_session_sync_enablers(session);
+ lttng_session_sync_event_enablers(session);
}
/*
* was written and a negative value on error.
*/
int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
- struct channel *chan)
+ struct channel *chan, bool *coherent)
{
struct lib_ring_buffer_ctx ctx;
int ret = 0;
ret = stream->transport->ops.event_reserve(&ctx, 0);
if (ret != 0) {
printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
+ stream->coherent = false;
goto end;
}
stream->transport->ops.event_write(&ctx,
reserve_len);
stream->transport->ops.event_commit(&ctx);
stream->metadata_in += reserve_len;
+ if (reserve_len < len)
+ stream->coherent = false;
+ else
+ stream->coherent = true;
ret = reserve_len;
end:
+ if (coherent)
+ *coherent = stream->coherent;
mutex_unlock(&stream->metadata_cache->lock);
return ret;
}
+static
+void lttng_metadata_begin(struct lttng_session *session)
+{
+ if (atomic_inc_return(&session->metadata_cache->producing) == 1)
+ mutex_lock(&session->metadata_cache->lock);
+}
+
+static
+void lttng_metadata_end(struct lttng_session *session)
+{
+ WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
+ if (atomic_dec_return(&session->metadata_cache->producing) == 0) {
+ struct lttng_metadata_stream *stream;
+
+ list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
+ wake_up_interruptible(&stream->read_wait);
+ mutex_unlock(&session->metadata_cache->lock);
+ }
+}
+
/*
* Write the metadata to the metadata cache.
* Must be called with sessions_mutex held.
* The metadata cache lock protects us from concurrent read access from
* thread outputting metadata content to ring buffer.
+ * The content of the printf is printed as a single atomic metadata
+ * transaction.
*/
int lttng_metadata_printf(struct lttng_session *session,
const char *fmt, ...)
char *str;
size_t len;
va_list ap;
- struct lttng_metadata_stream *stream;
- WARN_ON_ONCE(!READ_ONCE(session->active));
+ WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
va_start(ap, fmt);
str = kvasprintf(GFP_KERNEL, fmt, ap);
return -ENOMEM;
len = strlen(str);
- mutex_lock(&session->metadata_cache->lock);
+ WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
if (session->metadata_cache->metadata_written + len >
session->metadata_cache->cache_alloc) {
char *tmp_cache_realloc;
session->metadata_cache->metadata_written,
str, len);
session->metadata_cache->metadata_written += len;
- mutex_unlock(&session->metadata_cache->lock);
kfree(str);
- list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
- wake_up_interruptible(&stream->read_wait);
-
return 0;
err:
- mutex_unlock(&session->metadata_cache->lock);
kfree(str);
return -ENOMEM;
}
/*
* Must be called with sessions_mutex held.
+ * The entire event metadata is printed as a single atomic metadata
+ * transaction.
*/
static
int _lttng_event_metadata_statedump(struct lttng_session *session,
{
int ret = 0;
- if (event->metadata_dumped || !READ_ONCE(session->active))
+ if (event->metadata_dumped || !LTTNG_READ_ONCE(session->active))
return 0;
if (chan->channel_type == METADATA_CHANNEL)
return 0;
+ lttng_metadata_begin(session);
+
ret = lttng_metadata_printf(session,
"event {\n"
" name = \"%s\";\n"
event->metadata_dumped = 1;
end:
+ lttng_metadata_end(session);
return ret;
}
/*
* Must be called with sessions_mutex held.
+ * The entire channel metadata is printed as a single atomic metadata
+ * transaction.
*/
static
int _lttng_channel_metadata_statedump(struct lttng_session *session,
{
int ret = 0;
- if (chan->metadata_dumped || !READ_ONCE(session->active))
+ if (chan->metadata_dumped || !LTTNG_READ_ONCE(session->active))
return 0;
if (chan->channel_type == METADATA_CHANNEL)
return 0;
+ lttng_metadata_begin(session);
+
WARN_ON_ONCE(!chan->header_type);
ret = lttng_metadata_printf(session,
"stream {\n"
chan->metadata_dumped = 1;
end:
+ lttng_metadata_end(session);
return ret;
}
struct lttng_event *event;
int ret = 0;
- if (!READ_ONCE(session->active))
+ if (!LTTNG_READ_ONCE(session->active))
return 0;
+
+ lttng_metadata_begin(session);
+
if (session->metadata_dumped)
goto skip_session;
}
session->metadata_dumped = 1;
end:
+ lttng_metadata_end(session);
return ret;
}