This patch makes BT_ASSERT() always evaluate its condition.
The goal is to make a lot of _slow path_ assertions evaluated whatever
the debug or non-debug mode. This will help detect project programming
errors and hopefully end users will report them.
What used to be BT_ASSERT() is now BT_ASSERT_DBG(), that is, only
evaluated in debug mode. This is similar to how BT_ASSERT_PRE() is
always evaluated while BT_ASSERT_PRE_DEV() is only evaluated in
developer mode.
I went over each single BT_ASSERT() statement in `src` and decided
whether to use BT_ASSERT() or BT_ASSERT_DBG(). My strategy is similar
to what we do for BT_ASSERT_PRE(), that is:
* Use BT_ASSERT_DBG() in anything potentially executed once or more per
_event_ message.
Other messages occur so infrequently compared to event messages that
we don't care.
* Use BT_ASSERT_DBG() in property getters and object borrowing
functions.
We don't know how frequently the user can call those, so we don't take
any chance.
Everything else uses BT_ASSERT(), for example:
* In the library and plugins, everything related to metadata objects.
* Everything related to graph topology.
* Everything in the CLI.
* Network communication functions in `src.ctf.lttng-live`.
I believe some BT_ASSERT_DBG() statements could still be converted
BT_ASSERT(), but this patch is a good starting point.
I left the whole CTF writer code with BT_ASSERT_DBG() for the moment
as this library is not a priority.
All the tests use BT_ASSERT() because they are not an end user use case;
we don't care if they are less efficient in production mode.
This change does not seem to affect the production build's performance;
I compared, before and after, and I do not get a run time difference
that's greater than the observed measurement error.
Signed-off-by: Philippe Proulx <eeppeliteloop@gmail.com>
Change-Id: Ia74951a39b1fcc79579661562f6a98ed208fd9bb
Reviewed-on: https://review.lttng.org/c/babeltrace/+/2217
Tested-by: jenkins <jenkins@lttng.org>
PyObject *py_message_iter = bt_self_message_iterator_get_data(message_iterator);
PyObject *py_method_result = NULL;
- BT_ASSERT(py_message_iter);
-
+ BT_ASSERT_DBG(py_message_iter);
py_method_result = PyObject_CallMethod(py_message_iter,
"_bt_next_from_native", NULL);
if (!py_method_result) {
*count = 1;
/* Overflow errors should never happen. */
- BT_ASSERT(!PyErr_Occurred());
+ BT_ASSERT_DBG(!PyErr_Occurred());
status = BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_OK;
PyObject *py_method_result = NULL;
bt_component_class_sink_consume_method_status status;
- BT_ASSERT(py_comp);
+ BT_ASSERT_DBG(py_comp);
py_method_result = PyObject_CallMethod(py_comp,
"_user_consume", NULL);
bt_self_component *self_component = bt_self_message_iterator_borrow_component(self_message_iterator);
PyObject *py_comp;
- BT_ASSERT(self_component);
+ BT_ASSERT_DBG(self_component);
py_comp = bt_self_component_get_data(self_component);
- BT_ASSERT(py_comp);
+ BT_ASSERT_DBG(py_comp);
/* Return new reference */
Py_INCREF(py_comp);
#define BABELTRACE_ASSERT_INTERNAL_H
/*
- * Copyright (c) 2018 EfficiOS Inc. and Linux Foundation
- * Copyright (c) 2018 Philippe Proulx <pproulx@efficios.com>
+ * Copyright (c) 2018-2019 EfficiOS Inc. and Linux Foundation
+ * Copyright (c) 2018-2019 Philippe Proulx <pproulx@efficios.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
#include "common/macros.h"
-#ifdef BT_DEBUG_MODE
-
extern void bt_common_assert_failed(const char *file, int line,
- const char *func, const char *assertion) __attribute__((noreturn));
+ const char *func, const char *assertion)
+ __attribute__((noreturn));
/*
* Internal assertion (to detect logic errors on which the library user
- * has no influence). Use BT_ASSERT_PRE() or BT_ASSERT_POST() to check
+ * has no influence). Use BT_ASSERT_PRE*() or BT_ASSERT_POST*() to check
* preconditions or postconditions which must be directly or indirectly
* satisfied by the library user.
+ *
+ * BT_ASSERT() is enabled in both debug and non-debug modes.
*/
#define BT_ASSERT(_cond) \
do { \
/*
* Marks a function as being only used within a BT_ASSERT() context.
*/
-# define BT_ASSERT_FUNC
-#else
+#define BT_ASSERT_FUNC
+
+#ifdef BT_DEBUG_MODE
+
+/*
+ * Debug mode internal assertion.
+ */
+#define BT_ASSERT_DBG(_cond) BT_ASSERT(_cond)
+
/*
- * When BT_DEBUG_MODE is not defined, define BT_ASSERT() macro to the following
- * to trick the compiler into thinking that the variable passed as condition to
- * the assertion is used. This is to prevent set-but-not-used warnings from the
- * compiler when assertions are disabled. The `sizeof` operator also makes sure
- * that the `_cond` expression is not evaluated, thus preventing unwanted side
- * effects.
+ * Marks a function as being only used within a BT_ASSERT_DBG() context.
+ */
+#define BT_ASSERT_DBG_FUNC
+
+#else /* BT_DEBUG_MODE */
+
+/*
+ * When `BT_DEBUG_MODE` is _not_ defined, define BT_ASSERT_DBG() macro
+ * to the following to trick the compiler into thinking that the
+ * variable passed as condition to the assertion is used. This is to
+ * prevent set-but-not-used warnings from the compiler when assertions
+ * are disabled. The sizeof() operator also makes sure that the `_cond`
+ * expression is not evaluated, thus preventing unwanted side effects.
*
- * In-depth explanation: https://stackoverflow.com/questions/37411809/how-to-elegantly-fix-this-unused-variable-warning/37412551#37412551
+ * In-depth explanation:
+ * https://stackoverflow.com/questions/37411809/how-to-elegantly-fix-this-unused-variable-warning/37412551#37412551
*/
-# define BT_ASSERT(_cond) ((void) sizeof((void) (_cond), 0))
-# define BT_ASSERT_FUNC __attribute__((unused))
+# define BT_ASSERT_DBG(_cond) ((void) sizeof((void) (_cond), 0))
+# define BT_ASSERT_DBG_FUNC __attribute__((unused))
+
#endif /* BT_DEBUG_MODE */
#endif /* BABELTRACE_ASSERT_INTERNAL_H */
{
const char *ch;
bool printable = true;
- BT_ASSERT(input);
+ BT_ASSERT_DBG(input);
for (ch = input; *ch != '\0'; ch++) {
if (!isprint(*ch) && *ch != '\n' && *ch != '\r' &&
strncpy(_tmp_fmt, *out_fmt_ch, _tmp_fmt_size); \
_tmp_fmt[_tmp_fmt_size] = '\0'; \
_count = snprintf(*buf_ch, _size, _tmp_fmt, __VA_ARGS__); \
- BT_ASSERT(_count >= 0); \
+ BT_ASSERT_DBG(_count >= 0); \
*buf_ch += MIN(_count, _size); \
} while (0)
const char *fmt_ch = fmt;
char *buf_ch = buf;
- BT_ASSERT(buf);
- BT_ASSERT(fmt);
+ BT_ASSERT_DBG(buf);
+ BT_ASSERT_DBG(fmt);
while (*fmt_ch != '\0') {
switch (*fmt_ch) {
case '%':
- BT_ASSERT(fmt_ch[1] != '\0');
+ BT_ASSERT_DBG(fmt_ch[1] != '\0');
if (fmt_ch[1] == intro) {
handle_specifier(priv_data, &buf_ch,
uint64_t sep_count;
uint64_t new_len;
- BT_ASSERT(digits_per_group > 0);
- BT_ASSERT(sep != '\0');
+ BT_ASSERT_DBG(digits_per_group > 0);
+ BT_ASSERT_DBG(sep != '\0');
/* Compute new length of `str` */
orig_len = strlen(str);
- BT_ASSERT(orig_len > 0);
+ BT_ASSERT_DBG(orig_len > 0);
sep_count = (orig_len - 1) / digits_per_group;
new_len = strlen(str) + sep_count;
gchar * const *line;
unsigned int i;
- BT_ASSERT(str);
- BT_ASSERT(indent < total_length);
- BT_ASSERT(tmp_line);
- BT_ASSERT(folded);
+ BT_ASSERT_DBG(str);
+ BT_ASSERT_DBG(indent < total_length);
+ BT_ASSERT_DBG(tmp_line);
+ BT_ASSERT_DBG(folded);
if (strlen(str) == 0) {
/* Empty input string: empty output string */
/* Split lines */
lines = g_strsplit(str, "\n", 0);
- BT_ASSERT(lines);
+ BT_ASSERT_DBG(lines);
/* For each source line */
for (line = lines; *line; line++) {
/* Split words */
line_words = g_strsplit(*line, " ", 0);
- BT_ASSERT(line_words);
+ BT_ASSERT_DBG(line_words);
/*
* Indent for first line (we know there's at least one
g_strfreev(lines);
}
- BT_ASSERT(!line_words);
+ BT_ASSERT_DBG(!line_words);
if (tmp_line) {
g_string_free(tmp_line, TRUE);
size_t i = 0;
ssize_t ret;
- BT_ASSERT(buf);
+ BT_ASSERT_DBG(buf);
/* Never return an overflow value. */
- BT_ASSERT(count <= SSIZE_MAX);
+ BT_ASSERT_DBG(count <= SSIZE_MAX);
do {
ret = read(fd, buf + i, count - i);
}
}
i += ret;
- BT_ASSERT(i <= count);
+ BT_ASSERT_DBG(i <= count);
} while (count - i > 0 && ret > 0);
end:
GString *str = g_string_new(NULL);
uint64_t i;
- BT_ASSERT(path);
+ BT_ASSERT_DBG(path);
if (!str) {
goto end;
uint64_t value_period_cycles;
int64_t ns_to_add;
- BT_ASSERT(raw_value);
+ BT_ASSERT_DBG(raw_value);
/* Compute offset part of requested value, in nanoseconds */
if (!bt_safe_to_mul_int64(cc_offset_seconds, NS_PER_S_I)) {
BT_HIDDEN
void bt_uuid_to_str(const bt_uuid_t uuid_in, char *str_out)
{
- BT_ASSERT(uuid_in);
- BT_ASSERT(str_out);
+ BT_ASSERT_DBG(uuid_in);
+ BT_ASSERT_DBG(str_out);
sprintf(str_out, BT_UUID_FMT, BT_UUID_FMT_VALUES(uuid_in));
}
int ret = 0;
bt_uuid_t uuid_scan;
- BT_ASSERT(uuid_out);
- BT_ASSERT(str_in);
+ BT_ASSERT_DBG(uuid_out);
+ BT_ASSERT_DBG(str_in);
if (strnlen(str_in, BT_UUID_STR_LEN + 1) != BT_UUID_STR_LEN) {
ret = -1;
/* ferror() is set, errno set by fgetc(). */
return -1;
}
- BT_ASSERT(feof(stream));
+ BT_ASSERT_DBG(feof(stream));
found_eof = 1;
break;
}
* arguments using BT_LOGF(), and abort.
*
* To assert that a postcondition is satisfied or that some internal
- * object/context/value is in the expected state, use BT_ASSERT().
+ * object/context/value is in the expected state, use BT_ASSERT_DBG().
*/
# define BT_CTF_ASSERT_PRE(_cond, _fmt, ...) \
do { \
BT_LOGD("Freezing attributes object: value-addr=%p", attr_obj);
count = bt_ctf_value_array_get_length(bt_ctf_private_value_as_value(attr_obj));
- BT_ASSERT(count >= 0);
+ BT_ASSERT_DBG(count >= 0);
/*
* We do not freeze the array value object itself here, since
struct bt_ctf_clock_class *clock_class_b)
{
int ret = 1;
- BT_ASSERT(clock_class_a);
- BT_ASSERT(clock_class_b);
+ BT_ASSERT_DBG(clock_class_a);
+ BT_ASSERT_DBG(clock_class_b);
/* Name */
if (strcmp(clock_class_a->name->str, clock_class_b->name->str) != 0) {
bt_uuid_generate(cc_uuid);
ret = bt_ctf_clock_class_set_uuid(clock->clock_class, cc_uuid);
- BT_ASSERT(ret == 0);
+ BT_ASSERT_DBG(ret == 0);
return clock;
error:
BT_HIDDEN
void bt_ctf_event_class_common_freeze(struct bt_ctf_event_class_common *event_class)
{
- BT_ASSERT(event_class);
+ BT_ASSERT_DBG(event_class);
if (event_class->frozen) {
return;
{
int ret = 0;
- BT_ASSERT(event_class);
- BT_ASSERT(expected_clock_class);
+ BT_ASSERT_DBG(event_class);
+ BT_ASSERT_DBG(expected_clock_class);
ret = bt_ctf_field_type_common_validate_single_clock_class(
event_class->context_field_type,
expected_clock_class);
goto end;
}
- BT_ASSERT(bt_ctf_field_type_common_get_type_id(
+ BT_ASSERT_DBG(bt_ctf_field_type_common_get_type_id(
event_class->common.payload_field_type) ==
BT_CTF_FIELD_TYPE_ID_STRUCT);
ret = bt_ctf_field_type_structure_add_field(
goto end;
}
- BT_ASSERT(bt_ctf_field_type_common_get_type_id(
+ BT_ASSERT_DBG(bt_ctf_field_type_common_get_type_id(
event_class->common.payload_field_type) ==
BT_CTF_FIELD_TYPE_ID_STRUCT);
ret = bt_ctf_field_type_common_structure_get_field_count(
goto end;
}
- BT_ASSERT(bt_ctf_field_type_common_get_type_id(
+ BT_ASSERT_DBG(bt_ctf_field_type_common_get_type_id(
event_class->common.payload_field_type) ==
BT_CTF_FIELD_TYPE_ID_STRUCT);
ret = bt_ctf_field_type_structure_get_field_by_index(
goto end;
}
- BT_ASSERT(bt_ctf_field_type_common_get_type_id(
+ BT_ASSERT_DBG(bt_ctf_field_type_common_get_type_id(
event_class->common.payload_field_type) ==
BT_CTF_FIELD_TYPE_ID_STRUCT);
name_quark = g_quark_try_string(name);
int ret = 0;
struct bt_ctf_value *attr_value = NULL;
- BT_ASSERT(event_class);
- BT_ASSERT(context);
+ BT_ASSERT_DBG(event_class);
+ BT_ASSERT_DBG(context);
BT_LOGD("Serializing event class's metadata: "
"event-class-addr=%p, event-class-name=\"%s\", "
"event-class-id=%" PRId64 ", metadata-context-addr=%p",
/* Serialize attributes */
g_string_append_printf(context->string, "\tname = \"%s\";\n",
event_class->common.name->str);
- BT_ASSERT(event_class->common.id >= 0);
+ BT_ASSERT_DBG(event_class->common.id >= 0);
g_string_append_printf(context->string, "\tid = %" PRId64 ";\n",
event_class->common.id);
g_string_append_printf(context->string, "\tstream_id = %" PRId64 ";\n",
goto end;
}
- BT_ASSERT(event_class->common.payload_field_type->id ==
+ BT_ASSERT_DBG(event_class->common.payload_field_type->id ==
BT_CTF_FIELD_TYPE_ID_STRUCT);
name_quark = g_quark_try_string(name);
if (!name_quark) {
struct bt_ctf_stream_class_common *bt_ctf_event_class_common_borrow_stream_class(
struct bt_ctf_event_class_common *event_class)
{
- BT_ASSERT(event_class);
+ BT_ASSERT_DBG(event_class);
return (void *) bt_ctf_object_borrow_parent(&event_class->base);
}
struct bt_ctf_event_class_common *event_class)
{
BT_CTF_ASSERT_PRE_NON_NULL(event_class, "Event class");
- BT_ASSERT(event_class->name);
+ BT_ASSERT_DBG(event_class->name);
return event_class->name->str;
}
struct bt_ctf_private_value *environment = NULL;
stream_class = bt_ctf_event_class_common_borrow_stream_class(event_class);
- BT_ASSERT(stream_class);
+ BT_ASSERT_DBG(stream_class);
trace = bt_ctf_stream_class_common_borrow_trace(stream_class);
if (trace) {
BT_LOGD_STR("Event class is part of a trace.");
packet_header_type =
bt_ctf_trace_common_borrow_packet_header_field_type(trace);
trace_valid = trace->valid;
- BT_ASSERT(trace_valid);
+ BT_ASSERT_DBG(trace_valid);
environment = trace->environment;
}
int ret = 0;
struct bt_ctf_stream_class_common *stream_class;
- BT_ASSERT(event);
+ BT_ASSERT_DBG(event);
if (event->header_field) {
ret = bt_ctf_field_common_validate_recursive(
event->header_field->field);
* We should not have been able to create the event without associating
* the event class to a stream class.
*/
- BT_ASSERT(stream_class);
+ BT_ASSERT_DBG(stream_class);
if (stream_class->event_context_field_type) {
ret = bt_ctf_field_common_validate_recursive(
void _bt_ctf_event_common_set_is_frozen(struct bt_ctf_event_common *event,
bool is_frozen)
{
- BT_ASSERT(event);
+ BT_ASSERT_DBG(event);
BT_LOGD("Freezing event: addr=%p, "
"event-class-name=\"%s\", event-class-id=%" PRId64,
event, bt_ctf_event_class_common_get_name(event->class),
event_class);
/* The event class was frozen when added to its stream class */
- BT_ASSERT(event_class->frozen);
+ BT_ASSERT_DBG(event_class->frozen);
trace = bt_ctf_stream_class_common_borrow_trace(stream_class);
if (must_be_in_trace) {
void destroy_event_header_field(struct bt_ctf_field_wrapper *field_wrapper,
struct bt_ctf_stream_class *stream_class)
{
- BT_ASSERT(field_wrapper);
+ BT_ASSERT_DBG(field_wrapper);
bt_ctf_object_put_ref(field_wrapper->field);
bt_ctf_field_wrapper_destroy(field_wrapper);
}
void release_event_header_field(struct bt_ctf_field_wrapper *field_wrapper,
struct bt_ctf_event_common *event_common)
{
- BT_ASSERT(field_wrapper);
+ BT_ASSERT_DBG(field_wrapper);
BT_CTF_OBJECT_PUT_REF_AND_RESET(field_wrapper->field);
bt_ctf_field_wrapper_destroy(field_wrapper);
}
BT_HIDDEN
struct bt_ctf_stream *bt_ctf_event_borrow_stream(struct bt_ctf_event *event)
{
- BT_ASSERT(event);
+ BT_ASSERT_DBG(event);
return (struct bt_ctf_stream *)
bt_ctf_object_borrow_parent(&BT_CTF_TO_COMMON(event)->base);
}
{
int ret = 0;
- BT_ASSERT(event);
- BT_ASSERT(ctfser);
+ BT_ASSERT_DBG(event);
+ BT_ASSERT_DBG(ctfser);
BT_LOGT_STR("Serializing event's context field.");
if (event->common.context_field) {
struct bt_ctf_event_class_common *bt_ctf_event_common_borrow_class(
struct bt_ctf_event_common *event)
{
- BT_ASSERT(event);
+ BT_ASSERT_DBG(event);
return event->class;
}
{
struct bt_ctf_field_path *new_path;
- BT_ASSERT(path);
+ BT_ASSERT_DBG(path);
BT_LOGD("Copying field path: addr=%p, index-count=%u",
path, path->indexes->len);
new_path = bt_ctf_field_path_create();
bool init_bo, bt_ctf_object_release_func release_func,
struct bt_ctf_field_type_common_methods *methods)
{
- BT_ASSERT(ft && (ft->id > BT_CTF_FIELD_TYPE_ID_UNKNOWN) &&
+ BT_ASSERT_DBG(ft && (ft->id > BT_CTF_FIELD_TYPE_ID_UNKNOWN) &&
(ft->id < BT_CTF_FIELD_TYPE_ID_NR));
bt_ctf_object_init_shared(&ft->base, release_func);
BT_LOGD("Setting initial field type's byte order: bo=%s",
bt_ctf_byte_order_string(bo));
ret = bt_ctf_field_type_common_set_byte_order(ft, bo);
- BT_ASSERT(ret == 0);
+ BT_ASSERT_DBG(ret == 0);
}
ft->alignment = 1;
{
struct bt_ctf_field_type_common_integer *int_ft = BT_CTF_FROM_COMMON(ft);
- BT_ASSERT(size > 0);
+ BT_ASSERT_DBG(size > 0);
BT_LOGD("Initializing common integer field type object: size=%u",
size);
ft->id = BT_CTF_FIELD_TYPE_ID_INTEGER;
{
struct bt_ctf_field_type_common_enumeration *enum_ft = BT_CTF_FROM_COMMON(ft);
- BT_ASSERT(container_ft);
+ BT_ASSERT_DBG(container_ft);
BT_LOGD("Initializing common enumeration field type object: int-ft-addr=%p",
container_ft);
ft->id = BT_CTF_FIELD_TYPE_ID_ENUM;
{
struct bt_ctf_field_type_common_array *array_ft = BT_CTF_FROM_COMMON(ft);
- BT_ASSERT(element_ft);
+ BT_ASSERT_DBG(element_ft);
BT_LOGD("Initializing common array field type object: element-ft-addr=%p, "
"length=%u", element_ft, length);
ft->id = BT_CTF_FIELD_TYPE_ID_ARRAY;
{
struct bt_ctf_field_type_common_sequence *seq_ft = BT_CTF_FROM_COMMON(ft);
- BT_ASSERT(element_ft);
- BT_ASSERT(length_field_name);
- BT_ASSERT(bt_ctf_identifier_is_valid(length_field_name));
+ BT_ASSERT_DBG(element_ft);
+ BT_ASSERT_DBG(length_field_name);
+ BT_ASSERT_DBG(bt_ctf_identifier_is_valid(length_field_name));
BT_LOGD("Initializing common sequence field type object: element-ft-addr=%p, "
"length-field-name=\"%s\"", element_ft, length_field_name);
ft->id = BT_CTF_FIELD_TYPE_ID_SEQUENCE;
{
struct bt_ctf_field_type_common_variant *var_ft = BT_CTF_FROM_COMMON(ft);
- BT_ASSERT(!tag_name || bt_ctf_identifier_is_valid(tag_name));
+ BT_ASSERT_DBG(!tag_name || bt_ctf_identifier_is_valid(tag_name));
BT_LOGD("Initializing common variant field type object: "
"tag-ft-addr=%p, tag-field-name=\"%s\"",
tag_ft, tag_name);
member_ft = &choice->type;
member_name = &choice->name;
- BT_ASSERT(!choice->ranges);
+ BT_ASSERT_DBG(!choice->ranges);
choice->ranges = g_array_new(FALSE, TRUE,
sizeof(struct bt_ctf_field_type_common_variant_choice_range));
- BT_ASSERT(choice->ranges);
+ BT_ASSERT_DBG(choice->ranges);
} else {
struct bt_ctf_field_type_common_structure_field *field =
&g_array_index(members,
bt_ctf_field_type_common_structure_get_field_count(ft);
int64_t i;
- BT_ASSERT(field_count >= 0);
+ BT_ASSERT_DBG(field_count >= 0);
for (i = 0; i < field_count; ++i) {
const char *field_name;
ret = bt_ctf_field_type_common_structure_borrow_field_by_index(ft,
&field_name, &child_ft, i);
- BT_ASSERT(ret == 0);
+ BT_ASSERT_DBG(ret == 0);
ret = bt_ctf_field_type_common_validate(child_ft);
if (ret) {
BT_LOGW("Invalid structure field type: "
ret = bt_ctf_field_type_common_variant_borrow_field_by_index(ft,
&field_name, &child_ft, i);
- BT_ASSERT(ret == 0);
+ BT_ASSERT_DBG(ret == 0);
ret = bt_ctf_field_type_common_validate(child_ft);
if (ret) {
BT_LOGW("Invalid variant field type: "
{
int ret = 0;
- BT_ASSERT(ft);
+ BT_ASSERT_DBG(ft);
if (ft->valid) {
/* Already marked as valid */
if (mapping_name) {
*mapping_name = g_quark_to_string(mapping->string);
- BT_ASSERT(*mapping_name);
+ BT_ASSERT_DBG(*mapping_name);
}
if (range_begin) {
if (mapping_name) {
*mapping_name = g_quark_to_string(mapping->string);
- BT_ASSERT(*mapping_name);
+ BT_ASSERT_DBG(*mapping_name);
}
if (range_begin) {
GQuark name_quark;
uint64_t i;
- BT_ASSERT(ft);
- BT_ASSERT(field_name);
- BT_ASSERT(field_type);
- BT_ASSERT(ft->id == BT_CTF_FIELD_TYPE_ID_STRUCT);
+ BT_ASSERT_DBG(ft);
+ BT_ASSERT_DBG(field_name);
+ BT_ASSERT_DBG(field_type);
+ BT_ASSERT_DBG(ft->id == BT_CTF_FIELD_TYPE_ID_STRUCT);
name_quark = g_quark_from_string(field_name);
for (i = 0; i < struct_ft->fields->len; i++) {
if (field_name) {
*field_name = g_quark_to_string(field->name);
- BT_ASSERT(*field_name);
+ BT_ASSERT_DBG(*field_name);
}
return 0;
if (field_name) {
*field_name = g_quark_to_string(choice->name);
- BT_ASSERT(*field_name);
+ BT_ASSERT_DBG(*field_name);
}
return 0;
uint64_t i;
struct bt_ctf_field_type_common_variant *var_ft = BT_CTF_FROM_COMMON(ft);
- BT_ASSERT(ft);
- BT_ASSERT(ft->id == BT_CTF_FIELD_TYPE_ID_VARIANT);
+ BT_ASSERT_DBG(ft);
+ BT_ASSERT_DBG(ft->id == BT_CTF_FIELD_TYPE_ID_VARIANT);
if (bt_ctf_field_type_common_variant_update_choices(ft)) {
ret = INT64_C(-1);
BT_CTF_ASSERT_PRE_NON_NULL(ft, "Field type");
BT_CTF_ASSERT_PRE_CTF_FT_COMMON_HAS_ID(ft, BT_CTF_FIELD_TYPE_ID_ARRAY,
"Field type");
- BT_ASSERT(array_ft && array_ft->element_ft);
+ BT_ASSERT_DBG(array_ft && array_ft->element_ft);
return array_ft->element_ft;
}
struct bt_ctf_field_type_common *element_ft =
bt_ctf_field_type_common_sequence_borrow_element_field_type(ft);
- BT_ASSERT(element_ft);
+ BT_ASSERT_DBG(element_ft);
ret = bt_ctf_field_type_common_get_alignment(element_ft);
break;
}
struct bt_ctf_field_type_common *element_ft =
bt_ctf_field_type_common_array_borrow_element_field_type(ft);
- BT_ASSERT(element_ft);
+ BT_ASSERT_DBG(element_ft);
ret = bt_ctf_field_type_common_get_alignment(element_ft);
break;
}
element_count = bt_ctf_field_type_common_structure_get_field_count(
ft);
- BT_ASSERT(element_count >= 0);
+ BT_ASSERT_DBG(element_count >= 0);
for (i = 0; i < element_count; i++) {
struct bt_ctf_field_type_common *field = NULL;
ret = bt_ctf_field_type_common_structure_borrow_field_by_index(
ft, NULL, &field, i);
- BT_ASSERT(ret == 0);
- BT_ASSERT(field);
+ BT_ASSERT_DBG(ret == 0);
+ BT_ASSERT_DBG(field);
field_alignment = bt_ctf_field_type_common_get_alignment(
field);
if (field_alignment < 0) {
goto end;
}
- BT_ASSERT(ret == BT_CTF_BYTE_ORDER_NATIVE ||
+ BT_ASSERT_DBG(ret == BT_CTF_BYTE_ORDER_NATIVE ||
ret == BT_CTF_BYTE_ORDER_LITTLE_ENDIAN ||
ret == BT_CTF_BYTE_ORDER_BIG_ENDIAN ||
ret == BT_CTF_BYTE_ORDER_NETWORK);
return;
}
- BT_ASSERT(ft->methods->freeze);
+ BT_ASSERT_DBG(ft->methods->freeze);
ft->methods->freeze(ft);
}
struct bt_ctf_field_type_common *ft_copy = NULL;
BT_CTF_ASSERT_PRE_NON_NULL(ft, "Field type");
- BT_ASSERT(ft->methods->copy);
+ BT_ASSERT_DBG(ft->methods->copy);
ft_copy = ft->methods->copy(ft);
if (!ft_copy) {
BT_LOGE_STR("Cannot copy field type.");
goto end;
}
- BT_ASSERT(var_ft->tag_ft);
+ BT_ASSERT_DBG(var_ft->tag_ft);
is_signed = !!var_ft->tag_ft->container_ft->is_signed;
for (i = 0; i < var_ft->choices->len; i++) {
goto end;
}
- BT_ASSERT(choice->ranges);
+ BT_ASSERT_DBG(choice->ranges);
g_array_set_size(choice->ranges, 0);
while (bt_ctf_field_type_enumeration_mapping_iterator_next(iter) == 0) {
&range.lower.u, &range.upper.u);
}
- BT_ASSERT(ret == 0);
+ BT_ASSERT_DBG(ret == 0);
g_array_append_val(choice->ranges, range);
}
goto end;
}
- BT_ASSERT(ft_a->methods->compare);
+ BT_ASSERT_DBG(ft_a->methods->compare);
ret = ft_a->methods->compare(ft_a, ft_b);
if (ret == 1) {
BT_LOGT("Field types differ: ft-a-addr=%p, ft-b-addr=%p",
goto end;
}
- BT_ASSERT(expected_clock_class);
+ BT_ASSERT_DBG(expected_clock_class);
switch (ft->id) {
case BT_CTF_FIELD_TYPE_ID_INTEGER:
abort();
}
- BT_ASSERT(sub_ft);
+ BT_ASSERT_DBG(sub_ft);
ret = bt_ctf_field_type_common_validate_single_clock_class(sub_ft,
expected_clock_class);
break;
ret = bt_ctf_field_type_common_structure_borrow_field_by_index(
ft, &name, &member_type, i);
- BT_ASSERT(ret == 0);
+ BT_ASSERT_DBG(ret == 0);
ret = bt_ctf_field_type_common_validate_single_clock_class(
member_type, expected_clock_class);
if (ret) {
ret = bt_ctf_field_type_common_variant_borrow_field_by_index(
ft, &name, &member_type, i);
- BT_ASSERT(ret == 0);
+ BT_ASSERT_DBG(ret == 0);
ret = bt_ctf_field_type_common_validate_single_clock_class(
member_type, expected_clock_class);
if (ret) {
struct bt_ctf_field_type_common *type_common = (void *) type;
bt_ctf_field_type_serialize_func serialize_func;
- BT_ASSERT(type);
- BT_ASSERT(context);
+ BT_ASSERT_DBG(type);
+ BT_ASSERT_DBG(context);
/* Make sure field type is valid before serializing it */
ret = bt_ctf_field_type_common_validate((void *) type);
const char *clock_name = bt_ctf_clock_class_get_name(
integer->mapped_clock_class);
- BT_ASSERT(clock_name);
+ BT_ASSERT_DBG(clock_name);
g_string_append_printf(context->string,
"; map = clock.%s.value", clock_name);
}
"ft-addr=%p, metadata-context-addr=%p", type, context);
container_type =
bt_ctf_field_type_common_enumeration_borrow_container_field_type(type);
- BT_ASSERT(container_type);
+ BT_ASSERT_DBG(container_type);
container_signed = bt_ctf_field_type_common_integer_is_signed(
container_type);
- BT_ASSERT(container_signed >= 0);
+ BT_ASSERT_DBG(container_signed >= 0);
g_string_append(context->string, "enum : ");
BT_LOGD_STR("Serializing CTF writer enumeration field type's container field type's metadata.");
ret = bt_ctf_field_type_serialize_recursive(
"Tag field");
container = bt_ctf_field_enumeration_borrow_container(tag_field);
- BT_ASSERT(container);
+ BT_ASSERT_DBG(container);
if (var_ft->tag_ft->container_ft->is_signed) {
int64_t val;
ret = bt_ctf_field_integer_signed_get_value(container,
&val);
- BT_ASSERT(ret == 0);
+ BT_ASSERT_DBG(ret == 0);
choice_index = bt_ctf_field_type_common_variant_find_choice_index(
(void *) ft, (uint64_t) val, true);
} else {
ret = bt_ctf_field_integer_unsigned_get_value(container,
&val);
- BT_ASSERT(ret == 0);
+ BT_ASSERT_DBG(ret == 0);
choice_index = bt_ctf_field_type_common_variant_find_choice_index(
(void *) ft, val, false);
}
ret = bt_ctf_field_type_variant_get_field_by_index(ft, NULL,
&ret_ft, choice_index);
- BT_ASSERT(ret == 0);
+ BT_ASSERT_DBG(ret == 0);
end:
return ret_ft;
/* Copy ranges */
copy_entry->ranges = g_array_new(FALSE, TRUE,
sizeof(struct bt_ctf_field_type_common_variant_choice_range));
- BT_ASSERT(copy_entry->ranges);
+ BT_ASSERT_DBG(copy_entry->ranges);
g_array_set_size(copy_entry->ranges, entry->ranges->len);
for (range_i = 0; range_i < entry->ranges->len; range_i++) {
void bt_ctf_field_wrapper_destroy(struct bt_ctf_field_wrapper *field_wrapper)
{
BT_LOGD("Destroying field wrapper: addr=%p", field_wrapper);
- BT_ASSERT(!field_wrapper->field);
+ BT_ASSERT_DBG(!field_wrapper->field);
BT_LOGD_STR("Putting stream class.");
g_free(field_wrapper);
}
{
struct bt_ctf_field_wrapper *field_wrapper = NULL;
- BT_ASSERT(pool);
- BT_ASSERT(ft);
+ BT_ASSERT_DBG(pool);
+ BT_ASSERT_DBG(ft);
field_wrapper = bt_ctf_object_pool_create_object(pool);
if (!field_wrapper) {
BT_LOGE("Cannot allocate one field wrapper");
goto end;
}
- BT_ASSERT(field_wrapper->field);
+ BT_ASSERT_DBG(field_wrapper->field);
end:
return field_wrapper;
struct bt_ctf_field_common *copy = NULL;
BT_CTF_ASSERT_PRE_NON_NULL(field, "Field");
- BT_ASSERT(field_type_common_has_known_id(field->type));
- BT_ASSERT(field->methods->copy);
+ BT_ASSERT_DBG(field_type_common_has_known_id(field->type));
+ BT_ASSERT_DBG(field->methods->copy);
copy = field->methods->copy(field);
if (!copy) {
BT_LOGW("Cannot create field: ft-addr=%p", field->type);
uint64_t i;
BT_LOGD("Initializing common array field object: ft-addr=%p", type);
- BT_ASSERT(type);
+ BT_ASSERT_DBG(type);
bt_ctf_field_common_initialize(field, type, is_shared,
release_func, methods);
array_length = array_type->length;
int ret = 0;
BT_LOGD("Initializing common sequence field object: ft-addr=%p", type);
- BT_ASSERT(type);
+ BT_ASSERT_DBG(type);
bt_ctf_field_common_initialize(field, type, is_shared,
release_func, methods);
sequence->elements = g_ptr_array_new();
int ret = 0;
struct bt_ctf_field_common_structure *structure = BT_CTF_FROM_COMMON(field);
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
for (i = 0; i < structure->fields->len; i++) {
ret = bt_ctf_field_common_validate_recursive(
this_ret = bt_ctf_field_type_common_structure_borrow_field_by_index(
field->type, &name, NULL, i);
- BT_ASSERT(this_ret == 0);
+ BT_ASSERT_DBG(this_ret == 0);
BT_CTF_ASSERT_PRE_MSG("Invalid structure field's field: "
"struct-field-addr=%p, field-name=\"%s\", "
"index=%" PRId64 ", field-addr=%p",
int ret = 0;
struct bt_ctf_field_common_variant *variant = BT_CTF_FROM_COMMON(field);
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
if (!variant->current_field) {
ret = -1;
int ret = 0;
struct bt_ctf_field_common_array *array = BT_CTF_FROM_COMMON(field);
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
for (i = 0; i < array->elements->len; i++) {
ret = bt_ctf_field_common_validate_recursive((void *) array->elements->pdata[i]);
int ret = 0;
struct bt_ctf_field_common_sequence *sequence = BT_CTF_FROM_COMMON(field);
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
for (i = 0; i < sequence->elements->len; i++) {
ret = bt_ctf_field_common_validate_recursive(
BT_HIDDEN
void bt_ctf_field_common_generic_reset(struct bt_ctf_field_common *field)
{
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
field->payload_set = false;
}
int64_t i;
struct bt_ctf_field_common_structure *structure = BT_CTF_FROM_COMMON(field);
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
for (i = 0; i < structure->fields->len; i++) {
struct bt_ctf_field_common *member = structure->fields->pdata[i];
{
struct bt_ctf_field_common_variant *variant = BT_CTF_FROM_COMMON(field);
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
variant->current_field = NULL;
}
size_t i;
struct bt_ctf_field_common_array *array = BT_CTF_FROM_COMMON(field);
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
for (i = 0; i < array->elements->len; i++) {
struct bt_ctf_field_common *member = array->elements->pdata[i];
struct bt_ctf_field_common_sequence *sequence = BT_CTF_FROM_COMMON(field);
uint64_t i;
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
for (i = 0; i < sequence->elements->len; i++) {
if (sequence->elements->pdata[i]) {
BT_LOGD("Setting field object's frozen state: addr=%p, is-frozen=%d",
field, is_frozen);
- BT_ASSERT(field_type_common_has_known_id(field->type));
- BT_ASSERT(field->methods->set_is_frozen);
+ BT_ASSERT_DBG(field_type_common_has_known_id(field->type));
+ BT_ASSERT_DBG(field->methods->set_is_frozen);
field->methods->set_is_frozen(field, is_frozen);
end:
size_t i;
struct bt_ctf_field_common_structure *structure = BT_CTF_FROM_COMMON(field);
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
for (i = 0; i < structure->fields->len; i++) {
is_set = bt_ctf_field_common_is_set_recursive(
struct bt_ctf_field_common_variant *variant = BT_CTF_FROM_COMMON(field);
bt_ctf_bool is_set = BT_CTF_FALSE;
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
if (variant->current_field) {
is_set = bt_ctf_field_common_is_set_recursive(
bt_ctf_bool is_set = BT_CTF_FALSE;
struct bt_ctf_field_common_array *array = BT_CTF_FROM_COMMON(field);
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
for (i = 0; i < array->elements->len; i++) {
is_set = bt_ctf_field_common_is_set_recursive(array->elements->pdata[i]);
bt_ctf_bool is_set = BT_CTF_FALSE;
struct bt_ctf_field_common_sequence *sequence = BT_CTF_FROM_COMMON(field);
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
if (!sequence->elements) {
goto end;
struct bt_ctf_field_common *field_common = (void *) field;
bt_ctf_field_serialize_recursive_func serialize_func;
- BT_ASSERT(ctfser);
+ BT_ASSERT_DBG(ctfser);
BT_CTF_ASSERT_PRE_NON_NULL(field, "Field");
- BT_ASSERT(field_common->spec.writer.serialize_func);
+ BT_ASSERT_DBG(field_common->spec.writer.serialize_func);
serialize_func = field_common->spec.writer.serialize_func;
return serialize_func(field_common, ctfser,
native_byte_order);
if (G_UNLIKELY(!member)) {
ret = bt_ctf_field_type_common_structure_borrow_field_by_index(
field->type, &field_name, NULL, i);
- BT_ASSERT(ret == 0);
+ BT_ASSERT_DBG(ret == 0);
BT_LOGW("Cannot serialize structure field's field: field is not set: "
"struct-field-addr=%p, "
"field-name=\"%s\", index=%" PRId64,
if (G_UNLIKELY(ret)) {
ret = bt_ctf_field_type_common_structure_borrow_field_by_index(
field->type, &field_name, NULL, i);
- BT_ASSERT(ret == 0);
+ BT_ASSERT_DBG(ret == 0);
BT_LOGW("Cannot serialize structure field's field: "
"struct-field-addr=%p, field-addr=%p, "
"field-name=\"%s\", index=%" PRId64,
enum bt_ctf_field_type_id type_id;
BT_CTF_ASSERT_PRE_NON_NULL(type, "Field type");
- BT_ASSERT(field_type_common_has_known_id((void *) type));
+ BT_ASSERT_DBG(field_type_common_has_known_id((void *) type));
BT_CTF_ASSERT_PRE(bt_ctf_field_type_common_validate((void *) type) == 0,
"Field type is invalid: ft-addr=%p", type);
type_id = bt_ctf_field_type_get_type_id(type);
}
ret = bt_ctf_field_integer_unsigned_get_value(length_field, &length);
- BT_ASSERT(ret == 0);
+ BT_ASSERT_DBG(ret == 0);
return bt_ctf_field_common_sequence_set_length((void *) field,
length, (bt_ctf_field_common_create_func) bt_ctf_field_create);
}
(void *) enum_field->container, &tag_uval);
}
- BT_ASSERT(ret == 0);
+ BT_ASSERT_DBG(ret == 0);
ret = bt_ctf_field_common_variant_set_tag((void *) field, tag_uval,
is_signed);
if (ret) {
bt_ctf_object_put_ref(variant_field->tag);
variant_field->tag = bt_ctf_object_get_ref(tag_field);
current_field = bt_ctf_field_variant_get_current_field(field);
- BT_ASSERT(current_field);
+ BT_ASSERT_DBG(current_field);
end:
return current_field;
BT_CTF_ASSERT_PRE_NON_NULL(field, "Enumeration field");
BT_CTF_ASSERT_PRE_CTF_FIELD_COMMON_HAS_TYPE_ID((struct bt_ctf_field_common *) field,
BT_CTF_FIELD_TYPE_ID_ENUM, "Field");
- BT_ASSERT(enumeration->container);
+ BT_ASSERT_DBG(enumeration->container);
return (void *) enumeration->container;
}
int ret;
BT_LOGD("Creating CTF writer array field object: ft-addr=%p", type);
- BT_ASSERT(type);
+ BT_ASSERT_DBG(type);
if (!array) {
BT_LOGE_STR("Failed to allocate one array field.");
# define bt_ctf_field_common_set(_field, _val)
#endif
-BT_ASSERT_FUNC
+BT_ASSERT_DBG_FUNC
static inline bool field_type_common_has_known_id(
struct bt_ctf_field_type_common *ft)
{
goto end;
}
- BT_ASSERT(field_type_common_has_known_id(field->type));
+ BT_ASSERT_DBG(field_type_common_has_known_id(field->type));
if (field->methods->validate) {
ret = field->methods->validate(field);
static inline
void _bt_ctf_field_common_reset_recursive(struct bt_ctf_field_common *field)
{
- BT_ASSERT(field);
- BT_ASSERT(field->methods->reset);
+ BT_ASSERT_DBG(field);
+ BT_ASSERT_DBG(field->methods->reset);
field->methods->reset(field);
}
static inline
void _bt_ctf_field_common_set(struct bt_ctf_field_common *field, bool value)
{
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
field->payload_set = value;
}
goto end;
}
- BT_ASSERT(field_type_common_has_known_id(field->type));
- BT_ASSERT(field->methods->is_set);
+ BT_ASSERT_DBG(field_type_common_has_known_id(field->type));
+ BT_ASSERT_DBG(field->methods->is_set);
is_set = field->methods->is_set(field);
end:
bt_ctf_object_release_func release_func,
struct bt_ctf_field_common_methods *methods)
{
- BT_ASSERT(field);
- BT_ASSERT(ft);
+ BT_ASSERT_DBG(field);
+ BT_ASSERT_DBG(ft);
bt_ctf_object_init(&field->base, is_shared, release_func);
field->methods = methods;
field->type = (void *) bt_ctf_object_get_ref(ft);
goto end;
}
- BT_ASSERT(!sequence->elements->pdata[i]);
+ BT_ASSERT_DBG(!sequence->elements->pdata[i]);
sequence->elements->pdata[i] = elem_field;
}
}
}
ret = structure->fields->pdata[index];
- BT_ASSERT(ret);
+ BT_ASSERT_DBG(ret);
error:
return ret;
}
/* Select corresponding field */
- BT_ASSERT(choice_index < variant->fields->len);
+ BT_ASSERT_DBG(choice_index < variant->fields->len);
variant->current_field = variant->fields->pdata[choice_index];
variant->tag_value.u = tag_uval;
static inline
void bt_ctf_field_common_finalize(struct bt_ctf_field_common *field)
{
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
BT_LOGD_STR("Putting field's type.");
bt_ctf_object_put_ref(field->type);
}
static inline
void bt_ctf_field_common_integer_finalize(struct bt_ctf_field_common *field)
{
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
BT_LOGD("Finalizing common integer field object: addr=%p", field);
bt_ctf_field_common_finalize(field);
}
static inline
void bt_ctf_field_common_floating_point_finalize(struct bt_ctf_field_common *field)
{
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
BT_LOGD("Finalizing common floating point number field object: addr=%p", field);
bt_ctf_field_common_finalize(field);
}
{
struct bt_ctf_field_common_structure *structure = BT_CTF_FROM_COMMON(field);
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
BT_LOGD("Finalizing common structure field object: addr=%p", field);
bt_ctf_field_common_finalize(field);
{
struct bt_ctf_field_common_variant *variant = BT_CTF_FROM_COMMON(field);
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
BT_LOGD("Finalizing common variant field object: addr=%p", field);
bt_ctf_field_common_finalize(field);
{
struct bt_ctf_field_common_array *array = BT_CTF_FROM_COMMON(field);
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
BT_LOGD("Finalizing common array field object: addr=%p", field);
bt_ctf_field_common_finalize(field);
{
struct bt_ctf_field_common_sequence *sequence = BT_CTF_FROM_COMMON(field);
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
BT_LOGD("Finalizing common sequence field object: addr=%p", field);
bt_ctf_field_common_finalize(field);
{
struct bt_ctf_field_common_string *string = BT_CTF_FROM_COMMON(field);
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
BT_LOGD("Finalizing common string field object: addr=%p", field);
bt_ctf_field_common_finalize(field);
{
int ret = 0;
- BT_ASSERT(new_object_func);
- BT_ASSERT(destroy_object_func);
+ BT_ASSERT_DBG(new_object_func);
+ BT_ASSERT_DBG(destroy_object_func);
BT_LOGD("Initializing object pool: addr=%p, data-addr=%p",
pool, data);
pool->objects = g_ptr_array_new();
{
uint64_t i;
- BT_ASSERT(pool);
+ BT_ASSERT_DBG(pool);
BT_LOGD("Finalizing object pool.");
if (pool->objects) {
{
struct bt_ctf_object *obj;
- BT_ASSERT(pool);
+ BT_ASSERT_DBG(pool);
#ifdef BT_LOGT
BT_LOGT("Creating object from pool: pool-addr=%p, pool-size=%zu, pool-cap=%u",
{
struct bt_ctf_object *bt_obj = obj;
- BT_ASSERT(pool);
- BT_ASSERT(obj);
+ BT_ASSERT_DBG(pool);
+ BT_ASSERT_DBG(obj);
#ifdef BT_LOGT
BT_LOGT("Recycling object: pool-addr=%p, pool-size=%zu, pool-cap=%u, obj-addr=%p",
static inline
unsigned long long bt_ctf_object_get_ref_count(struct bt_ctf_object *obj)
{
- BT_ASSERT(obj);
- BT_ASSERT(obj->is_shared);
+ BT_ASSERT_DBG(obj);
+ BT_ASSERT_DBG(obj->is_shared);
return obj->ref_count;
}
static inline
struct bt_ctf_object *bt_ctf_object_borrow_parent(struct bt_ctf_object *obj)
{
- BT_ASSERT(obj);
- BT_ASSERT(obj->is_shared);
+ BT_ASSERT_DBG(obj);
+ BT_ASSERT_DBG(obj->is_shared);
return obj->parent;
}
static inline
void bt_ctf_object_set_parent(struct bt_ctf_object *child, struct bt_ctf_object *parent)
{
- BT_ASSERT(child);
- BT_ASSERT(child->is_shared);
+ BT_ASSERT_DBG(child);
+ BT_ASSERT_DBG(child->is_shared);
#ifdef BT_LOGT
BT_LOGT("Setting object's parent: addr=%p, parent-addr=%p",
* object's reference count falls to zero.
*/
if (parent) {
- BT_ASSERT(!child->parent);
+ BT_ASSERT_DBG(!child->parent);
child->parent = parent;
bt_ctf_object_get_no_null_check(parent);
} else {
static inline
void bt_ctf_object_try_spec_release(struct bt_ctf_object *obj)
{
- BT_ASSERT(obj);
- BT_ASSERT(obj->is_shared);
- BT_ASSERT(obj->spec_release_func);
+ BT_ASSERT_DBG(obj);
+ BT_ASSERT_DBG(obj->is_shared);
+ BT_ASSERT_DBG(obj->spec_release_func);
if (bt_ctf_object_get_ref_count(obj) == 0) {
obj->spec_release_func(obj);
void bt_ctf_object_init(struct bt_ctf_object *obj, bool is_shared,
bt_ctf_object_release_func release_func)
{
- BT_ASSERT(obj);
- BT_ASSERT(!is_shared || release_func);
+ BT_ASSERT_DBG(obj);
+ BT_ASSERT_DBG(!is_shared || release_func);
obj->is_shared = is_shared;
obj->release_func = release_func;
obj->parent_is_owner_listener_func = NULL;
void bt_ctf_object_init_shared_with_parent(struct bt_ctf_object *obj,
bt_ctf_object_release_func spec_release_func)
{
- BT_ASSERT(obj);
- BT_ASSERT(spec_release_func);
+ BT_ASSERT_DBG(obj);
+ BT_ASSERT_DBG(spec_release_func);
bt_ctf_object_init_shared(obj, bt_ctf_object_with_parent_release_func);
obj->spec_release_func = spec_release_func;
}
void bt_ctf_object_set_parent_is_owner_listener_func(struct bt_ctf_object *obj,
bt_ctf_object_parent_is_owner_listener_func func)
{
- BT_ASSERT(obj);
- BT_ASSERT(obj->is_shared);
- BT_ASSERT(obj->spec_release_func);
+ BT_ASSERT_DBG(obj);
+ BT_ASSERT_DBG(obj->is_shared);
+ BT_ASSERT_DBG(obj->spec_release_func);
((struct bt_ctf_object *) obj)->parent_is_owner_listener_func = func;
}
static inline
void bt_ctf_object_inc_ref_count(struct bt_ctf_object *obj)
{
- BT_ASSERT(obj);
- BT_ASSERT(obj->is_shared);
+ BT_ASSERT_DBG(obj);
+ BT_ASSERT_DBG(obj->is_shared);
obj->ref_count++;
- BT_ASSERT(obj->ref_count != 0);
+ BT_ASSERT_DBG(obj->ref_count != 0);
}
static inline
void *bt_ctf_object_get_no_null_check_no_parent_check(struct bt_ctf_object *obj)
{
- BT_ASSERT(obj);
- BT_ASSERT(obj->is_shared);
+ BT_ASSERT_DBG(obj);
+ BT_ASSERT_DBG(obj->is_shared);
#ifdef BT_LOGT
BT_LOGT("Incrementing object's reference count: %llu -> %llu: "
static inline
void *bt_ctf_object_get_no_null_check(struct bt_ctf_object *obj)
{
- BT_ASSERT(obj);
- BT_ASSERT(obj->is_shared);
+ BT_ASSERT_DBG(obj);
+ BT_ASSERT_DBG(obj->is_shared);
if (G_UNLIKELY(obj->parent && bt_ctf_object_get_ref_count(obj) == 0)) {
#ifdef BT_LOGT
static inline
void bt_ctf_object_put_no_null_check(struct bt_ctf_object *obj)
{
- BT_ASSERT(obj);
- BT_ASSERT(obj->is_shared);
- BT_ASSERT(obj->ref_count > 0);
+ BT_ASSERT_DBG(obj);
+ BT_ASSERT_DBG(obj->is_shared);
+ BT_ASSERT_DBG(obj->ref_count > 0);
#ifdef BT_LOGT
BT_LOGT("Decrementing object's reference count: %llu -> %llu: "
obj->ref_count--;
if (obj->ref_count == 0) {
- BT_ASSERT(obj->release_func);
+ BT_ASSERT_DBG(obj->release_func);
obj->release_func(obj);
}
}
struct bt_ctf_field_type_common *get_type_from_ctx(struct resolve_context *ctx,
enum bt_ctf_scope scope)
{
- BT_ASSERT(scope >= BT_CTF_SCOPE_TRACE_PACKET_HEADER &&
+ BT_ASSERT_DBG(scope >= BT_CTF_SCOPE_TRACE_PACKET_HEADER &&
scope <= BT_CTF_SCOPE_EVENT_FIELDS);
return ctx->scopes[scope - BT_CTF_SCOPE_TRACE_PACKET_HEADER];
/*
* Start from both roots and find the first mismatch.
*/
- BT_ASSERT(field_path1->root == field_path2->root);
+ BT_ASSERT_DBG(field_path1->root == field_path2->root);
field_path1_len = field_path1->indexes->len;
field_path2_len = field_path2->indexes->len;
{
int ret;
- BT_ASSERT(type_stack_size(ctx->type_stack) == 0);
+ BT_ASSERT_DBG(type_stack_size(ctx->type_stack) == 0);
ctx->root_scope = root_scope;
ret = resolve_type(get_type_from_ctx(ctx, root_scope), ctx);
ctx->root_scope = BT_CTF_SCOPE_UNKNOWN;
BT_CTF_VALIDATION_FLAG_EVENT;
struct bt_ctf_clock_class *expected_clock_class = NULL;
- BT_ASSERT(copy_field_type_func);
+ BT_ASSERT_DBG(copy_field_type_func);
if (!stream_class || !event_class) {
BT_LOGW("Invalid parameter: stream class or event class is NULL: "
* The trace and stream class should be valid at this
* point.
*/
- BT_ASSERT(trace->valid);
- BT_ASSERT(stream_class->valid);
+ BT_ASSERT_DBG(trace->valid);
+ BT_ASSERT_DBG(stream_class->valid);
packet_header_type =
bt_ctf_trace_common_borrow_packet_header_field_type(trace);
packet_context_type =
* now if the stream class is frozen.
*/
if (stream_class->frozen && expected_clock_class) {
- BT_ASSERT(!stream_class->clock_class ||
+ BT_ASSERT_DBG(!stream_class->clock_class ||
stream_class->clock_class == expected_clock_class);
BT_CTF_OBJECT_MOVE_REF(stream_class->clock_class, expected_clock_class);
}
int ret;
uint64_t i;
- BT_ASSERT(stream_class);
- BT_ASSERT(expected_clock_class);
+ BT_ASSERT_DBG(stream_class);
+ BT_ASSERT_DBG(expected_clock_class);
ret = bt_ctf_field_type_common_validate_single_clock_class(
stream_class->packet_context_field_type,
expected_clock_class);
struct bt_ctf_event_class_common *event_class =
g_ptr_array_index(stream_class->event_classes, i);
- BT_ASSERT(event_class);
+ BT_ASSERT_DBG(event_class);
ret = bt_ctf_event_class_common_validate_single_clock_class(
event_class, expected_clock_class);
if (ret) {
bt_ctf_field_type_structure_get_field_type_by_name(parent_ft,
field_name);
- BT_ASSERT(stream_class->clock);
+ BT_ASSERT_DBG(stream_class->clock);
if (!ft) {
/* Field does not exist: not an error */
goto end;
}
- BT_ASSERT(((struct bt_ctf_field_type_common *) ft)->id ==
+ BT_ASSERT_DBG(((struct bt_ctf_field_type_common *) ft)->id ==
BT_CTF_FIELD_TYPE_ID_INTEGER);
mapped_clock_class =
bt_ctf_field_type_integer_get_mapped_clock_class(ft);
ret = bt_ctf_field_type_common_integer_set_mapped_clock_class_no_check_frozen(
(void *) ft_copy, stream_class->clock->clock_class);
- BT_ASSERT(ret == 0);
+ BT_ASSERT_DBG(ret == 0);
ret = bt_ctf_field_type_common_structure_replace_field(
(void *) parent_ft, field_name, (void *) ft_copy);
{
int ret = 0;
- BT_ASSERT(stream_class);
+ BT_ASSERT_DBG(stream_class);
if (!stream_class->clock) {
/* No clock class to map to */
*/
trace = BT_CTF_FROM_COMMON(bt_ctf_stream_class_common_borrow_trace(
BT_CTF_TO_COMMON(stream_class)));
- BT_ASSERT(trace);
+ BT_ASSERT_DBG(trace);
packet_header_type = bt_ctf_trace_get_packet_header_field_type(trace);
trace = NULL;
if (packet_header_type) {
struct bt_ctf_trace_common *bt_ctf_stream_class_common_borrow_trace(
struct bt_ctf_stream_class_common *stream_class)
{
- BT_ASSERT(stream_class);
+ BT_ASSERT_DBG(stream_class);
return (void *) bt_ctf_object_borrow_parent(&stream_class->base);
}
void _bt_ctf_stream_class_common_set_id(
struct bt_ctf_stream_class_common *stream_class, int64_t id)
{
- BT_ASSERT(stream_class);
+ BT_ASSERT_DBG(stream_class);
stream_class->id = id;
stream_class->id_set = 1;
BT_LOGT("Set stream class's ID (internal): "
}
field_type = bt_ctf_field_get_type(field);
- BT_ASSERT(field_type);
+ BT_ASSERT_DBG(field_type);
if (bt_ctf_field_type_get_type_id(field_type) !=
BT_CTF_FIELD_TYPE_ID_INTEGER) {
stream->packet_header, "magic");
const uint32_t magic_value = 0xc1fc1fc1;
- BT_ASSERT(stream);
+ BT_ASSERT_DBG(stream);
if (!magic_field) {
/* No magic field found. Not an error, skip. */
struct bt_ctf_field *uuid_field = bt_ctf_field_structure_get_field_by_name(
stream->packet_header, "uuid");
- BT_ASSERT(stream);
+ BT_ASSERT_DBG(stream);
if (!uuid_field) {
/* No uuid field found. Not an error, skip. */
struct bt_ctf_field *field = bt_ctf_field_structure_get_field_by_name(
stream->packet_context, "content_size");
- BT_ASSERT(stream);
+ BT_ASSERT_DBG(stream);
if (!field) {
/* No content size field found. Not an error, skip. */
struct bt_ctf_field *field = bt_ctf_field_structure_get_field_by_name(
stream->packet_context, "events_discarded");
- BT_ASSERT(stream);
+ BT_ASSERT_DBG(stream);
if (!field) {
/* No discarded events count field found. Not an error, skip. */
bt_ctf_object_put_ref(cc);
val_size = bt_ctf_field_type_integer_get_size(
(void *) field_common->type);
- BT_ASSERT(val_size >= 1);
+ BT_ASSERT_DBG(val_size >= 1);
if (bt_ctf_field_type_integer_is_signed(
(void *) field_common->type)) {
struct bt_ctf_field *int_field =
bt_ctf_field_enumeration_get_container(field);
- BT_ASSERT(int_field);
+ BT_ASSERT_DBG(int_field);
ret = visit_field_update_clock_value(int_field, val);
bt_ctf_object_put_ref(int_field);
break;
int64_t len = bt_ctf_field_type_array_get_length(
(void *) field_common->type);
- BT_ASSERT(len >= 0);
+ BT_ASSERT_DBG(len >= 0);
for (i = 0; i < len; i++) {
struct bt_ctf_field *elem_field =
bt_ctf_field_array_get_field(field, i);
- BT_ASSERT(elem_field);
+ BT_ASSERT_DBG(elem_field);
ret = visit_field_update_clock_value(elem_field, val);
bt_ctf_object_put_ref(elem_field);
if (ret) {
struct bt_ctf_field *elem_field =
bt_ctf_field_sequence_get_field(field, i);
- BT_ASSERT(elem_field);
+ BT_ASSERT_DBG(elem_field);
ret = visit_field_update_clock_value(elem_field, val);
bt_ctf_object_put_ref(elem_field);
if (ret) {
int64_t len = bt_ctf_field_type_structure_get_field_count(
(void *) field_common->type);
- BT_ASSERT(len >= 0);
+ BT_ASSERT_DBG(len >= 0);
for (i = 0; i < len; i++) {
struct bt_ctf_field *member_field =
bt_ctf_field_structure_get_field_by_index(field, i);
- BT_ASSERT(member_field);
+ BT_ASSERT_DBG(member_field);
ret = visit_field_update_clock_value(member_field, val);
bt_ctf_object_put_ref(member_field);
if (ret) {
if (ts_begin_field && bt_ctf_field_is_set_recursive(ts_begin_field)) {
/* Use provided `timestamp_begin` value as starting value */
ret = bt_ctf_field_integer_unsigned_get_value(ts_begin_field, &val);
- BT_ASSERT(ret == 0);
+ BT_ASSERT_DBG(ret == 0);
init_clock_value = val;
} else if (stream->last_ts_end != -1ULL) {
/* Use last packet's ending timestamp as starting value */
*/
len = bt_ctf_field_type_structure_get_field_count(
(void *) packet_context->type);
- BT_ASSERT(len >= 0);
+ BT_ASSERT_DBG(len >= 0);
for (i = 0; i < len; i++) {
const char *member_name;
ret = bt_ctf_field_type_structure_get_field_by_index(
(void *) packet_context->type, &member_name, NULL, i);
- BT_ASSERT(ret == 0);
+ BT_ASSERT_DBG(ret == 0);
if (strcmp(member_name, "timestamp_begin") == 0 ||
strcmp(member_name, "timestamp_end") == 0) {
member_field = bt_ctf_field_structure_get_field_by_index(
stream->packet_context, i);
- BT_ASSERT(member_field);
+ BT_ASSERT_DBG(member_field);
if (strcmp(member_name, "packet_size") == 0 &&
!bt_ctf_field_is_set_recursive(member_field)) {
for (i = 0; i < stream->events->len; i++) {
struct bt_ctf_event *event = g_ptr_array_index(stream->events, i);
- BT_ASSERT(event);
+ BT_ASSERT_DBG(event);
ret = visit_event_update_clock_value(event, &cur_clock_value);
if (ret) {
BT_LOGW("Cannot automatically update clock value "
*/
if (ts_end_field && bt_ctf_field_is_set_recursive(ts_end_field)) {
ret = bt_ctf_field_integer_unsigned_get_value(ts_end_field, &val);
- BT_ASSERT(ret == 0);
+ BT_ASSERT_DBG(ret == 0);
if (val < cur_clock_value) {
BT_LOGW("Packet's final timestamp is less than "
if (ts_end_field && !bt_ctf_field_is_set_recursive(ts_end_field)) {
ret = set_integer_field_value(ts_end_field, cur_clock_value);
- BT_ASSERT(ret == 0);
+ BT_ASSERT_DBG(ret == 0);
stream->last_ts_end = cur_clock_value;
}
/* Set `timestamp_begin` field to initial clock value */
if (ts_begin_field && !bt_ctf_field_is_set_recursive(ts_begin_field)) {
ret = set_integer_field_value(ts_begin_field, init_clock_value);
- BT_ASSERT(ret == 0);
+ BT_ASSERT_DBG(ret == 0);
}
end:
/* Use stream name's base name as prefix */
gchar *basename = g_path_get_basename(stream->common.name->str);
- BT_ASSERT(basename);
+ BT_ASSERT_DBG(basename);
if (strcmp(basename, G_DIR_SEPARATOR_S) == 0) {
g_string_assign(filename, "stream");
g_path_get_basename(
stream->common.stream_class->name->str);
- BT_ASSERT(basename);
+ BT_ASSERT_DBG(basename);
if (strcmp(basename, G_DIR_SEPARATOR_S) == 0) {
g_string_assign(filename, "stream");
append_ids:
stream_class_id = bt_ctf_stream_class_common_get_id(stream->common.stream_class);
- BT_ASSERT(stream_class_id >= 0);
- BT_ASSERT(stream->common.id >= 0);
+ BT_ASSERT_DBG(stream_class_id >= 0);
+ BT_ASSERT_DBG(stream->common.id >= 0);
g_string_append_printf(filename, "-%" PRId64 "-%" PRId64,
stream_class_id, stream->common.id);
stream->last_ts_end = -1ULL;
BT_LOGD("CTF writer stream object belongs writer's trace: "
"writer-addr=%p", writer);
- BT_ASSERT(writer);
+ BT_ASSERT_DBG(writer);
if (stream_class->common.packet_context_field_type) {
BT_LOGD("Creating stream's packet context field: "
BT_CTF_TO_COMMON(stream)));
int64_t event_class_id;
- BT_ASSERT(event);
+ BT_ASSERT_DBG(event);
if (!event->common.header_field) {
goto end;
id_field = bt_ctf_field_structure_get_field_by_name(
(void *) event->common.header_field->field, "id");
event_class_id = bt_ctf_event_class_common_get_id(event->common.class);
- BT_ASSERT(event_class_id >= 0);
+ BT_ASSERT_DBG(event_class_id >= 0);
if (id_field && bt_ctf_field_get_type_id(id_field) == BT_CTF_FIELD_TYPE_ID_INTEGER) {
ret = set_integer_field_value(id_field, event_class_id);
if (mapped_clock_class) {
uint64_t timestamp;
- BT_ASSERT(mapped_clock_class ==
+ BT_ASSERT_DBG(mapped_clock_class ==
stream_class->clock->clock_class);
ret = bt_ctf_clock_get_value(
stream_class->clock,
×tamp);
- BT_ASSERT(ret == 0);
+ BT_ASSERT_DBG(ret == 0);
ret = set_integer_field_value(timestamp_field,
timestamp);
if (ret) {
}
field_type = bt_ctf_field_get_type(field);
- BT_ASSERT(field_type);
+ BT_ASSERT_DBG(field_type);
if (bt_ctf_field_type_common_compare((void *) field_type,
trace->common.packet_header_field_type)) {
bt_ctf_stream_get_name(stream), stream->flushed_packet_count);
trace = BT_CTF_FROM_COMMON(bt_ctf_stream_class_common_borrow_trace(
stream->common.stream_class));
- BT_ASSERT(trace);
+ BT_ASSERT_DBG(trace);
native_byte_order = bt_ctf_trace_get_native_byte_order(trace);
ret = auto_populate_packet_header(stream);
struct bt_ctf_field_type *field_type = NULL;
struct bt_ctf_field *integer;
- BT_ASSERT(structure);
- BT_ASSERT(name);
+ BT_ASSERT_DBG(structure);
+ BT_ASSERT_DBG(name);
integer = bt_ctf_field_structure_get_field_by_name(structure, name);
if (!integer) {
}
field_type = bt_ctf_field_get_type(integer);
- BT_ASSERT(field_type);
+ BT_ASSERT_DBG(field_type);
if (bt_ctf_field_type_get_type_id(field_type) != BT_CTF_FIELD_TYPE_ID_INTEGER) {
/*
* The user most likely meant for us to populate this field
struct bt_ctf_stream_class_common *bt_ctf_stream_common_borrow_class(
struct bt_ctf_stream_common *stream)
{
- BT_ASSERT(stream);
+ BT_ASSERT_DBG(stream);
return stream->stream_class;
}
ret = bt_ctf_field_type_common_structure_borrow_field_by_index(
packet_header_type, &field_name, NULL, 0);
- BT_ASSERT(ret == 0);
+ BT_ASSERT_DBG(ret == 0);
if (strcmp(field_name, "magic") != 0) {
BT_LOGW("Invalid packet header field type: `magic` field must be the first field: "
}
elem_ft = bt_ctf_field_type_common_array_borrow_element_field_type(field_type);
- BT_ASSERT(elem_ft);
+ BT_ASSERT_DBG(elem_ft);
if (elem_ft->id != BT_CTF_FIELD_TYPE_ID_INTEGER) {
BT_LOGW("Invalid packet header field type: `uuid` field's element field type must be an integer field type: "
goto invalid;
}
- BT_ASSERT(int_ft);
+ BT_ASSERT_DBG(int_ft);
if (bt_ctf_field_type_common_integer_is_signed(int_ft)) {
BT_LOGW("Invalid event header field type: `id` field must be an unsigned integer or enumeration field type: "
"id-ft-addr=%p", int_ft);
struct bt_ctf_clock_class *expected_clock_class =
bt_ctf_object_get_ref(init_expected_clock_class);
- BT_ASSERT(copy_field_type_func);
+ BT_ASSERT_DBG(copy_field_type_func);
if (!trace) {
BT_LOGW_STR("Invalid parameter: trace is NULL.");
event_class_count =
bt_ctf_stream_class_common_get_event_class_count(stream_class);
- BT_ASSERT(event_class_count >= 0);
+ BT_ASSERT_DBG(event_class_count >= 0);
if (!stream_class->frozen) {
/*
{
struct bt_ctf_search_query query = { .value = clock_class, .found = 0 };
- BT_ASSERT(trace);
- BT_ASSERT(clock_class);
+ BT_ASSERT_DBG(trace);
+ BT_ASSERT_DBG(clock_class);
g_ptr_array_foreach(trace->clock_classes, value_exists, &query);
return query.found;
g_string_append(context->string, "trace {\n");
g_string_append(context->string, "\tmajor = 1;\n");
g_string_append(context->string, "\tminor = 8;\n");
- BT_ASSERT(trace->common.native_byte_order == BT_CTF_BYTE_ORDER_LITTLE_ENDIAN ||
+ BT_ASSERT_DBG(trace->common.native_byte_order == BT_CTF_BYTE_ORDER_LITTLE_ENDIAN ||
trace->common.native_byte_order == BT_CTF_BYTE_ORDER_BIG_ENDIAN ||
trace->common.native_byte_order == BT_CTF_BYTE_ORDER_NETWORK);
env_field_value_obj = bt_ctf_attributes_borrow_field_value(
trace->common.environment, i);
- BT_ASSERT(entry_name);
- BT_ASSERT(env_field_value_obj);
+ BT_ASSERT_DBG(entry_name);
+ BT_ASSERT_DBG(env_field_value_obj);
switch (bt_ctf_value_get_type(
bt_ctf_private_value_as_value(env_field_value_obj))) {
BT_CTF_ASSERT_PRE_NON_NULL(trace, "Trace");
ret = bt_ctf_attributes_get_count(trace->environment);
- BT_ASSERT(ret >= 0);
+ BT_ASSERT_DBG(ret >= 0);
return ret;
}
}
reserved_keywords_set = g_hash_table_new(g_direct_hash, g_direct_equal);
- BT_ASSERT(reserved_keywords_set);
+ BT_ASSERT_DBG(reserved_keywords_set);
for (i = 0; i < reserved_keywords_count; i++) {
gpointer quark = GINT_TO_POINTER(g_quark_from_string(
GString *str = g_string_new(NULL);
size_t i;
- BT_ASSERT(path);
+ BT_ASSERT_DBG(path);
if (!str) {
goto end;
bt_ctf_value_array_borrow_element_by_index(
array_obj, i);
- BT_ASSERT(element_obj);
+ BT_ASSERT_DBG(element_obj);
BT_LOGD("Copying array value's element: element-addr=%p, "
"index=%d", element_obj, i);
ret = bt_ctf_value_copy(&element_obj_copy, element_obj);
goto end;
}
- BT_ASSERT(element_obj_copy);
+ BT_ASSERT_DBG(element_obj_copy);
ret = bt_ctf_private_value_array_append_element(copy_obj,
(void *) element_obj_copy);
BT_CTF_OBJECT_PUT_REF_AND_RESET(element_obj_copy);
while (g_hash_table_iter_next(&iter, &key, &element_obj)) {
const char *key_str = g_quark_to_string(GPOINTER_TO_UINT(key));
- BT_ASSERT(key_str);
+ BT_ASSERT_DBG(key_str);
BT_LOGD("Copying map value's element: element-addr=%p, "
"key=\"%s\"", element_obj, key_str);
ret = bt_ctf_value_copy(&element_obj_copy, element_obj);
goto end;
}
- BT_ASSERT(element_obj_copy);
+ BT_ASSERT_DBG(element_obj_copy);
ret = bt_ctf_private_value_map_insert_entry(copy_obj, key_str,
(void *) element_obj_copy);
BT_CTF_OBJECT_PUT_REF_AND_RESET(element_obj_copy);
{
enum bt_ctf_value_status ret = BT_CTF_VALUE_STATUS_OK;
- BT_ASSERT(object);
+ BT_ASSERT_DBG(object);
if (object->frozen) {
goto end;
goto error;
}
- BT_ASSERT(extension_obj_elem_copy);
+ BT_ASSERT_DBG(extension_obj_elem_copy);
/* Replace in extended object */
extend_data->status = bt_ctf_private_value_map_insert_entry(
goto end;
error:
- BT_ASSERT(extend_data->status != BT_CTF_VALUE_STATUS_OK);
+ BT_ASSERT_DBG(extend_data->status != BT_CTF_VALUE_STATUS_OK);
ret = BT_CTF_FALSE;
end:
goto error;
}
- BT_ASSERT(extended_map_obj);
+ BT_ASSERT_DBG(extended_map_obj);
/*
* For each key in the extension map object, replace this key
/* Default to little-endian */
ret = bt_ctf_writer_set_byte_order(writer, BT_CTF_BYTE_ORDER_NATIVE);
- BT_ASSERT(ret == 0);
+ BT_ASSERT_DBG(ret == 0);
/* Create trace directory if necessary and open a metadata file */
if (g_mkdir_with_parents(path, S_IRWXU | S_IRWXG)) {
uint8_t *_bt_ctfser_get_addr(struct bt_ctfser *ctfser)
{
/* Only makes sense to get the address after aligning on byte */
- BT_ASSERT(ctfser->offset_in_cur_packet_bits % 8 == 0);
+ BT_ASSERT_DBG(ctfser->offset_in_cur_packet_bits % 8 == 0);
return ((uint8_t *) mmap_align_addr(ctfser->base_mma)) +
ctfser->mmap_base_offset + _bt_ctfser_offset_bytes(ctfser);
}
static inline
void _bt_ctfser_incr_offset(struct bt_ctfser *ctfser, uint64_t size_bits)
{
- BT_ASSERT(_bt_ctfser_has_space_left(ctfser, size_bits));
+ BT_ASSERT_DBG(_bt_ctfser_has_space_left(ctfser, size_bits));
ctfser->offset_in_cur_packet_bits += size_bits;
}
int ret = 0;
uint64_t align_size_bits;
- BT_ASSERT(alignment_bits > 0);
+ BT_ASSERT_DBG(alignment_bits > 0);
align_size_bits = ALIGN(ctfser->offset_in_cur_packet_bits,
alignment_bits) - ctfser->offset_in_cur_packet_bits;
/* Reverse byte order? */
bool rbo = byte_order != BYTE_ORDER;
- BT_ASSERT(size_bits % 8 == 0);
- BT_ASSERT(_bt_ctfser_has_space_left(ctfser, size_bits));
+ BT_ASSERT_DBG(size_bits % 8 == 0);
+ BT_ASSERT_DBG(_bt_ctfser_has_space_left(ctfser, size_bits));
switch (size_bits) {
case 8:
/* Reverse byte order? */
bool rbo = byte_order != BYTE_ORDER;
- BT_ASSERT(size_bits % 8 == 0);
- BT_ASSERT(_bt_ctfser_has_space_left(ctfser, size_bits));
+ BT_ASSERT_DBG(size_bits % 8 == 0);
+ BT_ASSERT_DBG(_bt_ctfser_has_space_left(ctfser, size_bits));
switch (size_bits) {
case 8:
{
int ret;
- BT_ASSERT(alignment_bits % 8 == 0);
+ BT_ASSERT_DBG(alignment_bits % 8 == 0);
ret = bt_ctfser_align_offset_in_current_packet(ctfser, alignment_bits);
if (G_UNLIKELY(ret)) {
goto end;
{
int ret;
- BT_ASSERT(alignment_bits % 8 == 0);
+ BT_ASSERT_DBG(alignment_bits % 8 == 0);
ret = bt_ctfser_align_offset_in_current_packet(ctfser, alignment_bits);
if (G_UNLIKELY(ret)) {
goto end;
void bt_ctfser_set_offset_in_current_packet_bits(struct bt_ctfser *ctfser,
uint64_t offset_bits)
{
- BT_ASSERT(offset_bits <= _bt_ctfser_cur_packet_size_bits(ctfser));
+ BT_ASSERT_DBG(offset_bits <= _bt_ctfser_cur_packet_size_bits(ctfser));
ctfser->offset_in_cur_packet_bits = offset_bits;
}
* the user), use BT_ASSERT_PRE().
*
* To assert that an internal postcondition is satisfied, use
- * BT_ASSERT().
+ * BT_ASSERT() or BT_ASSERT_DBG().
*/
#define BT_ASSERT_POST(_cond, _fmt, ...) \
do { \
* code), use BT_ASSERT_POST().
*
* To assert that an internal postcondition is satisfied, use
- * BT_ASSERT().
+ * BT_ASSERT() or BT_ASSERT_DBG().
*/
#define BT_ASSERT_PRE(_cond, _fmt, ...) \
do { \
struct simple_sink_data *data = bt_self_component_get_data(
bt_self_component_sink_as_self_component(self_comp));
- BT_ASSERT(data);
- BT_ASSERT(data->init_method_data.consume_func);
- BT_ASSERT(data->msg_iter);
+ BT_ASSERT_DBG(data);
+ BT_ASSERT_DBG(data->init_method_data.consume_func);
+ BT_ASSERT_DBG(data->msg_iter);
/* Call user's "consume" function */
status = data->init_method_data.consume_func(data->msg_iter,
cls = component->parent.class;
- BT_ASSERT(cls);
- BT_ASSERT(cls->type == BT_COMPONENT_CLASS_TYPE_FILTER);
+ BT_ASSERT_DBG(cls);
+ BT_ASSERT_DBG(cls->type == BT_COMPONENT_CLASS_TYPE_FILTER);
return (bt_component_class_filter *) cls;
}
cls = component->parent.class;
- BT_ASSERT(cls);
- BT_ASSERT(cls->type == BT_COMPONENT_CLASS_TYPE_SINK);
+ BT_ASSERT_DBG(cls);
+ BT_ASSERT_DBG(cls->type == BT_COMPONENT_CLASS_TYPE_SINK);
return (bt_component_class_sink *) cls;
}
cls = component->parent.class;
- BT_ASSERT(cls);
- BT_ASSERT(cls->type == BT_COMPONENT_CLASS_TYPE_SOURCE);
+ BT_ASSERT_DBG(cls);
+ BT_ASSERT_DBG(cls->type == BT_COMPONENT_CLASS_TYPE_SOURCE);
return (bt_component_class_source *) cls;
}
static inline
struct bt_graph *bt_component_borrow_graph(struct bt_component *comp)
{
- BT_ASSERT(comp);
+ BT_ASSERT_DBG(comp);
return (void *) bt_object_borrow_parent(&comp->base);
}
static inline
struct bt_graph *bt_connection_borrow_graph(struct bt_connection *conn)
{
- BT_ASSERT(conn);
+ BT_ASSERT_DBG(conn);
return (void *) conn->base.parent;
}
enum bt_component_class_sink_consume_method_status consume_status;
struct bt_component_class_sink *sink_class = NULL;
- BT_ASSERT(comp);
+ BT_ASSERT_DBG(comp);
sink_class = (void *) comp->parent.class;
- BT_ASSERT(sink_class->methods.consume);
+ BT_ASSERT_DBG(sink_class->methods.consume);
BT_LIB_LOGD("Calling user's consume method: %!+c", comp);
consume_status = sink_class->methods.consume((void *) comp);
BT_LOGD("User method returned: status=%s",
int index;
BT_LIB_LOGD("Making specific sink consume: %![comp-]+c", sink);
- BT_ASSERT(bt_component_borrow_graph((void *) sink) == graph);
+ BT_ASSERT_DBG(bt_component_borrow_graph((void *) sink) == graph);
if (g_queue_is_empty(graph->sinks_to_consume)) {
BT_LOGD_STR("Graph's sink queue is empty: end of graph.");
}
sink_node = g_queue_pop_nth_link(graph->sinks_to_consume, index);
- BT_ASSERT(sink_node);
+ BT_ASSERT_DBG(sink_node);
status = consume_sink_node(graph, sink_node);
end:
BT_HIDDEN
bool bt_graph_is_interrupted(const struct bt_graph *graph)
{
- BT_ASSERT(graph);
+ BT_ASSERT_DBG(graph);
return bt_interrupter_array_any_is_set(graph->interrupters);
}
static inline
void bt_graph_set_can_consume(struct bt_graph *graph, bool can_consume)
{
- BT_ASSERT(graph);
+ BT_ASSERT_DBG(graph);
graph->can_consume = can_consume;
}
int status = BT_FUNC_STATUS_OK;
uint64_t i;
- BT_ASSERT(graph->config_state != BT_GRAPH_CONFIGURATION_STATE_FAULTY);
+ BT_ASSERT_DBG(graph->config_state !=
+ BT_GRAPH_CONFIGURATION_STATE_FAULTY);
if (G_LIKELY(graph->config_state ==
BT_GRAPH_CONFIGURATION_STATE_CONFIGURED)) {
bool is_set = false;
uint64_t i;
- BT_ASSERT(interrupters);
+ BT_ASSERT_DBG(interrupters);
for (i = 0; i < interrupters->len; i++) {
const struct bt_interrupter *intr = interrupters->pdata[i];
struct bt_self_component_port_input_message_iterator *iterator,
enum bt_self_component_port_input_message_iterator_state state)
{
- BT_ASSERT(iterator);
+ BT_ASSERT_DBG(iterator);
BT_LIB_LOGD("Updating message iterator's state: new-state=%s",
bt_self_component_port_input_message_iterator_state_string(state));
iterator->state = state;
{
enum bt_component_class_message_iterator_next_method_status status;
- BT_ASSERT(iterator->methods.next);
+ BT_ASSERT_DBG(iterator->methods.next);
BT_LOGD_STR("Calling user's \"next\" method.");
status = iterator->methods.next(iterator, msgs, capacity, user_count);
BT_LOGD("User method returned: status=%s, msg-count=%" PRIu64,
BT_SELF_COMPONENT_PORT_INPUT_MESSAGE_ITERATOR_STATE_ACTIVE,
"Message iterator's \"next\" called, but "
"message iterator is in the wrong state: %!+i", iterator);
- BT_ASSERT(iterator->upstream_component);
- BT_ASSERT(iterator->upstream_component->class);
+ BT_ASSERT_DBG(iterator->upstream_component);
+ BT_ASSERT_DBG(iterator->upstream_component->class);
BT_ASSERT_PRE_DEV(
bt_component_borrow_graph(iterator->upstream_component)->config_state !=
BT_GRAPH_CONFIGURATION_STATE_CONFIGURING,
* For the same reason, there is no way that this iterator could
* have seeked (cannot seek a self message iterator).
*/
- BT_ASSERT(iterator->state ==
+ BT_ASSERT_DBG(iterator->state ==
BT_SELF_COMPONENT_PORT_INPUT_MESSAGE_ITERATOR_STATE_ACTIVE);
switch (status) {
const struct bt_clock_snapshot *clk_snapshot = NULL;
int ret;
- BT_ASSERT(msg);
- BT_ASSERT(got_first);
+ BT_ASSERT_DBG(msg);
+ BT_ASSERT_DBG(got_first);
switch (msg->type) {
case BT_MESSAGE_TYPE_EVENT:
(const void *) msg;
clk_snapshot = inactivity_msg->default_cs;
- BT_ASSERT(clk_snapshot);
+ BT_ASSERT_DBG(clk_snapshot);
break;
}
case BT_MESSAGE_TYPE_PACKET_BEGINNING:
abort();
}
- BT_ASSERT(clk_snapshot);
+ BT_ASSERT_DBG(clk_snapshot);
ret = bt_clock_snapshot_get_ns_from_origin(clk_snapshot,
&msg_ns_from_origin);
if (ret) {
stream_state->seen_clock_snapshot = true;
}
- BT_ASSERT(!bt_g_hash_table_contains(stream_states, stream_msg->stream));
+ BT_ASSERT_DBG(!bt_g_hash_table_contains(stream_states, stream_msg->stream));
g_hash_table_insert(stream_states, stream_msg->stream, stream_state);
break;
}
/* Update stream's state: packet began. */
stream_state = g_hash_table_lookup(stream_states, packet_msg->packet->stream);
- BT_ASSERT(stream_state);
-
- BT_ASSERT(stream_state->state == AUTO_SEEK_STREAM_STATE_STREAM_BEGAN);
+ BT_ASSERT_DBG(stream_state);
+ BT_ASSERT_DBG(stream_state->state == AUTO_SEEK_STREAM_STATE_STREAM_BEGAN);
stream_state->state = AUTO_SEEK_STREAM_STATE_PACKET_BEGAN;
- BT_ASSERT(!stream_state->packet);
+ BT_ASSERT_DBG(!stream_state->packet);
stream_state->packet = packet_msg->packet;
if (packet_msg->packet->stream->class->packets_have_beginning_default_clock_snapshot) {
stream_state = g_hash_table_lookup(stream_states,
event_msg->event->packet->stream);
- BT_ASSERT(stream_state);
+ BT_ASSERT_DBG(stream_state);
// HELPME: are we sure that event messages have clock snapshots at this point?
stream_state->seen_clock_snapshot = true;
/* Update stream's state: packet ended. */
stream_state = g_hash_table_lookup(stream_states, packet_msg->packet->stream);
- BT_ASSERT(stream_state);
-
- BT_ASSERT(stream_state->state == AUTO_SEEK_STREAM_STATE_PACKET_BEGAN);
+ BT_ASSERT_DBG(stream_state);
+ BT_ASSERT_DBG(stream_state->state == AUTO_SEEK_STREAM_STATE_PACKET_BEGAN);
stream_state->state = AUTO_SEEK_STREAM_STATE_STREAM_BEGAN;
- BT_ASSERT(stream_state->packet);
+ BT_ASSERT_DBG(stream_state->packet);
stream_state->packet = NULL;
if (packet_msg->packet->stream->class->packets_have_end_default_clock_snapshot) {
struct auto_seek_stream_state *stream_state;
stream_state = g_hash_table_lookup(stream_states, stream_msg->stream);
- BT_ASSERT(stream_state);
- BT_ASSERT(stream_state->state == AUTO_SEEK_STREAM_STATE_STREAM_BEGAN);
- BT_ASSERT(!stream_state->packet);
+ BT_ASSERT_DBG(stream_state);
+ BT_ASSERT_DBG(stream_state->state == AUTO_SEEK_STREAM_STATE_STREAM_BEGAN);
+ BT_ASSERT_DBG(!stream_state->packet);
/* Update stream's state: this stream doesn't exist anymore. */
g_hash_table_remove(stream_states, stream_msg->stream);
struct auto_seek_stream_state *stream_state;
stream_state = g_hash_table_lookup(stream_states, discarded_msg->stream);
- BT_ASSERT(stream_state);
+ BT_ASSERT_DBG(stream_state);
if ((msg->type == BT_MESSAGE_TYPE_DISCARDED_EVENTS && discarded_msg->stream->class->discarded_events_have_default_clock_snapshots) ||
(msg->type == BT_MESSAGE_TYPE_DISCARDED_PACKETS && discarded_msg->stream->class->discarded_packets_have_default_clock_snapshots)) {
msg = NULL;
end:
- BT_ASSERT(!msg || status != BT_FUNC_STATUS_OK);
+ BT_ASSERT_DBG(!msg || status != BT_FUNC_STATUS_OK);
return status;
}
uint64_t i;
bool got_first = false;
- BT_ASSERT(iterator);
+ BT_ASSERT_DBG(iterator);
memset(&messages[0], 0, sizeof(messages[0]) * MSG_BATCH_SIZE);
/*
set_self_comp_port_input_msg_iterator_state(iterator,
BT_SELF_COMPONENT_PORT_INPUT_MESSAGE_ITERATOR_STATE_ACTIVE);
- BT_ASSERT(iterator->methods.next);
+ BT_ASSERT_DBG(iterator->methods.next);
while (!got_first) {
/*
* The user's "next" method must not do any action which
* would change the iterator's state.
*/
- BT_ASSERT(iterator->state ==
+ BT_ASSERT_DBG(iterator->state ==
BT_SELF_COMPONENT_PORT_INPUT_MESSAGE_ITERATOR_STATE_ACTIVE);
switch (status) {
&can_seek_beginning);
BT_ASSERT(can_seek_status == BT_FUNC_STATUS_OK);
BT_ASSERT(can_seek_beginning);
-
BT_ASSERT(iterator->methods.seek_beginning);
BT_LIB_LOGD("Calling user's \"seek beginning\" method: %!+i",
iterator);
{
struct bt_message_discarded_items *disc_items_msg = (void *) message;
- BT_ASSERT(message);
+ BT_ASSERT_DBG(message);
return disc_items_msg->stream;
}
struct bt_message_discarded_items *disc_items_msg = (void *) message;
BT_ASSERT_PRE_DEV_NON_NULL(count, "Count (output)");
- BT_ASSERT(message);
+ BT_ASSERT_DBG(message);
*count = disc_items_msg->count.value;
return disc_items_msg->count.base.avail;
}
{
struct bt_message_discarded_items *disc_items_msg = (void *) message;
- BT_ASSERT(message);
+ BT_ASSERT_DBG(message);
BT_ASSERT_PRE_DEV(disc_items_msg->stream->class->default_clock_class,
"Message's stream's class has no default clock class: "
"%![msg-]+n, %![sc-]+S",
{
struct bt_message_discarded_items *disc_items_msg = (void *) message;
- BT_ASSERT(message);
+ BT_ASSERT_DBG(message);
BT_ASSERT_PRE_DEV(disc_items_msg->stream->class->default_clock_class,
"Message's stream's class has no default clock class: "
"%![msg-]+n, %![sc-]+S",
{
struct bt_message_discarded_items *disc_items_msg = (void *) msg;
- BT_ASSERT(msg);
+ BT_ASSERT_DBG(msg);
return disc_items_msg->stream->class->default_clock_class;
}
struct bt_stream_class *stream_class;
stream_class = bt_event_class_borrow_stream_class_inline(event_class);
- BT_ASSERT(stream_class);
+ BT_ASSERT_DBG(stream_class);
return bt_stream_class_borrow_trace_class(stream_class);
}
struct bt_stream *stream = (void *) c_stream;
struct bt_event *event;
- BT_ASSERT(stream);
+ BT_ASSERT_DBG(stream);
BT_ASSERT_PRE_NON_NULL(msg_iter, "Message iterator");
BT_ASSERT_PRE_NON_NULL(event_class, "Event class");
BT_ASSERT_PRE(event_class_has_trace(event_class),
"Event class is not part of a trace: %!+E", event_class);
stream_class = bt_event_class_borrow_stream_class_inline(event_class);
- BT_ASSERT(stream_class);
+ BT_ASSERT_DBG(stream_class);
BT_ASSERT_PRE((with_cs && stream_class->default_clock_class) ||
(!with_cs && !stream_class->default_clock_class),
"Creating an event message with a default clock snapshot, but without "
}
if (with_cs) {
- BT_ASSERT(stream_class->default_clock_class);
+ BT_ASSERT_DBG(stream_class->default_clock_class);
message->default_cs = bt_clock_snapshot_create(
stream_class->default_clock_class);
if (!message->default_cs) {
bt_clock_snapshot_set_raw_value(message->default_cs, raw_value);
}
- BT_ASSERT(!message->event);
+ BT_ASSERT_DBG(!message->event);
message->event = event;
if (packet) {
struct bt_message_event *event_msg = (void *) msg;
struct bt_graph *graph;
- BT_ASSERT(event_msg);
+ BT_ASSERT_DBG(event_msg);
if (G_UNLIKELY(!msg->graph)) {
bt_message_event_destroy(msg);
BT_LIB_LOGD("Recycling event message: %![msg-]+n, %![event-]+e",
msg, event_msg->event);
bt_message_reset(msg);
- BT_ASSERT(event_msg->event);
+ BT_ASSERT_DBG(event_msg->event);
bt_event_recycle(event_msg->event);
event_msg->event = NULL;
BT_ASSERT_PRE_DEV_MSG_IS_TYPE(msg, BT_MESSAGE_TYPE_EVENT);
stream_class = bt_event_class_borrow_stream_class_inline(
event_msg->event->class);
- BT_ASSERT(stream_class);
+ BT_ASSERT_DBG(stream_class);
BT_ASSERT_PRE_DEV(stream_class->default_clock_class,
"Message's stream's class has no default clock class: "
"%![msg-]+n, %![sc-]+S", msg, stream_class);
BT_ASSERT_PRE_DEV_MSG_IS_TYPE(msg, BT_MESSAGE_TYPE_EVENT);
stream_class = bt_event_class_borrow_stream_class_inline(
event_msg->event->class);
- BT_ASSERT(stream_class);
+ BT_ASSERT_DBG(stream_class);
return stream_class->default_clock_class;
}
static inline
void bt_message_reset(struct bt_message *message)
{
- BT_ASSERT(message);
+ BT_ASSERT_DBG(message);
#ifdef BT_DEV_MODE
message->frozen = BT_FALSE;
{
struct bt_message_packet *packet_msg = (void *) message;
- BT_ASSERT(message);
+ BT_ASSERT_DBG(message);
BT_ASSERT_PRE_DEV(
packet_msg->packet->stream->class->default_clock_class,
"Message's stream's class has no default clock class: "
{
struct bt_message_packet *packet_msg = (void *) msg;
- BT_ASSERT(msg);
+ BT_ASSERT_DBG(msg);
return packet_msg->packet->stream->class->default_clock_class;
}
{
struct bt_message_stream *stream_msg;
- BT_ASSERT(message);
+ BT_ASSERT_DBG(message);
stream_msg = (void *) message;
return stream_msg->stream;
}
struct bt_message_stream *stream_msg = (void *) msg;
struct bt_stream_class *sc;
- BT_ASSERT(msg);
+ BT_ASSERT_DBG(msg);
sc = stream_msg->stream->class;
- BT_ASSERT(sc);
+ BT_ASSERT_DBG(sc);
BT_ASSERT_PRE_DEV(sc->default_clock_class,
"Message's stream's class has no default clock class: "
"%![msg-]+n, %![sc-]+S", msg, sc);
- BT_ASSERT(stream_msg->default_cs);
+ BT_ASSERT_DBG(stream_msg->default_cs);
*snapshot = stream_msg->default_cs;
{
struct bt_message_stream *stream_msg = (void *) msg;
- BT_ASSERT(msg);
+ BT_ASSERT_DBG(msg);
return stream_msg->stream->class->default_clock_class;
}
static inline
struct bt_component *bt_port_borrow_component_inline(const struct bt_port *port)
{
- BT_ASSERT(port);
+ BT_ASSERT_DBG(port);
return (void *) bt_object_borrow_parent(&port->base);
}
uint64_t a_i, b_i;
bool is_equal = true;
- BT_ASSERT(range_set_a);
- BT_ASSERT(range_set_b);
+ BT_ASSERT_DBG(range_set_a);
+ BT_ASSERT_DBG(range_set_b);
if (range_set_a == range_set_b) {
goto end;
{
struct bt_object *obj;
- BT_ASSERT(pool);
+ BT_ASSERT_DBG(pool);
BT_LOGT("Creating object from pool: pool-addr=%p, pool-size=%zu, pool-cap=%u",
pool, pool->size, pool->objects->len);
{
struct bt_object *bt_obj = obj;
- BT_ASSERT(pool);
- BT_ASSERT(obj);
+ BT_ASSERT_DBG(pool);
+ BT_ASSERT_DBG(obj);
BT_LOGT("Recycling object: pool-addr=%p, pool-size=%zu, pool-cap=%u, obj-addr=%p",
pool, pool->size, pool->objects->len, obj);
{
struct bt_object *obj = (void *) c_obj;
- BT_ASSERT(obj);
- BT_ASSERT(obj->is_shared);
+ BT_ASSERT_DBG(obj);
+ BT_ASSERT_DBG(obj->is_shared);
return obj->ref_count;
}
{
struct bt_object *obj = (void *) c_obj;
- BT_ASSERT(obj);
- BT_ASSERT(obj->is_shared);
+ BT_ASSERT_DBG(obj);
+ BT_ASSERT_DBG(obj->is_shared);
return obj->parent;
}
static inline
void bt_object_set_parent(struct bt_object *child, struct bt_object *parent)
{
- BT_ASSERT(child);
- BT_ASSERT(child->is_shared);
+ BT_ASSERT_DBG(child);
+ BT_ASSERT_DBG(child->is_shared);
#ifdef _BT_OBJECT_LOGGING_ENABLED
BT_LOGT("Setting object's parent: addr=%p, parent-addr=%p",
* object's reference count falls to zero.
*/
if (parent) {
- BT_ASSERT(!child->parent);
+ BT_ASSERT_DBG(!child->parent);
child->parent = parent;
bt_object_get_ref_no_null_check(parent);
} else {
static inline
void bt_object_try_spec_release(struct bt_object *obj)
{
- BT_ASSERT(obj);
- BT_ASSERT(obj->is_shared);
- BT_ASSERT(obj->spec_release_func);
+ BT_ASSERT_DBG(obj);
+ BT_ASSERT_DBG(obj->is_shared);
+ BT_ASSERT_DBG(obj->spec_release_func);
if (bt_object_get_ref_count(obj) == 0) {
obj->spec_release_func(obj);
void bt_object_init(struct bt_object *obj, bool is_shared,
bt_object_release_func release_func)
{
- BT_ASSERT(obj);
- BT_ASSERT(!is_shared || release_func);
+ BT_ASSERT_DBG(obj);
+ BT_ASSERT_DBG(!is_shared || release_func);
obj->is_shared = is_shared;
obj->release_func = release_func;
obj->parent_is_owner_listener_func = NULL;
void bt_object_init_shared_with_parent(struct bt_object *obj,
bt_object_release_func spec_release_func)
{
- BT_ASSERT(obj);
- BT_ASSERT(spec_release_func);
+ BT_ASSERT_DBG(obj);
+ BT_ASSERT_DBG(spec_release_func);
bt_object_init_shared(obj, bt_object_with_parent_release_func);
obj->spec_release_func = spec_release_func;
}
void bt_object_set_parent_is_owner_listener_func(struct bt_object *obj,
bt_object_parent_is_owner_listener_func func)
{
- BT_ASSERT(obj);
- BT_ASSERT(obj->is_shared);
- BT_ASSERT(obj->spec_release_func);
+ BT_ASSERT_DBG(obj);
+ BT_ASSERT_DBG(obj->is_shared);
+ BT_ASSERT_DBG(obj->spec_release_func);
((struct bt_object *) obj)->parent_is_owner_listener_func = func;
}
{
struct bt_object *obj = (void *) c_obj;
- BT_ASSERT(obj);
- BT_ASSERT(obj->is_shared);
+ BT_ASSERT_DBG(obj);
+ BT_ASSERT_DBG(obj->is_shared);
obj->ref_count++;
- BT_ASSERT(obj->ref_count != 0);
+ BT_ASSERT_DBG(obj->ref_count != 0);
}
static inline
{
struct bt_object *obj = (void *) c_obj;
- BT_ASSERT(obj);
- BT_ASSERT(obj->is_shared);
+ BT_ASSERT_DBG(obj);
+ BT_ASSERT_DBG(obj->is_shared);
#ifdef _BT_OBJECT_LOGGING_ENABLED
BT_LOGT("Incrementing object's reference count: %llu -> %llu: "
{
struct bt_object *obj = (void *) c_obj;
- BT_ASSERT(obj);
- BT_ASSERT(obj->is_shared);
+ BT_ASSERT_DBG(obj);
+ BT_ASSERT_DBG(obj->is_shared);
if (G_UNLIKELY(obj->parent && bt_object_get_ref_count(obj) == 0)) {
#ifdef _BT_OBJECT_LOGGING_ENABLED
{
struct bt_object *obj = (void *) c_obj;
- BT_ASSERT(obj);
- BT_ASSERT(obj->is_shared);
- BT_ASSERT(obj->ref_count > 0);
+ BT_ASSERT_DBG(obj);
+ BT_ASSERT_DBG(obj->is_shared);
+ BT_ASSERT_DBG(obj->ref_count > 0);
#ifdef _BT_OBJECT_LOGGING_ENABLED
BT_LOGT("Decrementing object's reference count: %llu -> %llu: "
obj->ref_count--;
if (obj->ref_count == 0) {
- BT_ASSERT(obj->release_func);
+ BT_ASSERT_DBG(obj->release_func);
obj->release_func(obj);
}
}
const char *comp_class_cand_name =
bt_component_class_get_name(comp_class_candidate);
- BT_ASSERT(comp_class_cand_name);
+ BT_ASSERT_DBG(comp_class_cand_name);
if (strcmp(name, comp_class_cand_name) == 0) {
comp_class = comp_class_candidate;
return;
for (i = 1; i < heap->len; i++)
- BT_ASSERT(!heap->gt(heap->ptrs[i], heap->ptrs[0]));
+ BT_ASSERT_DBG(!heap->gt(heap->ptrs[i], heap->ptrs[0]));
}
#endif
const struct bt_value *attr_field_obj = NULL;
const struct bt_value *attr_field_name_obj = NULL;
- BT_ASSERT(attr_obj);
- BT_ASSERT(index < bt_value_array_get_length(attr_obj));
+ BT_ASSERT_DBG(attr_obj);
+ BT_ASSERT_DBG(index < bt_value_array_get_length(attr_obj));
attr_field_obj = bt_value_array_borrow_element_by_index_const(
attr_obj, index);
if (!attr_field_obj) {
struct bt_value *value_obj = NULL;
struct bt_value *attr_field_obj = NULL;
- BT_ASSERT(attr_obj);
- BT_ASSERT(index < bt_value_array_get_length(attr_obj));
+ BT_ASSERT_DBG(attr_obj);
+ BT_ASSERT_DBG(index < bt_value_array_get_length(attr_obj));
attr_field_obj =
bt_value_array_borrow_element_by_index(attr_obj, index);
if (!attr_field_obj) {
struct bt_value *value_obj = NULL;
struct bt_value *attr_field_obj = NULL;
- BT_ASSERT(attr_obj);
- BT_ASSERT(name);
+ BT_ASSERT_DBG(attr_obj);
+ BT_ASSERT_DBG(name);
attr_field_obj = bt_attributes_borrow_field_by_name(attr_obj, name);
if (!attr_field_obj) {
BT_LOGD("Cannot find attributes object's field by name: "
struct bt_clock_class *cc, int64_t ns_from_origin,
uint64_t *raw_value)
{
- BT_ASSERT(cc);
-
+ BT_ASSERT_DBG(cc);
return bt_common_clock_value_from_ns_from_origin(cc->offset_seconds,
cc->offset_cycles, cc->frequency, ns_from_origin,
raw_value) ? BT_FUNC_STATUS_OVERFLOW_ERROR : BT_FUNC_STATUS_OK;
{
uint64_t i;
- BT_ASSERT(cs_set);
- BT_ASSERT(cs_set->clock_snapshots);
+ BT_ASSERT_DBG(cs_set);
+ BT_ASSERT_DBG(cs_set->clock_snapshots);
for (i = 0; i < cs_set->clock_snapshots->len; i++) {
struct bt_clock_snapshot *cs = cs_set->clock_snapshots->pdata[i];
- BT_ASSERT(cs);
+ BT_ASSERT_DBG(cs);
bt_clock_snapshot_reset(cs);
}
struct bt_clock_snapshot *clock_snapshot = NULL;
uint64_t i;
- BT_ASSERT(cs_set);
- BT_ASSERT(cc);
+ BT_ASSERT_DBG(cs_set);
+ BT_ASSERT_DBG(cc);
/*
* Check if we already have a value for this clock class.
for (i = 0; i < cs_set->clock_snapshots->len; i++) {
struct bt_clock_snapshot *cs = cs_set->clock_snapshots->pdata[i];
- BT_ASSERT(cs);
+ BT_ASSERT_DBG(cs);
if (cs->clock_class == cc) {
clock_snapshot = cs;
void bt_clock_snapshot_set_set_default_clock_snapshot(
struct bt_clock_snapshot_set *cs_set, uint64_t raw_value)
{
- BT_ASSERT(cs_set);
- BT_ASSERT(cs_set->default_cs);
+ BT_ASSERT_DBG(cs_set);
+ BT_ASSERT_DBG(cs_set->default_cs);
bt_clock_snapshot_set_raw_value(cs_set->default_cs, raw_value);
}
{
struct bt_clock_snapshot *clock_snapshot = NULL;
- BT_ASSERT(clock_class);
+ BT_ASSERT_DBG(clock_class);
clock_snapshot = bt_object_pool_create_object(&clock_class->cs_pool);
if (!clock_snapshot) {
BT_LIB_LOGE_APPEND_CAUSE(
{
struct bt_clock_class *clock_class;
- BT_ASSERT(clock_snapshot);
+ BT_ASSERT_DBG(clock_snapshot);
BT_LIB_LOGD("Recycling clock snapshot: %!+k", clock_snapshot);
/*
*/
bt_clock_snapshot_reset(clock_snapshot);
clock_class = clock_snapshot->clock_class;
- BT_ASSERT(clock_class);
+ BT_ASSERT_DBG(clock_class);
clock_snapshot->clock_class = NULL;
bt_object_pool_recycle_object(&clock_class->cs_pool, clock_snapshot);
bt_object_put_ref(clock_class);
static inline
void bt_clock_snapshot_set(struct bt_clock_snapshot *clock_snapshot)
{
- BT_ASSERT(clock_snapshot);
+ BT_ASSERT_DBG(clock_snapshot);
clock_snapshot->is_set = true;
}
static inline
void bt_clock_snapshot_reset(struct bt_clock_snapshot *clock_snapshot)
{
- BT_ASSERT(clock_snapshot);
+ BT_ASSERT_DBG(clock_snapshot);
clock_snapshot->is_set = false;
}
void bt_clock_snapshot_set_raw_value(struct bt_clock_snapshot *clock_snapshot,
uint64_t cycles)
{
- BT_ASSERT(clock_snapshot);
+ BT_ASSERT_DBG(clock_snapshot);
clock_snapshot->value_cycles = cycles;
set_ns_from_origin(clock_snapshot);
bt_clock_snapshot_set(clock_snapshot);
struct bt_stream_class *bt_event_class_borrow_stream_class_inline(
const struct bt_event_class *event_class)
{
- BT_ASSERT(event_class);
+ BT_ASSERT_DBG(event_class);
return (void *) bt_object_borrow_parent(&event_class->base);
}
BT_HIDDEN
void _bt_event_set_is_frozen(const struct bt_event *event, bool is_frozen)
{
- BT_ASSERT(event);
+ BT_ASSERT_DBG(event);
BT_LIB_LOGD("Setting event's frozen state: %!+e, is-frozen=%d",
event, is_frozen);
static inline
void _bt_event_reset_dev_mode(struct bt_event *event)
{
- BT_ASSERT(event);
+ BT_ASSERT_DBG(event);
if (event->common_context_field) {
bt_field_set_is_frozen(
static inline
void bt_event_reset(struct bt_event *event)
{
- BT_ASSERT(event);
+ BT_ASSERT_DBG(event);
BT_LIB_LOGD("Resetting event: %!+e", event);
bt_event_set_is_frozen(event, false);
bt_object_put_ref_no_null_check(&event->stream->base);
{
struct bt_event_class *event_class;
- BT_ASSERT(event);
+ BT_ASSERT_DBG(event);
BT_LIB_LOGD("Recycling event: %!+e", event);
/*
*/
bt_event_reset(event);
event_class = event->class;
- BT_ASSERT(event_class);
+ BT_ASSERT_DBG(event_class);
event->class = NULL;
bt_object_pool_recycle_object(&event_class->event_pool, event);
bt_object_put_ref_no_null_check(&event_class->base);
event->class) == packet->stream->class,
"Packet's stream class and event's stream class differ: "
"%![event-]+e, %![packet-]+a", event, packet);
- BT_ASSERT(event->stream->class->supports_packets);
- BT_ASSERT(!event->packet);
+ BT_ASSERT_DBG(event->stream->class->supports_packets);
+ BT_ASSERT_DBG(!event->packet);
event->packet = packet;
bt_object_get_ref_no_null_check_no_parent_check(&event->packet->base);
BT_LIB_LOGD("Set event's packet: %![event-]+e, %![packet-]+a",
event->class) == stream->class,
"Stream's class and event's stream class differ: "
"%![event-]+e, %![stream-]+s", event, stream);
- BT_ASSERT(!event->stream);
+ BT_ASSERT_DBG(!event->stream);
event->stream = stream;
bt_object_get_ref_no_null_check_no_parent_check(&event->stream->base);
BT_LIB_LOGD("Set event's stream: %![event-]+e, %![stream-]+s",
{
struct bt_event *event = NULL;
- BT_ASSERT(event_class);
- BT_ASSERT(stream);
+ BT_ASSERT_DBG(event_class);
+ BT_ASSERT_DBG(stream);
event = bt_object_pool_create_object(&event_class->event_pool);
if (G_UNLIKELY(!event)) {
BT_LIB_LOGE_APPEND_CAUSE(
bt_event_set_stream(event, stream);
if (packet) {
- BT_ASSERT(packet);
+ BT_ASSERT_DBG(packet);
bt_event_set_packet(event, packet);
}
struct bt_field_class_enumeration_mapping *mapping = NULL;
uint64_t i;
- BT_ASSERT(fc);
+ BT_ASSERT_DBG(fc);
BT_ASSERT_PRE_DEV_NON_NULL(label, "Label");
for (i = 0; i < fc->mappings->len; i++) {
struct bt_field_class_named_field_class_container *fc,
uint64_t index)
{
- BT_ASSERT(fc);
+ BT_ASSERT_DBG(fc);
BT_ASSERT_PRE_DEV_VALID_INDEX(index, fc->named_fcs->len);
return fc->named_fcs->pdata[index];
}
gpointer orig_key;
gpointer value;
- BT_ASSERT(fc);
+ BT_ASSERT_DBG(fc);
BT_ASSERT_PRE_DEV_NON_NULL(name, "Name");
if (!g_hash_table_lookup_extended(fc->name_to_index, name, &orig_key,
&value)) {
struct bt_field_path_item *bt_field_path_borrow_item_by_index_inline(
const struct bt_field_path *field_path, uint64_t index)
{
- BT_ASSERT(field_path);
- BT_ASSERT(index < field_path->items->len);
+ BT_ASSERT_DBG(field_path);
+ BT_ASSERT_DBG(index < field_path->items->len);
return &g_array_index(field_path->items, struct bt_field_path_item,
index);
}
{
struct bt_field_wrapper *field_wrapper = NULL;
- BT_ASSERT(pool);
- BT_ASSERT(fc);
+ BT_ASSERT_DBG(pool);
+ BT_ASSERT_DBG(fc);
field_wrapper = bt_object_pool_create_object(pool);
if (!field_wrapper) {
BT_LIB_LOGE_APPEND_CAUSE(
{
struct bt_field_string *string_field = (void *) field;
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
string_field->length = 0;
bt_field_set_single(field, true);
}
goto end;
}
- BT_ASSERT(!array_field->fields->pdata[i]);
+ BT_ASSERT_DBG(!array_field->fields->pdata[i]);
array_field->fields->pdata[i] = elem_field;
}
}
}
ret_field = struct_field->fields->pdata[GPOINTER_TO_UINT(index)];
- BT_ASSERT(ret_field);
+ BT_ASSERT_DBG(ret_field);
end:
return ret_field;
const struct bt_field_class_named_field_class_container *container_fc;
const struct bt_field_variant *var_field = (const void *) field;
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
BT_ASSERT_PRE_DEV(var_field->selected_field,
"Variant field has no selected field: %!+f", field);
container_fc = (const void *) field->class;
static
void reset_single_field(struct bt_field *field)
{
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
field->is_set = false;
}
uint64_t i;
struct bt_field_structure *struct_field = (void *) field;
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
for (i = 0; i < struct_field->fields->len; i++) {
bt_field_reset(struct_field->fields->pdata[i]);
{
struct bt_field_option *opt_field = (void *) field;
- BT_ASSERT(opt_field);
+ BT_ASSERT_DBG(opt_field);
bt_field_reset(opt_field->content_field);
opt_field->selected_field = NULL;
}
uint64_t i;
struct bt_field_variant *var_field = (void *) field;
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
for (i = 0; i < var_field->fields->len; i++) {
bt_field_reset(var_field->fields->pdata[i]);
uint64_t i;
struct bt_field_array *array_field = (void *) field;
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
for (i = 0; i < array_field->fields->len; i++) {
bt_field_reset(array_field->fields->pdata[i]);
void _bt_field_set_is_frozen(const struct bt_field *field,
bool is_frozen)
{
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
BT_LIB_LOGD("Setting field object's frozen state: %!+f, is-frozen=%d",
field, is_frozen);
- BT_ASSERT(field->methods->set_is_frozen);
+ BT_ASSERT_DBG(field->methods->set_is_frozen);
field->methods->set_is_frozen((void *) field, is_frozen);
}
static
bool single_field_is_set(const struct bt_field *field)
{
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
return field->is_set;
}
uint64_t i;
const struct bt_field_structure *struct_field = (const void *) field;
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
for (i = 0; i < struct_field->fields->len; i++) {
is_set = bt_field_is_set(struct_field->fields->pdata[i]);
const struct bt_field_option *opt_field = (const void *) field;
bool is_set = false;
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
if (opt_field->selected_field) {
is_set = bt_field_is_set(opt_field->selected_field);
const struct bt_field_variant *var_field = (const void *) field;
bool is_set = false;
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
if (var_field->selected_field) {
is_set = bt_field_is_set(var_field->selected_field);
uint64_t i;
const struct bt_field_array *array_field = (const void *) field;
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
for (i = 0; i < array_field->length; i++) {
is_set = bt_field_is_set(array_field->fields->pdata[i]);
static inline
void _bt_field_reset(const struct bt_field *field)
{
- BT_ASSERT(field);
- BT_ASSERT(field->methods->reset);
+ BT_ASSERT_DBG(field);
+ BT_ASSERT_DBG(field->methods->reset);
field->methods->reset((void *) field);
}
static inline
void _bt_field_set_single(struct bt_field *field, bool value)
{
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
field->is_set = value;
}
goto end;
}
- BT_ASSERT(field->methods->is_set);
+ BT_ASSERT_DBG(field->methods->is_set);
is_set = field->methods->is_set(field);
end:
struct bt_trace_class *bt_stream_class_borrow_trace_class_inline(
const struct bt_stream_class *stream_class)
{
- BT_ASSERT(stream_class);
+ BT_ASSERT_DBG(stream_class);
return (void *) bt_object_borrow_parent(&stream_class->base);
}
static inline
struct bt_trace *bt_stream_borrow_trace_inline(const struct bt_stream *stream)
{
- BT_ASSERT(stream);
+ BT_ASSERT_DBG(stream);
return (void *) bt_object_borrow_parent(&stream->base);
}
BT_LIB_LOGD("Set trace's UUID: %!+t", trace);
}
-BT_ASSERT_FUNC
static
bool trace_has_environment_entry(const struct bt_trace *trace, const char *name)
{
bool overflows = false;
uint64_t offset_cycles_ns;
- BT_ASSERT(base_offset_ns);
+ BT_ASSERT_DBG(base_offset_ns);
/* Initialize nanosecond timestamp to clock's offset in seconds */
if (offset_seconds <= (INT64_MIN / INT64_C(1000000000) - 1) ||
*base_offset_ns = offset_seconds * INT64_C(1000000000);
/* Add offset in cycles */
- BT_ASSERT(offset_cycles < frequency);
+ BT_ASSERT_DBG(offset_cycles < frequency);
offset_cycles_ns = bt_util_ns_from_value(frequency,
offset_cycles);
- BT_ASSERT(offset_cycles_ns < 1000000000);
+ BT_ASSERT_DBG(offset_cycles_ns < 1000000000);
*base_offset_ns += (int64_t) offset_cycles_ns;
end:
}
value_ns_signed = (int64_t) value_ns_unsigned;
- BT_ASSERT(value_ns_signed >= 0);
+ BT_ASSERT_DBG(value_ns_signed >= 0);
if (*ns_from_origin <= 0) {
goto add_value;
struct stack_entry *entry;
struct bt_bfcr *bfcr;
- BT_ASSERT(stack);
- BT_ASSERT(base_class);
+ BT_ASSERT_DBG(stack);
+ BT_ASSERT_DBG(base_class);
bfcr = stack->bfcr;
BT_COMP_LOGT("Pushing field class on stack: stack-addr=%p, "
"fc-addr=%p, fc-type=%d, base-length=%zu, "
static inline
unsigned int stack_size(struct stack *stack)
{
- BT_ASSERT(stack);
+ BT_ASSERT_DBG(stack);
return stack->size;
}
{
struct bt_bfcr *bfcr;
- BT_ASSERT(stack);
- BT_ASSERT(stack_size(stack));
+ BT_ASSERT_DBG(stack);
+ BT_ASSERT_DBG(stack_size(stack));
bfcr = stack->bfcr;
BT_COMP_LOGT("Popping from stack: "
"stack-addr=%p, stack-size-before=%u, stack-size-after=%u",
static
void stack_clear(struct stack *stack)
{
- BT_ASSERT(stack);
+ BT_ASSERT_DBG(stack);
stack->size = 0;
}
static inline
struct stack_entry *stack_top(struct stack *stack)
{
- BT_ASSERT(stack);
- BT_ASSERT(stack_size(stack));
+ BT_ASSERT_DBG(stack);
+ BT_ASSERT_DBG(stack_size(stack));
return &g_array_index(stack->entries, struct stack_entry,
stack->size - 1);
}
enum bt_bfcr_status status = BT_BFCR_STATUS_OK;
struct ctf_field_class_float *fc = (void *) bfcr->cur_basic_field_class;
- BT_ASSERT(fc);
+ BT_ASSERT_DBG(fc);
field_size = fc->base.size;
bo = fc->base.byte_order;
bfcr->cur_bo = bo;
if (fc->size <= available) {
/* We have all the bits; decode and set now */
- BT_ASSERT(bfcr->buf.addr);
+ BT_ASSERT_DBG(bfcr->buf.addr);
status = read_basic_and_call_cb(bfcr, bfcr->buf.addr,
buf_at_from_addr(bfcr));
if (status != BT_BFCR_STATUS_OK) {
goto end;
}
- BT_ASSERT(buf_at_from_addr(bfcr) % 8 == 0);
+ BT_ASSERT_DBG(buf_at_from_addr(bfcr) % 8 == 0);
available_bytes = BITS_TO_BYTES_FLOOR(available_bits(bfcr));
buf_at_bytes = BITS_TO_BYTES_FLOOR(buf_at_from_addr(bfcr));
- BT_ASSERT(bfcr->buf.addr);
+ BT_ASSERT_DBG(bfcr->buf.addr);
first_chr = &bfcr->buf.addr[buf_at_bytes];
result = memchr(first_chr, '\0', available_bytes);
{
enum bt_bfcr_status status;
- BT_ASSERT(bfcr->cur_basic_field_class);
+ BT_ASSERT_DBG(bfcr->cur_basic_field_class);
switch (bfcr->cur_basic_field_class->type) {
case CTF_FIELD_CLASS_TYPE_INT:
{
enum bt_bfcr_status status;
- BT_ASSERT(bfcr->cur_basic_field_class);
+ BT_ASSERT_DBG(bfcr->cur_basic_field_class);
switch (bfcr->cur_basic_field_class->type) {
case CTF_FIELD_CLASS_TYPE_INT:
* 0 means "undefined" for variants; what we really want is 1
* (always aligned)
*/
- BT_ASSERT(field_alignment >= 1);
+ BT_ASSERT_DBG(field_alignment >= 1);
/* Compute how many bits we need to skip */
skip_bits = bits_to_skip_to_align_to(bfcr, (size_t) field_alignment);
size_t offset, size_t packet_offset, size_t sz,
enum bt_bfcr_status *status)
{
- BT_ASSERT(bfcr);
- BT_ASSERT(BYTES_TO_BITS(sz) >= offset);
+ BT_ASSERT_DBG(bfcr);
+ BT_ASSERT_DBG(BYTES_TO_BITS(sz) >= offset);
reset(bfcr);
bfcr->buf.addr = buf;
bfcr->buf.offset = offset;
size_t bt_bfcr_continue(struct bt_bfcr *bfcr, const uint8_t *buf, size_t sz,
enum bt_bfcr_status *status)
{
- BT_ASSERT(bfcr);
- BT_ASSERT(buf);
- BT_ASSERT(sz > 0);
+ BT_ASSERT_DBG(bfcr);
+ BT_ASSERT_DBG(buf);
+ BT_ASSERT_DBG(sz > 0);
bfcr->buf.addr = buf;
bfcr->buf.offset = 0;
bfcr->buf.at = 0;
void bt_bfcr_set_unsigned_int_cb(struct bt_bfcr *bfcr,
bt_bfcr_unsigned_int_cb_func cb)
{
- BT_ASSERT(bfcr);
- BT_ASSERT(cb);
+ BT_ASSERT_DBG(bfcr);
+ BT_ASSERT_DBG(cb);
bfcr->user.cbs.classes.unsigned_int = cb;
}
struct ctf_range *ctf_field_class_enum_mapping_borrow_range_by_index(
struct ctf_field_class_enum_mapping *mapping, uint64_t index)
{
- BT_ASSERT(mapping);
- BT_ASSERT(index < mapping->ranges->len);
+ BT_ASSERT_DBG(mapping);
+ BT_ASSERT_DBG(index < mapping->ranges->len);
return &g_array_index(mapping->ranges, struct ctf_range, index);
}
struct ctf_field_class_enum_mapping *ctf_field_class_enum_borrow_mapping_by_index(
struct ctf_field_class_enum *fc, uint64_t index)
{
- BT_ASSERT(fc);
- BT_ASSERT(index < fc->mappings->len);
+ BT_ASSERT_DBG(fc);
+ BT_ASSERT_DBG(index < fc->mappings->len);
return &g_array_index(fc->mappings, struct ctf_field_class_enum_mapping,
index);
}
struct ctf_field_class_enum_mapping *ret_mapping = NULL;
uint64_t i;
- BT_ASSERT(fc);
- BT_ASSERT(label);
+ BT_ASSERT_DBG(fc);
+ BT_ASSERT_DBG(label);
for (i = 0; i < fc->mappings->len; i++) {
struct ctf_field_class_enum_mapping *mapping =
struct ctf_named_field_class *ctf_field_class_struct_borrow_member_by_index(
struct ctf_field_class_struct *fc, uint64_t index)
{
- BT_ASSERT(fc);
- BT_ASSERT(index < fc->members->len);
+ BT_ASSERT_DBG(fc);
+ BT_ASSERT_DBG(index < fc->members->len);
return &g_array_index(fc->members, struct ctf_named_field_class,
index);
}
uint64_t i;
struct ctf_named_field_class *ret_named_fc = NULL;
- BT_ASSERT(fc);
- BT_ASSERT(name);
+ BT_ASSERT_DBG(fc);
+ BT_ASSERT_DBG(name);
for (i = 0; i < fc->members->len; i++) {
struct ctf_named_field_class *named_fc =
struct ctf_named_field_class *ctf_field_class_variant_borrow_option_by_index(
struct ctf_field_class_variant *fc, uint64_t index)
{
- BT_ASSERT(fc);
- BT_ASSERT(index < fc->options->len);
+ BT_ASSERT_DBG(fc);
+ BT_ASSERT_DBG(index < fc->options->len);
return &g_array_index(fc->options, struct ctf_named_field_class,
index);
}
uint64_t i;
struct ctf_named_field_class *ret_named_fc = NULL;
- BT_ASSERT(fc);
- BT_ASSERT(name);
+ BT_ASSERT_DBG(fc);
+ BT_ASSERT_DBG(name);
for (i = 0; i < fc->options->len; i++) {
struct ctf_named_field_class *named_fc =
ctf_field_class_variant_borrow_range_by_index(
struct ctf_field_class_variant *fc, uint64_t index)
{
- BT_ASSERT(fc);
- BT_ASSERT(index < fc->ranges->len);
+ BT_ASSERT_DBG(fc);
+ BT_ASSERT_DBG(index < fc->ranges->len);
return &g_array_index(fc->ranges, struct ctf_field_class_variant_range,
index);
}
ctf_field_class_struct_borrow_member_by_index(
(void *) comp_fc, index);
- BT_ASSERT(named_fc);
+ BT_ASSERT_DBG(named_fc);
fc = named_fc->fc;
break;
}
ctf_field_class_variant_borrow_option_by_index(
(void *) comp_fc, index);
- BT_ASSERT(named_fc);
+ BT_ASSERT_DBG(named_fc);
fc = named_fc->fc;
break;
}
int64_t ctf_field_path_borrow_index_by_index(struct ctf_field_path *fp,
uint64_t index)
{
- BT_ASSERT(fp);
- BT_ASSERT(index < fp->path->len);
+ BT_ASSERT_DBG(fp);
+ BT_ASSERT_DBG(index < fp->path->len);
return g_array_index(fp->path, int64_t, index);
}
abort();
}
- BT_ASSERT(fc);
+ BT_ASSERT_DBG(fc);
for (i = 0; i < field_path->path->len; i++) {
int64_t child_index =
struct ctf_field_class *child_fc =
ctf_field_class_compound_borrow_field_class_by_index(
fc, child_index);
- BT_ASSERT(child_fc);
+ BT_ASSERT_DBG(child_fc);
fc = child_fc;
}
- BT_ASSERT(fc);
+ BT_ASSERT_DBG(fc);
return fc;
}
struct ctf_event_class *ctf_stream_class_borrow_event_class_by_id(
struct ctf_stream_class *sc, uint64_t type)
{
- BT_ASSERT(sc);
+ BT_ASSERT_DBG(sc);
return g_hash_table_lookup(sc->event_classes_by_id,
GUINT_TO_POINTER((guint) type));
}
uint64_t i;
struct ctf_stream_class *ret_sc = NULL;
- BT_ASSERT(tc);
+ BT_ASSERT_DBG(tc);
for (i = 0; i < tc->stream_classes->len; i++) {
struct ctf_stream_class *sc = tc->stream_classes->pdata[i];
uint64_t i;
struct ctf_clock_class *ret_cc = NULL;
- BT_ASSERT(tc);
- BT_ASSERT(name);
+ BT_ASSERT_DBG(tc);
+ BT_ASSERT_DBG(name);
for (i = 0; i < tc->clock_classes->len; i++) {
struct ctf_clock_class *cc = tc->clock_classes->pdata[i];
- BT_ASSERT(cc->name);
+ BT_ASSERT_DBG(cc->name);
if (strcmp(cc->name->str, name) == 0) {
ret_cc = cc;
goto end;
struct ctf_trace_class_env_entry *ctf_trace_class_borrow_env_entry_by_index(
struct ctf_trace_class *tc, uint64_t index)
{
- BT_ASSERT(tc);
- BT_ASSERT(index < tc->env_entries->len);
+ BT_ASSERT_DBG(tc);
+ BT_ASSERT_DBG(index < tc->env_entries->len);
return &g_array_index(tc->env_entries, struct ctf_trace_class_env_entry,
index);
}
struct ctf_trace_class_env_entry *ret_entry = NULL;
uint64_t i;
- BT_ASSERT(tc);
- BT_ASSERT(name);
+ BT_ASSERT_DBG(tc);
+ BT_ASSERT_DBG(name);
for (i = 0; i < tc->env_entries->len; i++) {
struct ctf_trace_class_env_entry *env_entry =
bt_trace_class *ctf_metadata_decoder_get_ir_trace_class(
struct ctf_metadata_decoder *mdec)
{
- BT_ASSERT(mdec);
- BT_ASSERT(mdec->config.create_trace_class);
+ BT_ASSERT_DBG(mdec);
+ BT_ASSERT_DBG(mdec->config.create_trace_class);
return ctf_visitor_generate_ir_get_ir_trace_class(mdec->visitor);
}
struct ctf_trace_class *ctf_metadata_decoder_borrow_ctf_trace_class(
struct ctf_metadata_decoder *mdec)
{
- BT_ASSERT(mdec);
- BT_ASSERT(mdec->config.create_trace_class);
+ BT_ASSERT_DBG(mdec);
+ BT_ASSERT_DBG(mdec->config.create_trace_class);
return ctf_visitor_generate_ir_borrow_ctf_trace_class(mdec->visitor);
}
BT_HIDDEN
const char *ctf_metadata_decoder_get_text(struct ctf_metadata_decoder *mdec)
{
- BT_ASSERT(mdec);
- BT_ASSERT(mdec->config.keep_plain_text);
+ BT_ASSERT_DBG(mdec);
+ BT_ASSERT_DBG(mdec->config.keep_plain_text);
return mdec->text->str;
}
BT_HIDDEN
int ctf_metadata_decoder_get_byte_order(struct ctf_metadata_decoder *mdec)
{
- BT_ASSERT(mdec);
+ BT_ASSERT_DBG(mdec);
return mdec->bo;
}
{
int ret = 0;
- BT_ASSERT(mdec);
+ BT_ASSERT_DBG(mdec);
if (!mdec->is_uuid_set) {
ret = -1;
return -1;
}
}
- BT_ASSERT(nr_char > 0);
+ BT_ASSERT_DBG(nr_char > 0);
buffer[nr_char] = '\0';
*buf_len = nr_char;
return 0;
{
struct ctx *ctx = (void *) visitor;
- BT_ASSERT(ctx);
+ BT_ASSERT_DBG(ctx);
if (ctx->trace_class) {
bt_trace_class_get_ref(ctx->trace_class);
{
struct ctx *ctx = (void *) visitor;
- BT_ASSERT(ctx);
- BT_ASSERT(ctx->ctf_tc);
+ BT_ASSERT_DBG(ctx);
+ BT_ASSERT_DBG(ctx->ctf_tc);
return ctx->ctf_tc;
}
{
struct bt_msg_iter *notit;
- BT_ASSERT(stack);
+ BT_ASSERT_DBG(stack);
notit = stack->notit;
BT_COMP_LOGD("Destroying stack: addr=%p", stack);
struct stack_entry *entry;
struct bt_msg_iter *notit;
- BT_ASSERT(stack);
+ BT_ASSERT_DBG(stack);
notit = stack->notit;
- BT_ASSERT(base);
+ BT_ASSERT_DBG(base);
BT_COMP_LOGT("Pushing base field on stack: stack-addr=%p, "
"stack-size-before=%zu, stack-size-after=%zu",
stack, stack->size, stack->size + 1);
static inline
unsigned int stack_size(struct stack *stack)
{
- BT_ASSERT(stack);
+ BT_ASSERT_DBG(stack);
return stack->size;
}
{
struct bt_msg_iter *notit;
- BT_ASSERT(stack);
- BT_ASSERT(stack_size(stack));
+ BT_ASSERT_DBG(stack);
+ BT_ASSERT_DBG(stack_size(stack));
notit = stack->notit;
BT_COMP_LOGT("Popping from stack: "
"stack-addr=%p, stack-size-before=%zu, stack-size-after=%zu",
static inline
struct stack_entry *stack_top(struct stack *stack)
{
- BT_ASSERT(stack);
- BT_ASSERT(stack_size(stack));
+ BT_ASSERT_DBG(stack);
+ BT_ASSERT_DBG(stack_size(stack));
return &g_array_index(stack->entries, struct stack_entry,
stack->size - 1);
}
static
void stack_clear(struct stack *stack)
{
- BT_ASSERT(stack);
+ BT_ASSERT_DBG(stack);
stack->size = 0;
}
enum bt_msg_iter_status status = BT_MSG_ITER_STATUS_OK;
bt_message *msg = NULL;
- BT_ASSERT(notit->meta.ec);
- BT_ASSERT(notit->packet);
+ BT_ASSERT_DBG(notit->meta.ec);
+ BT_ASSERT_DBG(notit->packet);
BT_COMP_LOGD("Creating event message from event class and packet: "
"notit-addr=%p, ec-addr=%p, ec-name=\"%s\", packet-addr=%p",
notit, notit->meta.ec,
notit->meta.ec->name->str,
notit->packet);
- BT_ASSERT(notit->msg_iter);
- BT_ASSERT(notit->meta.sc);
+ BT_ASSERT_DBG(notit->msg_iter);
+ BT_ASSERT_DBG(notit->meta.sc);
if (bt_stream_class_borrow_default_clock_class(notit->meta.sc->ir_sc)) {
msg = bt_message_event_create_with_packet_and_default_clock_snapshot(
notit->event = bt_message_event_borrow_event(
notit->event_msg);
- BT_ASSERT(notit->event);
+ BT_ASSERT_DBG(notit->event);
next_state:
notit->state = STATE_DSCOPE_EVENT_COMMON_CONTEXT_BEGIN;
}
if (event_common_context_fc->in_ir && !notit->dry_run) {
- BT_ASSERT(!notit->dscopes.event_common_context);
+ BT_ASSERT_DBG(!notit->dscopes.event_common_context);
notit->dscopes.event_common_context =
bt_event_borrow_common_context_field(
notit->event);
- BT_ASSERT(notit->dscopes.event_common_context);
+ BT_ASSERT_DBG(notit->dscopes.event_common_context);
}
BT_COMP_LOGT("Decoding event common context field: "
}
if (event_spec_context_fc->in_ir && !notit->dry_run) {
- BT_ASSERT(!notit->dscopes.event_spec_context);
+ BT_ASSERT_DBG(!notit->dscopes.event_spec_context);
notit->dscopes.event_spec_context =
bt_event_borrow_specific_context_field(
notit->event);
- BT_ASSERT(notit->dscopes.event_spec_context);
+ BT_ASSERT_DBG(notit->dscopes.event_spec_context);
}
BT_COMP_LOGT("Decoding event specific context field: "
}
if (event_payload_fc->in_ir && !notit->dry_run) {
- BT_ASSERT(!notit->dscopes.event_payload);
+ BT_ASSERT_DBG(!notit->dscopes.event_payload);
notit->dscopes.event_payload =
bt_event_borrow_payload_field(
notit->event);
- BT_ASSERT(notit->dscopes.event_payload);
+ BT_ASSERT_DBG(notit->dscopes.event_payload);
}
BT_COMP_LOGT("Decoding event payload field: "
bt_field_class_type base_fc_type;
size_t index;
- BT_ASSERT(!stack_empty(notit->stack));
+ BT_ASSERT_DBG(!stack_empty(notit->stack));
index = stack_top(notit->stack)->index;
base_field = stack_top(notit->stack)->base;
- BT_ASSERT(base_field);
+ BT_ASSERT_DBG(base_field);
base_fc = bt_field_borrow_class_const(base_field);
- BT_ASSERT(base_fc);
+ BT_ASSERT_DBG(base_fc);
base_fc_type = bt_field_class_get_type(base_fc);
if (base_fc_type == BT_FIELD_CLASS_TYPE_STRUCTURE) {
- BT_ASSERT(index <
+ BT_ASSERT_DBG(index <
bt_field_class_structure_get_member_count(
bt_field_borrow_class_const(
base_field)));
base_field, index);
} else if (bt_field_class_type_is(base_fc_type,
BT_FIELD_CLASS_TYPE_ARRAY)) {
- BT_ASSERT(index < bt_field_array_get_length(base_field));
+ BT_ASSERT_DBG(index < bt_field_array_get_length(base_field));
next_field = bt_field_array_borrow_element_field_by_index(
base_field, index);
} else if (bt_field_class_type_is(base_fc_type,
BT_FIELD_CLASS_TYPE_VARIANT)) {
- BT_ASSERT(index == 0);
+ BT_ASSERT_DBG(index == 0);
next_field = bt_field_variant_borrow_selected_option_field(
base_field);
} else {
abort();
}
- BT_ASSERT(next_field);
+ BT_ASSERT_DBG(next_field);
return next_field;
}
uint64_t new_val_mask;
uint64_t cur_value_masked;
- BT_ASSERT(new_val_size > 0);
+ BT_ASSERT_DBG(new_val_size > 0);
/*
* Special case for a 64-bit new value, which is the limit
}
field = borrow_next_field(notit);
- BT_ASSERT(field);
- BT_ASSERT(bt_field_borrow_class_const(field) == fc->ir_fc);
- BT_ASSERT(bt_field_class_type_is(bt_field_get_class_type(field),
+ BT_ASSERT_DBG(field);
+ BT_ASSERT_DBG(bt_field_borrow_class_const(field) == fc->ir_fc);
+ BT_ASSERT_DBG(bt_field_class_type_is(bt_field_get_class_type(field),
BT_FIELD_CLASS_TYPE_UNSIGNED_INTEGER));
bt_field_integer_unsigned_set_value(field, value);
stack_top(notit->stack)->index++;
"notit-addr=%p, bfcr-addr=%p, fc-addr=%p, "
"fc-type=%d, fc-in-ir=%d, value=%" PRIu64,
notit, notit->bfcr, fc, fc->type, fc->in_ir, value);
- BT_ASSERT(int_fc->meaning == CTF_FIELD_CLASS_MEANING_NONE);
- BT_ASSERT(!int_fc->mapped_clock_class);
- BT_ASSERT(int_fc->storing_index < 0);
+ BT_ASSERT_DBG(int_fc->meaning == CTF_FIELD_CLASS_MEANING_NONE);
+ BT_ASSERT_DBG(!int_fc->mapped_clock_class);
+ BT_ASSERT_DBG(int_fc->storing_index < 0);
if (G_UNLIKELY(!fc->in_ir || notit->dry_run)) {
goto end;
}
string_field = stack_top(notit->stack)->base;
- BT_ASSERT(bt_field_get_class_type(string_field) ==
+ BT_ASSERT_DBG(bt_field_get_class_type(string_field) ==
BT_FIELD_CLASS_TYPE_STRING);
/* Append character */
"notit-addr=%p, bfcr-addr=%p, fc-addr=%p, "
"fc-type=%d, fc-in-ir=%d, value=%" PRId64,
notit, notit->bfcr, fc, fc->type, fc->in_ir, value);
- BT_ASSERT(int_fc->meaning == CTF_FIELD_CLASS_MEANING_NONE);
+ BT_ASSERT_DBG(int_fc->meaning == CTF_FIELD_CLASS_MEANING_NONE);
if (G_UNLIKELY(int_fc->storing_index >= 0)) {
g_array_index(notit->stored_values, uint64_t,
}
field = borrow_next_field(notit);
- BT_ASSERT(field);
- BT_ASSERT(bt_field_borrow_class_const(field) == fc->ir_fc);
- BT_ASSERT(bt_field_class_type_is(bt_field_get_class_type(field),
+ BT_ASSERT_DBG(field);
+ BT_ASSERT_DBG(bt_field_borrow_class_const(field) == fc->ir_fc);
+ BT_ASSERT_DBG(bt_field_class_type_is(bt_field_get_class_type(field),
BT_FIELD_CLASS_TYPE_SIGNED_INTEGER));
bt_field_integer_signed_set_value(field, value);
stack_top(notit->stack)->index++;
field = borrow_next_field(notit);
bt_field_class_type type = bt_field_get_class_type(field);
- BT_ASSERT(field);
- BT_ASSERT(bt_field_borrow_class_const(field) == fc->ir_fc);
- BT_ASSERT(bt_field_class_type_is(type, BT_FIELD_CLASS_TYPE_REAL));
+ BT_ASSERT_DBG(field);
+ BT_ASSERT_DBG(bt_field_borrow_class_const(field) == fc->ir_fc);
+ BT_ASSERT_DBG(bt_field_class_type_is(type, BT_FIELD_CLASS_TYPE_REAL));
if (type == BT_FIELD_CLASS_TYPE_SINGLE_PRECISION_REAL) {
bt_field_real_single_precision_set_value(field, (float) value);
}
field = borrow_next_field(notit);
- BT_ASSERT(field);
- BT_ASSERT(bt_field_borrow_class_const(field) == fc->ir_fc);
- BT_ASSERT(bt_field_get_class_type(field) ==
+ BT_ASSERT_DBG(field);
+ BT_ASSERT_DBG(bt_field_borrow_class_const(field) == fc->ir_fc);
+ BT_ASSERT_DBG(bt_field_get_class_type(field) ==
BT_FIELD_CLASS_TYPE_STRING);
bt_field_string_clear(field);
}
field = stack_top(notit->stack)->base;
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
/* Append current substring */
ret = bt_field_string_append_with_length(field, value, len);
field = notit->cur_dscope_field;
} else {
field = borrow_next_field(notit);
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
}
/* Push field */
- BT_ASSERT(field);
- BT_ASSERT(bt_field_borrow_class_const(field) == fc->ir_fc);
+ BT_ASSERT_DBG(field);
+ BT_ASSERT_DBG(bt_field_borrow_class_const(field) == fc->ir_fc);
stack_push(notit->stack, field);
/*
struct ctf_field_class_array_base *array_fc = (void *) fc;
if (array_fc->is_text) {
- BT_ASSERT(bt_field_get_class_type(field) ==
+ BT_ASSERT_DBG(bt_field_get_class_type(field) ==
BT_FIELD_CLASS_TYPE_STRING);
notit->done_filling_string = false;
bt_field_string_clear(field);
goto end;
}
- BT_ASSERT(!stack_empty(notit->stack));
- BT_ASSERT(bt_field_borrow_class_const(stack_top(notit->stack)->base) ==
+ BT_ASSERT_DBG(!stack_empty(notit->stack));
+ BT_ASSERT_DBG(bt_field_borrow_class_const(stack_top(notit->stack)->base) ==
fc->ir_fc);
/*
struct ctf_field_class_array_base *array_fc = (void *) fc;
if (array_fc->is_text) {
- BT_ASSERT(bt_field_get_class_type(
+ BT_ASSERT_DBG(bt_field_get_class_type(
stack_top(notit->stack)->base) ==
BT_FIELD_CLASS_TYPE_STRING);
bt_bfcr_set_unsigned_int_cb(notit->bfcr,
}
seq_field = stack_top(notit->stack)->base;
- BT_ASSERT(seq_field);
+ BT_ASSERT_DBG(seq_field);
/*
* bfcr_get_sequence_length_cb() also gets called back for a
* is a sequence field.
*/
if (!seq_fc->base.is_text) {
- BT_ASSERT(bt_field_class_type_is(
+ BT_ASSERT_DBG(bt_field_class_type_is(
bt_field_get_class_type(seq_field),
BT_FIELD_CLASS_TYPE_DYNAMIC_ARRAY));
ret = bt_field_array_dynamic_set_length(seq_field,
{
enum bt_msg_iter_status status = BT_MSG_ITER_STATUS_OK;
- BT_ASSERT(notit);
- BT_ASSERT(message);
+ BT_ASSERT_DBG(notit);
+ BT_ASSERT_DBG(message);
notit->msg_iter = msg_iter;
notit->set_stream = true;
BT_COMP_LOGD("Getting next message: notit-addr=%p", notit);
switch (notit->state) {
case STATE_EMIT_MSG_EVENT:
- BT_ASSERT(notit->event_msg);
+ BT_ASSERT_DBG(notit->event_msg);
/*
* Check if we need to emit the delayed packet
{
enum bt_msg_iter_status status = BT_MSG_ITER_STATUS_OK;
- BT_ASSERT(notit);
+ BT_ASSERT_DBG(notit);
notit->set_stream = false;
do {
{
enum bt_msg_iter_status status = BT_MSG_ITER_STATUS_OK;
- BT_ASSERT(notit);
- BT_ASSERT(clock_snapshot);
+ BT_ASSERT_DBG(notit);
+ BT_ASSERT_DBG(clock_snapshot);
status = decode_until_state(notit, target_state_1, target_state_2);
if (status != BT_MSG_ITER_STATUS_OK) {
goto end;
{
enum bt_msg_iter_status status;
- BT_ASSERT(notit);
- BT_ASSERT(props);
+ BT_ASSERT_DBG(notit);
+ BT_ASSERT_DBG(props);
status = read_packet_header_context_fields(notit);
if (status != BT_MSG_ITER_STATUS_OK) {
goto end;
fs_sink_ctf_field_class_struct_borrow_member_by_index(
struct fs_sink_ctf_field_class_struct *fc, uint64_t index)
{
- BT_ASSERT(fc);
- BT_ASSERT(index < fc->members->len);
+ BT_ASSERT_DBG(fc);
+ BT_ASSERT_DBG(index < fc->members->len);
return &g_array_index(fc->members, struct fs_sink_ctf_named_field_class,
index);
}
uint64_t i;
struct fs_sink_ctf_named_field_class *ret_named_fc = NULL;
- BT_ASSERT(fc);
- BT_ASSERT(name);
+ BT_ASSERT_DBG(fc);
+ BT_ASSERT_DBG(name);
for (i = 0; i < fc->members->len; i++) {
struct fs_sink_ctf_named_field_class *named_fc =
fs_sink_ctf_field_class_variant_borrow_option_by_index(
struct fs_sink_ctf_field_class_variant *fc, uint64_t index)
{
- BT_ASSERT(fc);
- BT_ASSERT(index < fc->options->len);
+ BT_ASSERT_DBG(fc);
+ BT_ASSERT_DBG(index < fc->options->len);
return &g_array_index(fc->options, struct fs_sink_ctf_named_field_class,
index);
}
uint64_t i;
struct fs_sink_ctf_named_field_class *ret_named_fc = NULL;
- BT_ASSERT(fc);
- BT_ASSERT(name);
+ BT_ASSERT_DBG(fc);
+ BT_ASSERT_DBG(name);
for (i = 0; i < fc->options->len; i++) {
struct fs_sink_ctf_named_field_class *named_fc =
/* Time */
if (stream->sc->default_clock_class) {
- BT_ASSERT(cs);
+ BT_ASSERT_DBG(cs);
ret = bt_ctfser_write_byte_aligned_unsigned_int(&stream->ctfser,
bt_clock_snapshot_get_value(cs), 8, 64, BYTE_ORDER);
if (G_UNLIKELY(ret)) {
/* Common context */
if (stream->sc->event_common_context_fc) {
field = bt_event_borrow_common_context_field_const(event);
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
ret = write_struct_field(stream,
(void *) stream->sc->event_common_context_fc,
field, true);
/* Specific context */
if (ec->spec_context_fc) {
field = bt_event_borrow_specific_context_field_const(event);
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
ret = write_struct_field(stream, (void *) ec->spec_context_fc,
field, true);
if (G_UNLIKELY(ret)) {
/* Specific context */
if (ec->payload_fc) {
field = bt_event_borrow_payload_field_const(event);
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
ret = write_struct_field(stream, (void *) ec->payload_fc,
field, true);
if (G_UNLIKELY(ret)) {
goto end;
}
- BT_ASSERT(ec);
+ BT_ASSERT_DBG(ec);
if (stream->sc->default_clock_class) {
cs = bt_message_event_borrow_default_clock_snapshot_const(
}
}
- BT_ASSERT(stream->packet_state.is_open);
+ BT_ASSERT_DBG(stream->packet_state.is_open);
ret = fs_sink_stream_write_event(stream, cs, ir_event, ec);
if (G_UNLIKELY(ret)) {
status = BT_COMPONENT_CLASS_SINK_CONSUME_METHOD_STATUS_ERROR;
fs_sink = bt_self_component_get_data(
bt_self_component_sink_as_self_component(self_comp));
- BT_ASSERT(fs_sink);
- BT_ASSERT(fs_sink->upstream_iter);
+ BT_ASSERT_DBG(fs_sink);
+ BT_ASSERT_DBG(fs_sink->upstream_iter);
/* Consume messages */
next_status = bt_self_component_port_input_message_iterator_next(
for (i = 0; i < msg_count; i++) {
const bt_message *msg = msgs[i];
- BT_ASSERT(msg);
+ BT_ASSERT_DBG(msg);
switch (bt_message_get_type(msg)) {
case BT_MESSAGE_TYPE_EVENT:
static inline
size_t remaining_mmap_bytes(struct ctf_fs_ds_file *ds_file)
{
- BT_ASSERT(ds_file->mmap_len >= ds_file->request_offset);
+ BT_ASSERT_DBG(ds_file->mmap_len >= ds_file->request_offset);
return ds_file->mmap_len - ds_file->request_offset;
}
{
bt_component_class_message_iterator_next_method_status status;
- BT_ASSERT(msg_iter_data->ds_file);
+ BT_ASSERT_DBG(msg_iter_data->ds_file);
while (true) {
bt_message *msg;
if (ret != LTTNG_LIVE_ITERATOR_STATUS_OK) {
goto end;
}
- BT_ASSERT(lttng_live_stream->state != LTTNG_LIVE_STREAM_EOF);
+ BT_ASSERT_DBG(lttng_live_stream->state != LTTNG_LIVE_STREAM_EOF);
if (lttng_live_stream->state == LTTNG_LIVE_STREAM_QUIESCENT) {
uint64_t last_inact_ts = lttng_live_stream->last_inactivity_ts,
curr_inact_ts = lttng_live_stream->current_inactivity_ts;
bt_logging_level log_level = lttng_live_msg_iter->log_level;
bt_self_component *self_comp = lttng_live_msg_iter->self_comp;
- BT_ASSERT(msg);
- BT_ASSERT(ts_ns);
+ BT_ASSERT_DBG(msg);
+ BT_ASSERT_DBG(ts_ns);
BT_COMP_LOGD("Getting message's timestamp: iter-data-addr=%p, msg-addr=%p, "
"last-msg-ts=%" PRId64, lttng_live_msg_iter, msg,
case BT_MESSAGE_TYPE_EVENT:
clock_class = bt_message_event_borrow_stream_class_default_clock_class_const(
msg);
- BT_ASSERT(clock_class);
+ BT_ASSERT_DBG(clock_class);
clock_snapshot = bt_message_event_borrow_default_clock_snapshot_const(
msg);
}
clock_class = bt_clock_snapshot_borrow_clock_class_const(clock_snapshot);
- BT_ASSERT(clock_class);
+ BT_ASSERT_DBG(clock_class);
ret = bt_clock_snapshot_get_ns_from_origin(clock_snapshot, ts_ns);
if (ret) {
int64_t youngest_candidate_msg_ts = INT64_MAX;
uint64_t stream_iter_idx;
- BT_ASSERT(live_trace);
- BT_ASSERT(live_trace->stream_iterators);
+ BT_ASSERT_DBG(live_trace);
+ BT_ASSERT_DBG(live_trace->stream_iterators);
/*
* Update the current message of every stream iterators of this trace.
* The current msg of every stream must have a timestamp equal or
goto end;
}
- BT_ASSERT(msg);
+ BT_ASSERT_DBG(msg);
/*
* Get the timestamp in nanoseconds from origin of this
}
}
- BT_ASSERT(stream_iter != youngest_candidate_stream_iter);
+ BT_ASSERT_DBG(stream_iter != youngest_candidate_stream_iter);
if (!stream_iter_is_ended) {
if (G_UNLIKELY(youngest_candidate_stream_iter == NULL) ||
* Order the messages in an arbitrary but
* deterministic way.
*/
- BT_ASSERT(stream_iter != youngest_candidate_stream_iter);
+ BT_ASSERT_DBG(stream_iter != youngest_candidate_stream_iter);
int ret = common_muxing_compare_messages(
stream_iter->current_msg,
youngest_candidate_stream_iter->current_msg);
goto end;
}
- BT_ASSERT(session->traces);
+ BT_ASSERT_DBG(session->traces);
/*
* Use while loops here rather then for loops so we can restart the
}
if (!trace_is_ended) {
- BT_ASSERT(stream_iter);
+ BT_ASSERT_DBG(stream_iter);
if (G_UNLIKELY(youngest_candidate_stream_iter == NULL) ||
stream_iter->current_msg_ts_ns < youngest_candidate_msg_ts) {
*count = 0;
- BT_ASSERT(lttng_live_msg_iter);
+ BT_ASSERT_DBG(lttng_live_msg_iter);
/*
* Clear all the invalid message reference that might be left over in
*candidate_stream_iter = NULL;
int64_t youngest_msg_ts_ns = INT64_MAX;
- BT_ASSERT(lttng_live_msg_iter->sessions);
+ BT_ASSERT_DBG(lttng_live_msg_iter->sessions);
session_idx = 0;
/*
* Use a while loop instead of a for loop so we can restart the
goto end;
}
- BT_ASSERT(youngest_stream_iter->current_msg);
+ BT_ASSERT_DBG(youngest_stream_iter->current_msg);
/* Ensure monotonicity. */
- BT_ASSERT(lttng_live_msg_iter->last_msg_ts_ns <=
+ BT_ASSERT_DBG(lttng_live_msg_iter->last_msg_ts_ns <=
youngest_stream_iter->current_msg_ts_ns);
/*
sc_count = bt_trace_class_get_stream_class_count(tc);
for (i = 0; i < sc_count; i++) {
sc = bt_trace_class_borrow_stream_class_by_index_const(tc, i);
- BT_ASSERT(sc);
+ BT_ASSERT_DBG(sc);
cc = bt_stream_class_borrow_default_clock_class_const(sc);
if (cc) {
}
}
end:
- BT_ASSERT(cc);
+ BT_ASSERT_DBG(cc);
return cc;
}
const bt_field *event_payload, *field;
event_payload = bt_event_borrow_payload_field_const(event);
- BT_ASSERT(event_payload);
+ BT_ASSERT_DBG(event_payload);
field = bt_field_structure_borrow_member_field_by_name_const(
event_payload, field_name);
bt_field_string_set_value_status set_status;
bt_field_string_append_status append_status;
- BT_ASSERT(bt_field_get_class_type(curr_field) ==
+ BT_ASSERT_DBG(bt_field_get_class_type(curr_field) ==
BT_FIELD_CLASS_TYPE_STRING);
if (dbg_info_src) {
{
bt_field_string_set_value_status status;
- BT_ASSERT(bt_field_get_class_type(curr_field) ==
+ BT_ASSERT_DBG(bt_field_get_class_type(curr_field) ==
BT_FIELD_CLASS_TYPE_STRING);
if (dbg_info_src && dbg_info_src->func) {
status = bt_field_string_set_value(curr_field,
bt_field_string_set_value_status set_status;
bt_field_string_append_status append_status;
- BT_ASSERT(bt_field_get_class_type(curr_field) ==
+ BT_ASSERT_DBG(bt_field_get_class_type(curr_field) ==
BT_FIELD_CLASS_TYPE_STRING);
if (dbg_info_src && dbg_info_src->src_path) {
bt_field_string_set_value_status status;
bt_field *bin_field, *func_field, *src_field;
- BT_ASSERT(bt_field_get_class_type(debug_info_field) ==
+ BT_ASSERT_DBG(bt_field_get_class_type(debug_info_field) ==
BT_FIELD_CLASS_TYPE_STRUCTURE);
bin_field = bt_field_structure_borrow_member_field_by_name(
src_field = bt_field_structure_borrow_member_field_by_name(
debug_info_field, "src");
- BT_ASSERT(bt_field_get_class_type(bin_field) ==
+ BT_ASSERT_DBG(bt_field_get_class_type(bin_field) ==
BT_FIELD_CLASS_TYPE_STRING);
- BT_ASSERT(bt_field_get_class_type(func_field) ==
+ BT_ASSERT_DBG(bt_field_get_class_type(func_field) ==
BT_FIELD_CLASS_TYPE_STRING);
- BT_ASSERT(bt_field_get_class_type(src_field) ==
+ BT_ASSERT_DBG(bt_field_get_class_type(src_field) ==
BT_FIELD_CLASS_TYPE_STRING);
status = bt_field_string_set_value(bin_field, "");
struct debug_info_source *dbg_info_src;
const bt_field_class *debug_info_fc;
- BT_ASSERT(bt_field_get_class_type(debug_info_field) ==
+ BT_ASSERT_DBG(bt_field_get_class_type(debug_info_field) ==
BT_FIELD_CLASS_TYPE_STRUCTURE);
debug_info_fc = bt_field_borrow_class_const(debug_info_field);
- BT_ASSERT(bt_field_class_structure_get_member_count(debug_info_fc) == 3);
+ BT_ASSERT_DBG(bt_field_class_structure_get_member_count(
+ debug_info_fc) == 3);
dbg_info_src = debug_info_query(debug_info, vpid, ip);
out_event_class = trace_ir_mapping_create_new_mapped_event_class(
debug_it->ir_maps, in_event_class);
}
- BT_ASSERT(out_event_class);
+ BT_ASSERT_DBG(out_event_class);
/* Borrow the input stream. */
in_stream = bt_event_borrow_stream_const(in_event);
- BT_ASSERT(in_stream);
+ BT_ASSERT_DBG(in_stream);
out_stream = trace_ir_mapping_borrow_mapped_stream(debug_it->ir_maps,
in_stream);
- BT_ASSERT(in_stream);
+ BT_ASSERT_DBG(in_stream);
/* Borrow the input and output packets. */
in_packet = bt_event_borrow_packet_const(in_event);
status = BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_OK;
self_comp = bt_self_message_iterator_borrow_component(self_msg_iter);
- BT_ASSERT(self_comp);
+ BT_ASSERT_DBG(self_comp);
debug_info = bt_self_component_get_data(self_comp);
- BT_ASSERT(debug_info);
+ BT_ASSERT_DBG(debug_info);
debug_info_msg_iter = bt_self_message_iterator_get_data(self_msg_iter);
- BT_ASSERT(debug_info_msg_iter);
+ BT_ASSERT_DBG(debug_info_msg_iter);
upstream_iterator = debug_info_msg_iter->msg_iter;
- BT_ASSERT(upstream_iterator);
+ BT_ASSERT_DBG(upstream_iterator);
upstream_iterator_ret_status =
bt_self_component_port_input_message_iterator_next(
* There should never be more received messages than the capacity we
* provided.
*/
- BT_ASSERT(*count <= capacity);
+ BT_ASSERT_DBG(*count <= capacity);
for (curr_msg_idx = 0; curr_msg_idx < *count; curr_msg_idx++) {
out_message = handle_message(debug_info_msg_iter,
if (in_common_ctx_field) {
out_common_ctx_field =
bt_event_borrow_common_context_field(out_event);
- BT_ASSERT(out_common_ctx_field);
+ BT_ASSERT_DBG(out_common_ctx_field);
copy_field_content(in_common_ctx_field,
out_common_ctx_field, log_level, self_comp);
}
if (in_specific_ctx_field) {
out_specific_ctx_field =
bt_event_borrow_specific_context_field(out_event);
- BT_ASSERT(out_specific_ctx_field);
+ BT_ASSERT_DBG(out_specific_ctx_field);
copy_field_content(in_specific_ctx_field,
out_specific_ctx_field, log_level, self_comp);
}
in_payload_field = bt_event_borrow_payload_field_const(in_event);
if (in_payload_field) {
out_payload_field = bt_event_borrow_payload_field(out_event);
- BT_ASSERT(out_payload_field);
+ BT_ASSERT_DBG(out_payload_field);
copy_field_content(in_payload_field,
out_payload_field, log_level, self_comp);
}
in_fc_type = bt_field_get_class_type(in_field);
out_fc_type = bt_field_get_class_type(out_field);
- BT_ASSERT(in_fc_type == out_fc_type);
+ BT_ASSERT_DBG(in_fc_type == out_fc_type);
BT_COMP_LOGT("Copying content of field: in-f-addr=%p, out-f-addr=%p",
in_field, out_field);
bt_field_option_set_has_field(out_field, BT_TRUE);
out_option_field = bt_field_option_borrow_field(
out_field);
- BT_ASSERT(out_option_field);
+ BT_ASSERT_DBG(out_option_field);
copy_field_content(in_option_field, out_option_field,
log_level, self_comp);
} else {
bt_stream_class *borrow_mapped_stream_class(struct trace_ir_metadata_maps *md_maps,
const bt_stream_class *in_stream_class)
{
- BT_ASSERT(md_maps);
- BT_ASSERT(in_stream_class);
+ BT_ASSERT_DBG(md_maps);
+ BT_ASSERT_DBG(in_stream_class);
return g_hash_table_lookup(md_maps->stream_class_map,
(gpointer) in_stream_class);
bt_stream *borrow_mapped_stream(struct trace_ir_data_maps *d_maps,
const bt_stream *in_stream)
{
- BT_ASSERT(d_maps);
- BT_ASSERT(in_stream);
+ BT_ASSERT_DBG(d_maps);
+ BT_ASSERT_DBG(in_stream);
return g_hash_table_lookup(d_maps->stream_map, (gpointer) in_stream);
}
{
struct trace_ir_data_maps *d_maps;
- BT_ASSERT(ir_maps);
- BT_ASSERT(in_stream);
+ BT_ASSERT_DBG(ir_maps);
+ BT_ASSERT_DBG(in_stream);
d_maps = borrow_data_maps_from_input_stream(ir_maps, in_stream);
/* Return the mapped stream. */
{
struct trace_ir_metadata_maps *md_maps;
- BT_ASSERT(ir_maps);
- BT_ASSERT(in_event_class);
+ BT_ASSERT_DBG(ir_maps);
+ BT_ASSERT_DBG(in_event_class);
md_maps = borrow_metadata_maps_from_input_event_class(ir_maps,
in_event_class);
bt_packet *borrow_mapped_packet(struct trace_ir_data_maps *d_maps,
const bt_packet *in_packet)
{
- BT_ASSERT(d_maps);
- BT_ASSERT(in_packet);
+ BT_ASSERT_DBG(d_maps);
+ BT_ASSERT_DBG(in_packet);
return g_hash_table_lookup(d_maps->packet_map, (gpointer) in_packet);
}
const bt_packet *in_packet)
{
struct trace_ir_data_maps *d_maps;
- BT_ASSERT(ir_maps);
- BT_ASSERT(in_packet);
+ BT_ASSERT_DBG(ir_maps);
+ BT_ASSERT_DBG(in_packet);
d_maps = borrow_data_maps_from_input_packet(ir_maps, in_packet);
struct trace_ir_metadata_maps *md_maps,
const bt_clock_class *in_clock_class)
{
- BT_ASSERT(md_maps);
- BT_ASSERT(in_clock_class);
+ BT_ASSERT_DBG(md_maps);
+ BT_ASSERT_DBG(in_clock_class);
return g_hash_table_lookup(md_maps->clock_class_map,
(gpointer) in_clock_class);
details_comp = bt_self_component_get_data(
bt_self_component_sink_as_self_component(comp));
- BT_ASSERT(details_comp);
- BT_ASSERT(details_comp->msg_iter);
+ BT_ASSERT_DBG(details_comp);
+ BT_ASSERT_DBG(details_comp->msg_iter);
/* Consume messages */
next_status = bt_self_component_port_input_message_iterator_next(
{
struct details_trace_class_meta *details_tc_meta;
- BT_ASSERT(ctx->details_comp->cfg.with_meta);
- BT_ASSERT(ctx->details_comp->meta);
+ BT_ASSERT_DBG(ctx->details_comp->cfg.with_meta);
+ BT_ASSERT_DBG(ctx->details_comp->meta);
details_tc_meta = g_hash_table_lookup(ctx->details_comp->meta, tc);
if (!details_tc_meta) {
/* Not found: create one */
goto end;
}
- BT_ASSERT(ctx->details_comp->meta);
+ BT_ASSERT_DBG(ctx->details_comp->meta);
details_tc_meta = g_hash_table_lookup(ctx->details_comp->meta, tc);
- BT_ASSERT(details_tc_meta);
+ BT_ASSERT_DBG(details_tc_meta);
need_to_write =
!g_hash_table_lookup(details_tc_meta->objects, obj);
goto end;
}
- BT_ASSERT(ctx->details_comp->meta);
+ BT_ASSERT_DBG(ctx->details_comp->meta);
details_tc_meta = g_hash_table_lookup(ctx->details_comp->meta, tc);
need_to_write = !details_tc_meta;
int ret = 0;
struct details_trace *details_trace = NULL;
- BT_ASSERT(unique_id);
- BT_ASSERT(ctx->details_comp->traces);
+ BT_ASSERT_DBG(unique_id);
+ BT_ASSERT_DBG(ctx->details_comp->traces);
if (!bt_g_hash_table_contains(ctx->details_comp->traces,
trace)) {
/* Not found: create one */
static inline
void incr_indent_by(struct details_write_ctx *ctx, unsigned int value)
{
- BT_ASSERT(ctx);
+ BT_ASSERT_DBG(ctx);
ctx->indent_level += value;
}
static inline
void decr_indent_by(struct details_write_ctx *ctx, unsigned int value)
{
- BT_ASSERT(ctx);
- BT_ASSERT(ctx->indent_level >= value);
+ BT_ASSERT_DBG(ctx);
+ BT_ASSERT_DBG(ctx->indent_level >= value);
ctx->indent_level -= value;
}
static inline
void write_nl(struct details_write_ctx *ctx)
{
- BT_ASSERT(ctx);
+ BT_ASSERT_DBG(ctx);
g_string_append_c(ctx->str, '\n');
}
static inline
void write_sp(struct details_write_ctx *ctx)
{
- BT_ASSERT(ctx);
+ BT_ASSERT_DBG(ctx);
g_string_append_c(ctx->str, ' ');
}
{
uint64_t i;
- BT_ASSERT(ctx);
+ BT_ASSERT_DBG(ctx);
for (i = 0; i < ctx->indent_level; i++) {
write_sp(ctx);
void write_str_prop_line(struct details_write_ctx *ctx, const char *prop_name,
const char *prop_value)
{
- BT_ASSERT(prop_value);
+ BT_ASSERT_DBG(prop_value);
write_indent(ctx);
write_prop_name(ctx, prop_name);
g_string_append(ctx->str, ": ");
void write_uuid_prop_line(struct details_write_ctx *ctx, const char *prop_name,
bt_uuid uuid)
{
- BT_ASSERT(uuid);
+ BT_ASSERT_DBG(uuid);
write_indent(ctx);
write_prop_name(ctx, prop_name);
g_string_append_printf(ctx->str,
{
GPtrArray *keys = data;
- BT_ASSERT(keys);
+ BT_ASSERT_DBG(keys);
g_ptr_array_add(keys, (void *) key);
return BT_TRUE;
}
GPtrArray *keys = g_ptr_array_new();
char buf[64];
- BT_ASSERT(keys);
+ BT_ASSERT_DBG(keys);
/* Write field's name */
if (name) {
bt_value_map_foreach_entry_const(value,
map_value_foreach_add_key_to_array, keys);
- BT_ASSERT(foreach_status ==
+ BT_ASSERT_DBG(foreach_status ==
BT_VALUE_MAP_FOREACH_ENTRY_CONST_STATUS_OK);
g_ptr_array_sort(keys, (GCompareFunc) compare_strings);
void write_user_attributes(struct details_write_ctx *ctx,
const bt_value *user_attrs, bool write_newline, bool *written)
{
- BT_ASSERT(user_attrs);
+ BT_ASSERT_DBG(user_attrs);
if (!bt_value_map_is_empty(user_attrs)) {
write_value(ctx, user_attrs, "User attributes");
mappings = g_ptr_array_new_with_free_func(
(GDestroyNotify) destroy_enum_field_class_mapping);
- BT_ASSERT(mappings);
+ BT_ASSERT_DBG(mappings);
/*
* Copy field class's mappings to our own arrays and structures
struct enum_field_class_mapping *mapping = g_new0(
struct enum_field_class_mapping, 1);
- BT_ASSERT(mapping);
+ BT_ASSERT_DBG(mapping);
if (is_signed) {
fc_mapping = bt_field_class_enumeration_signed_borrow_mapping_by_index_const(
fc_mapping));
mapping->ranges = range_set_to_int_ranges(fc_range_set,
is_signed);
- BT_ASSERT(mapping->ranges);
+ BT_ASSERT_DBG(mapping->ranges);
g_ptr_array_add(mappings, mapping);
}
uint64_t i;
int_ranges = range_set_to_int_ranges(orig_ranges, is_signed);
- BT_ASSERT(int_ranges);
+ BT_ASSERT_DBG(int_ranges);
for (i = 0; i < int_ranges->len; i++) {
struct int_range *range = int_range_at(int_ranges, i);
sel_field_path =
bt_field_class_variant_with_selector_field_borrow_selector_field_path_const(
fc);
- BT_ASSERT(sel_field_path);
+ BT_ASSERT_DBG(sel_field_path);
}
g_string_append(ctx->str, " (");
ranges, selector_is_signed);
uint64_t i;
- BT_ASSERT(sorted_ranges);
- BT_ASSERT(sorted_ranges->len > 0);
+ BT_ASSERT_DBG(sorted_ranges);
+ BT_ASSERT_DBG(sorted_ranges->len > 0);
write_prop_name_line(ctx, "Selector ranges");
for (i = 0; i < sorted_ranges->len; i++) {
void write_root_field_class(struct details_write_ctx *ctx, const char *name,
const bt_field_class *fc)
{
- BT_ASSERT(name);
- BT_ASSERT(fc);
+ BT_ASSERT_DBG(name);
+ BT_ASSERT_DBG(fc);
write_indent(ctx);
write_prop_name(ctx, name);
g_string_append(ctx->str, ": ");
{
int ret = 0;
- BT_ASSERT(tc);
+ BT_ASSERT_DBG(tc);
if (details_need_to_write_trace_class(ctx, tc)) {
uint64_t sc_i;
if (sc && details_need_to_write_meta_object(ctx, tc, sc)) {
uint64_t ec_i;
- BT_ASSERT(tc);
+ BT_ASSERT_DBG(tc);
if (ctx->details_comp->cfg.compact &&
ctx->details_comp->printed_something) {
}
if (ec && details_need_to_write_meta_object(ctx, tc, ec)) {
- BT_ASSERT(sc);
+ BT_ASSERT_DBG(sc);
if (ctx->details_comp->cfg.compact &&
ctx->details_comp->printed_something) {
void write_root_field(struct details_write_ctx *ctx, const char *name,
const bt_field *field)
{
- BT_ASSERT(name);
- BT_ASSERT(field);
+ BT_ASSERT_DBG(name);
+ BT_ASSERT_DBG(field);
write_indent(ctx);
write_prop_name(ctx, name);
g_string_append(ctx->str, ":");
bt_trace_borrow_environment_entry_value_by_name_const(
trace, name);
- BT_ASSERT(value);
+ BT_ASSERT_DBG(value);
write_compound_member_name(ctx, name);
write_sp(ctx);
/* Write times */
if (beginning_cs) {
write_time(ctx, beginning_cs);
- BT_ASSERT(end_cs);
+ BT_ASSERT_DBG(end_cs);
write_time(ctx, end_cs);
}
if (has_timestamp) {
/* Set new start for the message portion of the line */
*new_start = strchr(line, ']');
- BT_ASSERT(*new_start);
+ BT_ASSERT_DBG(*new_start);
(*new_start)++;
if ((*new_start)[0] == ' ') {
}
event = bt_message_event_borrow_event(msg);
- BT_ASSERT(event);
+ BT_ASSERT_DBG(event);
goto end;
error:
int ret;
ep_field = bt_event_borrow_payload_field(event);
- BT_ASSERT(ep_field);
+ BT_ASSERT_DBG(ep_field);
str_field = bt_field_structure_borrow_member_field_by_index(
ep_field, 0);
if (!str_field) {
}
event = bt_message_event_borrow_event(msg);
- BT_ASSERT(event);
+ BT_ASSERT_DBG(event);
ret = fill_event_payload_from_line(dmesg_comp, new_start, event);
if (ret) {
BT_COMP_LOGE("Cannot fill event payload field from line: "
bt_component_class_message_iterator_next_method_status status =
BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_OK;
- BT_ASSERT(dmesg_msg_iter);
+ BT_ASSERT_DBG(dmesg_msg_iter);
dmesg_comp = dmesg_msg_iter->dmesg_comp;
- BT_ASSERT(dmesg_comp);
+ BT_ASSERT_DBG(dmesg_comp);
if (dmesg_msg_iter->state == STATE_DONE) {
status = BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_END;
goto end;
}
- BT_ASSERT(dmesg_msg_iter->linebuf);
+ BT_ASSERT_DBG(dmesg_msg_iter->linebuf);
/* Ignore empty lines, once trimmed */
for (ch = dmesg_msg_iter->linebuf; *ch != '\0'; ch++) {
}
handle_state:
- BT_ASSERT(dmesg_comp->trace);
+ BT_ASSERT_DBG(dmesg_comp->trace);
switch (dmesg_msg_iter->state) {
case STATE_EMIT_STREAM_BEGINNING:
- BT_ASSERT(dmesg_msg_iter->tmp_event_msg);
+ BT_ASSERT_DBG(dmesg_msg_iter->tmp_event_msg);
*msg = bt_message_stream_beginning_create(
dmesg_msg_iter->pc_msg_iter, dmesg_comp->stream);
dmesg_msg_iter->state = STATE_EMIT_EVENT;
break;
case STATE_EMIT_EVENT:
- BT_ASSERT(dmesg_msg_iter->tmp_event_msg);
+ BT_ASSERT_DBG(dmesg_msg_iter->tmp_event_msg);
*msg = dmesg_msg_iter->tmp_event_msg;
dmesg_msg_iter->tmp_event_msg = NULL;
break;
bt_component_class_message_iterator_next_method_status ret =
BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_OK;
- BT_ASSERT(pretty);
+ BT_ASSERT_DBG(pretty);
switch (bt_message_get_type(message)) {
case BT_MESSAGE_TYPE_EVENT:
goto end;
}
- BT_ASSERT(next_status == BT_MESSAGE_ITERATOR_NEXT_STATUS_OK);
+ BT_ASSERT_DBG(next_status == BT_MESSAGE_ITERATOR_NEXT_STATUS_OK);
for (i = 0; i < count; i++) {
ret = (int) handle_message(pretty, msgs[i]);
&log_level);
if (prop_avail == BT_PROPERTY_AVAILABILITY_AVAILABLE) {
log_level_str = log_level_names[log_level];
- BT_ASSERT(log_level_str);
+ BT_ASSERT_DBG(log_level_str);
if (!pretty->start_line) {
bt_common_g_string_append(pretty->string, ", ");
bt_field_class_type ft_type;
int_fc = bt_field_borrow_class_const(field);
- BT_ASSERT(int_fc);
+ BT_ASSERT_DBG(int_fc);
ft_type = bt_field_get_class_type(field);
if (bt_field_class_type_is(ft_type,
BT_FIELD_CLASS_TYPE_UNSIGNED_INTEGER)) {
if (len < 64) {
size_t rounded_len;
- BT_ASSERT(len != 0);
+ BT_ASSERT_DBG(len != 0);
/* Round length to the nearest 3-bit */
rounded_len = (((len - 1) / 3) + 1) * 3;
v.u &= ((uint64_t) 1 << rounded_len) - 1;
}
field = bt_field_array_borrow_element_field_by_index_const(array, i);
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
return print_field(pretty, field, print_names);
}
}
field = bt_field_array_borrow_element_field_by_index_const(seq, i);
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
return print_field(pretty, field, print_names);
}
const bt_field *field = NULL;
field = bt_field_variant_borrow_selected_option_field_const(variant);
- BT_ASSERT(field);
+ BT_ASSERT_DBG(field);
bt_common_g_string_append(pretty->string, "{ ");
pretty->depth++;
if (print_names) {
const bt_event *event =
bt_message_event_borrow_event_const(event_msg);
- BT_ASSERT(event);
+ BT_ASSERT_DBG(event);
pretty->start_line = true;
g_string_assign(pretty->string, "");
ret = print_event_header(pretty, event_msg);
counter = bt_self_component_get_data(
bt_self_component_sink_as_self_component(comp));
- BT_ASSERT(counter);
+ BT_ASSERT_DBG(counter);
if (G_UNLIKELY(!counter->msg_iter)) {
try_print_last(counter);
for (i = 0; i < msg_count; i++) {
const bt_message *msg = msgs[i];
- BT_ASSERT(msg);
+ BT_ASSERT_DBG(msg);
switch (bt_message_get_type(msg)) {
case BT_MESSAGE_TYPE_EVENT:
counter->count.event++;
dummy = bt_self_component_get_data(
bt_self_component_sink_as_self_component(component));
- BT_ASSERT(dummy);
+ BT_ASSERT_DBG(dummy);
if (G_UNLIKELY(!dummy->msg_iter)) {
status = BT_COMPONENT_CLASS_SINK_CONSUME_METHOD_STATUS_END;
* valid: it must be considered for muxing operations.
*/
BT_COMP_LOGD_STR("Validated upstream message iterator wrapper.");
- BT_ASSERT(count > 0);
+ BT_ASSERT_DBG(count > 0);
/* Move messages to our queue */
for (i = 0; i < count; i++) {
const bt_stream_class *stream_class = NULL;
bt_message_type msg_type;
- BT_ASSERT(msg);
- BT_ASSERT(ts_ns);
+ BT_ASSERT_DBG(msg);
+ BT_ASSERT_DBG(ts_ns);
BT_COMP_LOGD("Getting message's timestamp: "
"muxer-msg-iter-addr=%p, msg-addr=%p, "
"last-returned-ts=%" PRId64,
switch (msg_type) {
case BT_MESSAGE_TYPE_EVENT:
- BT_ASSERT(bt_message_event_borrow_stream_class_default_clock_class_const(
+ BT_ASSERT_DBG(bt_message_event_borrow_stream_class_default_clock_class_const(
msg));
clock_snapshot = bt_message_event_borrow_default_clock_snapshot_const(
msg);
const uint8_t *cc_uuid;
const char *cc_name;
- BT_ASSERT(clock_class);
+ BT_ASSERT_DBG(clock_class);
cc_uuid = bt_clock_class_get_uuid(clock_class);
cc_name = bt_clock_class_get_name(clock_class);
bt_component_class_message_iterator_next_method_status status =
BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_OK;
- BT_ASSERT(muxer_comp);
- BT_ASSERT(muxer_msg_iter);
- BT_ASSERT(muxer_upstream_msg_iter);
+ BT_ASSERT_DBG(muxer_comp);
+ BT_ASSERT_DBG(muxer_msg_iter);
+ BT_ASSERT_DBG(muxer_upstream_msg_iter);
*muxer_upstream_msg_iter = NULL;
for (i = 0; i < muxer_msg_iter->active_muxer_upstream_msg_iters->len;
continue;
}
- BT_ASSERT(cur_muxer_upstream_msg_iter->msgs->length > 0);
+ BT_ASSERT_DBG(cur_muxer_upstream_msg_iter->msgs->length > 0);
msg = g_queue_peek_head(cur_muxer_upstream_msg_iter->msgs);
- BT_ASSERT(msg);
+ BT_ASSERT_DBG(msg);
if (G_UNLIKELY(bt_message_get_type(msg) ==
BT_MESSAGE_TYPE_STREAM_BEGINNING)) {
"muxer-upstream-msg-iter-wrap-addr=%p, "
"ts=%" PRId64,
muxer_msg_iter, muxer_upstream_msg_iter, next_return_ts);
- BT_ASSERT(status == BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_OK);
- BT_ASSERT(muxer_upstream_msg_iter);
+ BT_ASSERT_DBG(status ==
+ BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_OK);
+ BT_ASSERT_DBG(muxer_upstream_msg_iter);
/*
* Consume from the queue's head: other side
* (muxer_upstream_msg_iter_next()) writes to the tail.
*/
*msg = g_queue_pop_head(muxer_upstream_msg_iter->msgs);
- BT_ASSERT(*msg);
+ BT_ASSERT_DBG(*msg);
muxer_msg_iter->last_returned_ts_ns = next_return_ts;
end:
bt_self_component *self_comp = NULL;
struct muxer_comp *muxer_comp = NULL;
- BT_ASSERT(muxer_msg_iter);
+ BT_ASSERT_DBG(muxer_msg_iter);
self_comp = bt_self_message_iterator_borrow_component(
self_msg_iter);
- BT_ASSERT(self_comp);
+ BT_ASSERT_DBG(self_comp);
muxer_comp = bt_self_component_get_data(self_comp);
- BT_ASSERT(muxer_comp);
+ BT_ASSERT_DBG(muxer_comp);
BT_COMP_LOGT("Muxer component's message iterator's \"next\" method called: "
"comp-addr=%p, muxer-comp-addr=%p, muxer-msg-iter-addr=%p, "
"msg-iter-addr=%p",
const bt_clock_snapshot *clock_snapshot = NULL;
int ret = 0;
- BT_ASSERT(msg);
- BT_ASSERT(ns_from_origin);
- BT_ASSERT(has_clock_snapshot);
+ BT_ASSERT_DBG(msg);
+ BT_ASSERT_DBG(ns_from_origin);
+ BT_ASSERT_DBG(has_clock_snapshot);
switch (bt_message_get_type(msg)) {
case BT_MESSAGE_TYPE_EVENT:
continue;
}
- BT_ASSERT(ns_from_origin != INT64_MIN &&
+ BT_ASSERT_DBG(ns_from_origin != INT64_MIN &&
ns_from_origin != INT64_MAX);
put_messages(msgs, count);
goto found;
{
struct trimmer_iterator_stream_state *sstate;
- BT_ASSERT(stream);
+ BT_ASSERT_DBG(stream);
sstate = g_hash_table_lookup(trimmer_it->stream_states, stream);
- BT_ASSERT(sstate);
+ BT_ASSERT_DBG(sstate);
return sstate;
}
* class has a clock class. And we know it has, otherwise we
* couldn't be using the trimmer component.
*/
- BT_ASSERT(ns_from_origin);
+ BT_ASSERT_DBG(ns_from_origin);
if (G_UNLIKELY(!trimmer_it->end.is_infinite &&
*ns_from_origin > trimmer_it->end.ns_from_origin)) {
(*count)++;
}
- BT_ASSERT(*count > 0);
+ BT_ASSERT_DBG(*count > 0);
}
static inline
goto end;
}
- BT_ASSERT(my_count > 0);
+ BT_ASSERT_DBG(my_count > 0);
for (i = 0; i < my_count; i++) {
status = handle_message(trimmer_it, my_msgs[i],
* There's at least one message in the output message queue:
* move the messages to the output message array.
*/
- BT_ASSERT(!g_queue_is_empty(trimmer_it->output_messages));
+ BT_ASSERT_DBG(!g_queue_is_empty(trimmer_it->output_messages));
fill_message_array_from_output_messages(trimmer_it, msgs,
capacity, count);
bt_component_class_message_iterator_next_method_status status =
BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_OK;
- BT_ASSERT(trimmer_it);
+ BT_ASSERT_DBG(trimmer_it);
if (G_LIKELY(trimmer_it->state == TRIMMER_ITERATOR_STATE_TRIM)) {
status = state_trim(trimmer_it, msgs, capacity, count);