EnumerationFieldTypeMapping = barectf_config.EnumerationFieldTypeMapping
EnumerationFieldTypeMappingRange = barectf_config.EnumerationFieldTypeMappingRange
EnumerationFieldTypeMappings = barectf_config.EnumerationFieldTypeMappings
-EventType = barectf_config.EventType
+EventRecordType = barectf_config.EventRecordType
LogLevel = barectf_config.LogLevel
RealFieldType = barectf_config.RealFieldType
SignedEnumerationFieldType = barectf_config.SignedEnumerationFieldType
SignedIntegerFieldType = barectf_config.SignedIntegerFieldType
StaticArrayFieldType = barectf_config.StaticArrayFieldType
-StreamType = barectf_config.StreamType
-StreamTypeEventFeatures = barectf_config.StreamTypeEventFeatures
-StreamTypeFeatures = barectf_config.StreamTypeFeatures
-StreamTypePacketFeatures = barectf_config.StreamTypePacketFeatures
+DataStreamType = barectf_config.DataStreamType
+DataStreamTypeEventRecordFeatures = barectf_config.DataStreamTypeEventRecordFeatures
+DataStreamTypeFeatures = barectf_config.DataStreamTypeFeatures
+DataStreamTypePacketFeatures = barectf_config.DataStreamTypePacketFeatures
StringFieldType = barectf_config.StringFieldType
StructureFieldType = barectf_config.StructureFieldType
StructureFieldTypeMember = barectf_config.StructureFieldTypeMember
# suboperation.
#
# This is not strictly needed (could be appended to
- # `ops`), but the properties of `_StreamOps` and
- # `_EvOps` offer a single (structure field type)
+ # `ops`), but the properties of `_DsOps` and
+ # `_ErOps` offer a single (structure field type)
# operation.
subops.append(init_align_op)
_OptCompoundOp = Optional[_CompoundOp]
-# The operations for an event.
+# The operations for an event record.
#
# The available operations are:
#
# * Specific context operation.
# * Payload operation.
-class _EvOps:
+class _ErOps:
def __init__(self, spec_ctx_op: _OptCompoundOp, payload_op: _OptCompoundOp):
self._spec_ctx_op = spec_ctx_op
self._payload_op = payload_op
return self._payload_op
-_EvOpsMap = Mapping[barectf_config.EventType, _EvOps]
+_ErOpsMap = Mapping[barectf_config.EventRecordType, _ErOps]
-# The operations for a stream.
+# The operations for a data stream.
#
# The available operations are:
#
# * Packet header operation.
# * Packet context operation.
-# * Event header operation.
-# * Event common context operation.
-# * Event operations (`_EvOps`).
-class _StreamOps:
+# * Event record header operation.
+# * Event record common context operation.
+# * Event record operations (`_ErOps`).
+class _DsOps:
def __init__(self, pkt_header_op: _OptCompoundOp, pkt_ctx_op: _CompoundOp,
- ev_header_op: _OptCompoundOp, ev_common_ctx_op: _OptCompoundOp, ev_ops: _EvOpsMap):
+ er_header_op: _OptCompoundOp, er_common_ctx_op: _OptCompoundOp, er_ops: _ErOpsMap):
self._pkt_header_op = pkt_header_op
self._pkt_ctx_op = pkt_ctx_op
- self._ev_header_op = ev_header_op
- self._ev_common_ctx_op = ev_common_ctx_op
- self._ev_ops = ev_ops
+ self._er_header_op = er_header_op
+ self._er_common_ctx_op = er_common_ctx_op
+ self._er_ops = er_ops
@property
def pkt_header_op(self) -> _OptCompoundOp:
return self._pkt_ctx_op
@property
- def ev_header_op(self) -> _OptCompoundOp:
- return self._ev_header_op
+ def er_header_op(self) -> _OptCompoundOp:
+ return self._er_header_op
@property
- def ev_common_ctx_op(self) -> _OptCompoundOp:
- return self._ev_common_ctx_op
+ def er_common_ctx_op(self) -> _OptCompoundOp:
+ return self._er_common_ctx_op
@property
- def ev_ops(self) -> _EvOpsMap:
- return self._ev_ops
+ def er_ops(self) -> _ErOpsMap:
+ return self._er_ops
# The C variable name prefixes for the six kinds of root field types.
class _RootFtPrefixes:
PH = 'ph'
PC = 'pc'
- EH = 'eh'
- ECC = 'ecc'
+ ERH = 'erh'
+ ERCC = 'ercc'
SC = 'sc'
P = 'p'
_ROOT_FT_PREFIX_NAMES = {
_RootFtPrefixes.PH: 'packet header',
_RootFtPrefixes.PC: 'packet context',
- _RootFtPrefixes.EH: 'event header',
- _RootFtPrefixes.ECC: 'event common context',
+ _RootFtPrefixes.ERH: 'event record header',
+ _RootFtPrefixes.ERCC: 'event record common context',
_RootFtPrefixes.SC: 'specific context',
_RootFtPrefixes.P: 'payload',
}
'ft_c_type': self._ft_c_type,
'open_func_params_str': self._open_func_params_str,
'trace_func_params_str': self._trace_func_params_str,
- 'serialize_ev_common_ctx_func_params_str': self._serialize_ev_common_ctx_func_params_str,
+ 'serialize_er_common_ctx_func_params_str': self._serialize_er_common_ctx_func_params_str,
'loop_var_name': _loop_var_name,
'op_src_var_name': self._op_src_var_name,
}
self._serialize_write_dynamic_array_statements_templ = self._create_template('serialize-write-dynamic-array-statements.j2')
self._serialize_write_magic_statements_templ = self._create_template('serialize-write-magic-statements.j2')
self._serialize_write_uuid_statements_templ = self._create_template('serialize-write-uuid-statements.j2')
- self._serialize_write_stream_type_id_statements_templ = self._create_template('serialize-write-stream-type-id-statements.j2')
+ self._serialize_write_dst_id_statements_templ = self._create_template('serialize-write-dst-id-statements.j2')
self._serialize_write_time_statements_templ = self._create_template('serialize-write-time-statements.j2')
self._serialize_write_packet_size_statements_templ = self._create_template('serialize-write-packet-size-statements.j2')
self._serialize_write_skip_save_statements_templ = self._create_template('serialize-write-skip-save-statements.j2')
- self._serialize_write_ev_type_id_statements_templ = self._create_template('serialize-write-ev-type-id-statements.j2')
+ self._serialize_write_ert_id_statements_templ = self._create_template('serialize-write-ert-id-statements.j2')
self._size_align_statements_templ = self._create_template('size-align-statements.j2')
self._size_write_bit_array_statements_templ = self._create_template('size-write-bit-array-statements.j2')
self._size_write_string_statements_templ = self._create_template('size-write-string-statements.j2')
const_params=const_params)
# Returns the packet opening function prototype parameters for the
- # stream type `stream_type`.
- def _open_func_params_str(self, stream_type: barectf_config.StreamType,
- const_params: bool) -> str:
+ # data stream type `dst`.
+ def _open_func_params_str(self, dst: barectf_config.DataStreamType, const_params: bool) -> str:
parts = []
parts.append(self._proto_params_str(self._trace_type._pkt_header_ft, _RootFtPrefixes.PH,
const_params, {'magic', 'stream_id', 'uuid'}))
'content_size',
'events_discarded',
}
- parts.append(self._proto_params_str(stream_type._pkt_ctx_ft, _RootFtPrefixes.PC,
- const_params, exclude_set))
+ parts.append(self._proto_params_str(dst._pkt_ctx_ft, _RootFtPrefixes.PC, const_params,
+ exclude_set))
return ''.join(parts)
- # Returns the tracing function prototype parameters for the stream
- # and event types `stream_ev_types`.
- def _trace_func_params_str(self, stream_ev_types: Tuple[barectf_config.StreamType,
- barectf_config.EventType],
+ # Returns the tracing function prototype parameters for the data
+ # stream and event record types `ds_er_types`.
+ def _trace_func_params_str(self, ds_er_types: Tuple[barectf_config.DataStreamType,
+ barectf_config.EventRecordType],
const_params: bool, only_dyn: bool = False):
- stream_type = stream_ev_types[0]
- ev_type = stream_ev_types[1]
+ dst = ds_er_types[0]
+ ert = ds_er_types[1]
parts = []
- if stream_type._ev_header_ft is not None:
- parts.append(self._proto_params_str(stream_type._ev_header_ft, _RootFtPrefixes.EH,
+ if dst._er_header_ft is not None:
+ parts.append(self._proto_params_str(dst._er_header_ft, _RootFtPrefixes.ERH,
const_params, {'id', 'timestamp'},
only_dyn=only_dyn))
- if stream_type.event_common_context_field_type is not None:
- parts.append(self._proto_params_str(stream_type.event_common_context_field_type,
- _RootFtPrefixes.ECC, const_params,
+ if dst.event_record_common_context_field_type is not None:
+ parts.append(self._proto_params_str(dst.event_record_common_context_field_type,
+ _RootFtPrefixes.ERCC, const_params,
only_dyn=only_dyn))
- if ev_type.specific_context_field_type is not None:
- parts.append(self._proto_params_str(ev_type.specific_context_field_type,
+ if ert.specific_context_field_type is not None:
+ parts.append(self._proto_params_str(ert.specific_context_field_type,
_RootFtPrefixes.SC, const_params,
only_dyn=only_dyn))
- if ev_type.payload_field_type is not None:
- parts.append(self._proto_params_str(ev_type.payload_field_type, _RootFtPrefixes.P,
+ if ert.payload_field_type is not None:
+ parts.append(self._proto_params_str(ert.payload_field_type, _RootFtPrefixes.P,
const_params, only_dyn=only_dyn))
return ''.join(parts)
- # Returns the event header serialization function prototype
- # parameters for the stream type `stream_type`.
- def _serialize_ev_common_ctx_func_params_str(self, stream_type: barectf_config.StreamType,
+ # Returns the event record common context serialization function
+ # prototype parameters for the data stream type `dst`.
+ def _serialize_er_common_ctx_func_params_str(self, dst: barectf_config.DataStreamType,
const_params: bool) -> str:
- return self._proto_params_str(stream_type.event_common_context_field_type,
- _RootFtPrefixes.ECC, const_params)
+ return self._proto_params_str(dst.event_record_common_context_field_type,
+ _RootFtPrefixes.ERCC, const_params)
# Generates the bitfield header file contents.
def gen_bitfield_header(self) -> str:
# Generates the source code file contents.
def gen_src(self, header_file_name: str, bitfield_header_file_name: str) -> str:
- # Creates and returns the operations for all the stream and for
- # all their events.
- def create_stream_ops() -> Mapping[barectf_config.StreamType, _StreamOps]:
- stream_ser_ops = {}
+ # Creates and returns the operations for all the data stream and
+ # for all their event records.
+ def create_ds_ops() -> Mapping[barectf_config.DataStreamType, _DsOps]:
+ ds_ops = {}
- for stream_type in self._trace_type.stream_types:
- pkt_header_ser_op = None
+ for dst in self._trace_type.data_stream_types:
+ pkt_header_op = None
builder = _OpBuilder(self)
pkt_header_ft = self._trace_type._pkt_header_ft
spec_serialize_write_templates = {
'magic': self._serialize_write_magic_statements_templ,
'uuid': self._serialize_write_uuid_statements_templ,
- 'stream_id': self._serialize_write_stream_type_id_statements_templ,
+ 'stream_id': self._serialize_write_dst_id_statements_templ,
}
- pkt_header_ser_op = builder.build_for_root_ft(pkt_header_ft,
+ pkt_header_op = builder.build_for_root_ft(pkt_header_ft,
_RootFtPrefixes.PH,
spec_serialize_write_templates)
- # packet context operations
+ # packet context operation
spec_serialize_write_templates = {
'timestamp_begin': self._serialize_write_time_statements_templ,
'packet_size': self._serialize_write_packet_size_statements_templ,
'events_discarded': self._serialize_write_skip_save_statements_templ,
'content_size': self._serialize_write_skip_save_statements_templ,
}
- pkt_ctx_ser_op = builder.build_for_root_ft(stream_type._pkt_ctx_ft,
- _RootFtPrefixes.PC,
- spec_serialize_write_templates)
+ pkt_ctx_op = builder.build_for_root_ft(dst._pkt_ctx_ft, _RootFtPrefixes.PC,
+ spec_serialize_write_templates)
- # event header operationss
+ # event record header operation
builder = _OpBuilder(self)
- ev_header_ser_op = None
+ er_header_op = None
- if stream_type._ev_header_ft is not None:
+ if dst._er_header_ft is not None:
spec_serialize_write_templates = {
'timestamp': self._serialize_write_time_statements_templ,
- 'id': self._serialize_write_ev_type_id_statements_templ,
+ 'id': self._serialize_write_ert_id_statements_templ,
}
- ev_header_ser_op = builder.build_for_root_ft(stream_type._ev_header_ft,
- _RootFtPrefixes.EH,
- spec_serialize_write_templates)
+ er_header_op = builder.build_for_root_ft(dst._er_header_ft, _RootFtPrefixes.ERH,
+ spec_serialize_write_templates)
- # event common context operations
- ev_common_ctx_ser_op = None
+ # event record common context operation
+ er_common_ctx_op = None
- if stream_type.event_common_context_field_type is not None:
- ev_common_ctx_ser_op = builder.build_for_root_ft(stream_type.event_common_context_field_type,
- _RootFtPrefixes.ECC)
+ if dst.event_record_common_context_field_type is not None:
+ er_common_ctx_op = builder.build_for_root_ft(dst.event_record_common_context_field_type,
+ _RootFtPrefixes.ERCC)
- # operations specific to each event type
- ev_ser_ops = {}
+ # operations specific to each event record type
+ er_ops = {}
- for ev_type in stream_type.event_types:
+ for ert in dst.event_record_types:
ev_builder = copy.copy(builder)
- # specific context operations
- spec_ctx_ser_op = None
+ # specific context operation
+ spec_ctx_op = None
- if ev_type.specific_context_field_type is not None:
- spec_ctx_ser_op = ev_builder.build_for_root_ft(ev_type.specific_context_field_type,
- _RootFtPrefixes.SC)
+ if ert.specific_context_field_type is not None:
+ spec_ctx_op = ev_builder.build_for_root_ft(ert.specific_context_field_type,
+ _RootFtPrefixes.SC)
- # payload operations
- payload_ser_op = None
+ # payload operation
+ payload_op = None
- if ev_type.payload_field_type is not None:
- payload_ser_op = ev_builder.build_for_root_ft(ev_type.payload_field_type,
- _RootFtPrefixes.P)
+ if ert.payload_field_type is not None:
+ payload_op = ev_builder.build_for_root_ft(ert.payload_field_type,
+ _RootFtPrefixes.P)
- ev_ser_ops[ev_type] = _EvOps(spec_ctx_ser_op, payload_ser_op)
+ er_ops[ert] = _ErOps(spec_ctx_op, payload_op)
- stream_ser_ops[stream_type] = _StreamOps(pkt_header_ser_op, pkt_ctx_ser_op,
- ev_header_ser_op, ev_common_ctx_ser_op,
- ev_ser_ops)
+ ds_ops[dst] = _DsOps(pkt_header_op, pkt_ctx_op, er_header_op, er_common_ctx_op,
+ er_ops)
- return stream_ser_ops
+ return ds_ops
# Returns the "write" operation for the packet context member
- # named `member_name` within the stream type `stream_type`.
- def stream_op_pkt_ctx_op(stream_type: barectf_config.StreamType, member_name: str) -> _Op:
+ # named `member_name` within the data stream type `dst`.
+ def ds_op_pkt_ctx_op(dst: barectf_config.DataStreamType, member_name: str) -> _Op:
ret_op = None
- for op in stream_ops[stream_type].pkt_ctx_op.subops:
+ for op in ds_ops[dst].pkt_ctx_op.subops:
if op.top_name == member_name and type(op) is _WriteOp:
ret_op = op
break
assert ret_op is not None
return typing.cast(_Op, ret_op)
- stream_ops = create_stream_ops()
+ ds_ops = create_ds_ops()
c_src = self._create_file_template('barectf.c.j2').render(header_file_name=header_file_name,
bitfield_header_file_name=bitfield_header_file_name,
root_ft_prefixes=_RootFtPrefixes,
root_ft_prefix_names=_ROOT_FT_PREFIX_NAMES,
- stream_ops=stream_ops,
- stream_op_pkt_ctx_op=stream_op_pkt_ctx_op)
+ ds_ops=ds_ops,
+ ds_op_pkt_ctx_op=ds_op_pkt_ctx_op)
# Jinja 2 makes it hard to have multiple contiguous blocks
# delimited with empty lines when using a for loop, while not
cg_opts = config.options.code_generation_options
cg_opts = barectf.ConfigurationCodeGenerationOptions(v3_prefixes.identifier,
v3_prefixes.file_name,
- cg_opts.default_stream_type,
+ cg_opts.default_data_stream_type,
cg_opts.header_options,
cg_opts.clock_type_c_types)
config = barectf.Configuration(config.trace, barectf.ConfigurationOptions(cg_opts))
LogLevel = typing.NewType('LogLevel', int)
-class EventType(_UniqueByName):
+class EventRecordType(_UniqueByName):
def __init__(self, name: str, log_level: Optional[LogLevel] = None,
specific_context_field_type: _OptStructFt = None, payload_field_type: _OptStructFt = None):
self._id: Optional[Id] = None
_OptUIntFt = Optional[UnsignedIntegerFieldType]
-class StreamTypePacketFeatures:
+class DataStreamTypePacketFeatures:
def __init__(self, total_size_field_type: _DefaultableUIntFt = DEFAULT_FIELD_TYPE,
content_size_field_type: _DefaultableUIntFt = DEFAULT_FIELD_TYPE,
beginning_time_field_type: _OptDefaultableUIntFt = None,
end_time_field_type: _OptDefaultableUIntFt = None,
- discarded_events_counter_field_type: _OptDefaultableUIntFt = None):
+ discarded_event_records_snapshot_counter_field_type: _OptDefaultableUIntFt = None):
def get_ft(user_ft: _OptDefaultableUIntFt) -> _OptUIntFt:
if user_ft == DEFAULT_FIELD_TYPE:
return UnsignedIntegerFieldType(64)
self._content_size_field_type = get_ft(content_size_field_type)
self._beginning_time_field_type = get_ft(beginning_time_field_type)
self._end_time_field_type = get_ft(end_time_field_type)
- self._discarded_events_counter_field_type = get_ft(discarded_events_counter_field_type)
+ self._discarded_event_records_snapshot_counter_field_type = get_ft(discarded_event_records_snapshot_counter_field_type)
@property
def total_size_field_type(self) -> _OptUIntFt:
return self._end_time_field_type
@property
- def discarded_events_counter_field_type(self) -> _OptUIntFt:
- return self._discarded_events_counter_field_type
+ def discarded_event_records_snapshot_counter_field_type(self) -> _OptUIntFt:
+ return self._discarded_event_records_snapshot_counter_field_type
-class StreamTypeEventFeatures:
+class DataStreamTypeEventRecordFeatures:
def __init__(self, type_id_field_type: _OptDefaultableUIntFt = DEFAULT_FIELD_TYPE,
time_field_type: _OptDefaultableUIntFt = None):
def get_ft(user_ft: _OptDefaultableUIntFt) -> _OptUIntFt:
return self._time_field_type
-class StreamTypeFeatures:
- def __init__(self, packet_features: Optional[StreamTypePacketFeatures] = None,
- event_features: Optional[StreamTypeEventFeatures] = None):
+class DataStreamTypeFeatures:
+ def __init__(self, packet_features: Optional[DataStreamTypePacketFeatures] = None,
+ event_record_features: Optional[DataStreamTypeEventRecordFeatures] = None):
if packet_features is None:
- self._packet_features = StreamTypePacketFeatures()
+ self._packet_features = DataStreamTypePacketFeatures()
else:
self._packet_features = packet_features
- if event_features is None:
- self._event_features = StreamTypeEventFeatures()
+ if event_record_features is None:
+ self._event_record_features = DataStreamTypeEventRecordFeatures()
else:
- self._event_features = event_features
+ self._event_record_features = event_record_features
@property
- def packet_features(self) -> StreamTypePacketFeatures:
+ def packet_features(self) -> DataStreamTypePacketFeatures:
return self._packet_features
@property
- def event_features(self) -> StreamTypeEventFeatures:
- return self._event_features
+ def event_record_features(self) -> DataStreamTypeEventRecordFeatures:
+ return self._event_record_features
-class StreamType(_UniqueByName):
- def __init__(self, name: str, event_types: Set[EventType],
+class DataStreamType(_UniqueByName):
+ def __init__(self, name: str, event_record_types: Set[EventRecordType],
default_clock_type: Optional[ClockType] = None,
- features: Optional[StreamTypeFeatures] = None,
+ features: Optional[DataStreamTypeFeatures] = None,
packet_context_field_type_extra_members: Optional[_StructFtMembers] = None,
- event_common_context_field_type: _OptStructFt = None):
+ event_record_common_context_field_type: _OptStructFt = None):
self._id: Optional[Id] = None
self._name = name
self._default_clock_type = default_clock_type
- self._event_common_context_field_type = event_common_context_field_type
- self._event_types = frozenset(event_types)
+ self._event_record_common_context_field_type = event_record_common_context_field_type
+ self._event_record_types = frozenset(event_record_types)
# assign unique IDs
- for index, ev_type in enumerate(sorted(self._event_types, key=lambda evt: evt.name)):
- assert ev_type._id is None
- ev_type._id = Id(index)
+ for index, ert in enumerate(sorted(self._event_record_types, key=lambda evt: evt.name)):
+ assert ert._id is None
+ ert._id = Id(index)
self._set_features(features)
self._packet_context_field_type_extra_members = StructureFieldTypeMembers({})
self._packet_context_field_type_extra_members = StructureFieldTypeMembers(packet_context_field_type_extra_members)
self._set_pkt_ctx_ft()
- self._set_ev_header_ft()
+ self._set_er_header_ft()
- def _set_features(self, features: Optional[StreamTypeFeatures]):
+ def _set_features(self, features: Optional[DataStreamTypeFeatures]):
if features is not None:
self._features = features
return None
- ev_time_ft = None
+ er_time_ft = None
pkt_beginning_time_ft = None
pkt_end_time_ft = None
if self._default_clock_type is not None:
- # Automatic time field types because the stream type has a
- # default clock type.
- ev_time_ft = DEFAULT_FIELD_TYPE
+ # Automatic time field types because the data stream type
+ # has a default clock type.
+ er_time_ft = DEFAULT_FIELD_TYPE
pkt_beginning_time_ft = DEFAULT_FIELD_TYPE
pkt_end_time_ft = DEFAULT_FIELD_TYPE
- self._features = StreamTypeFeatures(StreamTypePacketFeatures(beginning_time_field_type=pkt_beginning_time_ft,
- end_time_field_type=pkt_end_time_ft),
- StreamTypeEventFeatures(time_field_type=ev_time_ft))
+ self._features = DataStreamTypeFeatures(DataStreamTypePacketFeatures(beginning_time_field_type=pkt_beginning_time_ft,
+ end_time_field_type=pkt_end_time_ft),
+ DataStreamTypeEventRecordFeatures(time_field_type=er_time_ft))
def _set_ft_mapped_clk_type_name(self, ft: Optional[UnsignedIntegerFieldType]):
if ft is None:
add_member_if_exists('timestamp_end', self._features.packet_features.end_time_field_type,
True)
add_member_if_exists('events_discarded',
- self._features.packet_features.discarded_events_counter_field_type)
+ self._features.packet_features.discarded_event_records_snapshot_counter_field_type)
if self._packet_context_field_type_extra_members is not None:
for name, field_type in self._packet_context_field_type_extra_members.items():
self._pkt_ctx_ft = StructureFieldType(8, members)
- def _set_ev_header_ft(self):
+ def _set_er_header_ft(self):
members = collections.OrderedDict()
- if self._features.event_features.type_id_field_type is not None:
- members['id'] = StructureFieldTypeMember(self._features.event_features.type_id_field_type)
+ if self._features.event_record_features.type_id_field_type is not None:
+ members['id'] = StructureFieldTypeMember(self._features.event_record_features.type_id_field_type)
- if self._features.event_features.time_field_type is not None:
- ft = self._features.event_features.time_field_type
+ if self._features.event_record_features.time_field_type is not None:
+ ft = self._features.event_record_features.time_field_type
self._set_ft_mapped_clk_type_name(ft)
members['timestamp'] = StructureFieldTypeMember(ft)
- self._ev_header_ft = StructureFieldType(8, members)
+ self._er_header_ft = StructureFieldType(8, members)
@property
def id(self) -> Optional[Id]:
return self._default_clock_type
@property
- def features(self) -> StreamTypeFeatures:
+ def features(self) -> DataStreamTypeFeatures:
return self._features
@property
return self._packet_context_field_type_extra_members
@property
- def event_common_context_field_type(self) -> _OptStructFt:
- return self._event_common_context_field_type
+ def event_record_common_context_field_type(self) -> _OptStructFt:
+ return self._event_record_common_context_field_type
@property
- def event_types(self) -> FrozenSet[EventType]:
- return self._event_types
+ def event_record_types(self) -> FrozenSet[EventRecordType]:
+ return self._event_record_types
_OptUuidFt = Optional[Union[str, StaticArrayFieldType]]
class TraceTypeFeatures:
def __init__(self, magic_field_type: _OptDefaultableUIntFt = DEFAULT_FIELD_TYPE,
uuid_field_type: _OptUuidFt = None,
- stream_type_id_field_type: _OptDefaultableUIntFt = DEFAULT_FIELD_TYPE):
+ data_stream_type_id_field_type: _OptDefaultableUIntFt = DEFAULT_FIELD_TYPE):
def get_field_type(user_ft: Optional[Union[str, _FieldType]],
create_default_ft: Callable[[], _FieldType]) -> _OptFt:
if user_ft == DEFAULT_FIELD_TYPE:
def create_default_uuid_ft():
return StaticArrayFieldType(Count(16), UnsignedIntegerFieldType(8))
- def create_default_stream_type_id_ft():
+ def create_default_dst_id_ft():
return UnsignedIntegerFieldType(64)
self._magic_field_type = typing.cast(_OptUIntFt, get_field_type(magic_field_type, create_default_magic_ft))
self._uuid_field_type = typing.cast(Optional[StaticArrayFieldType],
get_field_type(uuid_field_type, create_default_uuid_ft))
- self._stream_type_id_field_type = typing.cast(_OptUIntFt,
- get_field_type(stream_type_id_field_type,
- create_default_stream_type_id_ft))
+ self._data_stream_type_id_field_type = typing.cast(_OptUIntFt,
+ get_field_type(data_stream_type_id_field_type,
+ create_default_dst_id_ft))
@property
def magic_field_type(self) -> _OptUIntFt:
return self._uuid_field_type
@property
- def stream_type_id_field_type(self) -> _OptUIntFt:
- return self._stream_type_id_field_type
+ def data_stream_type_id_field_type(self) -> _OptUIntFt:
+ return self._data_stream_type_id_field_type
class TraceType:
- def __init__(self, stream_types: Set[StreamType], uuid: _OptUuid = None,
+ def __init__(self, data_stream_types: Set[DataStreamType], uuid: _OptUuid = None,
features: Optional[TraceTypeFeatures] = None):
- self._stream_types = frozenset(stream_types)
+ self._data_stream_types = frozenset(data_stream_types)
# assign unique IDs
- for index, stream_type in enumerate(sorted(self._stream_types, key=lambda st: st.name)):
- assert stream_type._id is None
- stream_type._id = Id(index)
+ for index, dst in enumerate(sorted(self._data_stream_types, key=lambda st: st.name)):
+ assert dst._id is None
+ dst._id = Id(index)
self._uuid = uuid
self._set_features(features)
add_member_if_exists('magic', self._features.magic_field_type)
add_member_if_exists('uuid', self._features.uuid_field_type)
- add_member_if_exists('stream_id', self._features.stream_type_id_field_type)
+ add_member_if_exists('stream_id', self._features.data_stream_type_id_field_type)
self._pkt_header_ft = StructureFieldType(8, members)
@property
return self._uuid
@property
- def stream_types(self) -> FrozenSet[StreamType]:
- return self._stream_types
+ def data_stream_types(self) -> FrozenSet[DataStreamType]:
+ return self._data_stream_types
- def stream_type(self, name: str) -> Optional[StreamType]:
- for cand_stream_type in self._stream_types:
- if cand_stream_type.name == name:
- return cand_stream_type
+ def data_stream_type(self, name: str) -> Optional[DataStreamType]:
+ for cand_dst in self._data_stream_types:
+ if cand_dst.name == name:
+ return cand_dst
return None
def clock_types(self) -> Set[ClockType]:
clk_types = set()
- for stream_type in self._stream_types:
- if stream_type.default_clock_type is not None:
- clk_types.add(stream_type.default_clock_type)
+ for dst in self._data_stream_types:
+ if dst.default_clock_type is not None:
+ clk_types.add(dst.default_clock_type)
return clk_types
class ConfigurationCodeGenerationHeaderOptions:
def __init__(self, identifier_prefix_definition: bool = False,
- default_stream_type_name_definition: bool = False):
+ default_data_stream_type_name_definition: bool = False):
self._identifier_prefix_definition = identifier_prefix_definition
- self._default_stream_type_name_definition = default_stream_type_name_definition
+ self._default_data_stream_type_name_definition = default_data_stream_type_name_definition
@property
def identifier_prefix_definition(self) -> bool:
return self._identifier_prefix_definition
@property
- def default_stream_type_name_definition(self) -> bool:
- return self._default_stream_type_name_definition
+ def default_data_stream_type_name_definition(self) -> bool:
+ return self._default_data_stream_type_name_definition
class ConfigurationCodeGenerationOptions:
def __init__(self, identifier_prefix: str = 'barectf_', file_name_prefix: str = 'barectf',
- default_stream_type: Optional[StreamType] = None,
+ default_data_stream_type: Optional[DataStreamType] = None,
header_options: Optional[ConfigurationCodeGenerationHeaderOptions] = None,
clock_type_c_types: Optional[_ClkTypeCTypes] = None):
self._identifier_prefix = identifier_prefix
self._file_name_prefix = file_name_prefix
- self._default_stream_type = default_stream_type
+ self._default_data_stream_type = default_data_stream_type
self._header_options = ConfigurationCodeGenerationHeaderOptions()
return self._file_name_prefix
@property
- def default_stream_type(self) -> Optional[StreamType]:
- return self._default_stream_type
+ def default_data_stream_type(self) -> Optional[DataStreamType]:
+ return self._default_data_stream_type
@property
def header_options(self) -> ConfigurationCodeGenerationHeaderOptions:
clk_type_c_types = self._options.code_generation_options.clock_type_c_types
- for stream_type in trace.type.stream_types:
- def_clk_type = stream_type.default_clock_type
+ for dst in trace.type.data_stream_types:
+ def_clk_type = dst.default_clock_type
if def_clk_type is None:
continue
return v3_clk_type_node
- # Converts a v2 event type node to a v3 event type node and returns
- # it.
- def _conv_ev_type_node(self, v2_ev_type_node: _MapNode) -> _MapNode:
- # create empty v3 event type node
- v3_ev_type_node: _MapNode = collections.OrderedDict()
+ # Converts a v2 event record type node to a v3 event record type
+ # node and returns it.
+ def _conv_ert_node(self, v2_ert_node: _MapNode) -> _MapNode:
+ # create empty v3 event record type node
+ v3_ert_node: _MapNode = collections.OrderedDict()
# copy `log-level` property
- _copy_prop_if_exists(v3_ev_type_node, v2_ev_type_node, 'log-level')
+ _copy_prop_if_exists(v3_ert_node, v2_ert_node, 'log-level')
# convert specific context field type node
- v2_ft_node = v2_ev_type_node.get('context-type')
+ v2_ft_node = v2_ert_node.get('context-type')
if v2_ft_node is not None:
- v3_ev_type_node['specific-context-field-type'] = self._conv_ft_node(v2_ft_node)
+ v3_ert_node['specific-context-field-type'] = self._conv_ft_node(v2_ft_node)
# convert payload field type node
- v2_ft_node = v2_ev_type_node.get('payload-type')
+ v2_ft_node = v2_ert_node.get('payload-type')
if v2_ft_node is not None:
- v3_ev_type_node['payload-field-type'] = self._conv_ft_node(v2_ft_node)
+ v3_ert_node['payload-field-type'] = self._conv_ft_node(v2_ft_node)
- return v3_ev_type_node
+ return v3_ert_node
@staticmethod
def _set_v3_feature_ft_if_exists(v3_features_node: _MapNode, key: str,
v3_features_node[key] = val
- # Converts a v2 stream type node to a v3 stream type node and
- # returns it.
- def _conv_stream_type_node(self, v2_stream_type_node: _MapNode) -> _MapNode:
- # This function creates a v3 stream type features node from the
- # packet context and event header field type nodes of a
- # v2 stream type node.
+ # Converts a v2 data stream type node to a v3 data stream type node
+ # and returns it.
+ def _conv_dst_node(self, v2_dst_node: _MapNode) -> _MapNode:
+ # This function creates a v3 data stream type features node from
+ # the packet context and event record header field type nodes of
+ # a v2 data stream type node.
def v3_features_node_from_v2_ft_nodes(v2_pkt_ctx_ft_fields_node: _MapNode,
- v2_ev_header_ft_fields_node: Optional[_MapNode]) -> _MapNode:
- if v2_ev_header_ft_fields_node is None:
- v2_ev_header_ft_fields_node = collections.OrderedDict()
+ v2_er_header_ft_fields_node: Optional[_MapNode]) -> _MapNode:
+ if v2_er_header_ft_fields_node is None:
+ v2_er_header_ft_fields_node = collections.OrderedDict()
v3_pkt_total_size_ft_node = self._conv_ft_node(v2_pkt_ctx_ft_fields_node['packet_size'])
v3_pkt_content_size_ft_node = self._conv_ft_node(v2_pkt_ctx_ft_fields_node['content_size'])
'timestamp_begin')
v3_pkt_end_time_ft_node = self._conv_ft_node_if_exists(v2_pkt_ctx_ft_fields_node,
'timestamp_end')
- v3_pkt_disc_ev_counter_ft_node = self._conv_ft_node_if_exists(v2_pkt_ctx_ft_fields_node,
- 'events_discarded')
- v3_ev_type_id_ft_node = self._conv_ft_node_if_exists(v2_ev_header_ft_fields_node, 'id')
- v3_ev_time_ft_node = self._conv_ft_node_if_exists(v2_ev_header_ft_fields_node,
+ v3_pkt_disc_er_counter_snap_ft_node = self._conv_ft_node_if_exists(v2_pkt_ctx_ft_fields_node,
+ 'events_discarded')
+ v3_ert_id_ft_node = self._conv_ft_node_if_exists(v2_er_header_ft_fields_node, 'id')
+ v3_er_time_ft_node = self._conv_ft_node_if_exists(v2_er_header_ft_fields_node,
'timestamp')
v3_features_node: _MapNode = collections.OrderedDict()
v3_pkt_node: _MapNode = collections.OrderedDict()
- v3_ev_node: _MapNode = collections.OrderedDict()
+ v3_er_node: _MapNode = collections.OrderedDict()
v3_pkt_node['total-size-field-type'] = v3_pkt_total_size_ft_node
v3_pkt_node['content-size-field-type'] = v3_pkt_content_size_ft_node
self._set_v3_feature_ft_if_exists(v3_pkt_node, 'beginning-time-field-type',
v3_pkt_beg_time_ft_node)
self._set_v3_feature_ft_if_exists(v3_pkt_node, 'end-time-field-type',
v3_pkt_end_time_ft_node)
- self._set_v3_feature_ft_if_exists(v3_pkt_node, 'discarded-events-counter-field-type',
- v3_pkt_disc_ev_counter_ft_node)
- self._set_v3_feature_ft_if_exists(v3_ev_node, 'type-id-field-type',
- v3_ev_type_id_ft_node)
- self._set_v3_feature_ft_if_exists(v3_ev_node, 'time-field-type', v3_ev_time_ft_node)
+ self._set_v3_feature_ft_if_exists(v3_pkt_node,
+ 'discarded-event-records-counter-snapshot-field-type',
+ v3_pkt_disc_er_counter_snap_ft_node)
+ self._set_v3_feature_ft_if_exists(v3_er_node, 'type-id-field-type', v3_ert_id_ft_node)
+ self._set_v3_feature_ft_if_exists(v3_er_node, 'time-field-type', v3_er_time_ft_node)
v3_features_node['packet'] = v3_pkt_node
- v3_features_node['event'] = v3_ev_node
+ v3_features_node['event-record'] = v3_er_node
return v3_features_node
def clk_type_name_from_v2_int_ft_node(v2_int_ft_node: Optional[_MapNode]) -> _OptStr:
return None
- # create empty v3 stream type node
- v3_stream_type_node: _MapNode = collections.OrderedDict()
+ # create empty v3 data stream type node
+ v3_dst_node: _MapNode = collections.OrderedDict()
# rename `$default` property to `$is-default`
- _copy_prop_if_exists(v3_stream_type_node, v2_stream_type_node, '$default', '$is-default')
+ _copy_prop_if_exists(v3_dst_node, v2_dst_node, '$default', '$is-default')
# set default clock type node
pct_prop_name = 'packet-context-type'
- v2_pkt_ctx_ft_fields_node = v2_stream_type_node[pct_prop_name]['fields']
+ v2_pkt_ctx_ft_fields_node = v2_dst_node[pct_prop_name]['fields']
eht_prop_name = 'event-header-type'
- v2_ev_header_ft_fields_node = None
- v2_ev_header_ft_node = v2_stream_type_node.get(eht_prop_name)
+ v2_er_header_ft_fields_node = None
+ v2_er_header_ft_node = v2_dst_node.get(eht_prop_name)
- if v2_ev_header_ft_node is not None:
- v2_ev_header_ft_fields_node = v2_ev_header_ft_node['fields']
+ if v2_er_header_ft_node is not None:
+ v2_er_header_ft_fields_node = v2_er_header_ft_node['fields']
def_clk_type_name = None
_append_error_ctx(exc, f'`{pct_prop_name}` property')
try:
- if def_clk_type_name is None and v2_ev_header_ft_fields_node is not None:
- def_clk_type_name = clk_type_name_from_v2_int_ft_node(v2_ev_header_ft_fields_node.get('timestamp'))
+ if def_clk_type_name is None and v2_er_header_ft_fields_node is not None:
+ def_clk_type_name = clk_type_name_from_v2_int_ft_node(v2_er_header_ft_fields_node.get('timestamp'))
if def_clk_type_name is None and ts_begin_clk_type_name is not None:
def_clk_type_name = ts_begin_clk_type_name
_append_error_ctx(exc, f'`{eht_prop_name}` property')
if def_clk_type_name is not None:
- v3_stream_type_node['$default-clock-type-name'] = def_clk_type_name
+ v3_dst_node['$default-clock-type-name'] = def_clk_type_name
# set features node
- v3_stream_type_node['$features'] = v3_features_node_from_v2_ft_nodes(v2_pkt_ctx_ft_fields_node,
- v2_ev_header_ft_fields_node)
+ v3_dst_node['$features'] = v3_features_node_from_v2_ft_nodes(v2_pkt_ctx_ft_fields_node,
+ v2_er_header_ft_fields_node)
# set extra packet context field type members node
pkt_ctx_ft_extra_members = []
}))
if len(pkt_ctx_ft_extra_members) > 0:
- v3_stream_type_node['packet-context-field-type-extra-members'] = pkt_ctx_ft_extra_members
+ v3_dst_node['packet-context-field-type-extra-members'] = pkt_ctx_ft_extra_members
- # convert event common context field type node
- v2_ft_node = v2_stream_type_node.get('event-context-type')
+ # convert event record common context field type node
+ v2_ft_node = v2_dst_node.get('event-context-type')
if v2_ft_node is not None:
- v3_stream_type_node['event-common-context-field-type'] = self._conv_ft_node(v2_ft_node)
+ v3_dst_node['event-record-common-context-field-type'] = self._conv_ft_node(v2_ft_node)
- # convert event type nodes
- v3_event_types_node = collections.OrderedDict()
+ # convert event record type nodes
+ v3_erts_node = collections.OrderedDict()
- for ev_type_name, v2_ev_type_node in v2_stream_type_node['events'].items():
+ for ert_name, v2_ert_node in v2_dst_node['events'].items():
try:
- v3_event_types_node[ev_type_name] = self._conv_ev_type_node(v2_ev_type_node)
+ v3_erts_node[ert_name] = self._conv_ert_node(v2_ert_node)
except _ConfigurationParseError as exc:
- _append_error_ctx(exc, f'Event type `{ev_type_name}`')
+ _append_error_ctx(exc, f'Event record type `{ert_name}`')
- v3_stream_type_node['event-types'] = v3_event_types_node
+ v3_dst_node['event-record-types'] = v3_erts_node
- return v3_stream_type_node
+ return v3_dst_node
# Converts a v2 metadata node to a v3 trace node and returns it.
def _conv_meta_node(self, v2_meta_node: _MapNode) -> _MapNode:
v3_magic_ft_node = self._conv_ft_node_if_exists(v2_pkt_header_ft_fields_node, 'magic')
v3_uuid_ft_node = self._conv_ft_node_if_exists(v2_pkt_header_ft_fields_node, 'uuid')
- v3_stream_type_id_ft_node = self._conv_ft_node_if_exists(v2_pkt_header_ft_fields_node,
- 'stream_id')
+ v3_dst_id_ft_node = self._conv_ft_node_if_exists(v2_pkt_header_ft_fields_node,
+ 'stream_id')
v3_features_node: _MapNode = collections.OrderedDict()
set_if_exists('magic-field-type', v3_magic_ft_node)
set_if_exists('uuid-field-type', v3_uuid_ft_node)
- set_if_exists('stream-type-id-field-type', v3_stream_type_id_ft_node)
+ set_if_exists('data-stream-type-id-field-type', v3_dst_id_ft_node)
return v3_features_node
v3_trace_node: _MapNode = collections.OrderedDict()
v2_pkt_header_ft_node = v2_trace_node.get('packet-header-type')
v3_trace_type_node['$features'] = v3_features_node_from_v2_ft_node(v2_pkt_header_ft_node)
- # convert stream type nodes
- v3_stream_types_node = collections.OrderedDict()
+ # convert data stream type nodes
+ v3_dsts_node = collections.OrderedDict()
- for stream_type_name, v2_stream_type_node in v2_meta_node['streams'].items():
+ for dst_name, v2_dst_node in v2_meta_node['streams'].items():
try:
- v3_stream_types_node[stream_type_name] = self._conv_stream_type_node(v2_stream_type_node)
+ v3_dsts_node[dst_name] = self._conv_dst_node(v2_dst_node)
except _ConfigurationParseError as exc:
- _append_error_ctx(exc, f'Stream type `{stream_type_name}`')
+ _append_error_ctx(exc, f'Data stream type `{dst_name}`')
- v3_trace_type_node['stream-types'] = v3_stream_types_node
+ v3_trace_type_node['data-stream-types'] = v3_dsts_node
# If `v2_meta_node` has a `$default-stream` property, find the
- # corresponding v3 stream type node and set its `$is-default`
- # property to `True`.
+ # corresponding v3 data stream type node and set its
+ # `$is-default` property to `True`.
prop_name = '$default-stream'
- v2_def_stream_type_node = v2_meta_node.get(prop_name)
+ v2_def_dst_node = v2_meta_node.get(prop_name)
- if v2_def_stream_type_node is not None:
+ if v2_def_dst_node is not None:
found = False
- for stream_type_name, v3_stream_type_node in v3_stream_types_node.items():
- if stream_type_name == v2_def_stream_type_node:
- v3_stream_type_node['$is-default'] = True
+ for dst_name, v3_dst_node in v3_dsts_node.items():
+ if dst_name == v2_def_dst_node:
+ v3_dst_node['$is-default'] = True
found = True
break
if not found:
raise _ConfigurationParseError(f'`{prop_name}` property',
- f'Stream type `{v2_def_stream_type_node}` does not exist')
+ f'Data stream type `{v2_def_dst_node}` does not exist')
# set environment node
v2_env_node = v2_meta_node.get('env')
_copy_prop_if_exists(header_node, v2_options_node, 'gen-prefix-def',
'identifier-prefix-definition')
_copy_prop_if_exists(header_node, v2_options_node, 'gen-default-stream-def',
- 'default-stream-type-name-definition')
+ 'default-data-stream-type-name-definition')
code_gen_node['header'] = header_node
self._root_node[opt_prop_name] = collections.OrderedDict({
meta_node = self._root_node['metadata']
ft_aliases_node = meta_node['type-aliases']
- # Expand field type aliases within trace, stream, and event
- # types now.
+ # Expand field type aliases within trace, data stream, and event
+ # record types now.
try:
self._resolve_ft_alias_from(ft_aliases_node, meta_node['trace'], 'packet-header-type')
except _ConfigurationParseError as exc:
_append_error_ctx(exc, 'Trace type')
- for stream_type_name, stream_type_node in meta_node['streams'].items():
+ for dst_name, dst_node in meta_node['streams'].items():
try:
- self._resolve_ft_alias_from(ft_aliases_node, stream_type_node,
- 'packet-context-type')
- self._resolve_ft_alias_from(ft_aliases_node, stream_type_node, 'event-header-type')
- self._resolve_ft_alias_from(ft_aliases_node, stream_type_node,
- 'event-context-type')
+ self._resolve_ft_alias_from(ft_aliases_node, dst_node, 'packet-context-type')
+ self._resolve_ft_alias_from(ft_aliases_node, dst_node, 'event-header-type')
+ self._resolve_ft_alias_from(ft_aliases_node, dst_node, 'event-context-type')
- for ev_type_name, ev_type_node in stream_type_node['events'].items():
+ for ert_name, ert_node in dst_node['events'].items():
try:
- self._resolve_ft_alias_from(ft_aliases_node, ev_type_node, 'context-type')
- self._resolve_ft_alias_from(ft_aliases_node, ev_type_node, 'payload-type')
+ self._resolve_ft_alias_from(ft_aliases_node, ert_node, 'context-type')
+ self._resolve_ft_alias_from(ft_aliases_node, ert_node, 'payload-type')
except _ConfigurationParseError as exc:
- _append_error_ctx(exc, f'Event type `{ev_type_name}`')
+ _append_error_ctx(exc, f'Event record type `{ert_name}`')
except _ConfigurationParseError as exc:
- _append_error_ctx(exc, f'Stream type `{stream_type_name}`')
+ _append_error_ctx(exc, f'Data stream type `{dst_name}`')
# remove the (now unneeded) `type-aliases` node
del meta_node['type-aliases']
meta_node = self._root_node['metadata']
self._apply_ft_inheritance(meta_node['trace'], 'packet-header-type')
- for stream_type_node in meta_node['streams'].values():
- self._apply_ft_inheritance(stream_type_node, 'packet-context-type')
- self._apply_ft_inheritance(stream_type_node, 'event-header-type')
- self._apply_ft_inheritance(stream_type_node, 'event-context-type')
+ for dst_node in meta_node['streams'].values():
+ self._apply_ft_inheritance(dst_node, 'packet-context-type')
+ self._apply_ft_inheritance(dst_node, 'event-header-type')
+ self._apply_ft_inheritance(dst_node, 'event-context-type')
- for ev_type_node in stream_type_node['events'].values():
- self._apply_ft_inheritance(ev_type_node, 'context-type')
- self._apply_ft_inheritance(ev_type_node, 'payload-type')
+ for ert_node in dst_node['events'].values():
+ self._apply_ft_inheritance(ert_node, 'context-type')
+ self._apply_ft_inheritance(ert_node, 'payload-type')
# Calls _expand_ft_aliases() and _apply_fts_inheritance() if the
# metadata node has a `type-aliases` property.
# next, apply inheritance to create effective field types
self._apply_fts_inheritance()
- # Processes the inclusions of the event type node `ev_type_node`,
+ # Processes the inclusions of the event record type node `ert_node`,
# returning the effective node.
- def _process_ev_type_node_include(self, ev_type_node: _MapNode) -> _MapNode:
- # Make sure the event type node is valid for the inclusion
- # processing stage.
- self._schema_validator.validate(ev_type_node, 'config/2/event-type-pre-include')
+ def _process_ert_node_include(self, ert_node: _MapNode) -> _MapNode:
+ # Make sure the event record type node is valid for the
+ # inclusion processing stage.
+ self._schema_validator.validate(ert_node, 'config/2/ert-pre-include')
# process inclusions
- return self._process_node_include(ev_type_node, self._process_ev_type_node_include)
+ return self._process_node_include(ert_node, self._process_ert_node_include)
- # Processes the inclusions of the stream type node
- # `stream_type_node`, returning the effective node.
- def _process_stream_type_node_include(self, stream_type_node: _MapNode) -> _MapNode:
- def process_children_include(stream_type_node):
+ # Processes the inclusions of the data stream type node `dst_node`,
+ # returning the effective node.
+ def _process_dst_node_include(self, dst_node: _MapNode) -> _MapNode:
+ def process_children_include(dst_node):
prop_name = 'events'
- if prop_name in stream_type_node:
- ev_types_node = stream_type_node[prop_name]
+ if prop_name in dst_node:
+ erts_node = dst_node[prop_name]
- for key in list(ev_types_node):
- ev_types_node[key] = self._process_ev_type_node_include(ev_types_node[key])
+ for key in list(erts_node):
+ erts_node[key] = self._process_ert_node_include(erts_node[key])
- # Make sure the stream type node is valid for the inclusion
+ # Make sure the data stream type node is valid for the inclusion
# processing stage.
- self._schema_validator.validate(stream_type_node, 'config/2/stream-type-pre-include')
+ self._schema_validator.validate(dst_node, 'config/2/dst-pre-include')
# process inclusions
- return self._process_node_include(stream_type_node, self._process_stream_type_node_include,
+ return self._process_node_include(dst_node, self._process_dst_node_include,
process_children_include)
# Processes the inclusions of the trace type node `trace_type_node`,
prop_name = 'streams'
if prop_name in meta_node:
- stream_types_node = meta_node[prop_name]
+ dsts_node = meta_node[prop_name]
- for key in list(stream_types_node):
- stream_types_node[key] = self._process_stream_type_node_include(stream_types_node[key])
+ for key in list(dsts_node):
+ dsts_node[key] = self._process_dst_node_include(dsts_node[key])
# Make sure the metadata node is valid for the inclusion
# processing stage.
def _process_config_includes(self):
# Process inclusions in this order:
#
- # 1. Clock type node, event type nodes, and trace type nodes
- # (the order between those is not important).
+ # 1. Clock type node, event record type nodes, and trace type
+ # nodes (the order between those is not important).
#
- # 2. Stream type nodes.
+ # 2. Data stream type nodes.
#
# 3. Metadata node.
#
# This is because:
#
# * A metadata node can include clock type nodes, a trace type
- # node, stream type nodes, and event type nodes (indirectly).
+ # node, data stream type nodes, and event record type nodes
+ # (indirectly).
#
- # * A stream type node can include event type nodes.
+ # * A data stream type node can include event record type nodes.
#
# First, make sure the configuration node itself is valid for
# the inclusion processing stage.
# packet header and packet context field type member nodes (for
# example, `stream_id`, `packet_size`, or `timestamp_end`) to
# set the `$features` properties of barectf 3 trace type and
- # stream type nodes. Those field type nodes can be aliases,
+ # data stream type nodes. Those field type nodes can be aliases,
# contain aliases, or inherit from other nodes.
self._expand_fts()
# Validate the whole, (almost) effective configuration node.
#
# It's almost effective because the `log-level` property of
- # event type nodes can be log level aliases. Log level aliases
- # are also a feature of a barectf 3 configuration node,
+ # event record type nodes can be log level aliases. Log level
+ # aliases are also a feature of a barectf 3 configuration node,
# therefore this is compatible.
self._schema_validator.validate(self._root_node, 'config/2/config')
return Count(len(members_node))
- # Creates an event type from the event type node `ev_type_node`
- # named `name`.
+ # Creates an event record type from the event record type node
+ # `ert_node` named `name`.
#
- # `ev_member_count` is the total number of structure field type
- # members within the event type so far (from the common part in its
- # stream type). For example, if the stream type has a event header
- # field type with `id` and `timestamp` members, then
- # `ev_member_count` is 2.
- def _create_ev_type(self, name: str, ev_type_node: _MapNode, ev_member_count: Count) -> barectf_config.EventType:
+ # `ert_member_count` is the total number of structure field type
+ # members within the event record type so far (from the common part
+ # in its data stream type). For example, if the data stream type has
+ # an event record header field type with `id` and `timestamp`
+ # members, then `ert_member_count` is 2.
+ def _create_ert(self, name: str, ert_node: _MapNode,
+ ert_member_count: Count) -> barectf_config.EventRecordType:
try:
- self._validate_iden(name, '`name` property', 'event type name')
+ self._validate_iden(name, '`name` property', 'event record type name')
- # make sure the event type is not empty
+ # make sure the event record type is not empty
spec_ctx_ft_prop_name = 'specific-context-field-type'
payload_ft_prop_name = 'payload-field-type'
- ev_member_count = Count(ev_member_count + self._total_struct_ft_node_members(ev_type_node.get(spec_ctx_ft_prop_name)))
- ev_member_count = Count(ev_member_count + self._total_struct_ft_node_members(ev_type_node.get(payload_ft_prop_name)))
+ ert_member_count = Count(ert_member_count + self._total_struct_ft_node_members(ert_node.get(spec_ctx_ft_prop_name)))
+ ert_member_count = Count(ert_member_count + self._total_struct_ft_node_members(ert_node.get(payload_ft_prop_name)))
- if ev_member_count == 0:
- raise _ConfigurationParseError('Event type', 'Event type is empty (no members).')
+ if ert_member_count == 0:
+ raise _ConfigurationParseError('Event record type',
+ 'Event record type is empty (no members).')
- # create event type
- return barectf_config.EventType(name, ev_type_node.get('log-level'),
- self._try_create_struct_ft(ev_type_node,
+ # create event record type
+ return barectf_config.EventRecordType(name, ert_node.get('log-level'),
+ self._try_create_struct_ft(ert_node,
spec_ctx_ft_prop_name),
- self._try_create_struct_ft(ev_type_node,
+ self._try_create_struct_ft(ert_node,
payload_ft_prop_name))
except _ConfigurationParseError as exc:
- _append_error_ctx(exc, f'Event type `{name}`')
+ _append_error_ctx(exc, f'Event record type `{name}`')
# satisfy static type checker (never reached)
raise
assert type(ft_node) is collections.OrderedDict
return self._create_fts(ft_node)[0]
- def _create_stream_type(self, name: str, stream_type_node: _MapNode) -> barectf_config.StreamType:
+ def _create_dst(self, name: str, dst_node: _MapNode) -> barectf_config.DataStreamType:
try:
- # validate stream type's name
- self._validate_iden(name, '`name` property', 'stream type name')
+ # validate data stream type's name
+ self._validate_iden(name, '`name` property', 'data stream type name')
# get default clock type, if any
def_clk_type = None
prop_name = '$default-clock-type-name'
- def_clk_type_name = stream_type_node.get(prop_name)
+ def_clk_type_name = dst_node.get(prop_name)
if def_clk_type_name is not None:
try:
pkt_content_size_ft = barectf_config.DEFAULT_FIELD_TYPE
pkt_beginning_time_ft = None
pkt_end_time_ft = None
- pkt_discarded_events_counter_ft = None
- ev_type_id_ft = barectf_config.DEFAULT_FIELD_TYPE
- ev_time_ft = None
+ pkt_discarded_er_counter_snap_ft = None
+ ert_id_ft = barectf_config.DEFAULT_FIELD_TYPE
+ ert_time_ft = None
if def_clk_type is not None:
- # The stream type has a default clock type. Initialize
- # the packet beginning time, packet end time, and event
- # time field types to default field types.
+ # The data stream type has a default clock type.
+ # Initialize the packet beginning time, packet end time,
+ # and event record time field types to default field
+ # types.
#
- # This means your stream type node only needs a default
- # clock type name to enable those features
+ # This means your data stream type node only needs a
+ # default clock type name to enable those features
# automatically. Those features do not add any parameter
- # to the tracing event functions.
+ # to the event tracing functions.
pkt_beginning_time_ft = barectf_config.DEFAULT_FIELD_TYPE
pkt_end_time_ft = barectf_config.DEFAULT_FIELD_TYPE
- ev_time_ft = barectf_config.DEFAULT_FIELD_TYPE
+ ert_time_ft = barectf_config.DEFAULT_FIELD_TYPE
- features_node = stream_type_node.get('$features')
+ features_node = dst_node.get('$features')
if features_node is not None:
# create packet feature field types
pkt_beginning_time_ft)
pkt_end_time_ft = self._feature_ft(pkt_node, 'end-time-field-type',
pkt_end_time_ft)
- pkt_discarded_events_counter_ft = self._feature_ft(pkt_node,
- 'discarded-events-counter-field-type',
- pkt_discarded_events_counter_ft)
+ pkt_discarded_er_counter_snap_ft = self._feature_ft(pkt_node,
+ 'discarded-event-records-counter-snapshot-field-type',
+ pkt_discarded_er_counter_snap_ft)
- # create event feature field types
- ev_node = features_node.get('event')
+ # create event record feature field types
+ er_node = features_node.get('event-record')
type_id_ft_prop_name = 'type-id-field-type'
- if ev_node is not None:
- ev_type_id_ft = self._feature_ft(ev_node, type_id_ft_prop_name, ev_type_id_ft)
- ev_time_ft = self._feature_ft(ev_node, 'time-field-type', ev_time_ft)
+ if er_node is not None:
+ ert_id_ft = self._feature_ft(er_node, type_id_ft_prop_name, ert_id_ft)
+ ert_time_ft = self._feature_ft(er_node, 'time-field-type', ert_time_ft)
- ev_types_prop_name = 'event-types'
- ev_type_count = len(stream_type_node[ev_types_prop_name])
+ erts_prop_name = 'event-record-types'
+ ert_count = len(dst_node[erts_prop_name])
try:
- if ev_type_id_ft is None and ev_type_count > 1:
+ if ert_id_ft is None and ert_count > 1:
raise _ConfigurationParseError(f'`{type_id_ft_prop_name}` property',
- 'Event type ID field type feature is required because stream type has more than one event type')
+ 'Event record type ID field type feature is required because data stream type has more than one event record type')
- if isinstance(ev_type_id_ft, barectf_config._IntegerFieldType):
- ev_type_id_int_ft = typing.cast(barectf_config._IntegerFieldType, ev_type_id_ft)
+ if isinstance(ert_id_ft, barectf_config._IntegerFieldType):
+ ert_id_int_ft = typing.cast(barectf_config._IntegerFieldType, ert_id_ft)
- if ev_type_count > (1 << ev_type_id_int_ft.size):
+ if ert_count > (1 << ert_id_int_ft.size):
raise _ConfigurationParseError(f'`{type_id_ft_prop_name}` property',
- f'Field type\'s size ({ev_type_id_int_ft.size} bits) is too small to accomodate {ev_type_count} event types')
+ f'Field type\'s size ({ert_id_int_ft.size} bits) is too small to accomodate {ert_count} event record types')
except _ConfigurationParseError as exc:
- exc._append_ctx('`event` property')
+ exc._append_ctx('`event-record` property')
_append_error_ctx(exc, '`$features` property')
- pkt_features = barectf_config.StreamTypePacketFeatures(pkt_total_size_ft,
- pkt_content_size_ft,
- pkt_beginning_time_ft,
- pkt_end_time_ft,
- pkt_discarded_events_counter_ft)
- ev_features = barectf_config.StreamTypeEventFeatures(ev_type_id_ft, ev_time_ft)
- features = barectf_config.StreamTypeFeatures(pkt_features, ev_features)
+ pkt_features = barectf_config.DataStreamTypePacketFeatures(pkt_total_size_ft,
+ pkt_content_size_ft,
+ pkt_beginning_time_ft,
+ pkt_end_time_ft,
+ pkt_discarded_er_counter_snap_ft)
+ er_features = barectf_config.DataStreamTypeEventRecordFeatures(ert_id_ft, ert_time_ft)
+ features = barectf_config.DataStreamTypeFeatures(pkt_features, er_features)
# create packet context (structure) field type extra members
pkt_ctx_ft_extra_members = None
prop_name = 'packet-context-field-type-extra-members'
- pkt_ctx_ft_extra_members_node = stream_type_node.get(prop_name)
+ pkt_ctx_ft_extra_members_node = dst_node.get(prop_name)
if pkt_ctx_ft_extra_members_node is not None:
pkt_ctx_ft_extra_members = self._create_struct_ft_members(pkt_ctx_ft_extra_members_node,
raise _ConfigurationParseError(f'`{prop_name}` property',
f'Packet context field type member name `{member_name}` is reserved.')
- # create event types
- ev_header_common_ctx_member_count = Count(0)
+ # create event record types
+ er_header_common_ctx_member_count = Count(0)
- if ev_features.type_id_field_type is not None:
- ev_header_common_ctx_member_count = Count(ev_header_common_ctx_member_count + 1)
+ if er_features.type_id_field_type is not None:
+ er_header_common_ctx_member_count = Count(er_header_common_ctx_member_count + 1)
- if ev_features.time_field_type is not None:
- ev_header_common_ctx_member_count = Count(ev_header_common_ctx_member_count + 1)
+ if er_features.time_field_type is not None:
+ er_header_common_ctx_member_count = Count(er_header_common_ctx_member_count + 1)
- ev_common_ctx_ft_prop_name = 'event-common-context-field-type'
- ev_common_ctx_ft_node = stream_type_node.get(ev_common_ctx_ft_prop_name)
- ev_header_common_ctx_member_count = Count(ev_header_common_ctx_member_count + self._total_struct_ft_node_members(ev_common_ctx_ft_node))
- ev_types = set()
+ er_common_ctx_ft_prop_name = 'event-record-common-context-field-type'
+ er_common_ctx_ft_node = dst_node.get(er_common_ctx_ft_prop_name)
+ er_header_common_ctx_member_count = Count(er_header_common_ctx_member_count + self._total_struct_ft_node_members(er_common_ctx_ft_node))
+ erts = set()
- for ev_name, ev_type_node in stream_type_node[ev_types_prop_name].items():
- ev_types.add(self._create_ev_type(ev_name, ev_type_node, ev_header_common_ctx_member_count))
+ for ert_name, ert_node in dst_node[erts_prop_name].items():
+ erts.add(self._create_ert(ert_name, ert_node, er_header_common_ctx_member_count))
- # create stream type
- return barectf_config.StreamType(name, ev_types, def_clk_type, features,
- pkt_ctx_ft_extra_members,
- self._try_create_struct_ft(stream_type_node,
- ev_common_ctx_ft_prop_name))
+ # create data stream type
+ return barectf_config.DataStreamType(name, erts, def_clk_type, features,
+ pkt_ctx_ft_extra_members,
+ self._try_create_struct_ft(dst_node,
+ er_common_ctx_ft_prop_name))
except _ConfigurationParseError as exc:
- _append_error_ctx(exc, f'Stream type `{name}`')
+ _append_error_ctx(exc, f'Data data stream type `{name}`')
# satisfy static type checker (never reached)
raise
def _create_trace_type(self):
try:
- # create clock types (_create_stream_type() needs them)
+ # create clock types (_create_dst() needs them)
self._create_clk_types()
# get UUID
# create feature field types
magic_ft = barectf_config.DEFAULT_FIELD_TYPE
uuid_ft = None
- stream_type_id_ft = barectf_config.DEFAULT_FIELD_TYPE
+ dst_id_ft = barectf_config.DEFAULT_FIELD_TYPE
if trace_type_uuid is not None:
# Trace type has a UUID: initialize UUID field type to
uuid_ft = barectf_config.DEFAULT_FIELD_TYPE
features_node = self._trace_type_node.get('$features')
- stream_type_id_ft_prop_name = 'stream-type-id-field-type'
+ dst_id_ft_prop_name = 'data-stream-type-id-field-type'
if features_node is not None:
magic_ft = self._feature_ft(features_node, 'magic-field-type',
magic_ft)
uuid_ft = self._feature_ft(features_node, 'uuid-field-type', uuid_ft)
- stream_type_id_ft = self._feature_ft(features_node, stream_type_id_ft_prop_name,
- stream_type_id_ft)
+ dst_id_ft = self._feature_ft(features_node, dst_id_ft_prop_name, dst_id_ft)
- stream_types_prop_name = 'stream-types'
- stream_type_count = len(self._trace_type_node[stream_types_prop_name])
+ dsts_prop_name = 'data-stream-types'
+ dst_count = len(self._trace_type_node[dsts_prop_name])
try:
- if stream_type_id_ft is None and stream_type_count > 1:
- raise _ConfigurationParseError(f'`{stream_type_id_ft_prop_name}` property',
- 'Stream type ID field type feature is required because trace type has more than one stream type')
+ if dst_id_ft is None and dst_count > 1:
+ raise _ConfigurationParseError(f'`{dst_id_ft_prop_name}` property',
+ 'Data stream type ID field type feature is required because trace type has more than one data stream type')
- if isinstance(stream_type_id_ft, barectf_config._FieldType) and stream_type_count > (1 << stream_type_id_ft.size):
- raise _ConfigurationParseError(f'`{stream_type_id_ft_prop_name}` property',
- f'Field type\'s size ({stream_type_id_ft.size} bits) is too small to accomodate {stream_type_count} stream types')
+ if isinstance(dst_id_ft, barectf_config._FieldType) and dst_count > (1 << dst_id_ft.size):
+ raise _ConfigurationParseError(f'`{dst_id_ft_prop_name}` property',
+ f'Field type\'s size ({dst_id_ft.size} bits) is too small to accomodate {dst_count} data stream types')
except _ConfigurationParseError as exc:
_append_error_ctx(exc, '`$features` property')
- features = barectf_config.TraceTypeFeatures(magic_ft, uuid_ft, stream_type_id_ft)
+ features = barectf_config.TraceTypeFeatures(magic_ft, uuid_ft, dst_id_ft)
- # create stream types
- stream_types = set()
+ # create data stream types
+ dsts = set()
- for stream_name, stream_type_node in self._trace_type_node[stream_types_prop_name].items():
- stream_types.add(self._create_stream_type(stream_name, stream_type_node))
+ for dst_name, dst_node in self._trace_type_node[dsts_prop_name].items():
+ dsts.add(self._create_dst(dst_name, dst_node))
# create trace type
- return barectf_config.TraceType(stream_types, trace_type_uuid, features)
+ return barectf_config.TraceType(dsts, trace_type_uuid, features)
except _ConfigurationParseError as exc:
_append_error_ctx(exc, 'Trace type')
# create trace first
trace = self._create_trace()
- # find default stream type, if any
- def_stream_type = None
+ # find default data stream type, if any
+ def_dst = None
- for stream_type_name, stream_type_node in self._trace_type_node['stream-types'].items():
+ for dst_name, dst_node in self._trace_type_node['data-stream-types'].items():
prop_name = '$is-default'
- is_default = stream_type_node.get(prop_name)
+ is_default = dst_node.get(prop_name)
if is_default is True:
- if def_stream_type is not None:
+ if def_dst is not None:
exc = _ConfigurationParseError(f'`{prop_name}` property',
- f'Duplicate default stream type (`{def_stream_type.name}`)')
- exc._append_ctx(f'Stream type `{stream_type_name}`')
+ f'Duplicate default data stream type (`{def_dst.name}`)')
+ exc._append_ctx(f'Data stream type `{dst_name}`')
_append_error_ctx(exc, 'Trace type')
- def_stream_type = trace.type.stream_type(stream_type_name)
+ def_dst = trace.type.data_stream_type(dst_name)
# create clock type C type mapping
clk_types_node = self._trace_type_node.get('clock-types')
if clk_types_node is not None:
clk_type_c_types = collections.OrderedDict()
- for stream_type in trace.type.stream_types:
- if stream_type.default_clock_type is None:
+ for dst in trace.type.data_stream_types:
+ if dst.default_clock_type is None:
continue
- clk_type_node = clk_types_node[stream_type.default_clock_type.name]
+ clk_type_node = clk_types_node[dst.default_clock_type.name]
c_type = clk_type_node.get('$c-type')
if c_type is not None:
- clk_type_c_types[stream_type.default_clock_type] = c_type
+ clk_type_c_types[dst.default_clock_type] = c_type
# create options
iden_prefix_def = False
- def_stream_type_name_def = False
+ def_dst_name_def = False
opts_node = self.config_node.get('options')
iden_prefix = 'barectf_'
file_name_prefix = 'barectf'
if header_opts is not None:
iden_prefix_def = header_opts.get('identifier-prefix-definition', False)
- def_stream_type_name_def = header_opts.get('default-stream-type-name-definition',
- False)
+ def_dst_name_def = header_opts.get('default-data-stream-type-name-definition',
+ False)
header_opts = barectf_config.ConfigurationCodeGenerationHeaderOptions(iden_prefix_def,
- def_stream_type_name_def)
+ def_dst_name_def)
cg_opts = barectf_config.ConfigurationCodeGenerationOptions(iden_prefix, file_name_prefix,
- def_stream_type, header_opts,
+ def_dst, header_opts,
clk_type_c_types)
opts = barectf_config.ConfigurationOptions(cg_opts)
ft_aliases_node = self._trace_type_node['$field-type-aliases']
- # Expand field type aliases within trace, stream, and event type
- # nodes.
+ # Expand field type aliases within trace, data stream, and event
+ # record type nodes.
features_prop_name = '$features'
try:
try:
resolve_ft_alias_from(features_node, 'magic-field-type')
resolve_ft_alias_from(features_node, 'uuid-field-type')
- resolve_ft_alias_from(features_node, 'stream-type-id-field-type')
+ resolve_ft_alias_from(features_node, 'data-stream-type-id-field-type')
except _ConfigurationParseError as exc:
_append_error_ctx(exc, f'`{features_prop_name}` property')
except _ConfigurationParseError as exc:
_append_error_ctx(exc, 'Trace type')
- for stream_type_name, stream_type_node in self._trace_type_node['stream-types'].items():
+ for dst_name, dst_node in self._trace_type_node['data-stream-types'].items():
try:
- features_node = stream_type_node.get(features_prop_name)
+ features_node = dst_node.get(features_prop_name)
if features_node is not None:
try:
resolve_ft_alias_from(pkt_node, 'beginning-time-field-type')
resolve_ft_alias_from(pkt_node, 'end-time-field-type')
resolve_ft_alias_from(pkt_node,
- 'discarded-events-counter-field-type')
+ 'discarded-event-records-counter-snapshot-field-type')
except _ConfigurationParseError as exc:
_append_error_ctx(exc, f'`{pkt_prop_name}` property')
- ev_prop_name = 'event'
- ev_node = features_node.get(ev_prop_name)
+ er_prop_name = 'event-record'
+ er_node = features_node.get(er_prop_name)
- if ev_node is not None:
+ if er_node is not None:
try:
- resolve_ft_alias_from(ev_node, 'type-id-field-type')
- resolve_ft_alias_from(ev_node, 'time-field-type')
+ resolve_ft_alias_from(er_node, 'type-id-field-type')
+ resolve_ft_alias_from(er_node, 'time-field-type')
except _ConfigurationParseError as exc:
- _append_error_ctx(exc, f'`{ev_prop_name}` property')
+ _append_error_ctx(exc, f'`{er_prop_name}` property')
except _ConfigurationParseError as exc:
_append_error_ctx(exc, f'`{features_prop_name}` property')
pkt_ctx_ft_extra_members_prop_name = 'packet-context-field-type-extra-members'
- pkt_ctx_ft_extra_members_node = stream_type_node.get(pkt_ctx_ft_extra_members_prop_name)
+ pkt_ctx_ft_extra_members_node = dst_node.get(pkt_ctx_ft_extra_members_prop_name)
if pkt_ctx_ft_extra_members_node is not None:
try:
except _ConfigurationParseError as exc:
_append_error_ctx(exc, f'`{pkt_ctx_ft_extra_members_prop_name}` property')
- resolve_ft_alias_from(stream_type_node, 'event-common-context-field-type')
+ resolve_ft_alias_from(dst_node, 'event-record-common-context-field-type')
- for ev_type_name, ev_type_node in stream_type_node['event-types'].items():
+ for ert_name, ert_node in dst_node['event-record-types'].items():
try:
- resolve_ft_alias_from(ev_type_node, 'specific-context-field-type')
- resolve_ft_alias_from(ev_type_node, 'payload-field-type')
+ resolve_ft_alias_from(ert_node, 'specific-context-field-type')
+ resolve_ft_alias_from(ert_node, 'payload-field-type')
except _ConfigurationParseError as exc:
- _append_error_ctx(exc, f'Event type `{ev_type_name}`')
+ _append_error_ctx(exc, f'Event record type `{ert_name}`')
except _ConfigurationParseError as exc:
- _append_error_ctx(exc, f'Stream type `{stream_type_name}`')
+ _append_error_ctx(exc, f'Data stream type `{dst_name}`')
# remove the (now unneeded) `$field-type-aliases` property
del self._trace_type_node['$field-type-aliases']
if features_node is not None:
apply_ft_inheritance(features_node, 'magic-field-type')
apply_ft_inheritance(features_node, 'uuid-field-type')
- apply_ft_inheritance(features_node, 'stream-type-id-field-type')
+ apply_ft_inheritance(features_node, 'data-stream-type-id-field-type')
- for stream_type_node in self._trace_type_node['stream-types'].values():
- features_node = stream_type_node.get(features_prop_name)
+ for dst_node in self._trace_type_node['data-stream-types'].values():
+ features_node = dst_node.get(features_prop_name)
if features_node is not None:
pkt_node = features_node.get('packet')
apply_ft_inheritance(pkt_node, 'content-size-field-type')
apply_ft_inheritance(pkt_node, 'beginning-time-field-type')
apply_ft_inheritance(pkt_node, 'end-time-field-type')
- apply_ft_inheritance(pkt_node, 'discarded-events-counter-field-type')
+ apply_ft_inheritance(pkt_node, 'discarded-event-records-counter-snapshot-field-type')
- ev_node = features_node.get('event')
+ er_node = features_node.get('event-record')
- if ev_node is not None:
- apply_ft_inheritance(ev_node, 'type-id-field-type')
- apply_ft_inheritance(ev_node, 'time-field-type')
+ if er_node is not None:
+ apply_ft_inheritance(er_node, 'type-id-field-type')
+ apply_ft_inheritance(er_node, 'time-field-type')
- pkt_ctx_ft_extra_members_node = stream_type_node.get('packet-context-field-type-extra-members')
+ pkt_ctx_ft_extra_members_node = dst_node.get('packet-context-field-type-extra-members')
if pkt_ctx_ft_extra_members_node is not None:
for member_node in pkt_ctx_ft_extra_members_node:
member_node = list(member_node.values())[0]
apply_ft_inheritance(member_node, 'field-type')
- apply_ft_inheritance(stream_type_node, 'event-common-context-field-type')
+ apply_ft_inheritance(dst_node, 'event-record-common-context-field-type')
- for ev_type_node in stream_type_node['event-types'].values():
- apply_ft_inheritance(ev_type_node, 'specific-context-field-type')
- apply_ft_inheritance(ev_type_node, 'payload-field-type')
+ for ert_node in dst_node['event-record-types'].values():
+ apply_ft_inheritance(ert_node, 'specific-context-field-type')
+ apply_ft_inheritance(ert_node, 'payload-field-type')
# Normalizes structure field type member nodes.
#
if features_node is not None:
normalize_struct_ft_member_nodes(features_node, 'magic-field-type')
normalize_struct_ft_member_nodes(features_node, 'uuid-field-type')
- normalize_struct_ft_member_nodes(features_node, 'stream-type-id-field-type')
+ normalize_struct_ft_member_nodes(features_node, 'data-stream-type-id-field-type')
- for stream_type_node in self._trace_type_node['stream-types'].values():
- features_node = stream_type_node.get(features_prop_name)
+ for dst_node in self._trace_type_node['data-stream-types'].values():
+ features_node = dst_node.get(features_prop_name)
if features_node is not None:
pkt_node = features_node.get('packet')
normalize_struct_ft_member_nodes(pkt_node, 'beginning-time-field-type')
normalize_struct_ft_member_nodes(pkt_node, 'end-time-field-type')
normalize_struct_ft_member_nodes(pkt_node,
- 'discarded-events-counter-field-type')
+ 'discarded-event-records-counter-snapshot-field-type')
- ev_node = features_node.get('event')
+ er_node = features_node.get('event-record')
- if ev_node is not None:
- normalize_struct_ft_member_nodes(ev_node, 'type-id-field-type')
- normalize_struct_ft_member_nodes(ev_node, 'time-field-type')
+ if er_node is not None:
+ normalize_struct_ft_member_nodes(er_node, 'type-id-field-type')
+ normalize_struct_ft_member_nodes(er_node, 'time-field-type')
- pkt_ctx_ft_extra_members_node = stream_type_node.get('packet-context-field-type-extra-members')
+ pkt_ctx_ft_extra_members_node = dst_node.get('packet-context-field-type-extra-members')
if pkt_ctx_ft_extra_members_node is not None:
normalize_members_node(pkt_ctx_ft_extra_members_node)
- normalize_struct_ft_member_nodes(stream_type_node, 'event-common-context-field-type')
+ normalize_struct_ft_member_nodes(dst_node, 'event-record-common-context-field-type')
- for ev_type_node in stream_type_node['event-types'].values():
- normalize_struct_ft_member_nodes(ev_type_node, 'specific-context-field-type')
- normalize_struct_ft_member_nodes(ev_type_node, 'payload-field-type')
+ for ert_node in dst_node['event-record-types'].values():
+ normalize_struct_ft_member_nodes(ert_node, 'specific-context-field-type')
+ normalize_struct_ft_member_nodes(ert_node, 'payload-field-type')
# Calls _expand_ft_aliases() and _apply_fts_inheritance() if the
# trace type node has a `$field-type-aliases` property.
# next, apply inheritance to create effective field type nodes
self._apply_fts_inheritance()
- # Substitute the event type node log level aliases with their
+ # Substitute the event record type node log level aliases with their
# numeric equivalents.
#
# Removes the `$log-level-aliases` property of the trace type node.
return
# substitute log level aliases
- for stream_type_name, stream_type_node in self._trace_type_node['stream-types'].items():
+ for dst_name, dst_node in self._trace_type_node['data-stream-types'].items():
try:
- for ev_type_name, ev_type_node in stream_type_node['event-types'].items():
+ for ert_name, ert_node in dst_node['event-record-types'].items():
try:
prop_name = 'log-level'
- ll_node = ev_type_node.get(prop_name)
+ ll_node = ert_node.get(prop_name)
if ll_node is None:
continue
raise _ConfigurationParseError(f'`{prop_name}` property',
f'Log level alias `{ll_node}` does not exist')
- ev_type_node[prop_name] = log_level_aliases_node[ll_node]
+ ert_node[prop_name] = log_level_aliases_node[ll_node]
except _ConfigurationParseError as exc:
- _append_error_ctx(exc, f'Event type `{ev_type_name}`')
+ _append_error_ctx(exc, f'Event record type `{ert_name}`')
except _ConfigurationParseError as exc:
- _append_error_ctx(exc, f'Stream type `{stream_type_name}`')
+ _append_error_ctx(exc, f'Data stream type `{dst_name}`')
# Generator of parent node and key pairs for all the nodes,
# recursively, of `node`.
self._target_byte_order_node = self.config_node['target-byte-order']
self._target_byte_order = self._byte_order_from_node(self._target_byte_order_node)
- # Processes the inclusions of the event type node `ev_type_node`,
- # returning the effective node.
- def _process_ev_type_node_include(self, ev_type_node: _MapNode) -> _MapNode:
- # Make sure the event type node is valid for the inclusion
- # processing stage.
- self._schema_validator.validate(ev_type_node, 'config/3/event-type-pre-include')
+ # Processes the inclusions of the event record type node
+ # `ert_node`, returning the effective node.
+ def _process_ert_node_include(self, ert_node: _MapNode) -> _MapNode:
+ # Make sure the event record type node is valid for the
+ # inclusion processing stage.
+ self._schema_validator.validate(ert_node, 'config/3/ert-pre-include')
# process inclusions
- return self._process_node_include(ev_type_node, self._process_ev_type_node_include)
+ return self._process_node_include(ert_node, self._process_ert_node_include)
- # Processes the inclusions of the stream type node
- # `stream_type_node`, returning the effective node.
- def _process_stream_type_node_include(self, stream_type_node: _MapNode) -> _MapNode:
- def process_children_include(stream_type_node: _MapNode):
- prop_name = 'event-types'
+ # Processes the inclusions of the data stream type node `dst_node`,
+ # returning the effective node.
+ def _process_dst_node_include(self, dst_node: _MapNode) -> _MapNode:
+ def process_children_include(dst_node: _MapNode):
+ prop_name = 'event-record-types'
- if prop_name in stream_type_node:
- ev_types_node = stream_type_node[prop_name]
+ if prop_name in dst_node:
+ erts_node = dst_node[prop_name]
- for key in list(ev_types_node):
- ev_types_node[key] = self._process_ev_type_node_include(ev_types_node[key])
+ for key in list(erts_node):
+ erts_node[key] = self._process_ert_node_include(erts_node[key])
- # Make sure the stream type node is valid for the inclusion
+ # Make sure the data stream type node is valid for the inclusion
# processing stage.
- self._schema_validator.validate(stream_type_node, 'config/3/stream-type-pre-include')
+ self._schema_validator.validate(dst_node, 'config/3/dst-pre-include')
# process inclusions
- return self._process_node_include(stream_type_node, self._process_stream_type_node_include,
+ return self._process_node_include(dst_node, self._process_dst_node_include,
process_children_include)
# Processes the inclusions of the clock type node `clk_type_node`,
for key in list(clk_types_node):
clk_types_node[key] = self._process_clk_type_node_include(clk_types_node[key])
- prop_name = 'stream-types'
+ prop_name = 'data-stream-types'
if prop_name in trace_type_node:
- stream_types_node = trace_type_node[prop_name]
+ dsts_node = trace_type_node[prop_name]
- for key in list(stream_types_node):
- stream_types_node[key] = self._process_stream_type_node_include(stream_types_node[key])
+ for key in list(dsts_node):
+ dsts_node[key] = self._process_dst_node_include(dsts_node[key])
# Make sure the trace type node is valid for the inclusion
# processing stage.
def _process_config_includes(self):
# Process inclusions in this order:
#
- # 1. Clock type node and event type nodes (the order between
- # those is not important).
+ # 1. Clock type node and event record type nodes (the order
+ # between those is not important).
#
- # 2. Stream type nodes.
+ # 2. Data stream type nodes.
#
# 3. Trace type node.
#
# This is because:
#
# * A trace node can include a trace type node, clock type
- # nodes, stream type nodes, and event type nodes.
+ # nodes, data stream type nodes, and event record type nodes.
#
- # * A trace type node can include clock type nodes, stream type
- # nodes, and event type nodes.
+ # * A trace type node can include clock type nodes, data stream
+ # type nodes, and event record type nodes.
#
- # * A stream type node can include event type nodes.
+ # * A data stream type node can include event record type nodes.
#
# First, make sure the configuration node itself is valid for
# the inclusion processing stage.
#
# This process:
#
- # 1. Replaces log level aliases in event type nodes with their
- # numeric equivalents as found in the `$log-level-aliases`
- # property of the trace type node.
+ # 1. Replaces log level aliases in event record type nodes with
+ # their numeric equivalents as found in the
+ # `$log-level-aliases` property of the trace type node.
#
# 2. Removes the `$log-level-aliases` property from the trace
# type node.
#
# * A 32-bit magic number unsigned integer field type.
# * A UUID static array field type.
-# * An 8-bit stream type ID unsigned integer field type.
+# * An 8-bit data stream type ID unsigned integer field type.
#
# The trace type's UUID is automatically generated by barectf.
packet-header-type:
$ref: '#/definitions/partial-ft'
streams:
- title: Stream types object before field type expansions
+ title: Data stream types object before field type expansions
type: object
patternProperties:
'.*':
- title: Stream type object before field type expansions
+ title: Data stream type object before field type expansions
type: object
properties:
packet-context-type:
$ref: '#/definitions/uint-ft'
timestamp:
$ref: '#/definitions/uint-ft-ts'
- stream-type:
- title: Stream type object
+ dst:
+ title: Data stream type object
type: object
properties:
$default:
event-context-type:
$ref: '#/definitions/opt-struct-ft'
events:
- title: Event types object
+ title: Event record types object
type: object
patternProperties:
'^[A-Za-z_][A-Za-z0-9_]*$':
- $ref: '#/definitions/event-type'
+ $ref: '#/definitions/ert'
additionalProperties: false
minProperties: 1
required:
- packet-context-type
- events
additionalProperties: false
- event-type:
- title: Event type object
+ ert:
+ title: Event record type object
type: object
properties:
log-level:
$default-stream:
$ref: https://barectf.org/schemas/config/common/common.json#/definitions/opt-string
streams:
- title: Stream types object
+ title: Data stream types object
type: object
patternProperties:
'^[A-Za-z_][A-Za-z0-9_]*$':
- $ref: '#/definitions/stream-type'
+ $ref: '#/definitions/dst'
additionalProperties: false
minProperties: 1
required:
--- /dev/null
+# The MIT License (MIT)
+#
+# Copyright (c) 2020 Philippe Proulx <pproulx@efficios.com>
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+$schema: http://json-schema.org/draft-07/schema#
+$id: https://barectf.org/schemas/config/2/dst-pre-include.json
+title: Data stream type object before inclusions
+type: object
+properties:
+ $include:
+ $ref: https://barectf.org/schemas/config/2/include-prop.json
+ events:
+ title: Event record types object before inclusions
+ type: object
+ patternProperties:
+ '.*':
+ $ref: https://barectf.org/schemas/config/2/ert-pre-include.json
--- /dev/null
+# The MIT License (MIT)
+#
+# Copyright (c) 2020 Philippe Proulx <pproulx@efficios.com>
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+$schema: http://json-schema.org/draft-07/schema#
+$id: https://barectf.org/schemas/config/2/ert-pre-include.json
+title: Event record type object before inclusions
+type: object
+properties:
+ $include:
+ $ref: https://barectf.org/schemas/config/2/include-prop.json
+++ /dev/null
-# The MIT License (MIT)
-#
-# Copyright (c) 2020 Philippe Proulx <pproulx@efficios.com>
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-$schema: http://json-schema.org/draft-07/schema#
-$id: https://barectf.org/schemas/config/2/event-type-pre-include.json
-title: Event type object before inclusions
-type: object
-properties:
- $include:
- $ref: https://barectf.org/schemas/config/2/include-prop.json
trace:
$ref: https://barectf.org/schemas/config/2/trace-type-pre-include.json
streams:
- title: Stream types object before inclusions
+ title: Data stream types object before inclusions
type: object
patternProperties:
'.*':
- $ref: https://barectf.org/schemas/config/2/stream-type-pre-include.json
+ $ref: https://barectf.org/schemas/config/2/dst-pre-include.json
+++ /dev/null
-# The MIT License (MIT)
-#
-# Copyright (c) 2020 Philippe Proulx <pproulx@efficios.com>
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-$schema: http://json-schema.org/draft-07/schema#
-$id: https://barectf.org/schemas/config/2/stream-type-pre-include.json
-title: Stream type object before inclusions
-type: object
-properties:
- $include:
- $ref: https://barectf.org/schemas/config/2/include-prop.json
- events:
- title: Event types object before inclusions
- type: object
- patternProperties:
- '.*':
- $ref: https://barectf.org/schemas/config/2/event-type-pre-include.json
$ref: '#/definitions/partial-ft'
uuid-field-type:
$ref: '#/definitions/partial-ft'
- stream-type-id-field-type:
+ data-stream-type-id-field-type:
$ref: '#/definitions/partial-ft'
else:
type: 'null'
- stream-types:
- title: Stream types object before field type expansions
+ data-stream-types:
+ title: Data stream types object before field type expansions
type: object
patternProperties:
'.*':
- title: Stream type object before field type expansions
+ title: Data stream type object before field type expansions
type: object
properties:
$features:
$ref: '#/definitions/partial-ft'
end-time-field-type:
$ref: '#/definitions/partial-ft'
- discarded-events-counter-field-type:
+ discarded-event-records-counter-snapshot-field-type:
$ref: '#/definitions/partial-ft'
else:
type: 'null'
- event:
+ event-record:
if:
type: object
then:
$ref: '#/definitions/partial-ft'
else:
type: 'null'
- event-common-context-field-type:
+ event-record-common-context-field-type:
$ref: '#/definitions/partial-ft'
- event-types:
- title: Event types object before field type expansions
+ event-record-types:
+ title: Event record types object before field type expansions
type: object
patternProperties:
'.*':
- title: Event type object before field type expansions
+ title: Event record type object before field type expansions
type: object
properties:
specific-context-field-type:
payload-field-type:
$ref: '#/definitions/partial-ft'
required:
- - event-types
+ - event-record-types
required:
- - stream-types
+ - data-stream-types
required:
- type
required:
properties:
$log-level-aliases:
$ref: https://barectf.org/schemas/config/common/common.json#/definitions/opt-log-level-aliases-prop
- stream-types:
- title: Stream types object before log level alias substitutions
+ data-stream-types:
+ title: Data stream types object before log level alias substitutions
type: object
patternProperties:
'.*':
- title: Stream type object before log level alias substitutions
+ title: Data stream type object before log level alias substitutions
type: object
properties:
- event-types:
- title: Event types object before log level alias substitutions
+ event-record-types:
+ title: Event record types object before log level alias substitutions
type: object
patternProperties:
'.*':
- title: Event type object before log level alias substitutions
+ title: Event record type object before log level alias substitutions
type: object
properties:
log-level:
$ref: https://barectf.org/schemas/config/common/common.json#/definitions/opt-log-level-or-alias-prop
required:
- - event-types
+ - event-record-types
required:
- - stream-types
+ - data-stream-types
required:
- type
required:
const: false
else:
type: 'null'
- stream-type-id-field-type:
+ data-stream-type-id-field-type:
$ref: '#/definitions/opt-or-def-feature-uint-ft'
additionalProperties: false
else:
'^[A-Za-z_][A-Za-z0-9_]*$':
$ref: '#/definitions/clock-type'
additionalProperties: false
- stream-types:
- title: Stream types object
+ data-stream-types:
+ title: Data stream types object
type: object
patternProperties:
'^[A-Za-z_][A-Za-z0-9_]*$':
- $ref: '#/definitions/stream-type'
+ $ref: '#/definitions/dst'
additionalProperties: false
minProperties: 1
required:
- - stream-types
+ - data-stream-types
additionalProperties: false
clock-type:
title: Clock type object
$c-type:
$ref: https://barectf.org/schemas/config/common/common.json#/definitions/opt-string
additionalProperties: false
- stream-type:
- title: Stream type object
+ dst:
+ title: Data stream type object
type: object
properties:
$is-default:
$ref: '#/definitions/opt-or-def-feature-uint-ft'
end-time-field-type:
$ref: '#/definitions/opt-or-def-feature-uint-ft'
- discarded-events-counter-field-type:
+ discarded-event-records-counter-snapshot-field-type:
$ref: '#/definitions/opt-or-def-feature-uint-ft'
additionalProperties: false
else:
type: 'null'
- event:
+ event-record:
if:
type: object
then:
$ref: https://barectf.org/schemas/config/3/field-type.json#/definitions/struct-ft-members
else:
type: 'null'
- event-common-context-field-type:
+ event-record-common-context-field-type:
$ref: '#/definitions/opt-struct-ft'
- event-types:
- title: Event types object
+ event-record-types:
+ title: Event record types object
type: object
patternProperties:
'^[A-Za-z_][A-Za-z0-9_]*$':
- $ref: '#/definitions/event-type'
+ $ref: '#/definitions/ert'
additionalProperties: false
minProperties: 1
required:
- - event-types
+ - event-record-types
additionalProperties: false
- event-type:
- title: Event type object
+ ert:
+ title: Event record type object
type: object
properties:
log-level:
properties:
identifier-prefix-definition:
type: boolean
- default-stream-type-name-definition:
+ default-data-stream-type-name-definition:
type: boolean
additionalProperties: false
additionalProperties: false
--- /dev/null
+# The MIT License (MIT)
+#
+# Copyright (c) 2020 Philippe Proulx <pproulx@efficios.com>
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+$schema: http://json-schema.org/draft-07/schema#
+$id: https://barectf.org/schemas/config/3/dst-pre-include.json
+title: Data stream type object before inclusions
+type: object
+properties:
+ $include:
+ $ref: https://barectf.org/schemas/config/3/include-prop.json
+ event-record-types:
+ title: Event record types object before inclusions
+ type: object
+ patternProperties:
+ '.*':
+ $ref: https://barectf.org/schemas/config/3/ert-pre-include.json
--- /dev/null
+# The MIT License (MIT)
+#
+# Copyright (c) 2020 Philippe Proulx <pproulx@efficios.com>
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+$schema: http://json-schema.org/draft-07/schema#
+$id: https://barectf.org/schemas/config/3/ert-pre-include.json
+title: Event record type object before inclusions
+type: object
+properties:
+ $include:
+ $ref: https://barectf.org/schemas/config/3/include-prop.json
+++ /dev/null
-# The MIT License (MIT)
-#
-# Copyright (c) 2020 Philippe Proulx <pproulx@efficios.com>
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-$schema: http://json-schema.org/draft-07/schema#
-$id: https://barectf.org/schemas/config/3/event-type-pre-include.json
-title: Event type object before inclusions
-type: object
-properties:
- $include:
- $ref: https://barectf.org/schemas/config/3/include-prop.json
+++ /dev/null
-# The MIT License (MIT)
-#
-# Copyright (c) 2020 Philippe Proulx <pproulx@efficios.com>
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-$schema: http://json-schema.org/draft-07/schema#
-$id: https://barectf.org/schemas/config/3/stream-type-pre-include.json
-title: Stream type object before inclusions
-type: object
-properties:
- $include:
- $ref: https://barectf.org/schemas/config/3/include-prop.json
- event-types:
- title: Event types object before inclusions
- type: object
- patternProperties:
- '.*':
- $ref: https://barectf.org/schemas/config/3/event-type-pre-include.json
patternProperties:
'.*':
$ref: https://barectf.org/schemas/config/3/clock-type-pre-include.json
- stream-types:
- title: Stream types object before inclusions
+ data-stream-types:
+ title: Data stream types object before inclusions
type: object
patternProperties:
'.*':
- $ref: https://barectf.org/schemas/config/3/stream-type-pre-include.json
+ $ref: https://barectf.org/schemas/config/3/dst-pre-include.json
{#
# Generates the preamble of the packet opening/closing functions for
- # the stream type `stream_type`.
+ # the data stream type `dst`.
#}
-{% macro open_close_func_preamble(stream_type) %}
+{% macro open_close_func_preamble(dst) %}
struct {{ prefix }}ctx * const ctx = &sctx->parent;
-{% if stream_type.default_clock_type %}
-const {{ cg_opts.clock_type_c_types[stream_type.default_clock_type] }} ts = ctx->use_cur_last_event_ts ?
+{% if dst.default_clock_type %}
+const {{ cg_opts.clock_type_c_types[dst.default_clock_type] }} ts = ctx->use_cur_last_event_ts ?
sctx->cur_last_event_ts :
- ctx->cbs.{{ stream_type.default_clock_type.name }}_clock_get_value(ctx->data);
+ ctx->cbs.{{ dst.default_clock_type.name }}_clock_get_value(ctx->data);
{% endif %}
const int saved_in_tracing_section = ctx->in_tracing_section;
{%- endmacro %}
#
# Example:
#
- # , ecc_peer_id, ecc_addr, p_msg_id, p_msg
+ # , ercc_peer_id, ercc_addr, p_msg_id, p_msg
#}
{% macro ft_call_params(param_prefix, ft, only_dyn=false) %}
{% if ft %}
}
static
-int _reserve_ev_space(void * const vctx, const uint32_t ev_size)
+int _reserve_er_space(void * const vctx, const uint32_t er_size)
{
int ret;
struct {{ ctx_struct_name }} * const ctx = _FROM_VOID_PTR(struct {{ ctx_struct_name }}, vctx);
/* Event _cannot_ fit? */
- if (ev_size > (ctx->packet_size - ctx->off_content)) {
+ if (er_size > (ctx->packet_size - ctx->off_content)) {
goto no_space;
}
if ({{ prefix }}packet_is_full(ctx)) {
/* Yes: is the back-end full? */
if (ctx->cbs.is_backend_full(ctx->data)) {
- /* Yes: discard event */
+ /* Yes: discard event record */
goto no_space;
}
}
/* Event fits the current packet? */
- if (ev_size > (ctx->packet_size - ctx->at)) {
+ if (er_size > (ctx->packet_size - ctx->at)) {
/* No: close packet now */
ctx->use_cur_last_event_ts = 1;
ctx->cbs.close_packet(ctx->data);
/* Is the back-end full? */
if (ctx->cbs.is_backend_full(ctx->data)) {
- /* Yes: discard event */
+ /* Yes: discard event record */
goto no_space;
}
ctx->use_cur_last_event_ts = 1;
ctx->cbs.open_packet(ctx->data);
ctx->use_cur_last_event_ts = 0;
- assert(ev_size <= (ctx->packet_size - ctx->at));
+ assert(er_size <= (ctx->packet_size - ctx->at));
}
ret = 1;
}
static
-void _commit_ev(void * const vctx)
+void _commit_er(void * const vctx)
{
struct {{ ctx_struct_name }} * const ctx = _FROM_VOID_PTR(struct {{ ctx_struct_name }}, vctx);
ctx->use_cur_last_event_ts = 0;
}
-{% for stream_type in cfg.trace.type.stream_types | sort %}
- {% set def_clk_type = stream_type.default_clock_type %}
- {% set sctx_name %}{{ prefix }}{{ stream_type.name }}{% endset %}
- {% set this_stream_ops = stream_ops[stream_type] %}
+{% for dst in cfg.trace.type.data_stream_types | sort %}
+ {% set def_clk_type = dst.default_clock_type %}
+ {% set sctx_name %}{{ prefix }}{{ dst.name }}{% endset %}
+ {% set this_ds_ops = ds_ops[dst] %}
{% include 'c/open-func-proto.j2' %}
{
- {{ macros.open_close_func_preamble(stream_type) | indent_tab }}
+ {{ macros.open_close_func_preamble(dst) | indent_tab }}
/*
* This function is either called by a tracing function, or
}
ctx->at = 0;
- {% set pkt_header_op = this_stream_ops.pkt_header_op %}
+ {% set pkt_header_op = this_ds_ops.pkt_header_op %}
{% if pkt_header_op %}
- {{ pkt_header_op.serialize_str(stream_type=stream_type) | indent_tab }}
+ {{ pkt_header_op.serialize_str(dst=dst) | indent_tab }}
{% endif %}
- {{ this_stream_ops.pkt_ctx_op.serialize_str(stream_type=stream_type) | indent_tab }}
+ {{ this_ds_ops.pkt_ctx_op.serialize_str(dst=dst) | indent_tab }}
/* Save content beginning's offset */
ctx->off_content = ctx->at;
{% include 'c/close-func-proto.j2' %}
{
- {{ macros.open_close_func_preamble(stream_type) | indent_tab }}
+ {{ macros.open_close_func_preamble(dst) | indent_tab }}
/*
* This function is either called by a tracing function, or
/* Save content size */
ctx->content_size = ctx->at;
{% set name = 'timestamp_end' %}
- {% if name in stream_type._pkt_ctx_ft.members %}
- {% set op = stream_op_pkt_ctx_op(stream_type, name) %}
+ {% if name in dst._pkt_ctx_ft.members %}
+ {% set op = ds_op_pkt_ctx_op(dst, name) %}
/* Go back to `timestamp_end` field offset */
ctx->at = sctx->off_{{ op | op_src_var_name }};
{% endfilter %}
{% endif %}
{% set name = 'content_size' %}
- {% if name in stream_type._pkt_ctx_ft.members %}
- {% set op = stream_op_pkt_ctx_op(stream_type, name) %}
+ {% if name in dst._pkt_ctx_ft.members %}
+ {% set op = ds_op_pkt_ctx_op(dst, name) %}
/* Go back to `content_size` field offset */
ctx->at = sctx->off_{{ op | op_src_var_name }};
{% endfilter %}
{% endif %}
{% set name = 'events_discarded' %}
- {% if name in stream_type._pkt_ctx_ft.members %}
- {% set op = stream_op_pkt_ctx_op(stream_type, name) %}
+ {% if name in dst._pkt_ctx_ft.members %}
+ {% set op = ds_op_pkt_ctx_op(dst, name) %}
/* Go back to `events_discarded` field offset */
ctx->at = sctx->off_{{ op | op_src_var_name }};
end:
return;
}
- {% if stream_type._ev_header_ft %}
+ {% if dst._er_header_ft %}
-static void _serialize_ev_header_{{Â stream_type.name }}(void * const vctx,
- const uint32_t ev_type_id)
+static void _serialize_er_header_{{Â dst.name }}(void * const vctx,
+ const uint32_t ert_id)
{
struct {{ ctx_struct_name }} * const ctx = _FROM_VOID_PTR(struct {{ ctx_struct_name }}, vctx);
{% if def_clk_type %}
const {{ cg_opts.clock_type_c_types[def_clk_type] }} ts = sctx->cur_last_event_ts;
{% endif %}
- {{ this_stream_ops.ev_header_op.serialize_str(stream_type=stream_type) | indent_tab }}
+ {{ this_ds_ops.er_header_op.serialize_str(dst=dst) | indent_tab }}
}
{% endif %}
- {% if stream_type.event_common_context_field_type %}
+ {% if dst.event_record_common_context_field_type %}
-static void _serialize_ev_common_ctx_{{Â stream_type.name }}(void * const vctx{{ stream_type | serialize_ev_common_ctx_func_params_str(const_params) }})
+static void _serialize_er_common_ctx_{{Â dst.name }}(void * const vctx{{ dst | serialize_er_common_ctx_func_params_str(const_params) }})
{
struct {{ ctx_struct_name }} * const ctx = _FROM_VOID_PTR(struct {{ ctx_struct_name }}, vctx);
- {{ this_stream_ops.ev_common_ctx_op.serialize_str(stream_type=stream_type) | indent_tab }}
+ {{ this_ds_ops.er_common_ctx_op.serialize_str(dst=dst) | indent_tab }}
}
{% endif %}
{# internal serialization functions #}
- {% for ev_type in stream_type.event_types | sort %}
+ {% for ert in dst.event_record_types | sort %}
-static void _serialize_ev_{{ stream_type.name }}_{{ ev_type.name }}(void * const vctx{{ (stream_type, ev_type) | trace_func_params_str(const_params) }})
+static void _serialize_er_{{ dst.name }}_{{ ert.name }}(void * const vctx{{ (dst, ert) | trace_func_params_str(const_params) }})
{
struct {{ ctx_struct_name }} * const ctx = _FROM_VOID_PTR(struct {{ ctx_struct_name }}, vctx);
- {% if stream_type._ev_header_ft %}
+ {% if dst._er_header_ft %}
/* Serialize header */
- _serialize_ev_header_{{ stream_type.name }}(ctx, {{ ev_type.id }});
+ _serialize_er_header_{{ dst.name }}(ctx, {{ ert.id }});
{% endif %}
- {% if stream_type.event_common_context_field_type %}
+ {% if dst.event_record_common_context_field_type %}
/* Serialize common context */
- {% set params = macros.ft_call_params(root_ft_prefixes.ECC, stream_type.event_common_context_field_type) %}
- _serialize_ev_common_ctx_{{ stream_type.name }}(ctx{{ params }});
+ {% set params = macros.ft_call_params(root_ft_prefixes.ERCC, dst.event_record_common_context_field_type) %}
+ _serialize_er_common_ctx_{{ dst.name }}(ctx{{ params }});
{% endif %}
- {% set this_ev_ops = this_stream_ops.ev_ops[ev_type] %}
- {% if this_ev_ops.spec_ctx_op %}
+ {% set this_er_ops = this_ds_ops.er_ops[ert] %}
+ {% if this_er_ops.spec_ctx_op %}
- {{ this_ev_ops.spec_ctx_op.serialize_str(stream_type=stream_type, ev_type=ev_type) | indent_tab }}
+ {{ this_er_ops.spec_ctx_op.serialize_str(dst=dst, ert=ert) | indent_tab }}
{% endif %}
- {% if this_ev_ops.payload_op %}
+ {% if this_er_ops.payload_op %}
- {{ this_ev_ops.payload_op.serialize_str(stream_type=stream_type, ev_type=ev_type) | indent_tab }}
+ {{ this_er_ops.payload_op.serialize_str(dst=dst, ert=ert) | indent_tab }}
{% endif %}
}
{% endfor %}
{# internal size functions #}
- {% for ev_type in stream_type.event_types | sort %}
- {% set this_ev_ops = this_stream_ops.ev_ops[ev_type] %}
+ {% for ert in dst.event_record_types | sort %}
+ {% set this_er_ops = this_ds_ops.er_ops[ert] %}
-static uint32_t _ev_size_{{ stream_type.name }}_{{ ev_type.name }}(void * const vctx{{ (stream_type, ev_type) | trace_func_params_str(const_params, only_dyn=true) }})
+static uint32_t _er_size_{{ dst.name }}_{{ ert.name }}(void * const vctx{{ (dst, ert) | trace_func_params_str(const_params, only_dyn=true) }})
{
struct {{ ctx_struct_name }} * const ctx = _FROM_VOID_PTR(struct {{ ctx_struct_name }}, vctx);
uint32_t at = ctx->at;
- {% if this_stream_ops.ev_header_op %}
+ {% if this_ds_ops.er_header_op %}
- {{ this_stream_ops.ev_header_op.size_str(stream_type=stream_type) | indent_tab }}
+ {{ this_ds_ops.er_header_op.size_str(dst=dst) | indent_tab }}
{% endif %}
- {% if this_stream_ops.ev_common_ctx_op %}
+ {% if this_ds_ops.er_common_ctx_op %}
- {{ this_stream_ops.ev_common_ctx_op.size_str(stream_type=stream_type) | indent_tab }}
+ {{ this_ds_ops.er_common_ctx_op.size_str(dst=dst) | indent_tab }}
{% endif %}
- {% if this_ev_ops.spec_ctx_op %}
+ {% if this_er_ops.spec_ctx_op %}
- {{ this_ev_ops.spec_ctx_op.size_str(stream_type=stream_type, ev_type=ev_type) | indent_tab }}
+ {{ this_er_ops.spec_ctx_op.size_str(dst=dst, ert=ert) | indent_tab }}
{% endif %}
- {% if this_ev_ops.payload_op %}
+ {% if this_er_ops.payload_op %}
- {{ this_ev_ops.payload_op.size_str(stream_type=stream_type, ev_type=ev_type) | indent_tab }}
+ {{ this_er_ops.payload_op.size_str(dst=dst, ert=ert) | indent_tab }}
{% endif %}
return at - ctx->at;
}
{% endfor %}
{# public tracing functions #}
- {% for ev_type in stream_type.event_types | sort %}
+ {% for ert in dst.event_record_types | sort %}
{% include 'c/trace-func-proto.j2' %}
{
struct {{ ctx_struct_name }} * const ctx = &sctx->parent;
- uint32_t ev_size;
+ uint32_t er_size;
{% if def_clk_type %}
/* Save time */
/* We can alter the packet */
ctx->in_tracing_section = 1;
- /* Compute event size */
- {% set ev_common_ctx_params = macros.ft_call_params(root_ft_prefixes.ECC, stream_type.event_common_context_field_type, true) %}
- {% set spec_ctx_params = macros.ft_call_params(root_ft_prefixes.SC, ev_type.specific_context_field_type, true) %}
- {% set payload_params = macros.ft_call_params(root_ft_prefixes.P, ev_type.payload_field_type, true) %}
- {% set params %}{{ ev_common_ctx_params }}{{ spec_ctx_params }}{{ payload_params }}{% endset %}
- ev_size = _ev_size_{{ stream_type.name }}_{{ ev_type.name }}(_TO_VOID_PTR(ctx){{ params }});
+ /* Compute event record size */
+ {% set er_common_ctx_params = macros.ft_call_params(root_ft_prefixes.ERCC, dst.event_record_common_context_field_type, true) %}
+ {% set spec_ctx_params = macros.ft_call_params(root_ft_prefixes.SC, ert.specific_context_field_type, true) %}
+ {% set payload_params = macros.ft_call_params(root_ft_prefixes.P, ert.payload_field_type, true) %}
+ {% set params %}{{ er_common_ctx_params }}{{ spec_ctx_params }}{{ payload_params }}{% endset %}
+ er_size = _er_size_{{ dst.name }}_{{ ert.name }}(_TO_VOID_PTR(ctx){{ params }});
/* Is there enough space to serialize? */
- if (!_reserve_ev_space(_TO_VOID_PTR(ctx), ev_size)) {
+ if (!_reserve_er_space(_TO_VOID_PTR(ctx), er_size)) {
/* no: forget this */
ctx->in_tracing_section = 0;
goto end;
}
- /* Serialize event */
- {% set ev_common_ctx_params = macros.ft_call_params(root_ft_prefixes.ECC, stream_type.event_common_context_field_type) %}
- {% set spec_ctx_params = macros.ft_call_params(root_ft_prefixes.SC, ev_type.specific_context_field_type) %}
- {% set payload_params = macros.ft_call_params(root_ft_prefixes.P, ev_type.payload_field_type) %}
- {% set params %}{{ ev_common_ctx_params }}{{ spec_ctx_params }}{{ payload_params }}{% endset %}
- _serialize_ev_{{ stream_type.name }}_{{ ev_type.name }}(_TO_VOID_PTR(ctx){{ params }});
+ /* Serialize event record */
+ {% set er_common_ctx_params = macros.ft_call_params(root_ft_prefixes.ERCC, dst.event_record_common_context_field_type) %}
+ {% set spec_ctx_params = macros.ft_call_params(root_ft_prefixes.SC, ert.specific_context_field_type) %}
+ {% set payload_params = macros.ft_call_params(root_ft_prefixes.P, ert.payload_field_type) %}
+ {% set params %}{{ er_common_ctx_params }}{{ spec_ctx_params }}{{ payload_params }}{% endset %}
+ _serialize_er_{{ dst.name }}_{{ ert.name }}(_TO_VOID_PTR(ctx){{ params }});
- /* Commit event */
- _commit_ev(_TO_VOID_PTR(ctx));
+ /* Commit event record */
+ _commit_er(_TO_VOID_PTR(ctx));
/* Not tracing anymore */
ctx->in_tracing_section = 0;
{% set ucprefix = common.ucprefix %}
{% set trace_type = cfg.trace.type %}
{% set cg_opts = cfg.options.code_generation_options %}
-{% set def_stream_type = cg_opts.default_stream_type %}
+{% set def_dst = cg_opts.default_data_stream_type %}
{% set header_opts = cg_opts.header_options %}
{% set const_params = false %}
#ifndef _{{ ucprefix }}H
{% if header_opts.identifier_prefix_definition %}
#define _BARECTF_PREFIX {{ prefix }}
{% endif %}
-{% if def_stream_type and header_opts.default_stream_type_name_definition %}
-#define _BARECTF_DEFAULT_STREAM {{ def_stream_type.name }}
+{% if def_dst and header_opts.default_data_stream_type_name_definition %}
+#define _BARECTF_DEFAULT_STREAM {{ def_dst.name }}
{% endif %}
-{% if def_stream_type %}
+{% if def_dst %}
- {% for ev_type in def_stream_type.event_types | sort %}
-#define {{ prefix }}trace_{{ ev_type.name }} {{ c_common.trace_func_name(def_stream_type, ev_type) }}
+ {% for ert in def_dst.event_record_types | sort %}
+#define {{ prefix }}trace_{{ ert.name }} {{ c_common.trace_func_name(def_dst, ert) }}
{% endfor %}
{% endif %}
/* Size of packet header + context fields (content offset) */
uint32_t off_content;
- /* Discarded event counter */
+ /* Discarded event records counter snapshot */
uint32_t events_discarded;
/* Current packet is open? */
/* Tracing is enabled? */
volatile int is_tracing_enabled;
- /* Use current/last event time when opening/closing packets */
+ /* Use current/last event record time when opening/closing packets */
int use_cur_last_event_ts;
};
-{% for stream_type in trace_type.stream_types | sort %}
-/* Context for stream type `{{ stream_type.name }}` */
-struct {{ prefix }}{{ stream_type.name }}_ctx {
+{% for dst in trace_type.data_stream_types | sort %}
+/* Context for data stream type `{{ dst.name }}` */
+struct {{ prefix }}{{ dst.name }}_ctx {
/* Parent */
struct {{ prefix }}ctx parent;
uint32_t off_ph_{{ member_name }};
{% endfor %}
{% endif %}
- {% for member_name in stream_type._pkt_ctx_ft.members %}
+ {% for member_name in dst._pkt_ctx_ft.members %}
uint32_t off_pc_{{ member_name }};
{% endfor %}
- {% if stream_type.default_clock_type %}
- {{ cg_opts.clock_type_c_types[stream_type.default_clock_type] }} cur_last_event_ts;
+ {% if dst.default_clock_type %}
+ {{ cg_opts.clock_type_c_types[dst.default_clock_type] }} cur_last_event_ts;
{% endif %}
};
{% endfor %}
{% include 'c/ctx-init-func-proto.j2' %};
-{% for stream_type in trace_type.stream_types | sort %}
+{% for dst in trace_type.data_stream_types | sort %}
{% include 'c/open-func-proto.j2' %};
{% include 'c/close-func-proto.j2' %};
- {% for ev_type in stream_type.event_types | sort %}
+ {% for ert in dst.event_record_types | sort %}
{% include 'c/trace-func-proto.j2' %};
{% endfor %}
#}
{% import 'common.j2' as common %}
{% import 'c/common.j2' as c_common %}
-/* Close packet for stream type `{{ stream_type.name }}` */
-void {{ common.prefix }}{{ stream_type.name }}_close_packet(struct {{ common.prefix }}{{ stream_type.name }}_ctx *{{ c_common.const_ptr_str(const_params) }}sctx)
+/* Close packet for data stream type `{{ dst.name }}` */
+void {{ common.prefix }}{{ dst.name }}_close_packet(struct {{ common.prefix }}{{ dst.name }}_ctx *{{ c_common.const_ptr_str(const_params) }}sctx)
{% set ctx_struct_name %}{{ common.prefix }}ctx{% endset %}
{#
- # Generates the name of a tracing function for the stream type
- # `stream_type` and the event type `ev_type`.
+ # Generates the name of a tracing function for the data stream type
+ # `dst` and the event record type `ert`.
#
# Example:
#
# barectf_my_stream_trace_my_event
#}
-{% macro trace_func_name(stream_type, ev_type) %}
-{{ common.prefix }}{{ stream_type.name }}_trace_{{ ev_type.name }}
+{% macro trace_func_name(dst, ert) %}
+{{ common.prefix }}{{ dst.name }}_trace_{{ ert.name }}
{%- endmacro %}
{#
#}
{% import 'common.j2' as common %}
{% import 'c/common.j2' as c_common %}
-/* Open packet for stream type `{{ stream_type.name }}` */
-void {{ common.prefix }}{{ stream_type.name }}_open_packet(
- struct {{ common.prefix }}{{ stream_type.name }}_ctx *{{ c_common.const_ptr_str(const_params) }}sctx{{ stream_type | open_func_params_str(const_params) }})
+/* Open packet for data stream type `{{ dst.name }}` */
+void {{ common.prefix }}{{ dst.name }}_open_packet(
+ struct {{ common.prefix }}{{ dst.name }}_ctx *{{ c_common.const_ptr_str(const_params) }}sctx{{ dst | open_func_params_str(const_params) }})
for ({{ var_name }} = 0; {{ var_name }} < (uint32_t) {{ length_src }}; ++{{ var_name }}) {
{% for subop in op.subops %}
- {{ subop.serialize_str(stream_type=stream_type, ev_type=ev_type) | indent_tab(2) }}
+ {{ subop.serialize_str(dst=dst, ert=ert) | indent_tab(2) }}
{% endfor %}
}
--- /dev/null
+{#
+ # The MIT License (MIT)
+ #
+ # Copyright (c) 2020 Philippe Proulx <pproulx@efficios.com>
+ #
+ # Permission is hereby granted, free of charge, to any person obtaining
+ # a copy of this software and associated documentation files (the
+ # "Software"), to deal in the Software without restriction, including
+ # without limitation the rights to use, copy, modify, merge, publish,
+ # distribute, sublicense, and/or sell copies of the Software, and to
+ # permit persons to whom the Software is furnished to do so, subject to
+ # the following conditions:
+ #
+ # The above copyright notice and this permission notice shall be
+ # included in all copies or substantial portions of the Software.
+ #
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ #}
+{% set c_type = op.ft | ft_c_type %}
+{% set src = dst.id %}
+/* Write data stream type ID field */
+{% include 'c/serialize-write-bit-array-statements.j2' %}
--- /dev/null
+{#
+ # The MIT License (MIT)
+ #
+ # Copyright (c) 2020 Philippe Proulx <pproulx@efficios.com>
+ #
+ # Permission is hereby granted, free of charge, to any person obtaining
+ # a copy of this software and associated documentation files (the
+ # "Software"), to deal in the Software without restriction, including
+ # without limitation the rights to use, copy, modify, merge, publish,
+ # distribute, sublicense, and/or sell copies of the Software, and to
+ # permit persons to whom the Software is furnished to do so, subject to
+ # the following conditions:
+ #
+ # The above copyright notice and this permission notice shall be
+ # included in all copies or substantial portions of the Software.
+ #
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ #}
+{% set c_type = op.ft | ft_c_type %}
+{% set src = 'ert_id' %}
+/* Write event record type ID field */
+{% include 'c/serialize-write-bit-array-statements.j2' %}
+++ /dev/null
-{#
- # The MIT License (MIT)
- #
- # Copyright (c) 2020 Philippe Proulx <pproulx@efficios.com>
- #
- # Permission is hereby granted, free of charge, to any person obtaining
- # a copy of this software and associated documentation files (the
- # "Software"), to deal in the Software without restriction, including
- # without limitation the rights to use, copy, modify, merge, publish,
- # distribute, sublicense, and/or sell copies of the Software, and to
- # permit persons to whom the Software is furnished to do so, subject to
- # the following conditions:
- #
- # The above copyright notice and this permission notice shall be
- # included in all copies or substantial portions of the Software.
- #
- # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
- # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- #}
-{% set c_type = op.ft | ft_c_type %}
-{% set src = 'ev_type_id' %}
-/* Write event type ID field */
-{% include 'c/serialize-write-bit-array-statements.j2' %}
+++ /dev/null
-{#
- # The MIT License (MIT)
- #
- # Copyright (c) 2020 Philippe Proulx <pproulx@efficios.com>
- #
- # Permission is hereby granted, free of charge, to any person obtaining
- # a copy of this software and associated documentation files (the
- # "Software"), to deal in the Software without restriction, including
- # without limitation the rights to use, copy, modify, merge, publish,
- # distribute, sublicense, and/or sell copies of the Software, and to
- # permit persons to whom the Software is furnished to do so, subject to
- # the following conditions:
- #
- # The above copyright notice and this permission notice shall be
- # included in all copies or substantial portions of the Software.
- #
- # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
- # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- #}
-{% set c_type = op.ft | ft_c_type %}
-{% set src = stream_type.id %}
-/* Write stream type ID field */
-{% include 'c/serialize-write-bit-array-statements.j2' %}
{% endif %}
{
{% for subop in op.subops %}
- {{ subop.serialize_str(stream_type=stream_type, ev_type=ev_type) | indent_tab }}
+ {{ subop.serialize_str(dst=dst, ert=ert) | indent_tab }}
{% endfor %}
}
for ({{ var_name }} = 0; {{ var_name }} < (uint32_t) {{ length_src }}; ++{{ var_name }}) {
{% for subop in op.subops %}
- {{ subop.size_str(stream_type=stream_type, ev_type=ev_type) | indent_tab(2) }}
+ {{ subop.size_str(dst=dst, ert=ert) | indent_tab(2) }}
{% endfor %}
}
{% endif %}
{
{% for subop in op.subops %}
- {{ subop.size_str(stream_type=stream_type, ev_type=ev_type) | indent_tab }}
+ {{ subop.size_str(dst=dst, ert=ert) | indent_tab }}
{% endfor %}
}
#}
{% import 'common.j2' as common %}
{% import 'c/common.j2' as c_common %}
-/* Trace (stream type `{{ stream_type.name }}`, event type `{{ ev_type.name }}`) */
-void {{ common.prefix }}{{ stream_type.name }}_trace_{{ ev_type.name }}(struct {{ common.prefix }}{{ stream_type.name }}_ctx *{{ c_common.const_ptr_str(const_params) }}sctx{{ (stream_type, ev_type) | trace_func_params_str(const_params) }})
+/* Trace (data stream type `{{ dst.name }}`, event record type `{{ ert.name }}`) */
+void {{ common.prefix }}{{ dst.name }}_trace_{{ ert.name }}(struct {{ common.prefix }}{{ dst.name }}_ctx *{{ c_common.const_ptr_str(const_params) }}sctx{{ (dst, ert) | trace_func_params_str(const_params) }})
{% endfor %}
};
-{# all clock types (stream types's default clock types) #}
+{# all clock types (data stream types's default clock types) #}
{% for clk_type in cfg.trace.type.clock_types | sort %}
clock {
name = {{ clk_type.name }};
};
{% endfor %}
-{# stream types and their event types #}
-{% for stream_type in cfg.trace.type.stream_types | sort %}
-/* Stream type `{{ stream_type.name }}` */
+{# data stream types and their event record types #}
+{% for dst in cfg.trace.type.data_stream_types | sort %}
+/* Data stream type `{{ dst.name }}` */
stream {
- id = {{ stream_type.id }};
- {{ root_ft('packet.context', stream_type._pkt_ctx_ft) | indent_tab }}
- {% if stream_type._ev_header_ft %}
- {{ root_ft('event.header', stream_type._ev_header_ft) | indent_tab }}
+ id = {{ dst.id }};
+ {{ root_ft('packet.context', dst._pkt_ctx_ft) | indent_tab }}
+ {% if dst._er_header_ft %}
+ {{ root_ft('event.header', dst._er_header_ft) | indent_tab }}
{% endif %}
- {% if stream_type.event_common_context_field_type %}
- {{ root_ft('event.context', stream_type.event_common_context_field_type) | indent_tab }}
+ {% if dst.event_record_common_context_field_type %}
+ {{ root_ft('event.context', dst.event_record_common_context_field_type) | indent_tab }}
{% endif %}
};
- {# stream type's event types #}
- {% for ev_type in stream_type.event_types | sort %}
+ {# data stream type's event record types #}
+ {% for ert in dst.event_record_types | sort %}
event {
- stream_id = {{ stream_type.id }};
- id = {{ ev_type.id }};
- name = "{{ ev_type.name }}";
- {% if ev_type.log_level %}
- loglevel = {{ ev_type.log_level }};
+ stream_id = {{ dst.id }};
+ id = {{ ert.id }};
+ name = "{{ ert.name }}";
+ {% if ert.log_level %}
+ loglevel = {{ ert.log_level }};
{% endif %}
- {% if ev_type.specific_context_field_type %}
- {{ root_ft('context', ev_type.specific_context_field_type) | indent_tab }}
+ {% if ert.specific_context_field_type %}
+ {{ root_ft('context', ert.specific_context_field_type) | indent_tab }}
{% endif %}
- {% if ev_type.payload_field_type %}
- {{ root_ft('fields', ev_type.payload_field_type) | indent_tab }}
+ {% if ert.payload_field_type %}
+ {{ root_ft('fields', ert.payload_field_type) | indent_tab }}
{% endif %}
};
type:
$include:
- stdint.yaml
- stream-types:
+ data-stream-types:
default:
$is-default: yes
- event-types:
+ event-record-types:
ev:
payload-field-type:
class: struct
type:
$include:
- stdreal.yaml
- stream-types:
+ data-stream-types:
default:
$is-default: yes
- event-types:
+ event-record-types:
ev:
payload-field-type:
class: struct
type:
$include:
- stdreal.yaml
- stream-types:
+ data-stream-types:
default:
$is-default: yes
- event-types:
+ event-record-types:
ev:
payload-field-type:
class: struct
type:
$include:
- stdmisc.yaml
- stream-types:
+ data-stream-types:
default:
$is-default: yes
- event-types:
+ event-record-types:
ev:
payload-field-type:
class: struct
type:
$include:
- stdint.yaml
- stream-types:
+ data-stream-types:
default:
$is-default: yes
- event-types:
+ event-record-types:
ev:
payload-field-type:
class: struct
type:
$include:
- stdmisc.yaml
- stream-types:
+ data-stream-types:
default:
$is-default: yes
- event-types:
+ event-record-types:
ev:
payload-field-type:
class: struct
$include:
- stdint.yaml
- stdmisc.yaml
- stream-types:
+ data-stream-types:
default:
$is-default: yes
- event-types:
+ event-record-types:
ev:
payload-field-type:
class: struct
type:
$include:
- stdint.yaml
- stream-types:
+ data-stream-types:
default:
$is-default: yes
- event-types:
+ event-record-types:
ev:
payload-field-type:
class: struct
type:
$include:
- stdint.yaml
- stream-types:
+ data-stream-types:
default:
$is-default: yes
- event-types:
+ event-record-types:
ev:
payload-field-type:
class: struct
type:
$include:
- stdint.yaml
- stream-types:
+ data-stream-types:
default:
$is-default: yes
- event-types:
+ event-record-types:
ev:
payload-field-type:
class: struct
type:
$include:
- stdint.yaml
- stream-types:
+ data-stream-types:
default:
$is-default: yes
- event-types:
+ event-record-types:
ev:
payload-field-type:
class: struct
type:
$include:
- stdreal.yaml
- stream-types:
+ data-stream-types:
default:
$is-default: yes
- event-types:
+ event-record-types:
ev:
payload-field-type:
class: struct
type:
$include:
- stdreal.yaml
- stream-types:
+ data-stream-types:
default:
$is-default: yes
- event-types:
+ event-record-types:
ev:
payload-field-type:
class: struct
type:
$include:
- stdmisc.yaml
- stream-types:
+ data-stream-types:
default:
$is-default: yes
- event-types:
+ event-record-types:
ev:
payload-field-type:
class: struct
type:
$include:
- stdint.yaml
- stream-types:
+ data-stream-types:
default:
$is-default: yes
- event-types:
+ event-record-types:
ev:
payload-field-type:
class: struct
type:
$include:
- stdmisc.yaml
- stream-types:
+ data-stream-types:
default:
$is-default: yes
- event-types:
+ event-record-types:
ev:
payload-field-type:
class: struct
$include:
- stdint.yaml
- stdmisc.yaml
- stream-types:
+ data-stream-types:
default:
$is-default: yes
- event-types:
+ event-record-types:
ev:
payload-field-type:
class: struct
type:
$include:
- stdint.yaml
- stream-types:
+ data-stream-types:
default:
$is-default: yes
- event-types:
+ event-record-types:
ev:
payload-field-type:
class: struct
type:
$include:
- stdint.yaml
- stream-types:
+ data-stream-types:
default:
$is-default: yes
- event-types:
+ event-record-types:
ev:
payload-field-type:
class: struct
type:
$include:
- stdint.yaml
- stream-types:
+ data-stream-types:
default:
$is-default: yes
- event-types:
+ event-record-types:
ev:
payload-field-type:
class: struct
tracer_name = "barectf";
};
-/* Stream type `default` */
+/* Data stream type `default` */
stream {
id = 0;
packet.context := struct {
tracer_name = "barectf";
};
-/* Stream type `default` */
+/* Data stream type `default` */
stream {
id = 0;
packet.context := struct {
tracer_name = "barectf";
};
-/* Stream type `default` */
+/* Data stream type `default` */
stream {
id = 0;
packet.context := struct {
tracer_name = "barectf";
};
-/* Stream type `default` */
+/* Data stream type `default` */
stream {
id = 0;
packet.context := struct {
tracer_name = "barectf";
};
-/* Stream type `default` */
+/* Data stream type `default` */
stream {
id = 0;
packet.context := struct {
tracer_name = "barectf";
};
-/* Stream type `default` */
+/* Data stream type `default` */
stream {
id = 0;
packet.context := struct {
tracer_name = "barectf";
};
-/* Stream type `default` */
+/* Data stream type `default` */
stream {
id = 0;
packet.context := struct {
tracer_name = "barectf";
};
-/* Stream type `default` */
+/* Data stream type `default` */
stream {
id = 0;
packet.context := struct {
tracer_name = "barectf";
};
-/* Stream type `default` */
+/* Data stream type `default` */
stream {
id = 0;
packet.context := struct {
tracer_name = "barectf";
};
-/* Stream type `default` */
+/* Data stream type `default` */
stream {
id = 0;
packet.context := struct {
tracer_name = "barectf";
};
-/* Stream type `default` */
+/* Data stream type `default` */
stream {
id = 0;
packet.context := struct {
tracer_name = "barectf";
};
-/* Stream type `default` */
+/* Data stream type `default` */
stream {
id = 0;
packet.context := struct {
tracer_name = "barectf";
};
-/* Stream type `default` */
+/* Data stream type `default` */
stream {
id = 0;
packet.context := struct {
tracer_name = "barectf";
};
-/* Stream type `default` */
+/* Data stream type `default` */
stream {
id = 0;
packet.context := struct {
tracer_name = "barectf";
};
-/* Stream type `default` */
+/* Data stream type `default` */
stream {
id = 0;
packet.context := struct {
tracer_name = "barectf";
};
-/* Stream type `default` */
+/* Data stream type `default` */
stream {
id = 0;
packet.context := struct {
tracer_name = "barectf";
};
-/* Stream type `default` */
+/* Data stream type `default` */
stream {
id = 0;
packet.context := struct {
tracer_name = "barectf";
};
-/* Stream type `default` */
+/* Data stream type `default` */
stream {
id = 0;
packet.context := struct {
tracer_name = "barectf";
};
-/* Stream type `default` */
+/* Data stream type `default` */
stream {
id = 0;
packet.context := struct {
tracer_name = "barectf";
};
-/* Stream type `default` */
+/* Data stream type `default` */
stream {
id = 0;
packet.context := struct {