src.ctf.fs: make ctf_fs_ds_group_medops symbol hidden
[babeltrace.git] / src / plugins / utils / muxer / muxer.c
CommitLineData
958f7d11
PP
1/*
2 * Copyright 2017 Philippe Proulx <pproulx@efficios.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to deal
6 * in the Software without restriction, including without limitation the rights
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 * copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
5b6473ec 23#define BT_COMP_LOG_SELF_COMP (muxer_comp->self_comp)
87ec3926 24#define BT_LOG_OUTPUT_LEVEL (muxer_comp->log_level)
350ad6c1 25#define BT_LOG_TAG "PLUGIN/FLT.UTILS.MUXER"
d9c39b0a 26#include "logging/comp-logging.h"
fed72692 27
91d81473 28#include "common/macros.h"
6162e6b7 29#include "common/uuid.h"
3fadfbc0 30#include <babeltrace2/babeltrace.h>
958f7d11 31#include <glib.h>
c55a9f58 32#include <stdbool.h>
fed72692 33#include <inttypes.h>
578e048b
MJ
34#include "common/assert.h"
35#include "common/common.h"
0fbb9a9f 36#include <stdlib.h>
282c8cd0 37#include <string.h>
958f7d11 38
1aca5200 39#include "plugins/common/muxing/muxing.h"
006c5ffb 40#include "plugins/common/param-validation/param-validation.h"
1aca5200 41
fdf0b89e
FD
42#include "muxer.h"
43
958f7d11 44struct muxer_comp {
5b6473ec
PP
45 /* Weak refs */
46 bt_self_component_filter *self_comp_flt;
47 bt_self_component *self_comp;
d94d92ac 48
958f7d11
PP
49 unsigned int next_port_num;
50 size_t available_input_ports;
d6e69534 51 bool initializing_muxer_msg_iter;
87ec3926 52 bt_logging_level log_level;
958f7d11
PP
53};
54
d6e69534 55struct muxer_upstream_msg_iter {
87ec3926
PP
56 struct muxer_comp *muxer_comp;
57
ab11110e 58 /* Owned by this, NULL if ended */
d6e69534 59 bt_self_component_port_input_message_iterator *msg_iter;
958f7d11 60
d6e69534
PP
61 /* Contains `const bt_message *`, owned by this */
62 GQueue *msgs;
958f7d11
PP
63};
64
d6e69534
PP
65enum muxer_msg_iter_clock_class_expectation {
66 MUXER_MSG_ITER_CLOCK_CLASS_EXPECTATION_ANY = 0,
60f3d027 67 MUXER_MSG_ITER_CLOCK_CLASS_EXPECTATION_NONE,
d6e69534
PP
68 MUXER_MSG_ITER_CLOCK_CLASS_EXPECTATION_ABSOLUTE,
69 MUXER_MSG_ITER_CLOCK_CLASS_EXPECTATION_NOT_ABS_SPEC_UUID,
70 MUXER_MSG_ITER_CLOCK_CLASS_EXPECTATION_NOT_ABS_NO_UUID,
282c8cd0
PP
71};
72
d6e69534 73struct muxer_msg_iter {
87ec3926
PP
74 struct muxer_comp *muxer_comp;
75
ca02df0a
PP
76 /* Weak */
77 bt_self_message_iterator *self_msg_iter;
78
ab11110e 79 /*
d6e69534 80 * Array of struct muxer_upstream_msg_iter * (owned by this).
ab11110e
PP
81 *
82 * NOTE: This array is searched in linearly to find the youngest
d6e69534 83 * current message. Keep this until benchmarks confirm that
ab11110e
PP
84 * another data structure is faster than this for our typical
85 * use cases.
86 */
54bdc1f7
PP
87 GPtrArray *active_muxer_upstream_msg_iters;
88
89 /*
90 * Array of struct muxer_upstream_msg_iter * (owned by this).
91 *
92 * We move ended message iterators from
93 * `active_muxer_upstream_msg_iters` to this array so as to be
94 * able to restore them when seeking.
95 */
96 GPtrArray *ended_muxer_upstream_msg_iters;
958f7d11 97
d6e69534 98 /* Last time returned in a message */
958f7d11 99 int64_t last_returned_ts_ns;
282c8cd0
PP
100
101 /* Clock class expectation state */
d6e69534 102 enum muxer_msg_iter_clock_class_expectation clock_class_expectation;
282c8cd0
PP
103
104 /*
105 * Expected clock class UUID, only valid when
106 * clock_class_expectation is
d6e69534 107 * MUXER_MSG_ITER_CLOCK_CLASS_EXPECTATION_NOT_ABS_SPEC_UUID.
282c8cd0 108 */
6162e6b7 109 bt_uuid_t expected_clock_class_uuid;
cbca1c06
SM
110
111 /*
112 * Saved error. If we hit an error in the _next method, but have some
113 * messages ready to return, we save the error here and return it on
114 * the next _next call.
115 */
116 bt_component_class_message_iterator_next_method_status next_saved_status;
117 const struct bt_error *next_saved_error;
958f7d11
PP
118};
119
54bdc1f7
PP
120static
121void empty_message_queue(struct muxer_upstream_msg_iter *upstream_msg_iter)
122{
123 const bt_message *msg;
124
125 while ((msg = g_queue_pop_head(upstream_msg_iter->msgs))) {
126 bt_message_put_ref(msg);
127 }
128}
129
ab11110e 130static
d6e69534
PP
131void destroy_muxer_upstream_msg_iter(
132 struct muxer_upstream_msg_iter *muxer_upstream_msg_iter)
ab11110e 133{
87ec3926
PP
134 struct muxer_comp *muxer_comp;
135
d6e69534 136 if (!muxer_upstream_msg_iter) {
ab11110e
PP
137 return;
138 }
139
87ec3926 140 muxer_comp = muxer_upstream_msg_iter->muxer_comp;
5b6473ec 141 BT_COMP_LOGD("Destroying muxer's upstream message iterator wrapper: "
d6e69534
PP
142 "addr=%p, msg-iter-addr=%p, queue-len=%u",
143 muxer_upstream_msg_iter,
144 muxer_upstream_msg_iter->msg_iter,
145 muxer_upstream_msg_iter->msgs->length);
54bdc1f7
PP
146 bt_self_component_port_input_message_iterator_put_ref(
147 muxer_upstream_msg_iter->msg_iter);
d4393e08 148
d6e69534 149 if (muxer_upstream_msg_iter->msgs) {
54bdc1f7 150 empty_message_queue(muxer_upstream_msg_iter);
d6e69534 151 g_queue_free(muxer_upstream_msg_iter->msgs);
d4393e08
PP
152 }
153
d6e69534 154 g_free(muxer_upstream_msg_iter);
ab11110e
PP
155}
156
958f7d11 157static
c61018b9 158int muxer_msg_iter_add_upstream_msg_iter(struct muxer_msg_iter *muxer_msg_iter,
d6e69534 159 bt_self_component_port_input_message_iterator *self_msg_iter)
958f7d11 160{
c61018b9 161 int ret = 0;
d6e69534
PP
162 struct muxer_upstream_msg_iter *muxer_upstream_msg_iter =
163 g_new0(struct muxer_upstream_msg_iter, 1);
87ec3926 164 struct muxer_comp *muxer_comp = muxer_msg_iter->muxer_comp;
958f7d11 165
d6e69534 166 if (!muxer_upstream_msg_iter) {
5b6473ec 167 BT_COMP_LOGE_STR("Failed to allocate one muxer's upstream message iterator wrapper.");
6c20f4a0 168 goto error;
958f7d11
PP
169 }
170
87ec3926 171 muxer_upstream_msg_iter->muxer_comp = muxer_comp;
d6e69534
PP
172 muxer_upstream_msg_iter->msg_iter = self_msg_iter;
173 bt_self_component_port_input_message_iterator_get_ref(muxer_upstream_msg_iter->msg_iter);
174 muxer_upstream_msg_iter->msgs = g_queue_new();
175 if (!muxer_upstream_msg_iter->msgs) {
5b6473ec 176 BT_COMP_LOGE_STR("Failed to allocate a GQueue.");
6c20f4a0 177 goto error;
d4393e08
PP
178 }
179
54bdc1f7 180 g_ptr_array_add(muxer_msg_iter->active_muxer_upstream_msg_iters,
d6e69534 181 muxer_upstream_msg_iter);
5b6473ec 182 BT_COMP_LOGD("Added muxer's upstream message iterator wrapper: "
d6e69534
PP
183 "addr=%p, muxer-msg-iter-addr=%p, msg-iter-addr=%p",
184 muxer_upstream_msg_iter, muxer_msg_iter,
185 self_msg_iter);
958f7d11 186
6c20f4a0
FD
187 goto end;
188
189error:
190 g_free(muxer_upstream_msg_iter);
c61018b9 191 ret = -1;
6c20f4a0 192
958f7d11 193end:
c61018b9 194 return ret;
958f7d11
PP
195}
196
958f7d11 197static
d24d5663 198bt_self_component_add_port_status add_available_input_port(
b19ff26f 199 bt_self_component_filter *self_comp)
958f7d11 200{
d94d92ac 201 struct muxer_comp *muxer_comp = bt_self_component_get_data(
707b7d35 202 bt_self_component_filter_as_self_component(self_comp));
d24d5663
PP
203 bt_self_component_add_port_status status =
204 BT_SELF_COMPONENT_ADD_PORT_STATUS_OK;
958f7d11 205 GString *port_name = NULL;
958f7d11 206
f6ccaed9 207 BT_ASSERT(muxer_comp);
958f7d11
PP
208 port_name = g_string_new("in");
209 if (!port_name) {
5b6473ec 210 BT_COMP_LOGE_STR("Failed to allocate a GString.");
d24d5663 211 status = BT_SELF_COMPONENT_ADD_PORT_STATUS_MEMORY_ERROR;
958f7d11
PP
212 goto end;
213 }
214
215 g_string_append_printf(port_name, "%u", muxer_comp->next_port_num);
d94d92ac
PP
216 status = bt_self_component_filter_add_input_port(
217 self_comp, port_name->str, NULL, NULL);
d24d5663 218 if (status != BT_SELF_COMPONENT_ADD_PORT_STATUS_OK) {
5b6473ec 219 BT_COMP_LOGE("Cannot add input port to muxer component: "
fed72692 220 "port-name=\"%s\", comp-addr=%p, status=%s",
d94d92ac 221 port_name->str, self_comp,
d24d5663 222 bt_common_func_status_string(status));
958f7d11
PP
223 goto end;
224 }
225
226 muxer_comp->available_input_ports++;
227 muxer_comp->next_port_num++;
5b6473ec 228 BT_COMP_LOGI("Added one input port to muxer component: "
fed72692 229 "port-name=\"%s\", comp-addr=%p",
d94d92ac 230 port_name->str, self_comp);
5badd463 231
958f7d11
PP
232end:
233 if (port_name) {
234 g_string_free(port_name, TRUE);
235 }
236
147337a3 237 return status;
958f7d11
PP
238}
239
958f7d11 240static
d24d5663 241bt_self_component_add_port_status create_output_port(
b19ff26f 242 bt_self_component_filter *self_comp)
958f7d11 243{
d94d92ac
PP
244 return bt_self_component_filter_add_output_port(
245 self_comp, "out", NULL, NULL);
958f7d11
PP
246}
247
248static
249void destroy_muxer_comp(struct muxer_comp *muxer_comp)
250{
251 if (!muxer_comp) {
252 return;
253 }
254
958f7d11
PP
255 g_free(muxer_comp);
256}
257
006c5ffb
SM
258struct bt_param_validation_map_value_entry_descr muxer_params[] = {
259 BT_PARAM_VALIDATION_MAP_VALUE_ENTRY_END
260};
261
958f7d11 262BT_HIDDEN
21a9f056 263bt_component_class_initialize_method_status muxer_init(
5b6473ec 264 bt_self_component_filter *self_comp_flt,
59225a3e 265 bt_self_component_filter_configuration *config,
fdf0b89e 266 const bt_value *params, void *init_data)
958f7d11 267{
006c5ffb 268 bt_component_class_initialize_method_status status;
d24d5663 269 bt_self_component_add_port_status add_port_status;
5b6473ec
PP
270 bt_self_component *self_comp =
271 bt_self_component_filter_as_self_component(self_comp_flt);
958f7d11 272 struct muxer_comp *muxer_comp = g_new0(struct muxer_comp, 1);
87ec3926 273 bt_logging_level log_level = bt_component_get_logging_level(
5b6473ec 274 bt_self_component_as_component(self_comp));
006c5ffb
SM
275 enum bt_param_validation_status validation_status;
276 gchar *validate_error = NULL;
958f7d11 277
5b6473ec 278 BT_COMP_LOG_CUR_LVL(BT_LOG_INFO, log_level, self_comp,
87ec3926 279 "Initializing muxer component: "
d94d92ac 280 "comp-addr=%p, params-addr=%p", self_comp, params);
fed72692 281
958f7d11 282 if (!muxer_comp) {
5b6473ec 283 BT_COMP_LOG_CUR_LVL(BT_LOG_ERROR, log_level, self_comp,
87ec3926 284 "Failed to allocate one muxer component.");
006c5ffb 285 status = BT_COMPONENT_CLASS_INITIALIZE_METHOD_STATUS_MEMORY_ERROR;
958f7d11
PP
286 goto error;
287 }
288
87ec3926 289 muxer_comp->log_level = log_level;
5b6473ec
PP
290 muxer_comp->self_comp = self_comp;
291 muxer_comp->self_comp_flt = self_comp_flt;
65ee897d 292
006c5ffb
SM
293 validation_status = bt_param_validation_validate(params,
294 muxer_params, &validate_error);
295 if (validation_status == BT_PARAM_VALIDATION_STATUS_MEMORY_ERROR) {
296 status = BT_COMPONENT_CLASS_INITIALIZE_METHOD_STATUS_MEMORY_ERROR;
297 goto error;
298 } else if (validation_status == BT_PARAM_VALIDATION_STATUS_VALIDATION_ERROR) {
299 status = BT_COMPONENT_CLASS_INITIALIZE_METHOD_STATUS_ERROR;
300 BT_COMP_LOGE_APPEND_CAUSE(self_comp, "%s", validate_error);
301 goto error;
302 }
303
5b6473ec 304 bt_self_component_set_data(self_comp, muxer_comp);
d24d5663
PP
305 add_port_status = add_available_input_port(self_comp_flt);
306 if (add_port_status != BT_SELF_COMPONENT_ADD_PORT_STATUS_OK) {
5b6473ec 307 BT_COMP_LOGE("Cannot ensure that at least one muxer component's input port is available: "
fed72692
PP
308 "muxer-comp-addr=%p, status=%s",
309 muxer_comp,
d24d5663 310 bt_common_func_status_string(add_port_status));
006c5ffb 311 status = (int) add_port_status;
958f7d11
PP
312 goto error;
313 }
314
d24d5663
PP
315 add_port_status = create_output_port(self_comp_flt);
316 if (add_port_status != BT_SELF_COMPONENT_ADD_PORT_STATUS_OK) {
5b6473ec 317 BT_COMP_LOGE("Cannot create muxer component's output port: "
fed72692
PP
318 "muxer-comp-addr=%p, status=%s",
319 muxer_comp,
d24d5663 320 bt_common_func_status_string(add_port_status));
006c5ffb 321 status = (int) add_port_status;
958f7d11
PP
322 goto error;
323 }
324
5b6473ec 325 BT_COMP_LOGI("Initialized muxer component: "
fed72692 326 "comp-addr=%p, params-addr=%p, muxer-comp-addr=%p",
d94d92ac 327 self_comp, params, muxer_comp);
fed72692 328
006c5ffb 329 status = BT_COMPONENT_CLASS_INITIALIZE_METHOD_STATUS_OK;
958f7d11
PP
330 goto end;
331
332error:
333 destroy_muxer_comp(muxer_comp);
5b6473ec 334 bt_self_component_set_data(self_comp, NULL);
147337a3 335
958f7d11 336end:
006c5ffb 337 g_free(validate_error);
958f7d11
PP
338 return status;
339}
340
341BT_HIDDEN
b19ff26f 342void muxer_finalize(bt_self_component_filter *self_comp)
958f7d11 343{
d94d92ac 344 struct muxer_comp *muxer_comp = bt_self_component_get_data(
707b7d35 345 bt_self_component_filter_as_self_component(self_comp));
958f7d11 346
5b6473ec 347 BT_COMP_LOGI("Finalizing muxer component: comp-addr=%p",
d94d92ac 348 self_comp);
958f7d11
PP
349 destroy_muxer_comp(muxer_comp);
350}
351
352static
e803df70 353bt_self_component_port_input_message_iterator_create_from_message_iterator_status
87ec3926 354create_msg_iter_on_input_port(struct muxer_comp *muxer_comp,
ca02df0a 355 struct muxer_msg_iter *muxer_msg_iter,
e803df70
SM
356 bt_self_component_port_input *self_port,
357 bt_self_component_port_input_message_iterator **msg_iter)
958f7d11 358{
b19ff26f 359 const bt_port *port = bt_self_component_port_as_port(
707b7d35 360 bt_self_component_port_input_as_self_component_port(
d94d92ac 361 self_port));
e803df70
SM
362 bt_self_component_port_input_message_iterator_create_from_message_iterator_status
363 status;
958f7d11 364
f6ccaed9
PP
365 BT_ASSERT(port);
366 BT_ASSERT(bt_port_is_connected(port));
958f7d11 367
ab11110e 368 // TODO: Advance the iterator to >= the time of the latest
d6e69534 369 // returned message by the muxer message
ab11110e 370 // iterator which creates it.
e803df70
SM
371 status = bt_self_component_port_input_message_iterator_create_from_message_iterator(
372 muxer_msg_iter->self_msg_iter, self_port, msg_iter);
373 if (status != BT_SELF_COMPONENT_PORT_INPUT_MESSAGE_ITERATOR_CREATE_FROM_MESSAGE_ITERATOR_STATUS_OK) {
5b6473ec 374 BT_COMP_LOGE("Cannot create upstream message iterator on input port: "
d94d92ac
PP
375 "port-addr=%p, port-name=\"%s\"",
376 port, bt_port_get_name(port));
958f7d11
PP
377 goto end;
378 }
379
5b6473ec 380 BT_COMP_LOGI("Created upstream message iterator on input port: "
d6e69534
PP
381 "port-addr=%p, port-name=\"%s\", msg-iter-addr=%p",
382 port, bt_port_get_name(port), msg_iter);
fed72692 383
958f7d11 384end:
e803df70 385 return status;
958f7d11
PP
386}
387
ab11110e 388static
d24d5663 389bt_component_class_message_iterator_next_method_status muxer_upstream_msg_iter_next(
54bdc1f7
PP
390 struct muxer_upstream_msg_iter *muxer_upstream_msg_iter,
391 bool *is_ended)
ab11110e 392{
7fb13d3f 393 struct muxer_comp *muxer_comp = muxer_upstream_msg_iter->muxer_comp;
d24d5663
PP
394 bt_component_class_message_iterator_next_method_status status;
395 bt_message_iterator_next_status input_port_iter_status;
d6e69534 396 bt_message_array_const msgs;
d4393e08
PP
397 uint64_t i;
398 uint64_t count;
ab11110e 399
5b6473ec 400 BT_COMP_LOGD("Calling upstream message iterator's \"next\" method: "
d6e69534
PP
401 "muxer-upstream-msg-iter-wrap-addr=%p, msg-iter-addr=%p",
402 muxer_upstream_msg_iter,
403 muxer_upstream_msg_iter->msg_iter);
fdf0b89e 404 input_port_iter_status = bt_self_component_port_input_message_iterator_next(
d6e69534 405 muxer_upstream_msg_iter->msg_iter, &msgs, &count);
5b6473ec 406 BT_COMP_LOGD("Upstream message iterator's \"next\" method returned: "
d24d5663
PP
407 "status=%s",
408 bt_common_func_status_string(input_port_iter_status));
ab11110e 409
fdf0b89e 410 switch (input_port_iter_status) {
d24d5663 411 case BT_MESSAGE_ITERATOR_NEXT_STATUS_OK:
089717de 412 /*
d6e69534 413 * Message iterator's current message is
d4393e08 414 * valid: it must be considered for muxing operations.
089717de 415 */
5b6473ec 416 BT_COMP_LOGD_STR("Validated upstream message iterator wrapper.");
98b15851 417 BT_ASSERT_DBG(count > 0);
d4393e08 418
d6e69534 419 /* Move messages to our queue */
d4393e08
PP
420 for (i = 0; i < count; i++) {
421 /*
422 * Push to tail in order; other side
d6e69534 423 * (muxer_msg_iter_do_next_one()) consumes
d4393e08
PP
424 * from the head first.
425 */
d6e69534
PP
426 g_queue_push_tail(muxer_upstream_msg_iter->msgs,
427 (void *) msgs[i]);
d4393e08 428 }
d24d5663 429 status = BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_OK;
ab11110e 430 break;
d24d5663 431 case BT_MESSAGE_ITERATOR_NEXT_STATUS_AGAIN:
089717de 432 /*
d6e69534 433 * Message iterator's current message is not
089717de 434 * valid anymore. Return
d24d5663 435 * BT_MESSAGE_ITERATOR_NEXT_STATUS_AGAIN immediately.
089717de 436 */
d24d5663 437 status = BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_AGAIN;
ab11110e 438 break;
d24d5663 439 case BT_MESSAGE_ITERATOR_NEXT_STATUS_END: /* Fall-through. */
ab11110e 440 /*
d6e69534 441 * Message iterator reached the end: release it. It
ab11110e 442 * won't be considered again to find the youngest
d6e69534 443 * message.
ab11110e 444 */
54bdc1f7 445 *is_ended = true;
d24d5663 446 status = BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_OK;
089717de 447 break;
7fb13d3f
SM
448 case BT_MESSAGE_ITERATOR_NEXT_STATUS_ERROR:
449 case BT_MESSAGE_ITERATOR_NEXT_STATUS_MEMORY_ERROR:
450 /* Error status code */
451 BT_COMP_LOGE_APPEND_CAUSE(muxer_comp->self_comp,
452 "Upstream iterator's next method returned an error: status=%s",
453 bt_common_func_status_string(input_port_iter_status));
454 status = (int) input_port_iter_status;
455 break;
ab11110e 456 default:
7fb13d3f
SM
457 /* Unsupported status code */
458 BT_COMP_LOGE_APPEND_CAUSE(muxer_comp->self_comp,
459 "Unsupported status code: status=%s",
460 bt_common_func_status_string(input_port_iter_status));
d24d5663 461 status = BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_ERROR;
089717de 462 break;
ab11110e
PP
463 }
464
089717de 465 return status;
ab11110e
PP
466}
467
958f7d11 468static
d6e69534
PP
469int get_msg_ts_ns(struct muxer_comp *muxer_comp,
470 struct muxer_msg_iter *muxer_msg_iter,
471 const bt_message *msg, int64_t last_returned_ts_ns,
958f7d11
PP
472 int64_t *ts_ns)
473{
605e1019 474 const bt_clock_snapshot *clock_snapshot = NULL;
958f7d11 475 int ret = 0;
649934d2
PP
476 const bt_stream_class *stream_class = NULL;
477 bt_message_type msg_type;
958f7d11 478
98b15851
PP
479 BT_ASSERT_DBG(msg);
480 BT_ASSERT_DBG(ts_ns);
5b6473ec 481 BT_COMP_LOGD("Getting message's timestamp: "
d6e69534 482 "muxer-msg-iter-addr=%p, msg-addr=%p, "
fed72692 483 "last-returned-ts=%" PRId64,
d6e69534 484 muxer_msg_iter, msg, last_returned_ts_ns);
fed72692 485
91d81473 486 if (G_UNLIKELY(muxer_msg_iter->clock_class_expectation ==
60f3d027
PP
487 MUXER_MSG_ITER_CLOCK_CLASS_EXPECTATION_NONE)) {
488 *ts_ns = last_returned_ts_ns;
489 goto end;
490 }
491
649934d2
PP
492 msg_type = bt_message_get_type(msg);
493
91d81473 494 if (G_UNLIKELY(msg_type == BT_MESSAGE_TYPE_PACKET_BEGINNING)) {
649934d2
PP
495 stream_class = bt_stream_borrow_class_const(
496 bt_packet_borrow_stream_const(
497 bt_message_packet_beginning_borrow_packet_const(
498 msg)));
91d81473 499 } else if (G_UNLIKELY(msg_type == BT_MESSAGE_TYPE_PACKET_END)) {
649934d2
PP
500 stream_class = bt_stream_borrow_class_const(
501 bt_packet_borrow_stream_const(
502 bt_message_packet_end_borrow_packet_const(
503 msg)));
91d81473 504 } else if (G_UNLIKELY(msg_type == BT_MESSAGE_TYPE_DISCARDED_EVENTS)) {
2e90378a
PP
505 stream_class = bt_stream_borrow_class_const(
506 bt_message_discarded_events_borrow_stream_const(msg));
91d81473 507 } else if (G_UNLIKELY(msg_type == BT_MESSAGE_TYPE_DISCARDED_PACKETS)) {
2e90378a
PP
508 stream_class = bt_stream_borrow_class_const(
509 bt_message_discarded_packets_borrow_stream_const(msg));
649934d2
PP
510 }
511
512 switch (msg_type) {
d6e69534 513 case BT_MESSAGE_TYPE_EVENT:
98b15851 514 BT_ASSERT_DBG(bt_message_event_borrow_stream_class_default_clock_class_const(
60f3d027 515 msg));
0cbc2c33
PP
516 clock_snapshot = bt_message_event_borrow_default_clock_snapshot_const(
517 msg);
958f7d11 518 break;
5366eb53 519 case BT_MESSAGE_TYPE_PACKET_BEGINNING:
9b24b6aa 520 if (bt_stream_class_packets_have_beginning_default_clock_snapshot(
649934d2
PP
521 stream_class)) {
522 clock_snapshot = bt_message_packet_beginning_borrow_default_clock_snapshot_const(
523 msg);
524 } else {
525 goto no_clock_snapshot;
526 }
527
5366eb53
PP
528 break;
529 case BT_MESSAGE_TYPE_PACKET_END:
9b24b6aa 530 if (bt_stream_class_packets_have_end_default_clock_snapshot(
649934d2
PP
531 stream_class)) {
532 clock_snapshot = bt_message_packet_end_borrow_default_clock_snapshot_const(
533 msg);
534 } else {
535 goto no_clock_snapshot;
536 }
537
5366eb53 538 break;
c47138bf
FD
539 case BT_MESSAGE_TYPE_STREAM_BEGINNING:
540 {
541 enum bt_message_stream_clock_snapshot_state snapshot_state =
542 bt_message_stream_beginning_borrow_default_clock_snapshot_const(
543 msg, &clock_snapshot);
544 if (snapshot_state == BT_MESSAGE_STREAM_CLOCK_SNAPSHOT_STATE_UNKNOWN) {
545 goto no_clock_snapshot;
546 }
547
548 break;
549 }
550 case BT_MESSAGE_TYPE_STREAM_END:
551 {
552 enum bt_message_stream_clock_snapshot_state snapshot_state =
553 bt_message_stream_end_borrow_default_clock_snapshot_const(
554 msg, &clock_snapshot);
555 if (snapshot_state == BT_MESSAGE_STREAM_CLOCK_SNAPSHOT_STATE_UNKNOWN) {
556 goto no_clock_snapshot;
557 }
558
559 break;
560 }
5366eb53 561 case BT_MESSAGE_TYPE_DISCARDED_EVENTS:
2e90378a
PP
562 if (bt_stream_class_discarded_events_have_default_clock_snapshots(
563 stream_class)) {
9b24b6aa 564 clock_snapshot = bt_message_discarded_events_borrow_beginning_default_clock_snapshot_const(
2e90378a
PP
565 msg);
566 } else {
567 goto no_clock_snapshot;
568 }
569
5366eb53
PP
570 break;
571 case BT_MESSAGE_TYPE_DISCARDED_PACKETS:
2e90378a
PP
572 if (bt_stream_class_discarded_packets_have_default_clock_snapshots(
573 stream_class)) {
9b24b6aa 574 clock_snapshot = bt_message_discarded_packets_borrow_beginning_default_clock_snapshot_const(
2e90378a
PP
575 msg);
576 } else {
577 goto no_clock_snapshot;
578 }
579
5366eb53 580 break;
b9fd9cbb 581 case BT_MESSAGE_TYPE_MESSAGE_ITERATOR_INACTIVITY:
0cbc2c33
PP
582 clock_snapshot = bt_message_message_iterator_inactivity_borrow_default_clock_snapshot_const(
583 msg);
958f7d11
PP
584 break;
585 default:
d6e69534 586 /* All the other messages have a higher priority */
5b6473ec 587 BT_COMP_LOGD_STR("Message has no timestamp: using the last returned timestamp.");
958f7d11
PP
588 *ts_ns = last_returned_ts_ns;
589 goto end;
590 }
591
60f3d027
PP
592 ret = bt_clock_snapshot_get_ns_from_origin(clock_snapshot, ts_ns);
593 if (ret) {
5b6473ec 594 BT_COMP_LOGE("Cannot get nanoseconds from Epoch of clock snapshot: "
60f3d027
PP
595 "clock-snapshot-addr=%p", clock_snapshot);
596 goto error;
597 }
598
599 goto end;
600
601no_clock_snapshot:
5b6473ec 602 BT_COMP_LOGD_STR("Message's default clock snapshot is missing: "
60f3d027
PP
603 "using the last returned timestamp.");
604 *ts_ns = last_returned_ts_ns;
605 goto end;
606
607error:
608 ret = -1;
609
610end:
611 if (ret == 0) {
5b6473ec 612 BT_COMP_LOGD("Found message's timestamp: "
60f3d027
PP
613 "muxer-msg-iter-addr=%p, msg-addr=%p, "
614 "last-returned-ts=%" PRId64 ", ts=%" PRId64,
615 muxer_msg_iter, msg, last_returned_ts_ns,
616 *ts_ns);
44c440bc
PP
617 }
618
60f3d027
PP
619 return ret;
620}
621
622static inline
623int validate_clock_class(struct muxer_msg_iter *muxer_msg_iter,
624 struct muxer_comp *muxer_comp,
625 const bt_clock_class *clock_class)
626{
627 int ret = 0;
6162e6b7 628 const uint8_t *cc_uuid;
60f3d027
PP
629 const char *cc_name;
630
98b15851 631 BT_ASSERT_DBG(clock_class);
50842bdc
PP
632 cc_uuid = bt_clock_class_get_uuid(clock_class);
633 cc_name = bt_clock_class_get_name(clock_class);
282c8cd0 634
d6e69534
PP
635 if (muxer_msg_iter->clock_class_expectation ==
636 MUXER_MSG_ITER_CLOCK_CLASS_EXPECTATION_ANY) {
282c8cd0 637 /*
74f4949e
FD
638 * This is the first clock class that this muxer message
639 * iterator encounters. Its properties determine what to expect
640 * for the whole lifetime of the iterator.
282c8cd0 641 */
5552377a 642 if (bt_clock_class_origin_is_unix_epoch(clock_class)) {
282c8cd0 643 /* Expect absolute clock classes */
d6e69534
PP
644 muxer_msg_iter->clock_class_expectation =
645 MUXER_MSG_ITER_CLOCK_CLASS_EXPECTATION_ABSOLUTE;
282c8cd0
PP
646 } else {
647 if (cc_uuid) {
648 /*
649 * Expect non-absolute clock classes
650 * with a specific UUID.
651 */
d6e69534
PP
652 muxer_msg_iter->clock_class_expectation =
653 MUXER_MSG_ITER_CLOCK_CLASS_EXPECTATION_NOT_ABS_SPEC_UUID;
6162e6b7 654 bt_uuid_copy(muxer_msg_iter->expected_clock_class_uuid, cc_uuid);
282c8cd0
PP
655 } else {
656 /*
657 * Expect non-absolute clock classes
658 * with no UUID.
659 */
d6e69534
PP
660 muxer_msg_iter->clock_class_expectation =
661 MUXER_MSG_ITER_CLOCK_CLASS_EXPECTATION_NOT_ABS_NO_UUID;
282c8cd0
PP
662 }
663 }
664 }
665
74f4949e
FD
666 switch (muxer_msg_iter->clock_class_expectation) {
667 case MUXER_MSG_ITER_CLOCK_CLASS_EXPECTATION_ABSOLUTE:
668 if (!bt_clock_class_origin_is_unix_epoch(clock_class)) {
669 BT_COMP_LOGE("Expecting an absolute clock class, "
670 "but got a non-absolute one: "
671 "clock-class-addr=%p, clock-class-name=\"%s\"",
672 clock_class, cc_name);
673 goto error;
674 }
675 break;
676 case MUXER_MSG_ITER_CLOCK_CLASS_EXPECTATION_NOT_ABS_NO_UUID:
677 if (bt_clock_class_origin_is_unix_epoch(clock_class)) {
678 BT_COMP_LOGE("Expecting a non-absolute clock class with no UUID, "
679 "but got an absolute one: "
680 "clock-class-addr=%p, clock-class-name=\"%s\"",
681 clock_class, cc_name);
682 goto error;
683 }
282c8cd0 684
74f4949e
FD
685 if (cc_uuid) {
686 BT_COMP_LOGE("Expecting a non-absolute clock class with no UUID, "
687 "but got one with a UUID: "
688 "clock-class-addr=%p, clock-class-name=\"%s\", "
689 "uuid=\"" BT_UUID_FMT "\"",
690 clock_class, cc_name, BT_UUID_FMT_VALUES(cc_uuid));
691 goto error;
692 }
693 break;
694 case MUXER_MSG_ITER_CLOCK_CLASS_EXPECTATION_NOT_ABS_SPEC_UUID:
695 if (bt_clock_class_origin_is_unix_epoch(clock_class)) {
696 BT_COMP_LOGE("Expecting a non-absolute clock class with a specific UUID, "
697 "but got an absolute one: "
698 "clock-class-addr=%p, clock-class-name=\"%s\"",
699 clock_class, cc_name);
700 goto error;
701 }
282c8cd0 702
74f4949e
FD
703 if (!cc_uuid) {
704 BT_COMP_LOGE("Expecting a non-absolute clock class with a specific UUID, "
705 "but got one with no UUID: "
60f3d027
PP
706 "clock-class-addr=%p, clock-class-name=\"%s\"",
707 clock_class, cc_name);
708 goto error;
282c8cd0 709 }
74f4949e
FD
710
711 if (bt_uuid_compare(muxer_msg_iter->expected_clock_class_uuid, cc_uuid) != 0) {
712 BT_COMP_LOGE("Expecting a non-absolute clock class with a specific UUID, "
713 "but got one with different UUID: "
714 "clock-class-addr=%p, clock-class-name=\"%s\", "
715 "expected-uuid=\"" BT_UUID_FMT "\", "
716 "uuid=\"" BT_UUID_FMT "\"",
717 clock_class, cc_name,
718 BT_UUID_FMT_VALUES(muxer_msg_iter->expected_clock_class_uuid),
719 BT_UUID_FMT_VALUES(cc_uuid));
720 goto error;
721 }
722 break;
723 case MUXER_MSG_ITER_CLOCK_CLASS_EXPECTATION_NONE:
724 BT_COMP_LOGE("Expecting no clock class, but got one: "
725 "clock-class-addr=%p, clock-class-name=\"%s\"",
726 clock_class, cc_name);
727 goto error;
728 default:
729 /* Unexpected */
730 BT_COMP_LOGF("Unexpected clock class expectation: "
731 "expectation-code=%d",
732 muxer_msg_iter->clock_class_expectation);
498e7994 733 bt_common_abort();
958f7d11
PP
734 }
735
4c0f1c8c
PP
736 goto end;
737
958f7d11
PP
738error:
739 ret = -1;
740
741end:
60f3d027
PP
742 return ret;
743}
744
745static inline
746int validate_new_stream_clock_class(struct muxer_msg_iter *muxer_msg_iter,
747 struct muxer_comp *muxer_comp, const bt_stream *stream)
748{
749 int ret = 0;
750 const bt_stream_class *stream_class =
751 bt_stream_borrow_class_const(stream);
752 const bt_clock_class *clock_class =
753 bt_stream_class_borrow_default_clock_class_const(stream_class);
754
755 if (!clock_class) {
756 if (muxer_msg_iter->clock_class_expectation ==
757 MUXER_MSG_ITER_CLOCK_CLASS_EXPECTATION_ANY) {
758 /* Expect no clock class */
759 muxer_msg_iter->clock_class_expectation =
760 MUXER_MSG_ITER_CLOCK_CLASS_EXPECTATION_NONE;
9244b07a
SM
761 } else if (muxer_msg_iter->clock_class_expectation !=
762 MUXER_MSG_ITER_CLOCK_CLASS_EXPECTATION_NONE) {
763 BT_COMP_LOGE("Expecting stream class without a default clock class: "
60f3d027
PP
764 "stream-class-addr=%p, stream-class-name=\"%s\", "
765 "stream-class-id=%" PRIu64,
766 stream_class, bt_stream_class_get_name(stream_class),
767 bt_stream_class_get_id(stream_class));
768 ret = -1;
769 }
770
771 goto end;
fed72692
PP
772 }
773
60f3d027
PP
774 ret = validate_clock_class(muxer_msg_iter, muxer_comp, clock_class);
775
776end:
958f7d11
PP
777 return ret;
778}
779
ab11110e 780/*
d6e69534
PP
781 * This function finds the youngest available message amongst the
782 * non-ended upstream message iterators and returns the upstream
783 * message iterator which has it, or
784 * BT_MESSAGE_ITERATOR_STATUS_END if there's no available
785 * message.
ab11110e
PP
786 *
787 * This function does NOT:
788 *
d6e69534 789 * * Update any upstream message iterator.
d6e69534 790 * * Check the upstream message iterators to retry.
ab11110e 791 *
d6e69534
PP
792 * On sucess, this function sets *muxer_upstream_msg_iter to the
793 * upstream message iterator of which the current message is
ab11110e
PP
794 * the youngest, and sets *ts_ns to its time.
795 */
958f7d11 796static
d24d5663 797bt_component_class_message_iterator_next_method_status
d6e69534 798muxer_msg_iter_youngest_upstream_msg_iter(
958f7d11 799 struct muxer_comp *muxer_comp,
d6e69534
PP
800 struct muxer_msg_iter *muxer_msg_iter,
801 struct muxer_upstream_msg_iter **muxer_upstream_msg_iter,
958f7d11
PP
802 int64_t *ts_ns)
803{
804 size_t i;
805 int ret;
806 int64_t youngest_ts_ns = INT64_MAX;
d24d5663
PP
807 bt_component_class_message_iterator_next_method_status status =
808 BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_OK;
958f7d11 809
98b15851
PP
810 BT_ASSERT_DBG(muxer_comp);
811 BT_ASSERT_DBG(muxer_msg_iter);
812 BT_ASSERT_DBG(muxer_upstream_msg_iter);
d6e69534
PP
813 *muxer_upstream_msg_iter = NULL;
814
54bdc1f7
PP
815 for (i = 0; i < muxer_msg_iter->active_muxer_upstream_msg_iters->len;
816 i++) {
d6e69534
PP
817 const bt_message *msg;
818 struct muxer_upstream_msg_iter *cur_muxer_upstream_msg_iter =
54bdc1f7
PP
819 g_ptr_array_index(
820 muxer_msg_iter->active_muxer_upstream_msg_iters,
821 i);
d6e69534
PP
822 int64_t msg_ts_ns;
823
824 if (!cur_muxer_upstream_msg_iter->msg_iter) {
825 /* This upstream message iterator is ended */
ef267d12 826 BT_COMP_LOGT("Skipping ended upstream message iterator: "
d6e69534
PP
827 "muxer-upstream-msg-iter-wrap-addr=%p",
828 cur_muxer_upstream_msg_iter);
958f7d11
PP
829 continue;
830 }
831
98b15851 832 BT_ASSERT_DBG(cur_muxer_upstream_msg_iter->msgs->length > 0);
d6e69534 833 msg = g_queue_peek_head(cur_muxer_upstream_msg_iter->msgs);
98b15851 834 BT_ASSERT_DBG(msg);
60f3d027 835
91d81473 836 if (G_UNLIKELY(bt_message_get_type(msg) ==
60f3d027
PP
837 BT_MESSAGE_TYPE_STREAM_BEGINNING)) {
838 ret = validate_new_stream_clock_class(
839 muxer_msg_iter, muxer_comp,
840 bt_message_stream_beginning_borrow_stream_const(
841 msg));
842 if (ret) {
843 /*
844 * validate_new_stream_clock_class() logs
845 * errors.
846 */
d24d5663 847 status = BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_ERROR;
60f3d027
PP
848 goto end;
849 }
91d81473 850 } else if (G_UNLIKELY(bt_message_get_type(msg) ==
60f3d027
PP
851 BT_MESSAGE_TYPE_MESSAGE_ITERATOR_INACTIVITY)) {
852 const bt_clock_snapshot *cs;
60f3d027 853
0cbc2c33
PP
854 cs = bt_message_message_iterator_inactivity_borrow_default_clock_snapshot_const(
855 msg);
60f3d027
PP
856 ret = validate_clock_class(muxer_msg_iter, muxer_comp,
857 bt_clock_snapshot_borrow_clock_class_const(cs));
858 if (ret) {
859 /* validate_clock_class() logs errors */
d24d5663 860 status = BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_ERROR;
60f3d027
PP
861 goto end;
862 }
863 }
864
d6e69534
PP
865 ret = get_msg_ts_ns(muxer_comp, muxer_msg_iter, msg,
866 muxer_msg_iter->last_returned_ts_ns, &msg_ts_ns);
958f7d11 867 if (ret) {
d6e69534
PP
868 /* get_msg_ts_ns() logs errors */
869 *muxer_upstream_msg_iter = NULL;
d24d5663 870 status = BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_ERROR;
958f7d11
PP
871 goto end;
872 }
873
e5b2784a
FD
874 /*
875 * Update the current message iterator if it has not been set
876 * yet, or if its current message has a timestamp smaller than
877 * the previously selected youngest message.
878 */
879 if (G_UNLIKELY(*muxer_upstream_msg_iter == NULL) ||
880 msg_ts_ns < youngest_ts_ns) {
d6e69534
PP
881 *muxer_upstream_msg_iter =
882 cur_muxer_upstream_msg_iter;
883 youngest_ts_ns = msg_ts_ns;
958f7d11 884 *ts_ns = youngest_ts_ns;
6915f47a
FD
885 } else if (msg_ts_ns == youngest_ts_ns) {
886 /*
887 * The currently selected message to be sent downstream
888 * next has the exact same timestamp that of the
889 * current candidate message. We must break the tie
890 * in a predictable manner.
891 */
892 const bt_message *selected_msg = g_queue_peek_head(
893 (*muxer_upstream_msg_iter)->msgs);
894 BT_COMP_LOGD_STR("Two of the next message candidates have the same timestamps, pick one deterministically.");
895
896 /*
897 * Order the messages in an arbitrary but determinitic
898 * way.
899 */
1aca5200 900 ret = common_muxing_compare_messages(msg, selected_msg);
6915f47a
FD
901 if (ret < 0) {
902 /*
903 * The `msg` should go first. Update the next
904 * iterator and the current timestamp.
905 */
906 *muxer_upstream_msg_iter =
907 cur_muxer_upstream_msg_iter;
908 youngest_ts_ns = msg_ts_ns;
909 *ts_ns = youngest_ts_ns;
910 } else if (ret == 0) {
911 /* Unable to pick which one should go first. */
912 BT_COMP_LOGW("Cannot deterministically pick next upstream message iterator because they have identical next messages: "
913 "muxer-upstream-msg-iter-wrap-addr=%p"
914 "cur-muxer-upstream-msg-iter-wrap-addr=%p",
915 *muxer_upstream_msg_iter,
916 cur_muxer_upstream_msg_iter);
917 }
958f7d11
PP
918 }
919 }
920
d6e69534 921 if (!*muxer_upstream_msg_iter) {
d24d5663 922 status = BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_END;
958f7d11
PP
923 *ts_ns = INT64_MIN;
924 }
925
926end:
927 return status;
928}
929
930static
d24d5663
PP
931bt_component_class_message_iterator_next_method_status
932validate_muxer_upstream_msg_iter(
54bdc1f7
PP
933 struct muxer_upstream_msg_iter *muxer_upstream_msg_iter,
934 bool *is_ended)
958f7d11 935{
7fb13d3f
SM
936 struct muxer_comp *muxer_comp = muxer_upstream_msg_iter->muxer_comp;
937 bt_component_class_message_iterator_next_method_status status;
958f7d11 938
5b6473ec 939 BT_COMP_LOGD("Validating muxer's upstream message iterator wrapper: "
d6e69534
PP
940 "muxer-upstream-msg-iter-wrap-addr=%p",
941 muxer_upstream_msg_iter);
fed72692 942
d6e69534
PP
943 if (muxer_upstream_msg_iter->msgs->length > 0 ||
944 !muxer_upstream_msg_iter->msg_iter) {
5b6473ec 945 BT_COMP_LOGD("Already valid or not considered: "
d6e69534
PP
946 "queue-len=%u, upstream-msg-iter-addr=%p",
947 muxer_upstream_msg_iter->msgs->length,
948 muxer_upstream_msg_iter->msg_iter);
7fb13d3f 949 status = BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_OK;
ab11110e
PP
950 goto end;
951 }
952
d6e69534 953 /* muxer_upstream_msg_iter_next() logs details/errors */
54bdc1f7
PP
954 status = muxer_upstream_msg_iter_next(muxer_upstream_msg_iter,
955 is_ended);
089717de
PP
956
957end:
958 return status;
959}
960
961static
d24d5663
PP
962bt_component_class_message_iterator_next_method_status
963validate_muxer_upstream_msg_iters(
d6e69534 964 struct muxer_msg_iter *muxer_msg_iter)
089717de 965{
87ec3926 966 struct muxer_comp *muxer_comp = muxer_msg_iter->muxer_comp;
7fb13d3f 967 bt_component_class_message_iterator_next_method_status status;
089717de
PP
968 size_t i;
969
5b6473ec 970 BT_COMP_LOGD("Validating muxer's upstream message iterator wrappers: "
d6e69534 971 "muxer-msg-iter-addr=%p", muxer_msg_iter);
fed72692 972
54bdc1f7
PP
973 for (i = 0; i < muxer_msg_iter->active_muxer_upstream_msg_iters->len;
974 i++) {
6bf2abbd 975 bool is_ended = false;
d6e69534 976 struct muxer_upstream_msg_iter *muxer_upstream_msg_iter =
089717de 977 g_ptr_array_index(
54bdc1f7 978 muxer_msg_iter->active_muxer_upstream_msg_iters,
089717de
PP
979 i);
980
d6e69534 981 status = validate_muxer_upstream_msg_iter(
54bdc1f7 982 muxer_upstream_msg_iter, &is_ended);
d24d5663 983 if (status != BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_OK) {
fed72692 984 if (status < 0) {
7fb13d3f
SM
985 BT_COMP_LOGE_APPEND_CAUSE(muxer_comp->self_comp,
986 "Cannot validate muxer's upstream message iterator wrapper: "
d6e69534
PP
987 "muxer-msg-iter-addr=%p, "
988 "muxer-upstream-msg-iter-wrap-addr=%p",
989 muxer_msg_iter,
990 muxer_upstream_msg_iter);
fed72692 991 } else {
5b6473ec 992 BT_COMP_LOGD("Cannot validate muxer's upstream message iterator wrapper: "
d6e69534
PP
993 "muxer-msg-iter-addr=%p, "
994 "muxer-upstream-msg-iter-wrap-addr=%p",
995 muxer_msg_iter,
996 muxer_upstream_msg_iter);
fed72692
PP
997 }
998
089717de
PP
999 goto end;
1000 }
744ba28b
PP
1001
1002 /*
54bdc1f7
PP
1003 * Move this muxer upstream message iterator to the
1004 * array of ended iterators if it's ended.
744ba28b 1005 */
91d81473 1006 if (G_UNLIKELY(is_ended)) {
5b6473ec 1007 BT_COMP_LOGD("Muxer's upstream message iterator wrapper: ended or canceled: "
54bdc1f7
PP
1008 "muxer-msg-iter-addr=%p, "
1009 "muxer-upstream-msg-iter-wrap-addr=%p",
1010 muxer_msg_iter, muxer_upstream_msg_iter);
1011 g_ptr_array_add(
1012 muxer_msg_iter->ended_muxer_upstream_msg_iters,
1013 muxer_upstream_msg_iter);
1014 muxer_msg_iter->active_muxer_upstream_msg_iters->pdata[i] = NULL;
1015
744ba28b
PP
1016 /*
1017 * Use g_ptr_array_remove_fast() because the
1018 * order of those elements is not important.
1019 */
1020 g_ptr_array_remove_index_fast(
54bdc1f7 1021 muxer_msg_iter->active_muxer_upstream_msg_iters,
744ba28b
PP
1022 i);
1023 i--;
1024 }
089717de
PP
1025 }
1026
7fb13d3f
SM
1027 status = BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_OK;
1028
089717de
PP
1029end:
1030 return status;
1031}
1032
d4393e08 1033static inline
d24d5663 1034bt_component_class_message_iterator_next_method_status muxer_msg_iter_do_next_one(
089717de 1035 struct muxer_comp *muxer_comp,
d6e69534
PP
1036 struct muxer_msg_iter *muxer_msg_iter,
1037 const bt_message **msg)
089717de 1038{
d24d5663 1039 bt_component_class_message_iterator_next_method_status status;
d6e69534 1040 struct muxer_upstream_msg_iter *muxer_upstream_msg_iter = NULL;
089717de
PP
1041 int64_t next_return_ts;
1042
5badd463 1043 status = validate_muxer_upstream_msg_iters(muxer_msg_iter);
d24d5663 1044 if (status != BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_OK) {
5badd463
PP
1045 /* validate_muxer_upstream_msg_iters() logs details */
1046 goto end;
958f7d11
PP
1047 }
1048
1049 /*
089717de 1050 * At this point we know that all the existing upstream
d6e69534
PP
1051 * message iterators are valid. We can find the one,
1052 * amongst those, of which the current message is the
089717de 1053 * youngest.
958f7d11 1054 */
d6e69534
PP
1055 status = muxer_msg_iter_youngest_upstream_msg_iter(muxer_comp,
1056 muxer_msg_iter, &muxer_upstream_msg_iter,
089717de 1057 &next_return_ts);
d24d5663 1058 if (status < 0 || status == BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_END) {
d4393e08 1059 if (status < 0) {
7fb13d3f
SM
1060 BT_COMP_LOGE_APPEND_CAUSE(muxer_comp->self_comp,
1061 "Cannot find the youngest upstream message iterator wrapper: "
fed72692 1062 "status=%s",
d24d5663 1063 bt_common_func_status_string(status));
fed72692 1064 } else {
5b6473ec 1065 BT_COMP_LOGD("Cannot find the youngest upstream message iterator wrapper: "
fed72692 1066 "status=%s",
d24d5663 1067 bt_common_func_status_string(status));
fed72692
PP
1068 }
1069
958f7d11
PP
1070 goto end;
1071 }
1072
d6e69534 1073 if (next_return_ts < muxer_msg_iter->last_returned_ts_ns) {
7fb13d3f
SM
1074 BT_COMP_LOGE_APPEND_CAUSE(muxer_comp->self_comp,
1075 "Youngest upstream message iterator wrapper's timestamp is less than muxer's message iterator's last returned timestamp: "
d6e69534 1076 "muxer-msg-iter-addr=%p, ts=%" PRId64 ", "
fed72692 1077 "last-returned-ts=%" PRId64,
d6e69534
PP
1078 muxer_msg_iter, next_return_ts,
1079 muxer_msg_iter->last_returned_ts_ns);
d24d5663 1080 status = BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_ERROR;
958f7d11
PP
1081 goto end;
1082 }
1083
5b6473ec 1084 BT_COMP_LOGD("Found youngest upstream message iterator wrapper: "
d6e69534
PP
1085 "muxer-msg-iter-addr=%p, "
1086 "muxer-upstream-msg-iter-wrap-addr=%p, "
fed72692 1087 "ts=%" PRId64,
d6e69534 1088 muxer_msg_iter, muxer_upstream_msg_iter, next_return_ts);
98b15851
PP
1089 BT_ASSERT_DBG(status ==
1090 BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_OK);
1091 BT_ASSERT_DBG(muxer_upstream_msg_iter);
958f7d11
PP
1092
1093 /*
d4393e08 1094 * Consume from the queue's head: other side
d6e69534 1095 * (muxer_upstream_msg_iter_next()) writes to the tail.
958f7d11 1096 */
d6e69534 1097 *msg = g_queue_pop_head(muxer_upstream_msg_iter->msgs);
98b15851 1098 BT_ASSERT_DBG(*msg);
d6e69534 1099 muxer_msg_iter->last_returned_ts_ns = next_return_ts;
958f7d11
PP
1100
1101end:
d4393e08
PP
1102 return status;
1103}
1104
1105static
d24d5663 1106bt_component_class_message_iterator_next_method_status muxer_msg_iter_do_next(
d4393e08 1107 struct muxer_comp *muxer_comp,
d6e69534
PP
1108 struct muxer_msg_iter *muxer_msg_iter,
1109 bt_message_array_const msgs, uint64_t capacity,
d4393e08
PP
1110 uint64_t *count)
1111{
cbca1c06 1112 bt_component_class_message_iterator_next_method_status status;
d4393e08
PP
1113 uint64_t i = 0;
1114
cbca1c06
SM
1115 if (G_UNLIKELY(muxer_msg_iter->next_saved_error)) {
1116 /*
1117 * Last time we were called, we hit an error but had some
1118 * messages to deliver, so we stashed the error here. Return
1119 * it now.
1120 */
1121 BT_CURRENT_THREAD_MOVE_ERROR_AND_RESET(muxer_msg_iter->next_saved_error);
1122 status = muxer_msg_iter->next_saved_status;
1123 goto end;
1124 }
1125
1126 do {
d6e69534
PP
1127 status = muxer_msg_iter_do_next_one(muxer_comp,
1128 muxer_msg_iter, &msgs[i]);
d24d5663 1129 if (status == BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_OK) {
d4393e08
PP
1130 i++;
1131 }
cbca1c06 1132 } while (i < capacity && status == BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_OK);
d4393e08
PP
1133
1134 if (i > 0) {
1135 /*
d6e69534 1136 * Even if muxer_msg_iter_do_next_one() returned
d4393e08 1137 * something else than
d6e69534
PP
1138 * BT_MESSAGE_ITERATOR_STATUS_OK, we accumulated
1139 * message objects in the output message
d4393e08 1140 * array, so we need to return
d6e69534 1141 * BT_MESSAGE_ITERATOR_STATUS_OK so that they are
d4393e08 1142 * transfered to downstream. This other status occurs
d6e69534 1143 * again the next time muxer_msg_iter_do_next() is
d4393e08 1144 * called, possibly without any accumulated
d6e69534 1145 * message, in which case we'll return it.
d4393e08 1146 */
cbca1c06
SM
1147 if (status < 0) {
1148 /*
1149 * Save this error for the next _next call. Assume that
1150 * this component always appends error causes when
1151 * returning an error status code, which will cause the
1152 * current thread error to be non-NULL.
1153 */
1154 muxer_msg_iter->next_saved_error = bt_current_thread_take_error();
1155 BT_ASSERT(muxer_msg_iter->next_saved_error);
1156 muxer_msg_iter->next_saved_status = status;
1157 }
1158
d4393e08 1159 *count = i;
d24d5663 1160 status = BT_COMPONENT_CLASS_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_OK;
d4393e08
PP
1161 }
1162
cbca1c06 1163end:
d4393e08 1164 return status;
958f7d11
PP
1165}
1166
1167static
d6e69534 1168void destroy_muxer_msg_iter(struct muxer_msg_iter *muxer_msg_iter)
ab11110e 1169{
87ec3926
PP
1170 struct muxer_comp *muxer_comp;
1171
d6e69534 1172 if (!muxer_msg_iter) {
ab11110e
PP
1173 return;
1174 }
1175
87ec3926 1176 muxer_comp = muxer_msg_iter->muxer_comp;
5b6473ec 1177 BT_COMP_LOGD("Destroying muxer component's message iterator: "
d6e69534 1178 "muxer-msg-iter-addr=%p", muxer_msg_iter);
fed72692 1179
54bdc1f7 1180 if (muxer_msg_iter->active_muxer_upstream_msg_iters) {
5b6473ec 1181 BT_COMP_LOGD_STR("Destroying muxer's active upstream message iterator wrappers.");
54bdc1f7
PP
1182 g_ptr_array_free(
1183 muxer_msg_iter->active_muxer_upstream_msg_iters, TRUE);
1184 }
1185
1186 if (muxer_msg_iter->ended_muxer_upstream_msg_iters) {
5b6473ec 1187 BT_COMP_LOGD_STR("Destroying muxer's ended upstream message iterator wrappers.");
ab11110e 1188 g_ptr_array_free(
54bdc1f7 1189 muxer_msg_iter->ended_muxer_upstream_msg_iters, TRUE);
ab11110e
PP
1190 }
1191
d6e69534 1192 g_free(muxer_msg_iter);
ab11110e
PP
1193}
1194
1195static
21a9f056 1196bt_component_class_message_iterator_initialize_method_status
e803df70 1197muxer_msg_iter_init_upstream_iterators(struct muxer_comp *muxer_comp,
c0e46a7c
SM
1198 struct muxer_msg_iter *muxer_msg_iter,
1199 struct bt_self_message_iterator_configuration *config)
958f7d11 1200{
544d0515
PP
1201 int64_t count;
1202 int64_t i;
21a9f056 1203 bt_component_class_message_iterator_initialize_method_status status;
c0e46a7c 1204 bool can_seek_forward = true;
958f7d11 1205
d94d92ac 1206 count = bt_component_filter_get_input_port_count(
707b7d35 1207 bt_self_component_filter_as_component_filter(
5b6473ec 1208 muxer_comp->self_comp_flt));
544d0515 1209 if (count < 0) {
5b6473ec 1210 BT_COMP_LOGD("No input port to initialize for muxer component's message iterator: "
d6e69534
PP
1211 "muxer-comp-addr=%p, muxer-msg-iter-addr=%p",
1212 muxer_comp, muxer_msg_iter);
21a9f056 1213 status = BT_COMPONENT_CLASS_MESSAGE_ITERATOR_INITIALIZE_METHOD_STATUS_OK;
ab11110e
PP
1214 goto end;
1215 }
958f7d11
PP
1216
1217 for (i = 0; i < count; i++) {
5badd463 1218 bt_self_component_port_input_message_iterator *upstream_msg_iter;
b19ff26f 1219 bt_self_component_port_input *self_port =
d94d92ac 1220 bt_self_component_filter_borrow_input_port_by_index(
5b6473ec 1221 muxer_comp->self_comp_flt, i);
b19ff26f 1222 const bt_port *port;
e803df70
SM
1223 bt_self_component_port_input_message_iterator_create_from_message_iterator_status
1224 msg_iter_status;
1225 int int_status;
958f7d11 1226
d94d92ac 1227 BT_ASSERT(self_port);
707b7d35
PP
1228 port = bt_self_component_port_as_port(
1229 bt_self_component_port_input_as_self_component_port(
d94d92ac 1230 self_port));
f6ccaed9 1231 BT_ASSERT(port);
958f7d11
PP
1232
1233 if (!bt_port_is_connected(port)) {
5badd463 1234 /* Skip non-connected port */
958f7d11
PP
1235 continue;
1236 }
1237
e803df70
SM
1238 msg_iter_status = create_msg_iter_on_input_port(muxer_comp,
1239 muxer_msg_iter, self_port, &upstream_msg_iter);
1240 if (msg_iter_status != BT_SELF_COMPONENT_PORT_INPUT_MESSAGE_ITERATOR_CREATE_FROM_MESSAGE_ITERATOR_STATUS_OK) {
5badd463 1241 /* create_msg_iter_on_input_port() logs errors */
e803df70 1242 status = (int) msg_iter_status;
ab11110e 1243 goto end;
958f7d11 1244 }
fed72692 1245
e803df70 1246 int_status = muxer_msg_iter_add_upstream_msg_iter(muxer_msg_iter,
c61018b9 1247 upstream_msg_iter);
5badd463
PP
1248 bt_self_component_port_input_message_iterator_put_ref(
1249 upstream_msg_iter);
e803df70 1250 if (int_status) {
21a9f056 1251 status = BT_COMPONENT_CLASS_MESSAGE_ITERATOR_INITIALIZE_METHOD_STATUS_ERROR;
5badd463 1252 /* muxer_msg_iter_add_upstream_msg_iter() logs errors */
5badd463
PP
1253 goto end;
1254 }
c0e46a7c
SM
1255
1256 can_seek_forward = can_seek_forward &&
1257 bt_self_component_port_input_message_iterator_can_seek_forward(
1258 upstream_msg_iter);
958f7d11
PP
1259 }
1260
c0e46a7c
SM
1261 /*
1262 * This iterator can seek forward if all of its iterators can seek
1263 * forward.
1264 */
1265 bt_self_message_iterator_configuration_set_can_seek_forward(
1266 config, can_seek_forward);
1267
21a9f056 1268 status = BT_COMPONENT_CLASS_MESSAGE_ITERATOR_INITIALIZE_METHOD_STATUS_OK;
e803df70 1269
958f7d11 1270end:
e803df70 1271 return status;
958f7d11
PP
1272}
1273
958f7d11 1274BT_HIDDEN
21a9f056 1275bt_component_class_message_iterator_initialize_method_status muxer_msg_iter_init(
d6e69534 1276 bt_self_message_iterator *self_msg_iter,
8d8b141d 1277 bt_self_message_iterator_configuration *config,
b19ff26f
PP
1278 bt_self_component_filter *self_comp,
1279 bt_self_component_port_output *port)
958f7d11
PP
1280{
1281 struct muxer_comp *muxer_comp = NULL;
d6e69534 1282 struct muxer_msg_iter *muxer_msg_iter = NULL;
21a9f056 1283 bt_component_class_message_iterator_initialize_method_status status;
958f7d11 1284
d94d92ac 1285 muxer_comp = bt_self_component_get_data(
707b7d35 1286 bt_self_component_filter_as_self_component(self_comp));
f6ccaed9 1287 BT_ASSERT(muxer_comp);
5b6473ec 1288 BT_COMP_LOGD("Initializing muxer component's message iterator: "
d6e69534
PP
1289 "comp-addr=%p, muxer-comp-addr=%p, msg-iter-addr=%p",
1290 self_comp, muxer_comp, self_msg_iter);
a09c6b95 1291
d6e69534 1292 if (muxer_comp->initializing_muxer_msg_iter) {
a09c6b95 1293 /*
089717de 1294 * Weird, unhandled situation detected: downstream
d6e69534
PP
1295 * creates a muxer message iterator while creating
1296 * another muxer message iterator (same component).
a09c6b95 1297 */
5b6473ec 1298 BT_COMP_LOGE("Recursive initialization of muxer component's message iterator: "
d6e69534
PP
1299 "comp-addr=%p, muxer-comp-addr=%p, msg-iter-addr=%p",
1300 self_comp, muxer_comp, self_msg_iter);
21a9f056 1301 status = BT_COMPONENT_CLASS_MESSAGE_ITERATOR_INITIALIZE_METHOD_STATUS_ERROR;
958f7d11
PP
1302 goto error;
1303 }
1304
d6e69534
PP
1305 muxer_comp->initializing_muxer_msg_iter = true;
1306 muxer_msg_iter = g_new0(struct muxer_msg_iter, 1);
1307 if (!muxer_msg_iter) {
5b6473ec 1308 BT_COMP_LOGE_STR("Failed to allocate one muxer component's message iterator.");
21a9f056 1309 status = BT_COMPONENT_CLASS_MESSAGE_ITERATOR_INITIALIZE_METHOD_STATUS_MEMORY_ERROR;
ab11110e
PP
1310 goto error;
1311 }
1312
87ec3926 1313 muxer_msg_iter->muxer_comp = muxer_comp;
ca02df0a 1314 muxer_msg_iter->self_msg_iter = self_msg_iter;
d6e69534 1315 muxer_msg_iter->last_returned_ts_ns = INT64_MIN;
54bdc1f7 1316 muxer_msg_iter->active_muxer_upstream_msg_iters =
958f7d11 1317 g_ptr_array_new_with_free_func(
d6e69534 1318 (GDestroyNotify) destroy_muxer_upstream_msg_iter);
54bdc1f7 1319 if (!muxer_msg_iter->active_muxer_upstream_msg_iters) {
5b6473ec 1320 BT_COMP_LOGE_STR("Failed to allocate a GPtrArray.");
21a9f056 1321 status = BT_COMPONENT_CLASS_MESSAGE_ITERATOR_INITIALIZE_METHOD_STATUS_MEMORY_ERROR;
54bdc1f7
PP
1322 goto error;
1323 }
1324
1325 muxer_msg_iter->ended_muxer_upstream_msg_iters =
1326 g_ptr_array_new_with_free_func(
1327 (GDestroyNotify) destroy_muxer_upstream_msg_iter);
1328 if (!muxer_msg_iter->ended_muxer_upstream_msg_iters) {
5b6473ec 1329 BT_COMP_LOGE_STR("Failed to allocate a GPtrArray.");
21a9f056 1330 status = BT_COMPONENT_CLASS_MESSAGE_ITERATOR_INITIALIZE_METHOD_STATUS_MEMORY_ERROR;
958f7d11
PP
1331 goto error;
1332 }
1333
e803df70 1334 status = muxer_msg_iter_init_upstream_iterators(muxer_comp,
c0e46a7c 1335 muxer_msg_iter, config);
e803df70 1336 if (status) {
5b6473ec 1337 BT_COMP_LOGE("Cannot initialize connected input ports for muxer component's message iterator: "
fed72692 1338 "comp-addr=%p, muxer-comp-addr=%p, "
d6e69534
PP
1339 "muxer-msg-iter-addr=%p, msg-iter-addr=%p, ret=%d",
1340 self_comp, muxer_comp, muxer_msg_iter,
e803df70 1341 self_msg_iter, status);
a09c6b95
PP
1342 goto error;
1343 }
1344
5badd463 1345 bt_self_message_iterator_set_data(self_msg_iter, muxer_msg_iter);
5b6473ec 1346 BT_COMP_LOGD("Initialized muxer component's message iterator: "
d6e69534
PP
1347 "comp-addr=%p, muxer-comp-addr=%p, muxer-msg-iter-addr=%p, "
1348 "msg-iter-addr=%p",
1349 self_comp, muxer_comp, muxer_msg_iter, self_msg_iter);
958f7d11
PP
1350 goto end;
1351
1352error:
d6e69534 1353 destroy_muxer_msg_iter(muxer_msg_iter);
5badd463 1354 bt_self_message_iterator_set_data(self_msg_iter, NULL);
958f7d11
PP
1355
1356end:
d6e69534 1357 muxer_comp->initializing_muxer_msg_iter = false;
958f7d11
PP
1358 return status;
1359}
1360
1361BT_HIDDEN
54bdc1f7 1362void muxer_msg_iter_finalize(bt_self_message_iterator *self_msg_iter)
958f7d11 1363{
d6e69534
PP
1364 struct muxer_msg_iter *muxer_msg_iter =
1365 bt_self_message_iterator_get_data(self_msg_iter);
b19ff26f 1366 bt_self_component *self_comp = NULL;
958f7d11
PP
1367 struct muxer_comp *muxer_comp = NULL;
1368
d6e69534
PP
1369 self_comp = bt_self_message_iterator_borrow_component(
1370 self_msg_iter);
d94d92ac
PP
1371 BT_ASSERT(self_comp);
1372 muxer_comp = bt_self_component_get_data(self_comp);
5b6473ec 1373 BT_COMP_LOGD("Finalizing muxer component's message iterator: "
d6e69534
PP
1374 "comp-addr=%p, muxer-comp-addr=%p, muxer-msg-iter-addr=%p, "
1375 "msg-iter-addr=%p",
1376 self_comp, muxer_comp, muxer_msg_iter, self_msg_iter);
958f7d11 1377
5badd463 1378 if (muxer_msg_iter) {
d6e69534 1379 destroy_muxer_msg_iter(muxer_msg_iter);
958f7d11 1380 }
958f7d11
PP
1381}
1382
1383BT_HIDDEN
d24d5663 1384bt_component_class_message_iterator_next_method_status muxer_msg_iter_next(
d6e69534
PP
1385 bt_self_message_iterator *self_msg_iter,
1386 bt_message_array_const msgs, uint64_t capacity,
d4393e08 1387 uint64_t *count)
958f7d11 1388{
d24d5663 1389 bt_component_class_message_iterator_next_method_status status;
d6e69534
PP
1390 struct muxer_msg_iter *muxer_msg_iter =
1391 bt_self_message_iterator_get_data(self_msg_iter);
b19ff26f 1392 bt_self_component *self_comp = NULL;
958f7d11 1393 struct muxer_comp *muxer_comp = NULL;
958f7d11 1394
98b15851 1395 BT_ASSERT_DBG(muxer_msg_iter);
d6e69534
PP
1396 self_comp = bt_self_message_iterator_borrow_component(
1397 self_msg_iter);
98b15851 1398 BT_ASSERT_DBG(self_comp);
d94d92ac 1399 muxer_comp = bt_self_component_get_data(self_comp);
98b15851 1400 BT_ASSERT_DBG(muxer_comp);
ef267d12 1401 BT_COMP_LOGT("Muxer component's message iterator's \"next\" method called: "
d6e69534
PP
1402 "comp-addr=%p, muxer-comp-addr=%p, muxer-msg-iter-addr=%p, "
1403 "msg-iter-addr=%p",
1404 self_comp, muxer_comp, muxer_msg_iter, self_msg_iter);
fed72692 1405
d6e69534
PP
1406 status = muxer_msg_iter_do_next(muxer_comp, muxer_msg_iter,
1407 msgs, capacity, count);
d4393e08 1408 if (status < 0) {
5b6473ec 1409 BT_COMP_LOGE("Cannot get next message: "
d6e69534
PP
1410 "comp-addr=%p, muxer-comp-addr=%p, muxer-msg-iter-addr=%p, "
1411 "msg-iter-addr=%p, status=%s",
1412 self_comp, muxer_comp, muxer_msg_iter, self_msg_iter,
d24d5663 1413 bt_common_func_status_string(status));
fed72692 1414 } else {
ef267d12 1415 BT_COMP_LOGT("Returning from muxer component's message iterator's \"next\" method: "
d4393e08 1416 "status=%s",
d24d5663 1417 bt_common_func_status_string(status));
fed72692 1418 }
958f7d11 1419
d4393e08 1420 return status;
958f7d11
PP
1421}
1422
1423BT_HIDDEN
d24d5663 1424bt_component_class_port_connected_method_status muxer_input_port_connected(
b19ff26f
PP
1425 bt_self_component_filter *self_comp,
1426 bt_self_component_port_input *self_port,
1427 const bt_port_output *other_port)
958f7d11 1428{
d24d5663
PP
1429 bt_component_class_port_connected_method_status status =
1430 BT_COMPONENT_CLASS_PORT_CONNECTED_METHOD_STATUS_OK;
1431 bt_self_component_add_port_status add_port_status;
87ec3926
PP
1432 struct muxer_comp *muxer_comp = bt_self_component_get_data(
1433 bt_self_component_filter_as_self_component(self_comp));
958f7d11 1434
d24d5663
PP
1435 add_port_status = add_available_input_port(self_comp);
1436 if (add_port_status) {
5b6473ec 1437 BT_COMP_LOGE("Cannot add one muxer component's input port: "
5badd463 1438 "status=%s",
d24d5663
PP
1439 bt_common_func_status_string(status));
1440
1441 if (add_port_status ==
1442 BT_SELF_COMPONENT_ADD_PORT_STATUS_MEMORY_ERROR) {
1443 status = BT_COMPONENT_CLASS_PORT_CONNECTED_METHOD_STATUS_MEMORY_ERROR;
1444 } else {
1445 status = BT_COMPONENT_CLASS_PORT_CONNECTED_METHOD_STATUS_ERROR;
1446 }
1447
06a2cb0d
PP
1448 goto end;
1449 }
1450
958f7d11 1451end:
bf55043c 1452 return status;
958f7d11 1453}
b5443165 1454
54bdc1f7 1455static inline
f2fb1b32
SM
1456bt_component_class_message_iterator_can_seek_beginning_method_status
1457muxer_upstream_msg_iters_can_all_seek_beginning(
1458 GPtrArray *muxer_upstream_msg_iters, bt_bool *can_seek)
b5443165 1459{
c4b56970
PP
1460 bt_component_class_message_iterator_can_seek_beginning_method_status status =
1461 BT_COMPONENT_CLASS_MESSAGE_ITERATOR_CAN_SEEK_BEGINNING_METHOD_STATUS_OK;
b5443165 1462 uint64_t i;
b5443165 1463
54bdc1f7 1464 for (i = 0; i < muxer_upstream_msg_iters->len; i++) {
b5443165 1465 struct muxer_upstream_msg_iter *upstream_msg_iter =
54bdc1f7 1466 muxer_upstream_msg_iters->pdata[i];
f2fb1b32
SM
1467 status = (int) bt_self_component_port_input_message_iterator_can_seek_beginning(
1468 upstream_msg_iter->msg_iter, can_seek);
1469 if (status != BT_COMPONENT_CLASS_MESSAGE_ITERATOR_CAN_SEEK_BEGINNING_METHOD_STATUS_OK) {
1470 goto end;
1471 }
b5443165 1472
f2fb1b32 1473 if (!*can_seek) {
b5443165
PP
1474 goto end;
1475 }
1476 }
1477
f2fb1b32
SM
1478 *can_seek = BT_TRUE;
1479
b5443165 1480end:
f2fb1b32 1481 return status;
b5443165
PP
1482}
1483
54bdc1f7 1484BT_HIDDEN
f2fb1b32
SM
1485bt_component_class_message_iterator_can_seek_beginning_method_status
1486muxer_msg_iter_can_seek_beginning(
1487 bt_self_message_iterator *self_msg_iter, bt_bool *can_seek)
54bdc1f7
PP
1488{
1489 struct muxer_msg_iter *muxer_msg_iter =
1490 bt_self_message_iterator_get_data(self_msg_iter);
f2fb1b32 1491 bt_component_class_message_iterator_can_seek_beginning_method_status status;
54bdc1f7 1492
f2fb1b32
SM
1493 status = muxer_upstream_msg_iters_can_all_seek_beginning(
1494 muxer_msg_iter->active_muxer_upstream_msg_iters, can_seek);
1495 if (status != BT_COMPONENT_CLASS_MESSAGE_ITERATOR_CAN_SEEK_BEGINNING_METHOD_STATUS_OK) {
54bdc1f7
PP
1496 goto end;
1497 }
1498
f2fb1b32 1499 if (!*can_seek) {
54bdc1f7
PP
1500 goto end;
1501 }
1502
f2fb1b32
SM
1503 status = muxer_upstream_msg_iters_can_all_seek_beginning(
1504 muxer_msg_iter->ended_muxer_upstream_msg_iters, can_seek);
1505
54bdc1f7 1506end:
f2fb1b32 1507 return status;
54bdc1f7
PP
1508}
1509
b5443165 1510BT_HIDDEN
d24d5663 1511bt_component_class_message_iterator_seek_beginning_method_status muxer_msg_iter_seek_beginning(
b5443165
PP
1512 bt_self_message_iterator *self_msg_iter)
1513{
1514 struct muxer_msg_iter *muxer_msg_iter =
1515 bt_self_message_iterator_get_data(self_msg_iter);
d24d5663
PP
1516 bt_component_class_message_iterator_seek_beginning_method_status status =
1517 BT_COMPONENT_CLASS_MESSAGE_ITERATOR_SEEK_BEGINNING_METHOD_STATUS_OK;
1518 bt_message_iterator_seek_beginning_status seek_beg_status;
b5443165
PP
1519 uint64_t i;
1520
54bdc1f7
PP
1521 /* Seek all ended upstream iterators first */
1522 for (i = 0; i < muxer_msg_iter->ended_muxer_upstream_msg_iters->len;
1523 i++) {
b5443165 1524 struct muxer_upstream_msg_iter *upstream_msg_iter =
54bdc1f7 1525 muxer_msg_iter->ended_muxer_upstream_msg_iters->pdata[i];
b5443165 1526
d24d5663 1527 seek_beg_status = bt_self_component_port_input_message_iterator_seek_beginning(
b5443165 1528 upstream_msg_iter->msg_iter);
d24d5663
PP
1529 if (seek_beg_status != BT_MESSAGE_ITERATOR_SEEK_BEGINNING_STATUS_OK) {
1530 status = (int) seek_beg_status;
b5443165
PP
1531 goto end;
1532 }
54bdc1f7
PP
1533
1534 empty_message_queue(upstream_msg_iter);
1535 }
1536
1537 /* Seek all previously active upstream iterators */
1538 for (i = 0; i < muxer_msg_iter->active_muxer_upstream_msg_iters->len;
1539 i++) {
1540 struct muxer_upstream_msg_iter *upstream_msg_iter =
1541 muxer_msg_iter->active_muxer_upstream_msg_iters->pdata[i];
1542
d24d5663 1543 seek_beg_status = bt_self_component_port_input_message_iterator_seek_beginning(
54bdc1f7 1544 upstream_msg_iter->msg_iter);
d24d5663
PP
1545 if (seek_beg_status != BT_MESSAGE_ITERATOR_SEEK_BEGINNING_STATUS_OK) {
1546 status = (int) seek_beg_status;
54bdc1f7
PP
1547 goto end;
1548 }
1549
1550 empty_message_queue(upstream_msg_iter);
1551 }
1552
1553 /* Make them all active */
1554 for (i = 0; i < muxer_msg_iter->ended_muxer_upstream_msg_iters->len;
1555 i++) {
1556 struct muxer_upstream_msg_iter *upstream_msg_iter =
1557 muxer_msg_iter->ended_muxer_upstream_msg_iters->pdata[i];
1558
1559 g_ptr_array_add(muxer_msg_iter->active_muxer_upstream_msg_iters,
1560 upstream_msg_iter);
1561 muxer_msg_iter->ended_muxer_upstream_msg_iters->pdata[i] = NULL;
b5443165
PP
1562 }
1563
3625612b
MJ
1564 /*
1565 * GLib < 2.48.0 asserts when g_ptr_array_remove_range() is
1566 * called on an empty array.
1567 */
1568 if (muxer_msg_iter->ended_muxer_upstream_msg_iters->len > 0) {
1569 g_ptr_array_remove_range(muxer_msg_iter->ended_muxer_upstream_msg_iters,
1570 0, muxer_msg_iter->ended_muxer_upstream_msg_iters->len);
1571 }
b5443165 1572 muxer_msg_iter->last_returned_ts_ns = INT64_MIN;
54bdc1f7
PP
1573 muxer_msg_iter->clock_class_expectation =
1574 MUXER_MSG_ITER_CLOCK_CLASS_EXPECTATION_ANY;
b5443165
PP
1575
1576end:
d24d5663 1577 return status;
b5443165 1578}
This page took 0.161654 seconds and 4 git commands to generate.