lib: make trace IR API const-correct
[babeltrace.git] / lib / trace-ir / resolve-field-path.c
1 /*
2 * Copyright 2018 Philippe Proulx <pproulx@efficios.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to deal
6 * in the Software without restriction, including without limitation the rights
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 * copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23 #define BT_LOG_TAG "RESOLVE-FIELD-PATH"
24 #include <babeltrace/lib-logging-internal.h>
25
26 #include <babeltrace/assert-pre-internal.h>
27 #include <babeltrace/assert-internal.h>
28 #include <babeltrace/object.h>
29 #include <babeltrace/trace-ir/field-classes-internal.h>
30 #include <babeltrace/trace-ir/field-path-internal.h>
31 #include <babeltrace/trace-ir/field-path-const.h>
32 #include <babeltrace/trace-ir/resolve-field-path-internal.h>
33 #include <limits.h>
34 #include <stdint.h>
35 #include <inttypes.h>
36 #include <glib.h>
37
38 static
39 bool find_field_class_recursive(struct bt_field_class *fc,
40 struct bt_field_class *tgt_fc, struct bt_field_path *field_path)
41 {
42 bool found = false;
43
44 if (tgt_fc == fc) {
45 found = true;
46 goto end;
47 }
48
49 switch (fc->type) {
50 case BT_FIELD_CLASS_TYPE_STRUCTURE:
51 case BT_FIELD_CLASS_TYPE_VARIANT:
52 {
53 struct bt_field_class_named_field_class_container *container_fc =
54 (void *) fc;
55 uint64_t i;
56
57 for (i = 0; i < container_fc->named_fcs->len; i++) {
58 struct bt_named_field_class *named_fc =
59 BT_FIELD_CLASS_NAMED_FC_AT_INDEX(
60 container_fc, i);
61
62 g_array_append_val(field_path->indexes, i);
63 found = find_field_class_recursive(named_fc->fc,
64 tgt_fc, field_path);
65 if (found) {
66 goto end;
67 }
68
69 g_array_set_size(field_path->indexes,
70 field_path->indexes->len - 1);
71 }
72
73 break;
74 }
75 case BT_FIELD_CLASS_TYPE_STATIC_ARRAY:
76 case BT_FIELD_CLASS_TYPE_DYNAMIC_ARRAY:
77 {
78 struct bt_field_class_array *array_fc = (void *) fc;
79
80 found = find_field_class_recursive(array_fc->element_fc,
81 tgt_fc, field_path);
82 break;
83 }
84 default:
85 break;
86 }
87
88 end:
89 return found;
90 }
91
92 static
93 int find_field_class(struct bt_field_class *root_fc,
94 enum bt_scope root_scope, struct bt_field_class *tgt_fc,
95 struct bt_field_path **ret_field_path)
96 {
97 int ret = 0;
98 struct bt_field_path *field_path = NULL;
99
100 if (!root_fc) {
101 goto end;
102 }
103
104 field_path = bt_field_path_create();
105 if (!field_path) {
106 ret = -1;
107 goto end;
108 }
109
110 field_path->root = root_scope;
111 if (!find_field_class_recursive(root_fc, tgt_fc, field_path)) {
112 /* Not found here */
113 BT_OBJECT_PUT_REF_AND_RESET(field_path);
114 }
115
116 end:
117 *ret_field_path = field_path;
118 return ret;
119 }
120
121 static
122 struct bt_field_path *find_field_class_in_ctx(struct bt_field_class *fc,
123 struct bt_resolve_field_path_context *ctx)
124 {
125 struct bt_field_path *field_path = NULL;
126 int ret;
127
128 ret = find_field_class(ctx->packet_header, BT_SCOPE_PACKET_HEADER,
129 fc, &field_path);
130 if (ret || field_path) {
131 goto end;
132 }
133
134 ret = find_field_class(ctx->packet_context, BT_SCOPE_PACKET_CONTEXT,
135 fc, &field_path);
136 if (ret || field_path) {
137 goto end;
138 }
139
140 ret = find_field_class(ctx->event_header, BT_SCOPE_EVENT_HEADER,
141 fc, &field_path);
142 if (ret || field_path) {
143 goto end;
144 }
145
146 ret = find_field_class(ctx->event_common_context,
147 BT_SCOPE_EVENT_COMMON_CONTEXT, fc, &field_path);
148 if (ret || field_path) {
149 goto end;
150 }
151
152 ret = find_field_class(ctx->event_specific_context,
153 BT_SCOPE_EVENT_SPECIFIC_CONTEXT, fc, &field_path);
154 if (ret || field_path) {
155 goto end;
156 }
157
158 ret = find_field_class(ctx->event_payload, BT_SCOPE_EVENT_PAYLOAD,
159 fc, &field_path);
160 if (ret || field_path) {
161 goto end;
162 }
163
164 end:
165 return field_path;
166 }
167
168 BT_ASSERT_PRE_FUNC
169 static inline
170 bool target_is_before_source(struct bt_field_path *src_field_path,
171 struct bt_field_path *tgt_field_path)
172 {
173 bool is_valid = true;
174 uint64_t src_i = 0, tgt_i = 0;
175
176 if (tgt_field_path->root < src_field_path->root) {
177 goto end;
178 }
179
180 if (tgt_field_path->root > src_field_path->root) {
181 is_valid = false;
182 goto end;
183 }
184
185 BT_ASSERT(tgt_field_path->root == src_field_path->root);
186
187 while (src_i < src_field_path->indexes->len &&
188 tgt_i < tgt_field_path->indexes->len) {
189 uint64_t src_index = bt_field_path_get_index_by_index_inline(
190 src_field_path, src_i);
191 uint64_t tgt_index = bt_field_path_get_index_by_index_inline(
192 tgt_field_path, tgt_i);
193
194 if (tgt_index > src_index) {
195 is_valid = false;
196 goto end;
197 }
198
199 src_i++;
200 tgt_i++;
201 }
202
203 end:
204 return is_valid;
205 }
206
207 BT_ASSERT_PRE_FUNC
208 static inline
209 struct bt_field_class *borrow_root_field_class(
210 struct bt_resolve_field_path_context *ctx, enum bt_scope scope)
211 {
212 switch (scope) {
213 case BT_SCOPE_PACKET_HEADER:
214 return ctx->packet_header;
215 case BT_SCOPE_PACKET_CONTEXT:
216 return ctx->packet_context;
217 case BT_SCOPE_EVENT_HEADER:
218 return ctx->event_header;
219 case BT_SCOPE_EVENT_COMMON_CONTEXT:
220 return ctx->event_common_context;
221 case BT_SCOPE_EVENT_SPECIFIC_CONTEXT:
222 return ctx->event_specific_context;
223 case BT_SCOPE_EVENT_PAYLOAD:
224 return ctx->event_payload;
225 default:
226 abort();
227 }
228
229 return NULL;
230 }
231
232 BT_ASSERT_PRE_FUNC
233 static inline
234 struct bt_field_class *borrow_child_field_class(struct bt_field_class *parent_fc,
235 uint64_t index, bool *advance)
236 {
237 struct bt_field_class *child_fc = NULL;
238
239 switch (parent_fc->type) {
240 case BT_FIELD_CLASS_TYPE_STRUCTURE:
241 case BT_FIELD_CLASS_TYPE_VARIANT:
242 {
243 struct bt_named_field_class *named_fc =
244 BT_FIELD_CLASS_NAMED_FC_AT_INDEX(parent_fc, index);
245
246 child_fc = named_fc->fc;
247 *advance = true;
248 break;
249 }
250 case BT_FIELD_CLASS_TYPE_STATIC_ARRAY:
251 case BT_FIELD_CLASS_TYPE_DYNAMIC_ARRAY:
252 {
253 struct bt_field_class_array *array_fc = (void *) parent_fc;
254
255 child_fc = array_fc->element_fc;
256 *advance = false;
257 break;
258 }
259 default:
260 break;
261 }
262
263 return child_fc;
264 }
265
266 BT_ASSERT_PRE_FUNC
267 static inline
268 bool target_field_path_in_different_scope_has_struct_fc_only(
269 struct bt_field_path *src_field_path,
270 struct bt_field_path *tgt_field_path,
271 struct bt_resolve_field_path_context *ctx)
272 {
273 bool is_valid = true;
274 uint64_t i = 0;
275 struct bt_field_class *fc;
276
277 if (src_field_path->root == tgt_field_path->root) {
278 goto end;
279 }
280
281 fc = borrow_root_field_class(ctx, tgt_field_path->root);
282
283 while (i < tgt_field_path->indexes->len) {
284 uint64_t index = bt_field_path_get_index_by_index_inline(
285 tgt_field_path, i);
286 bool advance;
287
288 if (fc->type == BT_FIELD_CLASS_TYPE_STATIC_ARRAY ||
289 fc->type == BT_FIELD_CLASS_TYPE_DYNAMIC_ARRAY ||
290 fc->type == BT_FIELD_CLASS_TYPE_VARIANT) {
291 is_valid = false;
292 goto end;
293 }
294
295 fc = borrow_child_field_class(fc, index, &advance);
296
297 if (advance) {
298 i++;
299 }
300 }
301
302 end:
303 return is_valid;
304 }
305
306 BT_ASSERT_PRE_FUNC
307 static inline
308 bool lca_is_structure_field_class(struct bt_field_path *src_field_path,
309 struct bt_field_path *tgt_field_path,
310 struct bt_resolve_field_path_context *ctx)
311 {
312 bool is_valid = true;
313 struct bt_field_class *src_fc;
314 struct bt_field_class *tgt_fc;
315 struct bt_field_class *prev_fc = NULL;
316 uint64_t src_i = 0, tgt_i = 0;
317
318 if (src_field_path->root != tgt_field_path->root) {
319 goto end;
320 }
321
322 src_fc = borrow_root_field_class(ctx, src_field_path->root);
323 tgt_fc = borrow_root_field_class(ctx, tgt_field_path->root);
324 BT_ASSERT(src_fc);
325 BT_ASSERT(tgt_fc);
326
327 while (src_i < src_field_path->indexes->len &&
328 tgt_i < tgt_field_path->indexes->len) {
329 bool advance;
330 uint64_t src_index = bt_field_path_get_index_by_index_inline(
331 src_field_path, src_i);
332 uint64_t tgt_index = bt_field_path_get_index_by_index_inline(
333 tgt_field_path, tgt_i);
334
335 if (src_fc != tgt_fc) {
336 if (!prev_fc) {
337 /*
338 * This is correct: the LCA is the root
339 * scope field classe, which must be a
340 * structure field classe.
341 */
342 break;
343 }
344
345 if (prev_fc->type != BT_FIELD_CLASS_TYPE_STRUCTURE) {
346 is_valid = false;
347 }
348
349 break;
350 }
351
352 prev_fc = src_fc;
353 src_fc = borrow_child_field_class(src_fc, src_index, &advance);
354
355 if (advance) {
356 src_i++;
357 }
358
359 tgt_fc = borrow_child_field_class(tgt_fc, tgt_index, &advance);
360
361 if (advance) {
362 tgt_i++;
363 }
364 }
365
366 end:
367 return is_valid;
368 }
369
370 BT_ASSERT_PRE_FUNC
371 static inline
372 bool lca_to_target_has_struct_fc_only(struct bt_field_path *src_field_path,
373 struct bt_field_path *tgt_field_path,
374 struct bt_resolve_field_path_context *ctx)
375 {
376 bool is_valid = true;
377 struct bt_field_class *src_fc;
378 struct bt_field_class *tgt_fc;
379 uint64_t src_i = 0, tgt_i = 0;
380
381 if (src_field_path->root != tgt_field_path->root) {
382 goto end;
383 }
384
385 src_fc = borrow_root_field_class(ctx, src_field_path->root);
386 tgt_fc = borrow_root_field_class(ctx, tgt_field_path->root);
387 BT_ASSERT(src_fc);
388 BT_ASSERT(tgt_fc);
389 BT_ASSERT(src_fc == tgt_fc);
390
391 /* Find LCA */
392 while (src_i < src_field_path->indexes->len &&
393 tgt_i < tgt_field_path->indexes->len) {
394 bool advance;
395 uint64_t src_index = bt_field_path_get_index_by_index_inline(
396 src_field_path, src_i);
397 uint64_t tgt_index = bt_field_path_get_index_by_index_inline(
398 tgt_field_path, tgt_i);
399
400 if (src_i != tgt_i) {
401 /* Next field class is different: LCA is `tgt_fc` */
402 break;
403 }
404
405 src_fc = borrow_child_field_class(src_fc, src_index, &advance);
406
407 if (advance) {
408 src_i++;
409 }
410
411 tgt_fc = borrow_child_field_class(tgt_fc, tgt_index, &advance);
412
413 if (advance) {
414 tgt_i++;
415 }
416 }
417
418 /* Only structure field classes to the target */
419 while (tgt_i < tgt_field_path->indexes->len) {
420 bool advance;
421 uint64_t tgt_index = bt_field_path_get_index_by_index_inline(
422 tgt_field_path, tgt_i);
423
424 if (tgt_fc->type == BT_FIELD_CLASS_TYPE_STATIC_ARRAY ||
425 tgt_fc->type == BT_FIELD_CLASS_TYPE_DYNAMIC_ARRAY ||
426 tgt_fc->type == BT_FIELD_CLASS_TYPE_VARIANT) {
427 is_valid = false;
428 goto end;
429 }
430
431 tgt_fc = borrow_child_field_class(tgt_fc, tgt_index, &advance);
432
433 if (advance) {
434 tgt_i++;
435 }
436 }
437
438 end:
439 return is_valid;
440 }
441
442 BT_ASSERT_PRE_FUNC
443 static inline
444 bool field_path_is_valid(struct bt_field_class *src_fc,
445 struct bt_field_class *tgt_fc,
446 struct bt_resolve_field_path_context *ctx)
447 {
448 bool is_valid = true;
449 struct bt_field_path *src_field_path = find_field_class_in_ctx(
450 src_fc, ctx);
451 struct bt_field_path *tgt_field_path = find_field_class_in_ctx(
452 tgt_fc, ctx);
453
454 if (!src_field_path) {
455 BT_ASSERT_PRE_MSG("Cannot find requesting field classe in "
456 "resolving context: %!+F", src_fc);
457 is_valid = false;
458 goto end;
459 }
460
461 if (!tgt_field_path) {
462 BT_ASSERT_PRE_MSG("Cannot find target field classe in "
463 "resolving context: %!+F", tgt_fc);
464 is_valid = false;
465 goto end;
466 }
467
468 /* Target must be before source */
469 if (!target_is_before_source(src_field_path, tgt_field_path)) {
470 BT_ASSERT_PRE_MSG("Target field classe is located after "
471 "requesting field classe: %![req-fc-]+F, %![tgt-fc-]+F",
472 src_fc, tgt_fc);
473 is_valid = false;
474 goto end;
475 }
476
477 /*
478 * If target is in a different scope than source, there are no
479 * array or variant field classes on the way to the target.
480 */
481 if (!target_field_path_in_different_scope_has_struct_fc_only(
482 src_field_path, tgt_field_path, ctx)) {
483 BT_ASSERT_PRE_MSG("Target field classe is located in a "
484 "different scope than requesting field classe, "
485 "but within an array or a variant field classe: "
486 "%![req-fc-]+F, %![tgt-fc-]+F",
487 src_fc, tgt_fc);
488 is_valid = false;
489 goto end;
490 }
491
492 /* Same scope: LCA must be a structure field classe */
493 if (!lca_is_structure_field_class(src_field_path, tgt_field_path, ctx)) {
494 BT_ASSERT_PRE_MSG("Lowest common ancestor of target and "
495 "requesting field classes is not a structure field classe: "
496 "%![req-fc-]+F, %![tgt-fc-]+F",
497 src_fc, tgt_fc);
498 is_valid = false;
499 goto end;
500 }
501
502 /* Same scope: path from LCA to target has no array/variant FTs */
503 if (!lca_to_target_has_struct_fc_only(src_field_path, tgt_field_path,
504 ctx)) {
505 BT_ASSERT_PRE_MSG("Path from lowest common ancestor of target "
506 "and requesting field classes to target field classe "
507 "contains an array or a variant field classe: "
508 "%![req-fc-]+F, %![tgt-fc-]+F", src_fc, tgt_fc);
509 is_valid = false;
510 goto end;
511 }
512
513 end:
514 bt_object_put_ref(src_field_path);
515 bt_object_put_ref(tgt_field_path);
516 return is_valid;
517 }
518
519 static
520 struct bt_field_path *resolve_field_path(struct bt_field_class *src_fc,
521 struct bt_field_class *tgt_fc,
522 struct bt_resolve_field_path_context *ctx)
523 {
524 BT_ASSERT_PRE(field_path_is_valid(src_fc, tgt_fc, ctx),
525 "Invalid target field classe: %![req-fc-]+F, %![tgt-fc-]+F",
526 src_fc, tgt_fc);
527 return find_field_class_in_ctx(tgt_fc, ctx);
528 }
529
530 BT_HIDDEN
531 int bt_resolve_field_paths(struct bt_field_class *fc,
532 struct bt_resolve_field_path_context *ctx)
533 {
534 int ret = 0;
535
536 BT_ASSERT(fc);
537
538 /* Resolving part for dynamic array and variant field classes */
539 switch (fc->type) {
540 case BT_FIELD_CLASS_TYPE_DYNAMIC_ARRAY:
541 {
542 struct bt_field_class_dynamic_array *dyn_array_fc = (void *) fc;
543
544 if (dyn_array_fc->length_fc) {
545 BT_ASSERT(!dyn_array_fc->length_field_path);
546 dyn_array_fc->length_field_path = resolve_field_path(
547 fc, dyn_array_fc->length_fc, ctx);
548 if (!dyn_array_fc->length_field_path) {
549 ret = -1;
550 goto end;
551 }
552 }
553
554 break;
555 }
556 case BT_FIELD_CLASS_TYPE_VARIANT:
557 {
558 struct bt_field_class_variant *var_fc = (void *) fc;
559
560 if (var_fc->selector_fc) {
561 BT_ASSERT(!var_fc->selector_field_path);
562 var_fc->selector_field_path =
563 resolve_field_path(fc,
564 var_fc->selector_fc, ctx);
565 if (!var_fc->selector_field_path) {
566 ret = -1;
567 goto end;
568 }
569 }
570 }
571 default:
572 break;
573 }
574
575 /* Recursive part */
576 switch (fc->type) {
577 case BT_FIELD_CLASS_TYPE_STRUCTURE:
578 case BT_FIELD_CLASS_TYPE_VARIANT:
579 {
580 struct bt_field_class_named_field_class_container *container_fc =
581 (void *) fc;
582 uint64_t i;
583
584 for (i = 0; i < container_fc->named_fcs->len; i++) {
585 struct bt_named_field_class *named_fc =
586 BT_FIELD_CLASS_NAMED_FC_AT_INDEX(
587 container_fc, i);
588
589 ret = bt_resolve_field_paths(named_fc->fc, ctx);
590 if (ret) {
591 goto end;
592 }
593 }
594
595 break;
596 }
597 case BT_FIELD_CLASS_TYPE_STATIC_ARRAY:
598 case BT_FIELD_CLASS_TYPE_DYNAMIC_ARRAY:
599 {
600 struct bt_field_class_array *array_fc = (void *) fc;
601
602 ret = bt_resolve_field_paths(array_fc->element_fc, ctx);
603 break;
604 }
605 default:
606 break;
607 }
608
609 end:
610 return ret;
611 }
This page took 0.040899 seconds and 4 git commands to generate.