11b80a36a7d60b2fb494a9afdf5f3df0c304bc98
[babeltrace.git] / include / babeltrace / ctf-ir / clock-value-internal.h
1 #ifndef BABELTRACE_CTF_IR_CLOCK_VALUE_INTERNAL_H
2 #define BABELTRACE_CTF_IR_CLOCK_VALUE_INTERNAL_H
3
4 /*
5 * Copyright 2017 Philippe Proulx <pproulx@efficios.com>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 */
25
26 #include <babeltrace/babeltrace-internal.h>
27 #include <babeltrace/object-internal.h>
28 #include <babeltrace/ctf-ir/clock-class-internal.h>
29 #include <stdbool.h>
30 #include <stdint.h>
31
32 struct bt_clock_class;
33
34 struct bt_clock_value {
35 struct bt_object base;
36 struct bt_clock_class *clock_class;
37 uint64_t value;
38 bool ns_from_epoch_overflows;
39 int64_t ns_from_epoch;
40 bool is_set;
41 bool frozen;
42 };
43
44 static inline
45 void bt_clock_value_set(struct bt_clock_value *clock_value)
46 {
47 BT_ASSERT(clock_value);
48 clock_value->is_set = true;
49 }
50
51 static inline
52 void bt_clock_value_reset(struct bt_clock_value *clock_value)
53 {
54 BT_ASSERT(clock_value);
55 clock_value->is_set = false;
56 }
57
58 BT_UNUSED
59 static inline
60 void _bt_clock_value_set_is_frozen(struct bt_clock_value *clock_value,
61 bool is_frozen)
62 {
63 BT_ASSERT(clock_value);
64 clock_value->frozen = is_frozen;
65 }
66
67 static inline
68 uint64_t ns_from_value(uint64_t frequency, uint64_t value)
69 {
70 uint64_t ns;
71
72 if (frequency == UINT64_C(1000000000)) {
73 ns = value;
74 } else {
75 double dblres = ((1e9 * (double) value) / (double) frequency);
76
77 if (dblres >= (double) UINT64_MAX) {
78 /* Overflows uint64_t */
79 ns = -1ULL;
80 } else {
81 ns = (uint64_t) dblres;
82 }
83 }
84
85 return ns;
86 }
87
88 static inline
89 int ns_from_epoch(struct bt_clock_class *clock_class, uint64_t value,
90 int64_t *ns_from_epoch, bool *overflows)
91 {
92 int ret = 0;
93 int64_t diff;
94 int64_t s_ns;
95 uint64_t u_ns;
96 uint64_t cycles;
97
98 *overflows = false;
99
100 /* Initialize nanosecond timestamp to clock's offset in seconds */
101 if (clock_class->offset_s <= (INT64_MIN / INT64_C(1000000000)) ||
102 clock_class->offset_s >= (INT64_MAX / INT64_C(1000000000))) {
103 /*
104 * Overflow: offset in seconds converted to nanoseconds
105 * is outside the int64_t range.
106 */
107 *overflows = true;
108 goto end;
109 }
110
111 *ns_from_epoch = clock_class->offset_s * INT64_C(1000000000);
112
113 /* Add offset in cycles */
114 if (clock_class->offset < 0) {
115 cycles = (uint64_t) -clock_class->offset;
116 } else {
117 cycles = (uint64_t) clock_class->offset;
118 }
119
120 u_ns = ns_from_value(clock_class->frequency, cycles);
121
122 if (u_ns == UINT64_C(-1) || u_ns >= INT64_MAX) {
123 /*
124 * Overflow: offset in cycles converted to nanoseconds
125 * is outside the int64_t range.
126 */
127 *overflows = true;
128 goto end;
129 }
130
131 s_ns = (int64_t) u_ns;
132 BT_ASSERT(s_ns >= 0);
133
134 if (clock_class->offset < 0) {
135 if (*ns_from_epoch >= 0) {
136 /*
137 * Offset in cycles is negative so it must also
138 * be negative once converted to nanoseconds.
139 */
140 s_ns = -s_ns;
141 goto offset_ok;
142 }
143
144 diff = *ns_from_epoch - INT64_MIN;
145
146 if (s_ns >= diff) {
147 /*
148 * Overflow: current timestamp in nanoseconds
149 * plus the offset in cycles converted to
150 * nanoseconds is outside the int64_t range.
151 */
152 *overflows = true;
153 goto end;
154 }
155
156 /*
157 * Offset in cycles is negative so it must also be
158 * negative once converted to nanoseconds.
159 */
160 s_ns = -s_ns;
161 } else {
162 if (*ns_from_epoch <= 0) {
163 goto offset_ok;
164 }
165
166 diff = INT64_MAX - *ns_from_epoch;
167
168 if (s_ns >= diff) {
169 /*
170 * Overflow: current timestamp in nanoseconds
171 * plus the offset in cycles converted to
172 * nanoseconds is outside the int64_t range.
173 */
174 *overflows = true;
175 goto end;
176 }
177 }
178
179 offset_ok:
180 *ns_from_epoch += s_ns;
181
182 /* Add clock value (cycles) */
183 u_ns = ns_from_value(clock_class->frequency, value);
184
185 if (u_ns == -1ULL || u_ns >= INT64_MAX) {
186 /*
187 * Overflow: value converted to nanoseconds is outside
188 * the int64_t range.
189 */
190 *overflows = true;
191 goto end;
192 }
193
194 s_ns = (int64_t) u_ns;
195 BT_ASSERT(s_ns >= 0);
196
197 /* Clock value (cycles) is always positive */
198 if (*ns_from_epoch <= 0) {
199 goto value_ok;
200 }
201
202 diff = INT64_MAX - *ns_from_epoch;
203
204 if (s_ns >= diff) {
205 /*
206 * Overflow: current timestamp in nanoseconds plus the
207 * clock value converted to nanoseconds is outside the
208 * int64_t range.
209 */
210 *overflows = true;
211 goto end;
212 }
213
214 value_ok:
215 *ns_from_epoch += s_ns;
216
217 end:
218 if (*overflows) {
219 *ns_from_epoch = 0;
220 ret = -1;
221 }
222
223 return ret;
224 }
225
226 static inline
227 void set_ns_from_epoch(struct bt_clock_value *clock_value)
228 {
229 (void) ns_from_epoch(clock_value->clock_class,
230 clock_value->value, &clock_value->ns_from_epoch,
231 &clock_value->ns_from_epoch_overflows);
232 }
233
234 static inline
235 void bt_clock_value_set_raw_value(struct bt_clock_value *clock_value,
236 uint64_t cycles)
237 {
238 BT_ASSERT(clock_value);
239
240 clock_value->value = cycles;
241 set_ns_from_epoch(clock_value);
242 bt_clock_value_set(clock_value);
243 }
244
245 static inline
246 int bt_clock_value_set_value_inline(struct bt_clock_value *clock_value,
247 uint64_t raw_value)
248 {
249 #ifdef BT_ASSERT_PRE_NON_NULL
250 BT_ASSERT_PRE_NON_NULL(clock_value, "Clock value");
251 #endif
252
253 #ifdef BT_ASSERT_PRE_HOT
254 BT_ASSERT_PRE_HOT(clock_value, "Clock value", ": %!+k", clock_value);
255 #endif
256
257 bt_clock_value_set_raw_value(clock_value, raw_value);
258 return 0;
259 }
260
261 #ifdef BT_DEV_MODE
262 # define bt_clock_value_set_is_frozen _bt_clock_value_set_is_frozen
263 #else
264 # define bt_clock_value_set_is_frozen(_x, _f)
265 #endif /* BT_DEV_MODE */
266
267 BT_HIDDEN
268 struct bt_clock_value *bt_clock_value_create(
269 struct bt_clock_class *clock_class);
270
271 BT_HIDDEN
272 void bt_clock_value_recycle(struct bt_clock_value *clock_value);
273
274 BT_HIDDEN
275 void bt_clock_value_set_raw_value(struct bt_clock_value *clock_value,
276 uint64_t cycles);
277
278 #endif /* BABELTRACE_CTF_IR_CLOCK_VALUE_INTERNAL_H */
This page took 0.035575 seconds and 4 git commands to generate.