bb0283a388921fef6614c276c3f7f6ed416ea3dd
[babeltrace.git] / include / babeltrace / bitfield-internal.h
1 #ifndef _BABELTRACE_BITFIELD_H
2 #define _BABELTRACE_BITFIELD_H
3
4 /*
5 * Copyright 2010-2019 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 */
25
26 #include <stdint.h> /* C99 5.2.4.2 Numerical limits */
27 #include <stdbool.h> /* C99 7.16 bool type */
28 #include <babeltrace/compat/limits-internal.h> /* C99 5.2.4.2 Numerical limits */
29 #include <babeltrace/endian-internal.h> /* Non-standard BIG_ENDIAN, LITTLE_ENDIAN, BYTE_ORDER */
30
31 /*
32 * This header strictly follows the C99 standard, except for use of the
33 * compiler-specific __typeof__.
34 */
35
36 /*
37 * This bitfield header requires the compiler representation of signed
38 * integers to be two's complement.
39 */
40 #if (-1 != ~0)
41 #error "bitfield.h requires the compiler representation of signed integers to be two's complement."
42 #endif
43
44 /*
45 * _bt_is_signed_type() willingly generates comparison of unsigned
46 * expression < 0, which is always false. Silence compiler warnings.
47 */
48 #ifdef __GNUC__
49 # define _BT_DIAG_PUSH _Pragma("GCC diagnostic push")
50 # define _BT_DIAG_POP _Pragma("GCC diagnostic pop")
51
52 # define _BT_DIAG_STRINGIFY_1(x) #x
53 # define _BT_DIAG_STRINGIFY(x) _BT_DIAG_STRINGIFY_1(x)
54
55 # define _BT_DIAG_IGNORE(option) \
56 _Pragma(_BT_DIAG_STRINGIFY(GCC diagnostic ignored option))
57 # define _BT_DIAG_IGNORE_TYPE_LIMITS _BT_DIAG_IGNORE("-Wtype-limits")
58 #else
59 # define _BT_DIAG_PUSH
60 # define _BT_DIAG_POP
61 # define _BT_DIAG_IGNORE
62 #endif
63
64 #define _bt_is_signed_type(type) ((type) -1 < (type) 0)
65
66 /*
67 * Produce a build-time error if the condition `cond` is non-zero.
68 * Evaluates as a size_t expression.
69 */
70 #define _BT_BUILD_ASSERT(cond) \
71 sizeof(struct { int f:(2 * !!(cond) - 1); })
72
73 /*
74 * Cast value `v` to an unsigned integer of the same size as `v`.
75 */
76 #define _bt_cast_value_to_unsigned(v) \
77 (sizeof(v) == sizeof(uint8_t) ? (uint8_t) (v) : \
78 sizeof(v) == sizeof(uint16_t) ? (uint16_t) (v) : \
79 sizeof(v) == sizeof(uint32_t) ? (uint32_t) (v) : \
80 sizeof(v) == sizeof(uint64_t) ? (uint64_t) (v) : \
81 _BT_BUILD_ASSERT(sizeof(v) <= sizeof(uint64_t)))
82
83 /*
84 * Cast value `v` to an unsigned integer type of the size of type `type`
85 * *without* sign-extension.
86 *
87 * The unsigned cast ensures that we're not shifting a negative value,
88 * which is undefined in C. However, this limits the maximum type size
89 * of `type` to 64-bit. Generate a compile-time error if the size of
90 * `type` is larger than 64-bit.
91 */
92 #define _bt_cast_value_to_unsigned_type(type, v) \
93 (sizeof(type) == sizeof(uint8_t) ? \
94 (uint8_t) _bt_cast_value_to_unsigned(v) : \
95 sizeof(type) == sizeof(uint16_t) ? \
96 (uint16_t) _bt_cast_value_to_unsigned(v) : \
97 sizeof(type) == sizeof(uint32_t) ? \
98 (uint32_t) _bt_cast_value_to_unsigned(v) : \
99 sizeof(type) == sizeof(uint64_t) ? \
100 (uint64_t) _bt_cast_value_to_unsigned(v) : \
101 _BT_BUILD_ASSERT(sizeof(v) <= sizeof(uint64_t)))
102
103 /*
104 * _bt_fill_mask evaluates to a "type" integer with all bits set.
105 */
106 #define _bt_fill_mask(type) ((type) ~(type) 0)
107
108 /*
109 * Left shift a value `v` of `shift` bits.
110 *
111 * The type of `v` can be signed or unsigned integer.
112 * The value of `shift` must be less than the size of `v` (in bits),
113 * otherwise the behavior is undefined.
114 * Evaluates to the result of the shift operation.
115 *
116 * According to the C99 standard, left shift of a left hand-side signed
117 * type is undefined if it has a negative value or if the result cannot
118 * be represented in the result type. This bitfield header discards the
119 * bits that are left-shifted beyond the result type representation,
120 * which is the behavior of an unsigned type left shift operation.
121 * Therefore, always perform left shift on an unsigned type.
122 *
123 * This macro should not be used if `shift` can be greater or equal than
124 * the bitwidth of `v`. See `_bt_safe_lshift`.
125 */
126 #define _bt_lshift(v, shift) \
127 ((__typeof__(v)) (_bt_cast_value_to_unsigned(v) << (shift)))
128
129 /*
130 * Generate a mask of type `type` with the `length` least significant bits
131 * cleared, and the most significant bits set.
132 */
133 #define _bt_make_mask_complement(type, length) \
134 _bt_lshift(_bt_fill_mask(type), length)
135
136 /*
137 * Generate a mask of type `type` with the `length` least significant bits
138 * set, and the most significant bits cleared.
139 */
140 #define _bt_make_mask(type, length) \
141 ((type) ~_bt_make_mask_complement(type, length))
142
143 /*
144 * Right shift a value `v` of `shift` bits.
145 *
146 * The type of `v` can be signed or unsigned integer.
147 * The value of `shift` must be less than the size of `v` (in bits),
148 * otherwise the behavior is undefined.
149 * Evaluates to the result of the shift operation.
150 *
151 * According to the C99 standard, right shift of a left hand-side signed
152 * type which has a negative value is implementation defined. This
153 * bitfield header relies on the right shift implementation carrying the
154 * sign bit. If the compiler implementation has a different behavior,
155 * emulate carrying the sign bit.
156 *
157 * This macro should not be used if `shift` can be greater or equal than
158 * the bitwidth of `v`. See `_bt_safe_rshift`.
159 */
160 #if ((-1 >> 1) == -1)
161 #define _bt_rshift(v, shift) ((v) >> (shift))
162 #else
163 #define _bt_rshift(v, shift) \
164 ((__typeof__(v)) ((_bt_cast_value_to_unsigned(v) >> (shift)) | \
165 ((v) < 0 ? _bt_make_mask_complement(__typeof__(v), \
166 sizeof(v) * CHAR_BIT - (shift)) : 0)))
167 #endif
168
169 /*
170 * Right shift a signed or unsigned integer with `shift` value being an
171 * arbitrary number of bits. `v` is modified by this macro. The shift
172 * is transformed into a sequence of `_nr_partial_shifts` consecutive
173 * shift operations, each of a number of bits smaller than the bitwidth
174 * of `v`, ending with a shift of the number of left over bits.
175 */
176 #define _bt_safe_rshift(v, shift) \
177 do { \
178 unsigned long _nr_partial_shifts = (shift) / (sizeof(v) * CHAR_BIT - 1); \
179 unsigned long _leftover_bits = (shift) % (sizeof(v) * CHAR_BIT - 1); \
180 \
181 for (; _nr_partial_shifts; _nr_partial_shifts--) \
182 (v) = _bt_rshift(v, sizeof(v) * CHAR_BIT - 1); \
183 (v) = _bt_rshift(v, _leftover_bits); \
184 } while (0)
185
186 /*
187 * Left shift a signed or unsigned integer with `shift` value being an
188 * arbitrary number of bits. `v` is modified by this macro. The shift
189 * is transformed into a sequence of `_nr_partial_shifts` consecutive
190 * shift operations, each of a number of bits smaller than the bitwidth
191 * of `v`, ending with a shift of the number of left over bits.
192 */
193 #define _bt_safe_lshift(v, shift) \
194 do { \
195 unsigned long _nr_partial_shifts = (shift) / (sizeof(v) * CHAR_BIT - 1); \
196 unsigned long _leftover_bits = (shift) % (sizeof(v) * CHAR_BIT - 1); \
197 \
198 for (; _nr_partial_shifts; _nr_partial_shifts--) \
199 (v) = _bt_lshift(v, sizeof(v) * CHAR_BIT - 1); \
200 (v) = _bt_lshift(v, _leftover_bits); \
201 } while (0)
202
203 /*
204 * bt_bitfield_write - write integer to a bitfield in native endianness
205 *
206 * Save integer to the bitfield, which starts at the "start" bit, has "len"
207 * bits.
208 * The inside of a bitfield is from high bits to low bits.
209 * Uses native endianness.
210 * For unsigned "v", pad MSB with 0 if bitfield is larger than v.
211 * For signed "v", sign-extend v if bitfield is larger than v.
212 *
213 * On little endian, bytes are placed from the less significant to the most
214 * significant. Also, consecutive bitfields are placed from lower bits to higher
215 * bits.
216 *
217 * On big endian, bytes are places from most significant to less significant.
218 * Also, consecutive bitfields are placed from higher to lower bits.
219 */
220
221 #define _bt_bitfield_write_le(_ptr, type, _start, _length, _v) \
222 do { \
223 __typeof__(_v) __v = (_v); \
224 type *__ptr = (void *) (_ptr); \
225 unsigned long __start = (_start), __length = (_length); \
226 type mask, cmask; \
227 unsigned long ts = sizeof(type) * CHAR_BIT; /* type size */ \
228 unsigned long start_unit, end_unit, this_unit; \
229 unsigned long end, cshift; /* cshift is "complement shift" */ \
230 \
231 if (!__length) \
232 break; \
233 \
234 end = __start + __length; \
235 start_unit = __start / ts; \
236 end_unit = (end + (ts - 1)) / ts; \
237 \
238 /* Trim v high bits */ \
239 if (__length < sizeof(__v) * CHAR_BIT) \
240 __v &= _bt_make_mask(__typeof__(__v), __length); \
241 \
242 /* We can now append v with a simple "or", shift it piece-wise */ \
243 this_unit = start_unit; \
244 if (start_unit == end_unit - 1) { \
245 mask = _bt_make_mask(type, __start % ts); \
246 if (end % ts) \
247 mask |= _bt_make_mask_complement(type, end % ts); \
248 cmask = _bt_lshift((type) (__v), __start % ts); \
249 cmask &= ~mask; \
250 __ptr[this_unit] &= mask; \
251 __ptr[this_unit] |= cmask; \
252 break; \
253 } \
254 if (__start % ts) { \
255 cshift = __start % ts; \
256 mask = _bt_make_mask(type, cshift); \
257 cmask = _bt_lshift((type) (__v), cshift); \
258 cmask &= ~mask; \
259 __ptr[this_unit] &= mask; \
260 __ptr[this_unit] |= cmask; \
261 _bt_safe_rshift(__v, ts - cshift); \
262 __start += ts - cshift; \
263 this_unit++; \
264 } \
265 for (; this_unit < end_unit - 1; this_unit++) { \
266 __ptr[this_unit] = (type) __v; \
267 _bt_safe_rshift(__v, ts); \
268 __start += ts; \
269 } \
270 if (end % ts) { \
271 mask = _bt_make_mask_complement(type, end % ts); \
272 cmask = (type) __v; \
273 cmask &= ~mask; \
274 __ptr[this_unit] &= mask; \
275 __ptr[this_unit] |= cmask; \
276 } else \
277 __ptr[this_unit] = (type) __v; \
278 } while (0)
279
280 #define _bt_bitfield_write_be(_ptr, type, _start, _length, _v) \
281 do { \
282 __typeof__(_v) __v = (_v); \
283 type *__ptr = (void *) (_ptr); \
284 unsigned long __start = (_start), __length = (_length); \
285 type mask, cmask; \
286 unsigned long ts = sizeof(type) * CHAR_BIT; /* type size */ \
287 unsigned long start_unit, end_unit, this_unit; \
288 unsigned long end, cshift; /* cshift is "complement shift" */ \
289 \
290 if (!__length) \
291 break; \
292 \
293 end = __start + __length; \
294 start_unit = __start / ts; \
295 end_unit = (end + (ts - 1)) / ts; \
296 \
297 /* Trim v high bits */ \
298 if (__length < sizeof(__v) * CHAR_BIT) \
299 __v &= _bt_make_mask(__typeof__(__v), __length); \
300 \
301 /* We can now append v with a simple "or", shift it piece-wise */ \
302 this_unit = end_unit - 1; \
303 if (start_unit == end_unit - 1) { \
304 mask = _bt_make_mask(type, (ts - (end % ts)) % ts); \
305 if (__start % ts) \
306 mask |= _bt_make_mask_complement(type, ts - (__start % ts)); \
307 cmask = _bt_lshift((type) (__v), (ts - (end % ts)) % ts); \
308 cmask &= ~mask; \
309 __ptr[this_unit] &= mask; \
310 __ptr[this_unit] |= cmask; \
311 break; \
312 } \
313 if (end % ts) { \
314 cshift = end % ts; \
315 mask = _bt_make_mask(type, ts - cshift); \
316 cmask = _bt_lshift((type) (__v), ts - cshift); \
317 cmask &= ~mask; \
318 __ptr[this_unit] &= mask; \
319 __ptr[this_unit] |= cmask; \
320 _bt_safe_rshift(__v, cshift); \
321 end -= cshift; \
322 this_unit--; \
323 } \
324 for (; (long) this_unit >= (long) start_unit + 1; this_unit--) { \
325 __ptr[this_unit] = (type) __v; \
326 _bt_safe_rshift(__v, ts); \
327 end -= ts; \
328 } \
329 if (__start % ts) { \
330 mask = _bt_make_mask_complement(type, ts - (__start % ts)); \
331 cmask = (type) __v; \
332 cmask &= ~mask; \
333 __ptr[this_unit] &= mask; \
334 __ptr[this_unit] |= cmask; \
335 } else \
336 __ptr[this_unit] = (type) __v; \
337 } while (0)
338
339 /*
340 * bt_bitfield_write - write integer to a bitfield in native endianness
341 * bt_bitfield_write_le - write integer to a bitfield in little endian
342 * bt_bitfield_write_be - write integer to a bitfield in big endian
343 */
344
345 #if (BYTE_ORDER == LITTLE_ENDIAN)
346
347 #define bt_bitfield_write(ptr, type, _start, _length, _v) \
348 _bt_bitfield_write_le(ptr, type, _start, _length, _v)
349
350 #define bt_bitfield_write_le(ptr, type, _start, _length, _v) \
351 _bt_bitfield_write_le(ptr, type, _start, _length, _v)
352
353 #define bt_bitfield_write_be(ptr, type, _start, _length, _v) \
354 _bt_bitfield_write_be(ptr, unsigned char, _start, _length, _v)
355
356 #elif (BYTE_ORDER == BIG_ENDIAN)
357
358 #define bt_bitfield_write(ptr, type, _start, _length, _v) \
359 _bt_bitfield_write_be(ptr, type, _start, _length, _v)
360
361 #define bt_bitfield_write_le(ptr, type, _start, _length, _v) \
362 _bt_bitfield_write_le(ptr, unsigned char, _start, _length, _v)
363
364 #define bt_bitfield_write_be(ptr, type, _start, _length, _v) \
365 _bt_bitfield_write_be(ptr, type, _start, _length, _v)
366
367 #else /* (BYTE_ORDER == PDP_ENDIAN) */
368
369 #error "Byte order not supported"
370
371 #endif
372
373 #define _bt_bitfield_read_le(_ptr, type, _start, _length, _vptr) \
374 do { \
375 __typeof__(*(_vptr)) *__vptr = (_vptr); \
376 __typeof__(*__vptr) __v; \
377 type *__ptr = (void *) (_ptr); \
378 unsigned long __start = (_start), __length = (_length); \
379 type mask, cmask; \
380 unsigned long ts = sizeof(type) * CHAR_BIT; /* type size */ \
381 unsigned long start_unit, end_unit, this_unit; \
382 unsigned long end, cshift; /* cshift is "complement shift" */ \
383 bool is_signed_type; \
384 \
385 if (!__length) { \
386 *__vptr = 0; \
387 break; \
388 } \
389 \
390 end = __start + __length; \
391 start_unit = __start / ts; \
392 end_unit = (end + (ts - 1)) / ts; \
393 \
394 this_unit = end_unit - 1; \
395 _BT_DIAG_PUSH \
396 _BT_DIAG_IGNORE_TYPE_LIMITS \
397 is_signed_type = _bt_is_signed_type(__typeof__(__v)); \
398 _BT_DIAG_POP \
399 if (is_signed_type \
400 && (__ptr[this_unit] & _bt_lshift((type) 1, (end % ts ? end % ts : ts) - 1))) \
401 __v = ~(__typeof__(__v)) 0; \
402 else \
403 __v = 0; \
404 if (start_unit == end_unit - 1) { \
405 cmask = __ptr[this_unit]; \
406 cmask = _bt_rshift(cmask, __start % ts); \
407 if ((end - __start) % ts) { \
408 mask = _bt_make_mask(type, end - __start); \
409 cmask &= mask; \
410 } \
411 _bt_safe_lshift(__v, end - __start); \
412 __v |= _bt_cast_value_to_unsigned_type(__typeof__(__v), cmask); \
413 *__vptr = __v; \
414 break; \
415 } \
416 if (end % ts) { \
417 cshift = end % ts; \
418 mask = _bt_make_mask(type, cshift); \
419 cmask = __ptr[this_unit]; \
420 cmask &= mask; \
421 _bt_safe_lshift(__v, cshift); \
422 __v |= _bt_cast_value_to_unsigned_type(__typeof__(__v), cmask); \
423 end -= cshift; \
424 this_unit--; \
425 } \
426 for (; (long) this_unit >= (long) start_unit + 1; this_unit--) { \
427 _bt_safe_lshift(__v, ts); \
428 __v |= _bt_cast_value_to_unsigned_type(__typeof__(__v), __ptr[this_unit]); \
429 end -= ts; \
430 } \
431 if (__start % ts) { \
432 mask = _bt_make_mask(type, ts - (__start % ts)); \
433 cmask = __ptr[this_unit]; \
434 cmask = _bt_rshift(cmask, __start % ts); \
435 cmask &= mask; \
436 _bt_safe_lshift(__v, ts - (__start % ts)); \
437 __v |= _bt_cast_value_to_unsigned_type(__typeof__(__v), cmask); \
438 } else { \
439 _bt_safe_lshift(__v, ts); \
440 __v |= _bt_cast_value_to_unsigned_type(__typeof__(__v), __ptr[this_unit]); \
441 } \
442 *__vptr = __v; \
443 } while (0)
444
445 #define _bt_bitfield_read_be(_ptr, type, _start, _length, _vptr) \
446 do { \
447 __typeof__(*(_vptr)) *__vptr = (_vptr); \
448 __typeof__(*__vptr) __v; \
449 type *__ptr = (void *) (_ptr); \
450 unsigned long __start = (_start), __length = (_length); \
451 type mask, cmask; \
452 unsigned long ts = sizeof(type) * CHAR_BIT; /* type size */ \
453 unsigned long start_unit, end_unit, this_unit; \
454 unsigned long end, cshift; /* cshift is "complement shift" */ \
455 bool is_signed_type; \
456 \
457 if (!__length) { \
458 *__vptr = 0; \
459 break; \
460 } \
461 \
462 end = __start + __length; \
463 start_unit = __start / ts; \
464 end_unit = (end + (ts - 1)) / ts; \
465 \
466 this_unit = start_unit; \
467 _BT_DIAG_PUSH \
468 _BT_DIAG_IGNORE_TYPE_LIMITS \
469 is_signed_type = _bt_is_signed_type(__typeof__(__v)); \
470 _BT_DIAG_POP \
471 if (is_signed_type \
472 && (__ptr[this_unit] & _bt_lshift((type) 1, ts - (__start % ts) - 1))) \
473 __v = ~(__typeof__(__v)) 0; \
474 else \
475 __v = 0; \
476 if (start_unit == end_unit - 1) { \
477 cmask = __ptr[this_unit]; \
478 cmask = _bt_rshift(cmask, (ts - (end % ts)) % ts); \
479 if ((end - __start) % ts) { \
480 mask = _bt_make_mask(type, end - __start); \
481 cmask &= mask; \
482 } \
483 _bt_safe_lshift(__v, end - __start); \
484 __v |= _bt_cast_value_to_unsigned_type(__typeof__(__v), cmask); \
485 *__vptr = __v; \
486 break; \
487 } \
488 if (__start % ts) { \
489 cshift = __start % ts; \
490 mask = _bt_make_mask(type, ts - cshift); \
491 cmask = __ptr[this_unit]; \
492 cmask &= mask; \
493 _bt_safe_lshift(__v, ts - cshift); \
494 __v |= _bt_cast_value_to_unsigned_type(__typeof__(__v), cmask); \
495 __start += ts - cshift; \
496 this_unit++; \
497 } \
498 for (; this_unit < end_unit - 1; this_unit++) { \
499 _bt_safe_lshift(__v, ts); \
500 __v |= _bt_cast_value_to_unsigned_type(__typeof__(__v), __ptr[this_unit]); \
501 __start += ts; \
502 } \
503 if (end % ts) { \
504 mask = _bt_make_mask(type, end % ts); \
505 cmask = __ptr[this_unit]; \
506 cmask = _bt_rshift(cmask, ts - (end % ts)); \
507 cmask &= mask; \
508 _bt_safe_lshift(__v, end % ts); \
509 __v |= _bt_cast_value_to_unsigned_type(__typeof__(__v), cmask); \
510 } else { \
511 _bt_safe_lshift(__v, ts); \
512 __v |= _bt_cast_value_to_unsigned_type(__typeof__(__v), __ptr[this_unit]); \
513 } \
514 *__vptr = __v; \
515 } while (0)
516
517 /*
518 * bt_bitfield_read - read integer from a bitfield in native endianness
519 * bt_bitfield_read_le - read integer from a bitfield in little endian
520 * bt_bitfield_read_be - read integer from a bitfield in big endian
521 */
522
523 #if (BYTE_ORDER == LITTLE_ENDIAN)
524
525 #define bt_bitfield_read(_ptr, type, _start, _length, _vptr) \
526 _bt_bitfield_read_le(_ptr, type, _start, _length, _vptr)
527
528 #define bt_bitfield_read_le(_ptr, type, _start, _length, _vptr) \
529 _bt_bitfield_read_le(_ptr, type, _start, _length, _vptr)
530
531 #define bt_bitfield_read_be(_ptr, type, _start, _length, _vptr) \
532 _bt_bitfield_read_be(_ptr, unsigned char, _start, _length, _vptr)
533
534 #elif (BYTE_ORDER == BIG_ENDIAN)
535
536 #define bt_bitfield_read(_ptr, type, _start, _length, _vptr) \
537 _bt_bitfield_read_be(_ptr, type, _start, _length, _vptr)
538
539 #define bt_bitfield_read_le(_ptr, type, _start, _length, _vptr) \
540 _bt_bitfield_read_le(_ptr, unsigned char, _start, _length, _vptr)
541
542 #define bt_bitfield_read_be(_ptr, type, _start, _length, _vptr) \
543 _bt_bitfield_read_be(_ptr, type, _start, _length, _vptr)
544
545 #else /* (BYTE_ORDER == PDP_ENDIAN) */
546
547 #error "Byte order not supported"
548
549 #endif
550
551 #endif /* _BABELTRACE_BITFIELD_H */
This page took 0.040301 seconds and 4 git commands to generate.