Move to kernel style SPDX license identifiers
[babeltrace.git] / src / compat / bitfield.h
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright 2010-2019 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 */
6
7 #ifndef _BABELTRACE_BITFIELD_H
8 #define _BABELTRACE_BITFIELD_H
9
10 #include <stdint.h> /* C99 5.2.4.2 Numerical limits */
11 #include <stdbool.h> /* C99 7.16 bool type */
12 #include "compat/limits.h" /* C99 5.2.4.2 Numerical limits */
13 #include "compat/endian.h" /* Non-standard BIG_ENDIAN, LITTLE_ENDIAN, BYTE_ORDER */
14
15 /*
16 * This header strictly follows the C99 standard, except for use of the
17 * compiler-specific __typeof__.
18 */
19
20 /*
21 * This bitfield header requires the compiler representation of signed
22 * integers to be two's complement.
23 */
24 #if (-1 != ~0)
25 #error "bitfield.h requires the compiler representation of signed integers to be two's complement."
26 #endif
27
28 /*
29 * _bt_is_signed_type() willingly generates comparison of unsigned
30 * expression < 0, which is always false. Silence compiler warnings.
31 * GCC versions lower than 4.6.0 do not accept diagnostic pragma inside
32 * functions.
33 */
34 #if defined(__GNUC__) && (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) >= 40600
35 # define _BT_DIAG_PUSH _Pragma("GCC diagnostic push")
36 # define _BT_DIAG_POP _Pragma("GCC diagnostic pop")
37
38 # define _BT_DIAG_STRINGIFY_1(x) #x
39 # define _BT_DIAG_STRINGIFY(x) _BT_DIAG_STRINGIFY_1(x)
40
41 # define _BT_DIAG_IGNORE(option) \
42 _Pragma(_BT_DIAG_STRINGIFY(GCC diagnostic ignored option))
43 # define _BT_DIAG_IGNORE_TYPE_LIMITS _BT_DIAG_IGNORE("-Wtype-limits")
44 #else
45 # define _BT_DIAG_PUSH
46 # define _BT_DIAG_POP
47 # define _BT_DIAG_IGNORE
48 # define _BT_DIAG_IGNORE_TYPE_LIMITS
49 #endif
50
51 #define _bt_is_signed_type(type) ((type) -1 < (type) 0)
52
53 /*
54 * Produce a build-time error if the condition `cond` is non-zero.
55 * Evaluates as a size_t expression.
56 */
57 #ifdef __cplusplus
58 #define _BT_BUILD_ASSERT(cond) ([]{static_assert((cond), "");}, 0)
59 #else
60 #define _BT_BUILD_ASSERT(cond) \
61 sizeof(struct { int f:(2 * !!(cond) - 1); })
62 #endif
63
64 /*
65 * Cast value `v` to an unsigned integer of the same size as `v`.
66 */
67 #define _bt_cast_value_to_unsigned(v) \
68 (sizeof(v) == sizeof(uint8_t) ? (uint8_t) (v) : \
69 sizeof(v) == sizeof(uint16_t) ? (uint16_t) (v) : \
70 sizeof(v) == sizeof(uint32_t) ? (uint32_t) (v) : \
71 sizeof(v) == sizeof(uint64_t) ? (uint64_t) (v) : \
72 _BT_BUILD_ASSERT(sizeof(v) <= sizeof(uint64_t)))
73
74 /*
75 * Cast value `v` to an unsigned integer type of the size of type `type`
76 * *without* sign-extension.
77 *
78 * The unsigned cast ensures that we're not shifting a negative value,
79 * which is undefined in C. However, this limits the maximum type size
80 * of `type` to 64-bit. Generate a compile-time error if the size of
81 * `type` is larger than 64-bit.
82 */
83 #define _bt_cast_value_to_unsigned_type(type, v) \
84 (sizeof(type) == sizeof(uint8_t) ? \
85 (uint8_t) _bt_cast_value_to_unsigned(v) : \
86 sizeof(type) == sizeof(uint16_t) ? \
87 (uint16_t) _bt_cast_value_to_unsigned(v) : \
88 sizeof(type) == sizeof(uint32_t) ? \
89 (uint32_t) _bt_cast_value_to_unsigned(v) : \
90 sizeof(type) == sizeof(uint64_t) ? \
91 (uint64_t) _bt_cast_value_to_unsigned(v) : \
92 _BT_BUILD_ASSERT(sizeof(v) <= sizeof(uint64_t)))
93
94 /*
95 * _bt_fill_mask evaluates to a "type" integer with all bits set.
96 */
97 #define _bt_fill_mask(type) ((type) ~(type) 0)
98
99 /*
100 * Left shift a value `v` of `shift` bits.
101 *
102 * The type of `v` can be signed or unsigned integer.
103 * The value of `shift` must be less than the size of `v` (in bits),
104 * otherwise the behavior is undefined.
105 * Evaluates to the result of the shift operation.
106 *
107 * According to the C99 standard, left shift of a left hand-side signed
108 * type is undefined if it has a negative value or if the result cannot
109 * be represented in the result type. This bitfield header discards the
110 * bits that are left-shifted beyond the result type representation,
111 * which is the behavior of an unsigned type left shift operation.
112 * Therefore, always perform left shift on an unsigned type.
113 *
114 * This macro should not be used if `shift` can be greater or equal than
115 * the bitwidth of `v`. See `_bt_safe_lshift`.
116 */
117 #define _bt_lshift(v, shift) \
118 ((__typeof__(v)) (_bt_cast_value_to_unsigned(v) << (shift)))
119
120 /*
121 * Generate a mask of type `type` with the `length` least significant bits
122 * cleared, and the most significant bits set.
123 */
124 #define _bt_make_mask_complement(type, length) \
125 _bt_lshift(_bt_fill_mask(type), length)
126
127 /*
128 * Generate a mask of type `type` with the `length` least significant bits
129 * set, and the most significant bits cleared.
130 */
131 #define _bt_make_mask(type, length) \
132 ((type) ~_bt_make_mask_complement(type, length))
133
134 /*
135 * Right shift a value `v` of `shift` bits.
136 *
137 * The type of `v` can be signed or unsigned integer.
138 * The value of `shift` must be less than the size of `v` (in bits),
139 * otherwise the behavior is undefined.
140 * Evaluates to the result of the shift operation.
141 *
142 * According to the C99 standard, right shift of a left hand-side signed
143 * type which has a negative value is implementation defined. This
144 * bitfield header relies on the right shift implementation carrying the
145 * sign bit. If the compiler implementation has a different behavior,
146 * emulate carrying the sign bit.
147 *
148 * This macro should not be used if `shift` can be greater or equal than
149 * the bitwidth of `v`. See `_bt_safe_rshift`.
150 */
151 #if ((-1 >> 1) == -1)
152 #define _bt_rshift(v, shift) ((v) >> (shift))
153 #else
154 #define _bt_rshift(v, shift) \
155 ((__typeof__(v)) ((_bt_cast_value_to_unsigned(v) >> (shift)) | \
156 ((v) < 0 ? _bt_make_mask_complement(__typeof__(v), \
157 sizeof(v) * CHAR_BIT - (shift)) : 0)))
158 #endif
159
160 /*
161 * Right shift a signed or unsigned integer with `shift` value being an
162 * arbitrary number of bits. `v` is modified by this macro. The shift
163 * is transformed into a sequence of `_nr_partial_shifts` consecutive
164 * shift operations, each of a number of bits smaller than the bitwidth
165 * of `v`, ending with a shift of the number of left over bits.
166 */
167 #define _bt_safe_rshift(v, shift) \
168 do { \
169 unsigned long _nr_partial_shifts = (shift) / (sizeof(v) * CHAR_BIT - 1); \
170 unsigned long _leftover_bits = (shift) % (sizeof(v) * CHAR_BIT - 1); \
171 \
172 for (; _nr_partial_shifts; _nr_partial_shifts--) \
173 (v) = _bt_rshift(v, sizeof(v) * CHAR_BIT - 1); \
174 (v) = _bt_rshift(v, _leftover_bits); \
175 } while (0)
176
177 /*
178 * Left shift a signed or unsigned integer with `shift` value being an
179 * arbitrary number of bits. `v` is modified by this macro. The shift
180 * is transformed into a sequence of `_nr_partial_shifts` consecutive
181 * shift operations, each of a number of bits smaller than the bitwidth
182 * of `v`, ending with a shift of the number of left over bits.
183 */
184 #define _bt_safe_lshift(v, shift) \
185 do { \
186 unsigned long _nr_partial_shifts = (shift) / (sizeof(v) * CHAR_BIT - 1); \
187 unsigned long _leftover_bits = (shift) % (sizeof(v) * CHAR_BIT - 1); \
188 \
189 for (; _nr_partial_shifts; _nr_partial_shifts--) \
190 (v) = _bt_lshift(v, sizeof(v) * CHAR_BIT - 1); \
191 (v) = _bt_lshift(v, _leftover_bits); \
192 } while (0)
193
194 /*
195 * bt_bitfield_write - write integer to a bitfield in native endianness
196 *
197 * Save integer to the bitfield, which starts at the "start" bit, has "len"
198 * bits.
199 * The inside of a bitfield is from high bits to low bits.
200 * Uses native endianness.
201 * For unsigned "v", pad MSB with 0 if bitfield is larger than v.
202 * For signed "v", sign-extend v if bitfield is larger than v.
203 *
204 * On little endian, bytes are placed from the less significant to the most
205 * significant. Also, consecutive bitfields are placed from lower bits to higher
206 * bits.
207 *
208 * On big endian, bytes are places from most significant to less significant.
209 * Also, consecutive bitfields are placed from higher to lower bits.
210 */
211
212 #define _bt_bitfield_write_le(ptr, type, start, length, v) \
213 do { \
214 __typeof__(v) _v = (v); \
215 type *_ptr = (void *) (ptr); \
216 unsigned long _start = (start), _length = (length); \
217 type _mask, _cmask; \
218 unsigned long _ts = sizeof(type) * CHAR_BIT; /* type size */ \
219 unsigned long _start_unit, _end_unit, _this_unit; \
220 unsigned long _end, _cshift; /* _cshift is "complement shift" */ \
221 \
222 if (!_length) \
223 break; \
224 \
225 _end = _start + _length; \
226 _start_unit = _start / _ts; \
227 _end_unit = (_end + (_ts - 1)) / _ts; \
228 \
229 /* Trim v high bits */ \
230 if (_length < sizeof(_v) * CHAR_BIT) \
231 _v &= _bt_make_mask(__typeof__(_v), _length); \
232 \
233 /* We can now append v with a simple "or", shift it piece-wise */ \
234 _this_unit = _start_unit; \
235 if (_start_unit == _end_unit - 1) { \
236 _mask = _bt_make_mask(type, _start % _ts); \
237 if (_end % _ts) \
238 _mask |= _bt_make_mask_complement(type, _end % _ts); \
239 _cmask = _bt_lshift((type) (_v), _start % _ts); \
240 _cmask &= ~_mask; \
241 _ptr[_this_unit] &= _mask; \
242 _ptr[_this_unit] |= _cmask; \
243 break; \
244 } \
245 if (_start % _ts) { \
246 _cshift = _start % _ts; \
247 _mask = _bt_make_mask(type, _cshift); \
248 _cmask = _bt_lshift((type) (_v), _cshift); \
249 _cmask &= ~_mask; \
250 _ptr[_this_unit] &= _mask; \
251 _ptr[_this_unit] |= _cmask; \
252 _bt_safe_rshift(_v, _ts - _cshift); \
253 _start += _ts - _cshift; \
254 _this_unit++; \
255 } \
256 for (; _this_unit < _end_unit - 1; _this_unit++) { \
257 _ptr[_this_unit] = (type) _v; \
258 _bt_safe_rshift(_v, _ts); \
259 _start += _ts; \
260 } \
261 if (_end % _ts) { \
262 _mask = _bt_make_mask_complement(type, _end % _ts); \
263 _cmask = (type) _v; \
264 _cmask &= ~_mask; \
265 _ptr[_this_unit] &= _mask; \
266 _ptr[_this_unit] |= _cmask; \
267 } else \
268 _ptr[_this_unit] = (type) _v; \
269 } while (0)
270
271 #define _bt_bitfield_write_be(ptr, type, start, length, v) \
272 do { \
273 __typeof__(v) _v = (v); \
274 type *_ptr = (void *) (ptr); \
275 unsigned long _start = (start), _length = (length); \
276 type _mask, _cmask; \
277 unsigned long _ts = sizeof(type) * CHAR_BIT; /* type size */ \
278 unsigned long _start_unit, _end_unit, _this_unit; \
279 unsigned long _end, _cshift; /* _cshift is "complement shift" */ \
280 \
281 if (!_length) \
282 break; \
283 \
284 _end = _start + _length; \
285 _start_unit = _start / _ts; \
286 _end_unit = (_end + (_ts - 1)) / _ts; \
287 \
288 /* Trim v high bits */ \
289 if (_length < sizeof(_v) * CHAR_BIT) \
290 _v &= _bt_make_mask(__typeof__(_v), _length); \
291 \
292 /* We can now append v with a simple "or", shift it piece-wise */ \
293 _this_unit = _end_unit - 1; \
294 if (_start_unit == _end_unit - 1) { \
295 _mask = _bt_make_mask(type, (_ts - (_end % _ts)) % _ts); \
296 if (_start % _ts) \
297 _mask |= _bt_make_mask_complement(type, _ts - (_start % _ts)); \
298 _cmask = _bt_lshift((type) (_v), (_ts - (_end % _ts)) % _ts); \
299 _cmask &= ~_mask; \
300 _ptr[_this_unit] &= _mask; \
301 _ptr[_this_unit] |= _cmask; \
302 break; \
303 } \
304 if (_end % _ts) { \
305 _cshift = _end % _ts; \
306 _mask = _bt_make_mask(type, _ts - _cshift); \
307 _cmask = _bt_lshift((type) (_v), _ts - _cshift); \
308 _cmask &= ~_mask; \
309 _ptr[_this_unit] &= _mask; \
310 _ptr[_this_unit] |= _cmask; \
311 _bt_safe_rshift(_v, _cshift); \
312 _end -= _cshift; \
313 _this_unit--; \
314 } \
315 for (; (long) _this_unit >= (long) _start_unit + 1; _this_unit--) { \
316 _ptr[_this_unit] = (type) _v; \
317 _bt_safe_rshift(_v, _ts); \
318 _end -= _ts; \
319 } \
320 if (_start % _ts) { \
321 _mask = _bt_make_mask_complement(type, _ts - (_start % _ts)); \
322 _cmask = (type) _v; \
323 _cmask &= ~_mask; \
324 _ptr[_this_unit] &= _mask; \
325 _ptr[_this_unit] |= _cmask; \
326 } else \
327 _ptr[_this_unit] = (type) _v; \
328 } while (0)
329
330 /*
331 * bt_bitfield_write - write integer to a bitfield in native endianness
332 * bt_bitfield_write_le - write integer to a bitfield in little endian
333 * bt_bitfield_write_be - write integer to a bitfield in big endian
334 */
335
336 #if (BYTE_ORDER == LITTLE_ENDIAN)
337
338 #define bt_bitfield_write(ptr, type, start, length, v) \
339 _bt_bitfield_write_le(ptr, type, start, length, v)
340
341 #define bt_bitfield_write_le(ptr, type, start, length, v) \
342 _bt_bitfield_write_le(ptr, type, start, length, v)
343
344 #define bt_bitfield_write_be(ptr, type, start, length, v) \
345 _bt_bitfield_write_be(ptr, unsigned char, start, length, v)
346
347 #elif (BYTE_ORDER == BIG_ENDIAN)
348
349 #define bt_bitfield_write(ptr, type, start, length, v) \
350 _bt_bitfield_write_be(ptr, type, start, length, v)
351
352 #define bt_bitfield_write_le(ptr, type, start, length, v) \
353 _bt_bitfield_write_le(ptr, unsigned char, start, length, v)
354
355 #define bt_bitfield_write_be(ptr, type, start, length, v) \
356 _bt_bitfield_write_be(ptr, type, start, length, v)
357
358 #else /* (BYTE_ORDER == PDP_ENDIAN) */
359
360 #error "Byte order not supported"
361
362 #endif
363
364 #define _bt_bitfield_read_le(ptr, type, start, length, vptr) \
365 do { \
366 __typeof__(*(vptr)) *_vptr = (vptr); \
367 __typeof__(*_vptr) _v; \
368 type *_ptr = (type *) (ptr); \
369 unsigned long _start = (start), _length = (length); \
370 type _mask, _cmask; \
371 unsigned long _ts = sizeof(type) * CHAR_BIT; /* type size */ \
372 unsigned long _start_unit, _end_unit, _this_unit; \
373 unsigned long _end, _cshift; /* _cshift is "complement shift" */ \
374 bool _is_signed_type; \
375 \
376 if (!_length) { \
377 *_vptr = 0; \
378 break; \
379 } \
380 \
381 _end = _start + _length; \
382 _start_unit = _start / _ts; \
383 _end_unit = (_end + (_ts - 1)) / _ts; \
384 \
385 _this_unit = _end_unit - 1; \
386 _BT_DIAG_PUSH \
387 _BT_DIAG_IGNORE_TYPE_LIMITS \
388 _is_signed_type = _bt_is_signed_type(__typeof__(_v)); \
389 _BT_DIAG_POP \
390 if (_is_signed_type \
391 && (_ptr[_this_unit] & _bt_lshift((type) 1, (_end % _ts ? _end % _ts : _ts) - 1))) \
392 _v = ~(__typeof__(_v)) 0; \
393 else \
394 _v = 0; \
395 if (_start_unit == _end_unit - 1) { \
396 _cmask = _ptr[_this_unit]; \
397 _cmask = _bt_rshift(_cmask, _start % _ts); \
398 if ((_end - _start) % _ts) { \
399 _mask = _bt_make_mask(type, _end - _start); \
400 _cmask &= _mask; \
401 } \
402 _bt_safe_lshift(_v, _end - _start); \
403 _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _cmask); \
404 *_vptr = _v; \
405 break; \
406 } \
407 if (_end % _ts) { \
408 _cshift = _end % _ts; \
409 _mask = _bt_make_mask(type, _cshift); \
410 _cmask = _ptr[_this_unit]; \
411 _cmask &= _mask; \
412 _bt_safe_lshift(_v, _cshift); \
413 _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _cmask); \
414 _end -= _cshift; \
415 _this_unit--; \
416 } \
417 for (; (long) _this_unit >= (long) _start_unit + 1; _this_unit--) { \
418 _bt_safe_lshift(_v, _ts); \
419 _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _ptr[_this_unit]); \
420 _end -= _ts; \
421 } \
422 if (_start % _ts) { \
423 _mask = _bt_make_mask(type, _ts - (_start % _ts)); \
424 _cmask = _ptr[_this_unit]; \
425 _cmask = _bt_rshift(_cmask, _start % _ts); \
426 _cmask &= _mask; \
427 _bt_safe_lshift(_v, _ts - (_start % _ts)); \
428 _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _cmask); \
429 } else { \
430 _bt_safe_lshift(_v, _ts); \
431 _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _ptr[_this_unit]); \
432 } \
433 *_vptr = _v; \
434 } while (0)
435
436 #define _bt_bitfield_read_be(ptr, type, start, length, vptr) \
437 do { \
438 __typeof__(*(vptr)) *_vptr = (vptr); \
439 __typeof__(*_vptr) _v; \
440 type *_ptr = (void *) (ptr); \
441 unsigned long _start = (start), _length = (length); \
442 type _mask, _cmask; \
443 unsigned long _ts = sizeof(type) * CHAR_BIT; /* type size */ \
444 unsigned long _start_unit, _end_unit, _this_unit; \
445 unsigned long _end, _cshift; /* _cshift is "complement shift" */ \
446 bool _is_signed_type; \
447 \
448 if (!_length) { \
449 *_vptr = 0; \
450 break; \
451 } \
452 \
453 _end = _start + _length; \
454 _start_unit = _start / _ts; \
455 _end_unit = (_end + (_ts - 1)) / _ts; \
456 \
457 _this_unit = _start_unit; \
458 _BT_DIAG_PUSH \
459 _BT_DIAG_IGNORE_TYPE_LIMITS \
460 _is_signed_type = _bt_is_signed_type(__typeof__(_v)); \
461 _BT_DIAG_POP \
462 if (_is_signed_type \
463 && (_ptr[_this_unit] & _bt_lshift((type) 1, _ts - (_start % _ts) - 1))) \
464 _v = ~(__typeof__(_v)) 0; \
465 else \
466 _v = 0; \
467 if (_start_unit == _end_unit - 1) { \
468 _cmask = _ptr[_this_unit]; \
469 _cmask = _bt_rshift(_cmask, (_ts - (_end % _ts)) % _ts); \
470 if ((_end - _start) % _ts) { \
471 _mask = _bt_make_mask(type, _end - _start); \
472 _cmask &= _mask; \
473 } \
474 _bt_safe_lshift(_v, _end - _start); \
475 _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _cmask); \
476 *_vptr = _v; \
477 break; \
478 } \
479 if (_start % _ts) { \
480 _cshift = _start % _ts; \
481 _mask = _bt_make_mask(type, _ts - _cshift); \
482 _cmask = _ptr[_this_unit]; \
483 _cmask &= _mask; \
484 _bt_safe_lshift(_v, _ts - _cshift); \
485 _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _cmask); \
486 _start += _ts - _cshift; \
487 _this_unit++; \
488 } \
489 for (; _this_unit < _end_unit - 1; _this_unit++) { \
490 _bt_safe_lshift(_v, _ts); \
491 _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _ptr[_this_unit]); \
492 _start += _ts; \
493 } \
494 if (_end % _ts) { \
495 _mask = _bt_make_mask(type, _end % _ts); \
496 _cmask = _ptr[_this_unit]; \
497 _cmask = _bt_rshift(_cmask, _ts - (_end % _ts)); \
498 _cmask &= _mask; \
499 _bt_safe_lshift(_v, _end % _ts); \
500 _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _cmask); \
501 } else { \
502 _bt_safe_lshift(_v, _ts); \
503 _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _ptr[_this_unit]); \
504 } \
505 *_vptr = _v; \
506 } while (0)
507
508 /*
509 * bt_bitfield_read - read integer from a bitfield in native endianness
510 * bt_bitfield_read_le - read integer from a bitfield in little endian
511 * bt_bitfield_read_be - read integer from a bitfield in big endian
512 */
513
514 #if (BYTE_ORDER == LITTLE_ENDIAN)
515
516 #define bt_bitfield_read(ptr, type, start, length, vptr) \
517 _bt_bitfield_read_le(ptr, type, start, length, vptr)
518
519 #define bt_bitfield_read_le(ptr, type, start, length, vptr) \
520 _bt_bitfield_read_le(ptr, type, start, length, vptr)
521
522 #define bt_bitfield_read_be(ptr, type, start, length, vptr) \
523 _bt_bitfield_read_be(ptr, unsigned char, start, length, vptr)
524
525 #elif (BYTE_ORDER == BIG_ENDIAN)
526
527 #define bt_bitfield_read(ptr, type, start, length, vptr) \
528 _bt_bitfield_read_be(ptr, type, start, length, vptr)
529
530 #define bt_bitfield_read_le(ptr, type, start, length, vptr) \
531 _bt_bitfield_read_le(ptr, unsigned char, start, length, vptr)
532
533 #define bt_bitfield_read_be(ptr, type, start, length, vptr) \
534 _bt_bitfield_read_be(ptr, type, start, length, vptr)
535
536 #else /* (BYTE_ORDER == PDP_ENDIAN) */
537
538 #error "Byte order not supported"
539
540 #endif
541
542 #endif /* _BABELTRACE_BITFIELD_H */
This page took 0.041128 seconds and 4 git commands to generate.