X-Git-Url: http://git.efficios.com/?a=blobdiff_plain;f=libcounter%2Fcounter-api.h;fp=libcounter%2Fcounter-api.h;h=0a7c0adeecc5f3fb46cdea8d191934eb4113e903;hb=ebabbf580131acd1fe246c4d31fc5c044d36a038;hp=0000000000000000000000000000000000000000;hpb=d37ecb3fc622dee6f80f84c21f38d32eef407262;p=deliverable%2Flttng-ust.git diff --git a/libcounter/counter-api.h b/libcounter/counter-api.h new file mode 100644 index 00000000..0a7c0ade --- /dev/null +++ b/libcounter/counter-api.h @@ -0,0 +1,296 @@ +/* + * counter/counter-api.h + * + * LTTng Counters API, requiring counter/config.h + * + * Copyright (C) 2020 Mathieu Desnoyers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _LTTNG_COUNTER_API_H +#define _LTTNG_COUNTER_API_H + +#include +#include +#include "counter.h" +#include "counter-internal.h" +#include +#include +#include +#include "../libringbuffer/getcpu.h" + +/* + * Using unsigned arithmetic because overflow is defined. + */ +static inline int __lttng_counter_add(const struct lib_counter_config *config, + enum lib_counter_config_alloc alloc, + enum lib_counter_config_sync sync, + struct lib_counter *counter, + const size_t *dimension_indexes, int64_t v, + int64_t *remainder) +{ + size_t index; + bool overflow = false, underflow = false; + struct lib_counter_layout *layout; + int64_t move_sum = 0; + + if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes))) + return -EOVERFLOW; + index = lttng_counter_get_index(config, counter, dimension_indexes); + + switch (alloc) { + case COUNTER_ALLOC_PER_CPU: + layout = &counter->percpu_counters[lttng_ust_get_cpu()]; + break; + case COUNTER_ALLOC_GLOBAL: + layout = &counter->global_counters; + break; + default: + return -EINVAL; + } + if (caa_unlikely(!layout->counters)) + return -ENODEV; + + switch (config->counter_size) { + case COUNTER_SIZE_8_BIT: + { + int8_t *int_p = (int8_t *) layout->counters + index; + int8_t old, n, res; + int8_t global_sum_step = counter->global_sum_step.s8; + + res = *int_p; + switch (sync) { + case COUNTER_SYNC_PER_CPU: + { + do { + move_sum = 0; + old = res; + n = (int8_t) ((uint8_t) old + (uint8_t) v); + if (caa_unlikely(n > (int8_t) global_sum_step)) + move_sum = (int8_t) global_sum_step / 2; + else if (caa_unlikely(n < -(int8_t) global_sum_step)) + move_sum = -((int8_t) global_sum_step / 2); + n -= move_sum; + res = uatomic_cmpxchg(int_p, old, n); + } while (old != res); + break; + } + case COUNTER_SYNC_GLOBAL: + { + do { + old = res; + n = (int8_t) ((uint8_t) old + (uint8_t) v); + res = uatomic_cmpxchg(int_p, old, n); + } while (old != res); + break; + } + } + if (v > 0 && (v >= UINT8_MAX || n < old)) + overflow = true; + else if (v < 0 && (v <= -UINT8_MAX || n > old)) + underflow = true; + break; + } + case COUNTER_SIZE_16_BIT: + { + int16_t *int_p = (int16_t *) layout->counters + index; + int16_t old, n, res; + int16_t global_sum_step = counter->global_sum_step.s16; + + res = *int_p; + switch (sync) { + case COUNTER_SYNC_PER_CPU: + { + do { + move_sum = 0; + old = res; + n = (int16_t) ((uint16_t) old + (uint16_t) v); + if (caa_unlikely(n > (int16_t) global_sum_step)) + move_sum = (int16_t) global_sum_step / 2; + else if (caa_unlikely(n < -(int16_t) global_sum_step)) + move_sum = -((int16_t) global_sum_step / 2); + n -= move_sum; + res = uatomic_cmpxchg(int_p, old, n); + } while (old != res); + break; + } + case COUNTER_SYNC_GLOBAL: + { + do { + old = res; + n = (int16_t) ((uint16_t) old + (uint16_t) v); + res = uatomic_cmpxchg(int_p, old, n); + } while (old != res); + break; + } + } + if (v > 0 && (v >= UINT16_MAX || n < old)) + overflow = true; + else if (v < 0 && (v <= -UINT16_MAX || n > old)) + underflow = true; + break; + } + case COUNTER_SIZE_32_BIT: + { + int32_t *int_p = (int32_t *) layout->counters + index; + int32_t old, n, res; + int32_t global_sum_step = counter->global_sum_step.s32; + + res = *int_p; + switch (sync) { + case COUNTER_SYNC_PER_CPU: + { + do { + move_sum = 0; + old = res; + n = (int32_t) ((uint32_t) old + (uint32_t) v); + if (caa_unlikely(n > (int32_t) global_sum_step)) + move_sum = (int32_t) global_sum_step / 2; + else if (caa_unlikely(n < -(int32_t) global_sum_step)) + move_sum = -((int32_t) global_sum_step / 2); + n -= move_sum; + res = uatomic_cmpxchg(int_p, old, n); + } while (old != res); + break; + } + case COUNTER_SYNC_GLOBAL: + { + do { + old = res; + n = (int32_t) ((uint32_t) old + (uint32_t) v); + res = uatomic_cmpxchg(int_p, old, n); + } while (old != res); + break; + } + } + if (v > 0 && (v >= UINT32_MAX || n < old)) + overflow = true; + else if (v < 0 && (v <= -UINT32_MAX || n > old)) + underflow = true; + break; + } +#if CAA_BITS_PER_LONG == 64 + case COUNTER_SIZE_64_BIT: + { + int64_t *int_p = (int64_t *) layout->counters + index; + int64_t old, n, res; + int64_t global_sum_step = counter->global_sum_step.s64; + + res = *int_p; + switch (sync) { + case COUNTER_SYNC_PER_CPU: + { + do { + move_sum = 0; + old = res; + n = (int64_t) ((uint64_t) old + (uint64_t) v); + if (caa_unlikely(n > (int64_t) global_sum_step)) + move_sum = (int64_t) global_sum_step / 2; + else if (caa_unlikely(n < -(int64_t) global_sum_step)) + move_sum = -((int64_t) global_sum_step / 2); + n -= move_sum; + res = uatomic_cmpxchg(int_p, old, n); + } while (old != res); + break; + } + case COUNTER_SYNC_GLOBAL: + { + do { + old = res; + n = (int64_t) ((uint64_t) old + (uint64_t) v); + res = uatomic_cmpxchg(int_p, old, n); + } while (old != res); + break; + } + } + if (v > 0 && n < old) + overflow = true; + else if (v < 0 && n > old) + underflow = true; + break; + } +#endif + default: + return -EINVAL; + } + if (caa_unlikely(overflow && !lttng_bitmap_test_bit(index, layout->overflow_bitmap))) + lttng_bitmap_set_bit(index, layout->overflow_bitmap); + else if (caa_unlikely(underflow && !lttng_bitmap_test_bit(index, layout->underflow_bitmap))) + lttng_bitmap_set_bit(index, layout->underflow_bitmap); + if (remainder) + *remainder = move_sum; + return 0; +} + +static inline int __lttng_counter_add_percpu(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes, int64_t v) +{ + int64_t move_sum; + int ret; + + ret = __lttng_counter_add(config, COUNTER_ALLOC_PER_CPU, config->sync, + counter, dimension_indexes, v, &move_sum); + if (caa_unlikely(ret)) + return ret; + if (caa_unlikely(move_sum)) + return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, COUNTER_SYNC_GLOBAL, + counter, dimension_indexes, move_sum, NULL); + return 0; +} + +static inline int __lttng_counter_add_global(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes, int64_t v) +{ + return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, config->sync, counter, + dimension_indexes, v, NULL); +} + +static inline int lttng_counter_add(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes, int64_t v) +{ + switch (config->alloc) { + case COUNTER_ALLOC_PER_CPU: /* Fallthrough */ + case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: + return __lttng_counter_add_percpu(config, counter, dimension_indexes, v); + case COUNTER_ALLOC_GLOBAL: + return __lttng_counter_add_global(config, counter, dimension_indexes, v); + default: + return -EINVAL; + } +} + +static inline int lttng_counter_inc(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes) +{ + return lttng_counter_add(config, counter, dimension_indexes, 1); +} + +static inline int lttng_counter_dec(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes) +{ + return lttng_counter_add(config, counter, dimension_indexes, -1); +} + +#endif /* _LTTNG_COUNTER_API_H */