1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
5 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 #include "counter-internal.h"
12 #include <lttng/bitmap.h>
13 #include <urcu/system.h>
14 #include <urcu/compiler.h>
17 #include <lttng/align.h>
21 static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension
*dimension
)
23 return dimension
->max_nr_elem
;
26 static int lttng_counter_init_stride(const struct lib_counter_config
*config
,
27 struct lib_counter
*counter
)
29 size_t nr_dimensions
= counter
->nr_dimensions
;
33 for (i
= nr_dimensions
- 1; i
>= 0; i
--) {
34 struct lib_counter_dimension
*dimension
= &counter
->dimensions
[i
];
37 nr_elem
= lttng_counter_get_dimension_nr_elements(dimension
);
38 dimension
->stride
= stride
;
39 /* nr_elem should be minimum 1 for each dimension. */
43 if (stride
> SIZE_MAX
/ nr_elem
)
49 static int lttng_counter_layout_init(struct lib_counter
*counter
, int cpu
, int shm_fd
)
51 struct lib_counter_layout
*layout
;
53 size_t nr_elem
= counter
->allocated_elem
;
54 size_t shm_length
= 0, counters_offset
, overflow_offset
, underflow_offset
;
55 struct lttng_counter_shm_object
*shm_object
;
58 return 0; /* Skip, will be populated later. */
61 layout
= &counter
->global_counters
;
63 layout
= &counter
->percpu_counters
[cpu
];
64 switch (counter
->config
.counter_size
) {
65 case COUNTER_SIZE_8_BIT
:
66 case COUNTER_SIZE_16_BIT
:
67 case COUNTER_SIZE_32_BIT
:
68 case COUNTER_SIZE_64_BIT
:
69 counter_size
= (size_t) counter
->config
.counter_size
;
74 layout
->shm_fd
= shm_fd
;
75 counters_offset
= shm_length
;
76 shm_length
+= counter_size
* nr_elem
;
77 overflow_offset
= shm_length
;
78 shm_length
+= LTTNG_UST_ALIGN(nr_elem
, 8) / 8;
79 underflow_offset
= shm_length
;
80 shm_length
+= LTTNG_UST_ALIGN(nr_elem
, 8) / 8;
81 layout
->shm_len
= shm_length
;
82 if (counter
->is_daemon
) {
83 /* Allocate and clear shared memory. */
84 shm_object
= lttng_counter_shm_object_table_alloc(counter
->object_table
,
85 shm_length
, LTTNG_COUNTER_SHM_OBJECT_SHM
, shm_fd
, cpu
);
89 /* Map pre-existing shared memory. */
90 shm_object
= lttng_counter_shm_object_table_append_shm(counter
->object_table
,
95 layout
->counters
= shm_object
->memory_map
+ counters_offset
;
96 layout
->overflow_bitmap
= (unsigned long *)(shm_object
->memory_map
+ overflow_offset
);
97 layout
->underflow_bitmap
= (unsigned long *)(shm_object
->memory_map
+ underflow_offset
);
101 int lttng_counter_set_global_shm(struct lib_counter
*counter
, int fd
)
103 struct lib_counter_config
*config
= &counter
->config
;
104 struct lib_counter_layout
*layout
;
107 if (!(config
->alloc
& COUNTER_ALLOC_GLOBAL
))
109 layout
= &counter
->global_counters
;
110 if (layout
->shm_fd
>= 0)
112 ret
= lttng_counter_layout_init(counter
, -1, fd
);
114 counter
->received_shm
++;
118 int lttng_counter_set_cpu_shm(struct lib_counter
*counter
, int cpu
, int fd
)
120 struct lib_counter_config
*config
= &counter
->config
;
121 struct lib_counter_layout
*layout
;
124 if (cpu
< 0 || cpu
>= lttng_counter_num_possible_cpus())
127 if (!(config
->alloc
& COUNTER_ALLOC_PER_CPU
))
129 layout
= &counter
->percpu_counters
[cpu
];
130 if (layout
->shm_fd
>= 0)
132 ret
= lttng_counter_layout_init(counter
, cpu
, fd
);
134 counter
->received_shm
++;
139 int lttng_counter_set_global_sum_step(struct lib_counter
*counter
,
140 int64_t global_sum_step
)
142 if (global_sum_step
< 0)
145 switch (counter
->config
.counter_size
) {
146 case COUNTER_SIZE_8_BIT
:
147 if (global_sum_step
> INT8_MAX
)
149 counter
->global_sum_step
.s8
= (int8_t) global_sum_step
;
151 case COUNTER_SIZE_16_BIT
:
152 if (global_sum_step
> INT16_MAX
)
154 counter
->global_sum_step
.s16
= (int16_t) global_sum_step
;
156 case COUNTER_SIZE_32_BIT
:
157 if (global_sum_step
> INT32_MAX
)
159 counter
->global_sum_step
.s32
= (int32_t) global_sum_step
;
161 case COUNTER_SIZE_64_BIT
:
162 counter
->global_sum_step
.s64
= global_sum_step
;
172 int validate_args(const struct lib_counter_config
*config
,
173 size_t nr_dimensions
,
174 const size_t *max_nr_elem
,
175 int64_t global_sum_step
,
176 int global_counter_fd
,
177 int nr_counter_cpu_fds
,
178 const int *counter_cpu_fds
)
180 int nr_cpus
= lttng_counter_num_possible_cpus();
182 if (CAA_BITS_PER_LONG
!= 64 && config
->counter_size
== COUNTER_SIZE_64_BIT
) {
189 * global sum step is only useful with allocating both per-cpu
190 * and global counters.
192 if (global_sum_step
&& (!(config
->alloc
& COUNTER_ALLOC_GLOBAL
) ||
193 !(config
->alloc
& COUNTER_ALLOC_PER_CPU
)))
195 if (!(config
->alloc
& COUNTER_ALLOC_GLOBAL
) && global_counter_fd
>= 0)
197 if (!(config
->alloc
& COUNTER_ALLOC_PER_CPU
) && counter_cpu_fds
)
199 if (!(config
->alloc
& COUNTER_ALLOC_PER_CPU
) && nr_counter_cpu_fds
>= 0)
201 if (counter_cpu_fds
&& nr_cpus
!= nr_counter_cpu_fds
)
206 struct lib_counter
*lttng_counter_create(const struct lib_counter_config
*config
,
207 size_t nr_dimensions
,
208 const size_t *max_nr_elem
,
209 int64_t global_sum_step
,
210 int global_counter_fd
,
211 int nr_counter_cpu_fds
,
212 const int *counter_cpu_fds
,
215 struct lib_counter
*counter
;
216 size_t dimension
, nr_elem
= 1;
219 int nr_cpus
= lttng_counter_num_possible_cpus();
221 if (validate_args(config
, nr_dimensions
, max_nr_elem
,
222 global_sum_step
, global_counter_fd
, nr_counter_cpu_fds
,
225 counter
= zmalloc(sizeof(struct lib_counter
));
228 counter
->global_counters
.shm_fd
= -1;
229 counter
->config
= *config
;
230 counter
->is_daemon
= is_daemon
;
231 if (lttng_counter_set_global_sum_step(counter
, global_sum_step
))
233 counter
->nr_dimensions
= nr_dimensions
;
234 counter
->dimensions
= zmalloc(nr_dimensions
* sizeof(*counter
->dimensions
));
235 if (!counter
->dimensions
)
236 goto error_dimensions
;
237 for (dimension
= 0; dimension
< nr_dimensions
; dimension
++)
238 counter
->dimensions
[dimension
].max_nr_elem
= max_nr_elem
[dimension
];
239 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
) {
240 counter
->percpu_counters
= zmalloc(sizeof(struct lib_counter_layout
) * nr_cpus
);
241 if (!counter
->percpu_counters
)
242 goto error_alloc_percpu
;
243 lttng_counter_for_each_possible_cpu(cpu
)
244 counter
->percpu_counters
[cpu
].shm_fd
= -1;
247 if (lttng_counter_init_stride(config
, counter
))
248 goto error_init_stride
;
249 //TODO saturation values.
250 for (dimension
= 0; dimension
< counter
->nr_dimensions
; dimension
++)
251 nr_elem
*= lttng_counter_get_dimension_nr_elements(&counter
->dimensions
[dimension
]);
252 counter
->allocated_elem
= nr_elem
;
254 if (config
->alloc
& COUNTER_ALLOC_GLOBAL
)
256 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
)
257 nr_handles
+= nr_cpus
;
258 counter
->expected_shm
= nr_handles
;
260 /* Allocate table for global and per-cpu counters. */
261 counter
->object_table
= lttng_counter_shm_object_table_create(nr_handles
);
262 if (!counter
->object_table
)
263 goto error_alloc_object_table
;
265 if (config
->alloc
& COUNTER_ALLOC_GLOBAL
) {
266 ret
= lttng_counter_layout_init(counter
, -1, global_counter_fd
); /* global */
268 goto layout_init_error
;
270 if ((config
->alloc
& COUNTER_ALLOC_PER_CPU
) && counter_cpu_fds
) {
271 lttng_counter_for_each_possible_cpu(cpu
) {
272 ret
= lttng_counter_layout_init(counter
, cpu
, counter_cpu_fds
[cpu
]);
274 goto layout_init_error
;
280 lttng_counter_shm_object_table_destroy(counter
->object_table
, is_daemon
);
281 error_alloc_object_table
:
283 free(counter
->percpu_counters
);
285 free(counter
->dimensions
);
292 void lttng_counter_destroy(struct lib_counter
*counter
)
294 struct lib_counter_config
*config
= &counter
->config
;
296 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
)
297 free(counter
->percpu_counters
);
298 lttng_counter_shm_object_table_destroy(counter
->object_table
, counter
->is_daemon
);
299 free(counter
->dimensions
);
303 int lttng_counter_get_global_shm(struct lib_counter
*counter
, int *fd
, size_t *len
)
307 shm_fd
= counter
->global_counters
.shm_fd
;
311 *len
= counter
->global_counters
.shm_len
;
315 int lttng_counter_get_cpu_shm(struct lib_counter
*counter
, int cpu
, int *fd
, size_t *len
)
317 struct lib_counter_layout
*layout
;
320 if (cpu
>= lttng_counter_num_possible_cpus())
322 layout
= &counter
->percpu_counters
[cpu
];
323 shm_fd
= layout
->shm_fd
;
327 *len
= layout
->shm_len
;
331 bool lttng_counter_ready(struct lib_counter
*counter
)
333 if (counter
->received_shm
== counter
->expected_shm
)
338 int lttng_counter_read(const struct lib_counter_config
*config
,
339 struct lib_counter
*counter
,
340 const size_t *dimension_indexes
,
341 int cpu
, int64_t *value
, bool *overflow
,
345 struct lib_counter_layout
*layout
;
347 if (caa_unlikely(lttng_counter_validate_indexes(config
, counter
, dimension_indexes
)))
349 index
= lttng_counter_get_index(config
, counter
, dimension_indexes
);
351 switch (config
->alloc
) {
352 case COUNTER_ALLOC_PER_CPU
:
353 if (cpu
< 0 || cpu
>= lttng_counter_num_possible_cpus())
355 layout
= &counter
->percpu_counters
[cpu
];
357 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
359 if (cpu
>= lttng_counter_num_possible_cpus())
361 layout
= &counter
->percpu_counters
[cpu
];
363 layout
= &counter
->global_counters
;
366 case COUNTER_ALLOC_GLOBAL
:
369 layout
= &counter
->global_counters
;
374 if (caa_unlikely(!layout
->counters
))
377 switch (config
->counter_size
) {
378 case COUNTER_SIZE_8_BIT
:
380 int8_t *int_p
= (int8_t *) layout
->counters
+ index
;
381 *value
= (int64_t) CMM_LOAD_SHARED(*int_p
);
384 case COUNTER_SIZE_16_BIT
:
386 int16_t *int_p
= (int16_t *) layout
->counters
+ index
;
387 *value
= (int64_t) CMM_LOAD_SHARED(*int_p
);
390 case COUNTER_SIZE_32_BIT
:
392 int32_t *int_p
= (int32_t *) layout
->counters
+ index
;
393 *value
= (int64_t) CMM_LOAD_SHARED(*int_p
);
396 #if CAA_BITS_PER_LONG == 64
397 case COUNTER_SIZE_64_BIT
:
399 int64_t *int_p
= (int64_t *) layout
->counters
+ index
;
400 *value
= CMM_LOAD_SHARED(*int_p
);
407 *overflow
= lttng_bitmap_test_bit(index
, layout
->overflow_bitmap
);
408 *underflow
= lttng_bitmap_test_bit(index
, layout
->underflow_bitmap
);
412 int lttng_counter_aggregate(const struct lib_counter_config
*config
,
413 struct lib_counter
*counter
,
414 const size_t *dimension_indexes
,
415 int64_t *value
, bool *overflow
,
425 switch (config
->alloc
) {
426 case COUNTER_ALLOC_GLOBAL
: /* Fallthrough */
427 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
428 /* Read global counter. */
429 ret
= lttng_counter_read(config
, counter
, dimension_indexes
,
437 case COUNTER_ALLOC_PER_CPU
:
443 switch (config
->alloc
) {
444 case COUNTER_ALLOC_GLOBAL
:
446 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
: /* Fallthrough */
447 case COUNTER_ALLOC_PER_CPU
:
448 lttng_counter_for_each_possible_cpu(cpu
) {
451 ret
= lttng_counter_read(config
, counter
, dimension_indexes
,
457 /* Overflow is defined on unsigned types. */
458 sum
= (int64_t) ((uint64_t) old
+ (uint64_t) v
);
459 if (v
> 0 && sum
< old
)
461 else if (v
< 0 && sum
> old
)
473 int lttng_counter_clear_cpu(const struct lib_counter_config
*config
,
474 struct lib_counter
*counter
,
475 const size_t *dimension_indexes
,
479 struct lib_counter_layout
*layout
;
481 if (caa_unlikely(lttng_counter_validate_indexes(config
, counter
, dimension_indexes
)))
483 index
= lttng_counter_get_index(config
, counter
, dimension_indexes
);
485 switch (config
->alloc
) {
486 case COUNTER_ALLOC_PER_CPU
:
487 if (cpu
< 0 || cpu
>= lttng_counter_num_possible_cpus())
489 layout
= &counter
->percpu_counters
[cpu
];
491 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
493 if (cpu
>= lttng_counter_num_possible_cpus())
495 layout
= &counter
->percpu_counters
[cpu
];
497 layout
= &counter
->global_counters
;
500 case COUNTER_ALLOC_GLOBAL
:
503 layout
= &counter
->global_counters
;
508 if (caa_unlikely(!layout
->counters
))
511 switch (config
->counter_size
) {
512 case COUNTER_SIZE_8_BIT
:
514 int8_t *int_p
= (int8_t *) layout
->counters
+ index
;
515 CMM_STORE_SHARED(*int_p
, 0);
518 case COUNTER_SIZE_16_BIT
:
520 int16_t *int_p
= (int16_t *) layout
->counters
+ index
;
521 CMM_STORE_SHARED(*int_p
, 0);
524 case COUNTER_SIZE_32_BIT
:
526 int32_t *int_p
= (int32_t *) layout
->counters
+ index
;
527 CMM_STORE_SHARED(*int_p
, 0);
530 #if CAA_BITS_PER_LONG == 64
531 case COUNTER_SIZE_64_BIT
:
533 int64_t *int_p
= (int64_t *) layout
->counters
+ index
;
534 CMM_STORE_SHARED(*int_p
, 0);
541 lttng_bitmap_clear_bit(index
, layout
->overflow_bitmap
);
542 lttng_bitmap_clear_bit(index
, layout
->underflow_bitmap
);
546 int lttng_counter_clear(const struct lib_counter_config
*config
,
547 struct lib_counter
*counter
,
548 const size_t *dimension_indexes
)
552 switch (config
->alloc
) {
553 case COUNTER_ALLOC_PER_CPU
:
555 case COUNTER_ALLOC_GLOBAL
: /* Fallthrough */
556 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
557 /* Clear global counter. */
558 ret
= lttng_counter_clear_cpu(config
, counter
, dimension_indexes
, -1);
566 switch (config
->alloc
) {
567 case COUNTER_ALLOC_PER_CPU
: /* Fallthrough */
568 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
569 lttng_counter_for_each_possible_cpu(cpu
) {
570 ret
= lttng_counter_clear_cpu(config
, counter
, dimension_indexes
, cpu
);
575 case COUNTER_ALLOC_GLOBAL
: