1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
5 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <lttng/tracer.h>
11 #include <linux/cpumask.h>
12 #include <counter/counter.h>
13 #include <counter/counter-internal.h>
14 #include <wrapper/vmalloc.h>
16 static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension
*dimension
)
18 return dimension
->max_nr_elem
;
21 static int lttng_counter_init_stride(const struct lib_counter_config
*config
,
22 struct lib_counter
*counter
)
24 size_t nr_dimensions
= counter
->nr_dimensions
;
28 for (i
= nr_dimensions
- 1; i
>= 0; i
--) {
29 struct lib_counter_dimension
*dimension
= &counter
->dimensions
[i
];
32 nr_elem
= lttng_counter_get_dimension_nr_elements(dimension
);
33 dimension
->stride
= stride
;
34 /* nr_elem should be minimum 1 for each dimension. */
38 if (stride
> SIZE_MAX
/ nr_elem
)
44 static int lttng_counter_layout_init(struct lib_counter
*counter
, int cpu
)
46 struct lib_counter_layout
*layout
;
48 size_t nr_elem
= counter
->allocated_elem
;
51 layout
= &counter
->global_counters
;
53 layout
= per_cpu_ptr(counter
->percpu_counters
, cpu
);
54 switch (counter
->config
.counter_size
) {
55 case COUNTER_SIZE_8_BIT
:
56 case COUNTER_SIZE_16_BIT
:
57 case COUNTER_SIZE_32_BIT
:
58 case COUNTER_SIZE_64_BIT
:
59 counter_size
= (size_t) counter
->config
.counter_size
;
64 layout
->counters
= lttng_kvzalloc_node(ALIGN(counter_size
* nr_elem
,
65 1 << INTERNODE_CACHE_SHIFT
),
66 GFP_KERNEL
| __GFP_NOWARN
,
67 cpu_to_node(max(cpu
, 0)));
68 if (!layout
->counters
)
70 layout
->overflow_bitmap
= lttng_kvzalloc_node(ALIGN(ALIGN(nr_elem
, 8) / 8,
71 1 << INTERNODE_CACHE_SHIFT
),
72 GFP_KERNEL
| __GFP_NOWARN
,
73 cpu_to_node(max(cpu
, 0)));
74 if (!layout
->overflow_bitmap
)
76 layout
->underflow_bitmap
= lttng_kvzalloc_node(ALIGN(ALIGN(nr_elem
, 8) / 8,
77 1 << INTERNODE_CACHE_SHIFT
),
78 GFP_KERNEL
| __GFP_NOWARN
,
79 cpu_to_node(max(cpu
, 0)));
80 if (!layout
->underflow_bitmap
)
85 static void lttng_counter_layout_fini(struct lib_counter
*counter
, int cpu
)
87 struct lib_counter_layout
*layout
;
90 layout
= &counter
->global_counters
;
92 layout
= per_cpu_ptr(counter
->percpu_counters
, cpu
);
94 lttng_kvfree(layout
->counters
);
95 lttng_kvfree(layout
->overflow_bitmap
);
96 lttng_kvfree(layout
->underflow_bitmap
);
100 int lttng_counter_set_global_sum_step(struct lib_counter
*counter
,
101 int64_t global_sum_step
)
103 if (global_sum_step
< 0)
106 switch (counter
->config
.counter_size
) {
107 case COUNTER_SIZE_8_BIT
:
108 if (global_sum_step
> S8_MAX
)
110 counter
->global_sum_step
.s8
= (int8_t) global_sum_step
;
112 case COUNTER_SIZE_16_BIT
:
113 if (global_sum_step
> S16_MAX
)
115 counter
->global_sum_step
.s16
= (int16_t) global_sum_step
;
117 case COUNTER_SIZE_32_BIT
:
118 if (global_sum_step
> S32_MAX
)
120 counter
->global_sum_step
.s32
= (int32_t) global_sum_step
;
122 case COUNTER_SIZE_64_BIT
:
123 counter
->global_sum_step
.s64
= global_sum_step
;
133 int validate_args(const struct lib_counter_config
*config
,
134 size_t nr_dimensions
,
135 const size_t *max_nr_elem
,
136 int64_t global_sum_step
)
138 if (BITS_PER_LONG
!= 64 && config
->counter_size
== COUNTER_SIZE_64_BIT
) {
145 * global sum step is only useful with allocating both per-cpu
146 * and global counters.
148 if (global_sum_step
&& (!(config
->alloc
& COUNTER_ALLOC_GLOBAL
) ||
149 !(config
->alloc
& COUNTER_ALLOC_PER_CPU
)))
154 struct lib_counter
*lttng_counter_create(const struct lib_counter_config
*config
,
155 size_t nr_dimensions
,
156 const size_t *max_nr_elem
,
157 int64_t global_sum_step
)
159 struct lib_counter
*counter
;
160 size_t dimension
, nr_elem
= 1;
163 if (validate_args(config
, nr_dimensions
, max_nr_elem
, global_sum_step
))
165 counter
= kzalloc(sizeof(struct lib_counter
), GFP_KERNEL
);
168 counter
->config
= *config
;
169 if (lttng_counter_set_global_sum_step(counter
, global_sum_step
))
171 counter
->nr_dimensions
= nr_dimensions
;
172 counter
->dimensions
= kzalloc(nr_dimensions
* sizeof(*counter
->dimensions
), GFP_KERNEL
);
173 if (!counter
->dimensions
)
174 goto error_dimensions
;
175 for (dimension
= 0; dimension
< nr_dimensions
; dimension
++)
176 counter
->dimensions
[dimension
].max_nr_elem
= max_nr_elem
[dimension
];
177 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
) {
178 counter
->percpu_counters
= alloc_percpu(struct lib_counter_layout
);
179 if (!counter
->percpu_counters
)
180 goto error_alloc_percpu
;
183 if (lttng_counter_init_stride(config
, counter
))
184 goto error_init_stride
;
185 //TODO saturation values.
186 for (dimension
= 0; dimension
< counter
->nr_dimensions
; dimension
++)
187 nr_elem
*= lttng_counter_get_dimension_nr_elements(&counter
->dimensions
[dimension
]);
188 counter
->allocated_elem
= nr_elem
;
189 if (config
->alloc
& COUNTER_ALLOC_GLOBAL
) {
190 ret
= lttng_counter_layout_init(counter
, -1); /* global */
192 goto layout_init_error
;
194 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
) {
195 //TODO: integrate with CPU hotplug and online cpus
196 for (cpu
= 0; cpu
< num_possible_cpus(); cpu
++) {
197 ret
= lttng_counter_layout_init(counter
, cpu
);
199 goto layout_init_error
;
205 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
) {
206 for (cpu
= 0; cpu
< num_possible_cpus(); cpu
++)
207 lttng_counter_layout_fini(counter
, cpu
);
209 if (config
->alloc
& COUNTER_ALLOC_GLOBAL
)
210 lttng_counter_layout_fini(counter
, -1);
212 free_percpu(counter
->percpu_counters
);
214 kfree(counter
->dimensions
);
220 EXPORT_SYMBOL_GPL(lttng_counter_create
);
222 void lttng_counter_destroy(struct lib_counter
*counter
)
224 struct lib_counter_config
*config
= &counter
->config
;
227 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
) {
228 for (cpu
= 0; cpu
< num_possible_cpus(); cpu
++)
229 lttng_counter_layout_fini(counter
, cpu
);
230 free_percpu(counter
->percpu_counters
);
232 if (config
->alloc
& COUNTER_ALLOC_GLOBAL
)
233 lttng_counter_layout_fini(counter
, -1);
234 kfree(counter
->dimensions
);
237 EXPORT_SYMBOL_GPL(lttng_counter_destroy
);
239 int lttng_counter_read(const struct lib_counter_config
*config
,
240 struct lib_counter
*counter
,
241 const size_t *dimension_indexes
,
242 int cpu
, int64_t *value
, bool *overflow
,
245 struct lib_counter_layout
*layout
;
248 if (unlikely(lttng_counter_validate_indexes(config
, counter
, dimension_indexes
)))
250 index
= lttng_counter_get_index(config
, counter
, dimension_indexes
);
252 switch (config
->alloc
) {
253 case COUNTER_ALLOC_PER_CPU
:
254 if (cpu
< 0 || cpu
>= num_possible_cpus())
256 layout
= per_cpu_ptr(counter
->percpu_counters
, cpu
);
258 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
260 if (cpu
>= num_possible_cpus())
262 layout
= per_cpu_ptr(counter
->percpu_counters
, cpu
);
264 layout
= &counter
->global_counters
;
267 case COUNTER_ALLOC_GLOBAL
:
270 layout
= &counter
->global_counters
;
276 switch (config
->counter_size
) {
277 case COUNTER_SIZE_8_BIT
:
279 int8_t *int_p
= (int8_t *) layout
->counters
+ index
;
280 *value
= (int64_t) READ_ONCE(*int_p
);
283 case COUNTER_SIZE_16_BIT
:
285 int16_t *int_p
= (int16_t *) layout
->counters
+ index
;
286 *value
= (int64_t) READ_ONCE(*int_p
);
289 case COUNTER_SIZE_32_BIT
:
291 int32_t *int_p
= (int32_t *) layout
->counters
+ index
;
292 *value
= (int64_t) READ_ONCE(*int_p
);
295 #if BITS_PER_LONG == 64
296 case COUNTER_SIZE_64_BIT
:
298 int64_t *int_p
= (int64_t *) layout
->counters
+ index
;
299 *value
= READ_ONCE(*int_p
);
306 *overflow
= test_bit(index
, layout
->overflow_bitmap
);
307 *underflow
= test_bit(index
, layout
->underflow_bitmap
);
310 EXPORT_SYMBOL_GPL(lttng_counter_read
);
312 int lttng_counter_aggregate(const struct lib_counter_config
*config
,
313 struct lib_counter
*counter
,
314 const size_t *dimension_indexes
,
315 int64_t *value
, bool *overflow
,
325 switch (config
->alloc
) {
326 case COUNTER_ALLOC_GLOBAL
: /* Fallthrough */
327 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
328 /* Read global counter. */
329 ret
= lttng_counter_read(config
, counter
, dimension_indexes
,
337 case COUNTER_ALLOC_PER_CPU
:
341 switch (config
->alloc
) {
342 case COUNTER_ALLOC_GLOBAL
:
344 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
: /* Fallthrough */
345 case COUNTER_ALLOC_PER_CPU
:
346 //TODO: integrate with CPU hotplug and online cpus
347 for (cpu
= 0; cpu
< num_possible_cpus(); cpu
++) {
350 ret
= lttng_counter_read(config
, counter
, dimension_indexes
,
356 /* Overflow is defined on unsigned types. */
357 sum
= (int64_t) ((uint64_t) old
+ (uint64_t) v
);
358 if (v
> 0 && sum
< old
)
360 else if (v
< 0 && sum
> old
)
370 EXPORT_SYMBOL_GPL(lttng_counter_aggregate
);
373 int lttng_counter_clear_cpu(const struct lib_counter_config
*config
,
374 struct lib_counter
*counter
,
375 const size_t *dimension_indexes
,
378 struct lib_counter_layout
*layout
;
381 if (unlikely(lttng_counter_validate_indexes(config
, counter
, dimension_indexes
)))
383 index
= lttng_counter_get_index(config
, counter
, dimension_indexes
);
385 switch (config
->alloc
) {
386 case COUNTER_ALLOC_PER_CPU
:
387 if (cpu
< 0 || cpu
>= num_possible_cpus())
389 layout
= per_cpu_ptr(counter
->percpu_counters
, cpu
);
391 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
393 if (cpu
>= num_possible_cpus())
395 layout
= per_cpu_ptr(counter
->percpu_counters
, cpu
);
397 layout
= &counter
->global_counters
;
400 case COUNTER_ALLOC_GLOBAL
:
403 layout
= &counter
->global_counters
;
408 switch (config
->counter_size
) {
409 case COUNTER_SIZE_8_BIT
:
411 int8_t *int_p
= (int8_t *) layout
->counters
+ index
;
412 WRITE_ONCE(*int_p
, 0);
415 case COUNTER_SIZE_16_BIT
:
417 int16_t *int_p
= (int16_t *) layout
->counters
+ index
;
418 WRITE_ONCE(*int_p
, 0);
421 case COUNTER_SIZE_32_BIT
:
423 int32_t *int_p
= (int32_t *) layout
->counters
+ index
;
424 WRITE_ONCE(*int_p
, 0);
427 #if BITS_PER_LONG == 64
428 case COUNTER_SIZE_64_BIT
:
430 int64_t *int_p
= (int64_t *) layout
->counters
+ index
;
431 WRITE_ONCE(*int_p
, 0);
438 clear_bit(index
, layout
->overflow_bitmap
);
439 clear_bit(index
, layout
->underflow_bitmap
);
443 int lttng_counter_clear(const struct lib_counter_config
*config
,
444 struct lib_counter
*counter
,
445 const size_t *dimension_indexes
)
449 switch (config
->alloc
) {
450 case COUNTER_ALLOC_GLOBAL
: /* Fallthrough */
451 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
452 /* Clear global counter. */
453 ret
= lttng_counter_clear_cpu(config
, counter
, dimension_indexes
, -1);
457 case COUNTER_ALLOC_PER_CPU
:
461 switch (config
->alloc
) {
462 case COUNTER_ALLOC_GLOBAL
:
464 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
: /* Fallthrough */
465 case COUNTER_ALLOC_PER_CPU
:
466 //TODO: integrate with CPU hotplug and online cpus
467 for (cpu
= 0; cpu
< num_possible_cpus(); cpu
++) {
468 ret
= lttng_counter_clear_cpu(config
, counter
, dimension_indexes
, cpu
);
478 EXPORT_SYMBOL_GPL(lttng_counter_clear
);
480 int lttng_counter_get_nr_dimensions(const struct lib_counter_config
*config
,
481 struct lib_counter
*counter
,
482 size_t *nr_dimensions
)
484 *nr_dimensions
= counter
->nr_dimensions
;
487 EXPORT_SYMBOL_GPL(lttng_counter_get_nr_dimensions
);
489 int lttng_counter_get_max_nr_elem(const struct lib_counter_config
*config
,
490 struct lib_counter
*counter
,
491 size_t *max_nr_elem
) /* array of size nr_dimensions */
495 for (dimension
= 0; dimension
< counter
->nr_dimensions
; dimension
++)
496 max_nr_elem
[dimension
] = lttng_counter_get_dimension_nr_elements(&counter
->dimensions
[dimension
]);
499 EXPORT_SYMBOL_GPL(lttng_counter_get_max_nr_elem
);
501 MODULE_LICENSE("GPL and additional rights");
502 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
503 MODULE_DESCRIPTION("LTTng counter library");
504 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION
) "."
505 __stringify(LTTNG_MODULES_MINOR_VERSION
) "."
506 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION
)
507 LTTNG_MODULES_EXTRAVERSION
);