SoW-2020-0002: Trace Hit Counters
[deliverable/lttng-ust.git] / libcounter / counter.c
1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
2 *
3 * counter.c
4 *
5 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 */
7
8 #define _GNU_SOURCE
9 #include <errno.h>
10 #include "counter.h"
11 #include "counter-internal.h"
12 #include <lttng/bitmap.h>
13 #include <urcu/system.h>
14 #include <urcu/compiler.h>
15 #include <stdbool.h>
16 #include <helper.h>
17 #include <lttng/align.h>
18 #include "smp.h"
19 #include "shm.h"
20
21 static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension)
22 {
23 return dimension->max_nr_elem;
24 }
25
26 static int lttng_counter_init_stride(const struct lib_counter_config *config,
27 struct lib_counter *counter)
28 {
29 size_t nr_dimensions = counter->nr_dimensions;
30 size_t stride = 1;
31 ssize_t i;
32
33 for (i = nr_dimensions - 1; i >= 0; i--) {
34 struct lib_counter_dimension *dimension = &counter->dimensions[i];
35 size_t nr_elem;
36
37 nr_elem = lttng_counter_get_dimension_nr_elements(dimension);
38 dimension->stride = stride;
39 /* nr_elem should be minimum 1 for each dimension. */
40 if (!nr_elem)
41 return -EINVAL;
42 stride *= nr_elem;
43 if (stride > SIZE_MAX / nr_elem)
44 return -EINVAL;
45 }
46 return 0;
47 }
48
49 static int lttng_counter_layout_init(struct lib_counter *counter, int cpu, int shm_fd)
50 {
51 struct lib_counter_layout *layout;
52 size_t counter_size;
53 size_t nr_elem = counter->allocated_elem;
54 size_t shm_length = 0, counters_offset, overflow_offset, underflow_offset;
55 struct lttng_counter_shm_object *shm_object;
56
57 if (shm_fd < 0)
58 return 0; /* Skip, will be populated later. */
59
60 if (cpu == -1)
61 layout = &counter->global_counters;
62 else
63 layout = &counter->percpu_counters[cpu];
64 switch (counter->config.counter_size) {
65 case COUNTER_SIZE_8_BIT:
66 case COUNTER_SIZE_16_BIT:
67 case COUNTER_SIZE_32_BIT:
68 case COUNTER_SIZE_64_BIT:
69 counter_size = (size_t) counter->config.counter_size;
70 break;
71 default:
72 return -EINVAL;
73 }
74 layout->shm_fd = shm_fd;
75 counters_offset = shm_length;
76 shm_length += counter_size * nr_elem;
77 overflow_offset = shm_length;
78 shm_length += LTTNG_UST_ALIGN(nr_elem, 8) / 8;
79 underflow_offset = shm_length;
80 shm_length += LTTNG_UST_ALIGN(nr_elem, 8) / 8;
81 layout->shm_len = shm_length;
82 if (counter->is_daemon) {
83 /* Allocate and clear shared memory. */
84 shm_object = lttng_counter_shm_object_table_alloc(counter->object_table,
85 shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu);
86 if (!shm_object)
87 return -ENOMEM;
88 } else {
89 /* Map pre-existing shared memory. */
90 shm_object = lttng_counter_shm_object_table_append_shm(counter->object_table,
91 shm_fd, shm_length);
92 if (!shm_object)
93 return -ENOMEM;
94 }
95 layout->counters = shm_object->memory_map + counters_offset;
96 layout->overflow_bitmap = (unsigned long *)(shm_object->memory_map + overflow_offset);
97 layout->underflow_bitmap = (unsigned long *)(shm_object->memory_map + underflow_offset);
98 return 0;
99 }
100
101 int lttng_counter_set_global_shm(struct lib_counter *counter, int fd)
102 {
103 struct lib_counter_config *config = &counter->config;
104 struct lib_counter_layout *layout;
105 int ret;
106
107 if (!(config->alloc & COUNTER_ALLOC_GLOBAL))
108 return -EINVAL;
109 layout = &counter->global_counters;
110 if (layout->shm_fd >= 0)
111 return -EBUSY;
112 ret = lttng_counter_layout_init(counter, -1, fd);
113 if (!ret)
114 counter->received_shm++;
115 return ret;
116 }
117
118 int lttng_counter_set_cpu_shm(struct lib_counter *counter, int cpu, int fd)
119 {
120 struct lib_counter_config *config = &counter->config;
121 struct lib_counter_layout *layout;
122 int ret;
123
124 if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
125 return -EINVAL;
126
127 if (!(config->alloc & COUNTER_ALLOC_PER_CPU))
128 return -EINVAL;
129 layout = &counter->percpu_counters[cpu];
130 if (layout->shm_fd >= 0)
131 return -EBUSY;
132 ret = lttng_counter_layout_init(counter, cpu, fd);
133 if (!ret)
134 counter->received_shm++;
135 return ret;
136 }
137
138 static
139 int lttng_counter_set_global_sum_step(struct lib_counter *counter,
140 int64_t global_sum_step)
141 {
142 if (global_sum_step < 0)
143 return -EINVAL;
144
145 switch (counter->config.counter_size) {
146 case COUNTER_SIZE_8_BIT:
147 if (global_sum_step > INT8_MAX)
148 return -EINVAL;
149 counter->global_sum_step.s8 = (int8_t) global_sum_step;
150 break;
151 case COUNTER_SIZE_16_BIT:
152 if (global_sum_step > INT16_MAX)
153 return -EINVAL;
154 counter->global_sum_step.s16 = (int16_t) global_sum_step;
155 break;
156 case COUNTER_SIZE_32_BIT:
157 if (global_sum_step > INT32_MAX)
158 return -EINVAL;
159 counter->global_sum_step.s32 = (int32_t) global_sum_step;
160 break;
161 case COUNTER_SIZE_64_BIT:
162 counter->global_sum_step.s64 = global_sum_step;
163 break;
164 default:
165 return -EINVAL;
166 }
167
168 return 0;
169 }
170
171 static
172 int validate_args(const struct lib_counter_config *config,
173 size_t nr_dimensions,
174 const size_t *max_nr_elem,
175 int64_t global_sum_step,
176 int global_counter_fd,
177 int nr_counter_cpu_fds,
178 const int *counter_cpu_fds)
179 {
180 int nr_cpus = lttng_counter_num_possible_cpus();
181
182 if (CAA_BITS_PER_LONG != 64 && config->counter_size == COUNTER_SIZE_64_BIT) {
183 WARN_ON_ONCE(1);
184 return -1;
185 }
186 if (!max_nr_elem)
187 return -1;
188 /*
189 * global sum step is only useful with allocating both per-cpu
190 * and global counters.
191 */
192 if (global_sum_step && (!(config->alloc & COUNTER_ALLOC_GLOBAL) ||
193 !(config->alloc & COUNTER_ALLOC_PER_CPU)))
194 return -1;
195 if (!(config->alloc & COUNTER_ALLOC_GLOBAL) && global_counter_fd >= 0)
196 return -1;
197 if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds)
198 return -1;
199 if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && nr_counter_cpu_fds >= 0)
200 return -1;
201 if (counter_cpu_fds && nr_cpus != nr_counter_cpu_fds)
202 return -1;
203 return 0;
204 }
205
206 struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
207 size_t nr_dimensions,
208 const size_t *max_nr_elem,
209 int64_t global_sum_step,
210 int global_counter_fd,
211 int nr_counter_cpu_fds,
212 const int *counter_cpu_fds,
213 bool is_daemon)
214 {
215 struct lib_counter *counter;
216 size_t dimension, nr_elem = 1;
217 int cpu, ret;
218 int nr_handles = 0;
219 int nr_cpus = lttng_counter_num_possible_cpus();
220
221 if (validate_args(config, nr_dimensions, max_nr_elem,
222 global_sum_step, global_counter_fd, nr_counter_cpu_fds,
223 counter_cpu_fds))
224 return NULL;
225 counter = zmalloc(sizeof(struct lib_counter));
226 if (!counter)
227 return NULL;
228 counter->global_counters.shm_fd = -1;
229 counter->config = *config;
230 counter->is_daemon = is_daemon;
231 if (lttng_counter_set_global_sum_step(counter, global_sum_step))
232 goto error_sum_step;
233 counter->nr_dimensions = nr_dimensions;
234 counter->dimensions = zmalloc(nr_dimensions * sizeof(*counter->dimensions));
235 if (!counter->dimensions)
236 goto error_dimensions;
237 for (dimension = 0; dimension < nr_dimensions; dimension++)
238 counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension];
239 if (config->alloc & COUNTER_ALLOC_PER_CPU) {
240 counter->percpu_counters = zmalloc(sizeof(struct lib_counter_layout) * nr_cpus);
241 if (!counter->percpu_counters)
242 goto error_alloc_percpu;
243 lttng_counter_for_each_possible_cpu(cpu)
244 counter->percpu_counters[cpu].shm_fd = -1;
245 }
246
247 if (lttng_counter_init_stride(config, counter))
248 goto error_init_stride;
249 //TODO saturation values.
250 for (dimension = 0; dimension < counter->nr_dimensions; dimension++)
251 nr_elem *= lttng_counter_get_dimension_nr_elements(&counter->dimensions[dimension]);
252 counter->allocated_elem = nr_elem;
253
254 if (config->alloc & COUNTER_ALLOC_GLOBAL)
255 nr_handles++;
256 if (config->alloc & COUNTER_ALLOC_PER_CPU)
257 nr_handles += nr_cpus;
258 counter->expected_shm = nr_handles;
259
260 /* Allocate table for global and per-cpu counters. */
261 counter->object_table = lttng_counter_shm_object_table_create(nr_handles);
262 if (!counter->object_table)
263 goto error_alloc_object_table;
264
265 if (config->alloc & COUNTER_ALLOC_GLOBAL) {
266 ret = lttng_counter_layout_init(counter, -1, global_counter_fd); /* global */
267 if (ret)
268 goto layout_init_error;
269 }
270 if ((config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds) {
271 lttng_counter_for_each_possible_cpu(cpu) {
272 ret = lttng_counter_layout_init(counter, cpu, counter_cpu_fds[cpu]);
273 if (ret)
274 goto layout_init_error;
275 }
276 }
277 return counter;
278
279 layout_init_error:
280 lttng_counter_shm_object_table_destroy(counter->object_table, is_daemon);
281 error_alloc_object_table:
282 error_init_stride:
283 free(counter->percpu_counters);
284 error_alloc_percpu:
285 free(counter->dimensions);
286 error_dimensions:
287 error_sum_step:
288 free(counter);
289 return NULL;
290 }
291
292 void lttng_counter_destroy(struct lib_counter *counter)
293 {
294 struct lib_counter_config *config = &counter->config;
295
296 if (config->alloc & COUNTER_ALLOC_PER_CPU)
297 free(counter->percpu_counters);
298 lttng_counter_shm_object_table_destroy(counter->object_table, counter->is_daemon);
299 free(counter->dimensions);
300 free(counter);
301 }
302
303 int lttng_counter_get_global_shm(struct lib_counter *counter, int *fd, size_t *len)
304 {
305 int shm_fd;
306
307 shm_fd = counter->global_counters.shm_fd;
308 if (shm_fd < 0)
309 return -1;
310 *fd = shm_fd;
311 *len = counter->global_counters.shm_len;
312 return 0;
313 }
314
315 int lttng_counter_get_cpu_shm(struct lib_counter *counter, int cpu, int *fd, size_t *len)
316 {
317 struct lib_counter_layout *layout;
318 int shm_fd;
319
320 if (cpu >= lttng_counter_num_possible_cpus())
321 return -1;
322 layout = &counter->percpu_counters[cpu];
323 shm_fd = layout->shm_fd;
324 if (shm_fd < 0)
325 return -1;
326 *fd = shm_fd;
327 *len = layout->shm_len;
328 return 0;
329 }
330
331 bool lttng_counter_ready(struct lib_counter *counter)
332 {
333 if (counter->received_shm == counter->expected_shm)
334 return true;
335 return false;
336 }
337
338 int lttng_counter_read(const struct lib_counter_config *config,
339 struct lib_counter *counter,
340 const size_t *dimension_indexes,
341 int cpu, int64_t *value, bool *overflow,
342 bool *underflow)
343 {
344 size_t index;
345 struct lib_counter_layout *layout;
346
347 if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
348 return -EOVERFLOW;
349 index = lttng_counter_get_index(config, counter, dimension_indexes);
350
351 switch (config->alloc) {
352 case COUNTER_ALLOC_PER_CPU:
353 if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
354 return -EINVAL;
355 layout = &counter->percpu_counters[cpu];
356 break;
357 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
358 if (cpu >= 0) {
359 if (cpu >= lttng_counter_num_possible_cpus())
360 return -EINVAL;
361 layout = &counter->percpu_counters[cpu];
362 } else {
363 layout = &counter->global_counters;
364 }
365 break;
366 case COUNTER_ALLOC_GLOBAL:
367 if (cpu >= 0)
368 return -EINVAL;
369 layout = &counter->global_counters;
370 break;
371 default:
372 return -EINVAL;
373 }
374 if (caa_unlikely(!layout->counters))
375 return -ENODEV;
376
377 switch (config->counter_size) {
378 case COUNTER_SIZE_8_BIT:
379 {
380 int8_t *int_p = (int8_t *) layout->counters + index;
381 *value = (int64_t) CMM_LOAD_SHARED(*int_p);
382 break;
383 }
384 case COUNTER_SIZE_16_BIT:
385 {
386 int16_t *int_p = (int16_t *) layout->counters + index;
387 *value = (int64_t) CMM_LOAD_SHARED(*int_p);
388 break;
389 }
390 case COUNTER_SIZE_32_BIT:
391 {
392 int32_t *int_p = (int32_t *) layout->counters + index;
393 *value = (int64_t) CMM_LOAD_SHARED(*int_p);
394 break;
395 }
396 #if CAA_BITS_PER_LONG == 64
397 case COUNTER_SIZE_64_BIT:
398 {
399 int64_t *int_p = (int64_t *) layout->counters + index;
400 *value = CMM_LOAD_SHARED(*int_p);
401 break;
402 }
403 #endif
404 default:
405 return -EINVAL;
406 }
407 *overflow = lttng_bitmap_test_bit(index, layout->overflow_bitmap);
408 *underflow = lttng_bitmap_test_bit(index, layout->underflow_bitmap);
409 return 0;
410 }
411
412 int lttng_counter_aggregate(const struct lib_counter_config *config,
413 struct lib_counter *counter,
414 const size_t *dimension_indexes,
415 int64_t *value, bool *overflow,
416 bool *underflow)
417 {
418 int cpu, ret;
419 int64_t v, sum = 0;
420 bool of, uf;
421
422 *overflow = false;
423 *underflow = false;
424
425 switch (config->alloc) {
426 case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
427 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
428 /* Read global counter. */
429 ret = lttng_counter_read(config, counter, dimension_indexes,
430 -1, &v, &of, &uf);
431 if (ret < 0)
432 return ret;
433 sum += v;
434 *overflow |= of;
435 *underflow |= uf;
436 break;
437 case COUNTER_ALLOC_PER_CPU:
438 break;
439 default:
440 return -EINVAL;
441 }
442
443 switch (config->alloc) {
444 case COUNTER_ALLOC_GLOBAL:
445 break;
446 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: /* Fallthrough */
447 case COUNTER_ALLOC_PER_CPU:
448 lttng_counter_for_each_possible_cpu(cpu) {
449 int64_t old = sum;
450
451 ret = lttng_counter_read(config, counter, dimension_indexes,
452 cpu, &v, &of, &uf);
453 if (ret < 0)
454 return ret;
455 *overflow |= of;
456 *underflow |= uf;
457 /* Overflow is defined on unsigned types. */
458 sum = (int64_t) ((uint64_t) old + (uint64_t) v);
459 if (v > 0 && sum < old)
460 *overflow = true;
461 else if (v < 0 && sum > old)
462 *underflow = true;
463 }
464 break;
465 default:
466 return -EINVAL;
467 }
468 *value = sum;
469 return 0;
470 }
471
472 static
473 int lttng_counter_clear_cpu(const struct lib_counter_config *config,
474 struct lib_counter *counter,
475 const size_t *dimension_indexes,
476 int cpu)
477 {
478 size_t index;
479 struct lib_counter_layout *layout;
480
481 if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
482 return -EOVERFLOW;
483 index = lttng_counter_get_index(config, counter, dimension_indexes);
484
485 switch (config->alloc) {
486 case COUNTER_ALLOC_PER_CPU:
487 if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
488 return -EINVAL;
489 layout = &counter->percpu_counters[cpu];
490 break;
491 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
492 if (cpu >= 0) {
493 if (cpu >= lttng_counter_num_possible_cpus())
494 return -EINVAL;
495 layout = &counter->percpu_counters[cpu];
496 } else {
497 layout = &counter->global_counters;
498 }
499 break;
500 case COUNTER_ALLOC_GLOBAL:
501 if (cpu >= 0)
502 return -EINVAL;
503 layout = &counter->global_counters;
504 break;
505 default:
506 return -EINVAL;
507 }
508 if (caa_unlikely(!layout->counters))
509 return -ENODEV;
510
511 switch (config->counter_size) {
512 case COUNTER_SIZE_8_BIT:
513 {
514 int8_t *int_p = (int8_t *) layout->counters + index;
515 CMM_STORE_SHARED(*int_p, 0);
516 break;
517 }
518 case COUNTER_SIZE_16_BIT:
519 {
520 int16_t *int_p = (int16_t *) layout->counters + index;
521 CMM_STORE_SHARED(*int_p, 0);
522 break;
523 }
524 case COUNTER_SIZE_32_BIT:
525 {
526 int32_t *int_p = (int32_t *) layout->counters + index;
527 CMM_STORE_SHARED(*int_p, 0);
528 break;
529 }
530 #if CAA_BITS_PER_LONG == 64
531 case COUNTER_SIZE_64_BIT:
532 {
533 int64_t *int_p = (int64_t *) layout->counters + index;
534 CMM_STORE_SHARED(*int_p, 0);
535 break;
536 }
537 #endif
538 default:
539 return -EINVAL;
540 }
541 lttng_bitmap_clear_bit(index, layout->overflow_bitmap);
542 lttng_bitmap_clear_bit(index, layout->underflow_bitmap);
543 return 0;
544 }
545
546 int lttng_counter_clear(const struct lib_counter_config *config,
547 struct lib_counter *counter,
548 const size_t *dimension_indexes)
549 {
550 int cpu, ret;
551
552 switch (config->alloc) {
553 case COUNTER_ALLOC_PER_CPU:
554 break;
555 case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
556 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
557 /* Clear global counter. */
558 ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, -1);
559 if (ret < 0)
560 return ret;
561 break;
562 default:
563 return -EINVAL;
564 }
565
566 switch (config->alloc) {
567 case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
568 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
569 lttng_counter_for_each_possible_cpu(cpu) {
570 ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, cpu);
571 if (ret < 0)
572 return ret;
573 }
574 break;
575 case COUNTER_ALLOC_GLOBAL:
576 break;
577 default:
578 return -EINVAL;
579 }
580 return 0;
581 }
This page took 0.044675 seconds and 5 git commands to generate.