f85fc45e119764a5cc0bc3f5835c58f15b712b5b
[librseq.git] / src / rseq-mempool.c
1 // SPDX-License-Identifier: MIT
2 // SPDX-FileCopyrightText: 2024 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
3
4 #include <rseq/mempool.h>
5 #include <sys/mman.h>
6 #include <assert.h>
7 #include <string.h>
8 #include <pthread.h>
9 #include <unistd.h>
10 #include <stdlib.h>
11 #include <rseq/compiler.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <stdbool.h>
15 #include <stdio.h>
16
17 #ifdef HAVE_LIBNUMA
18 # include <numa.h>
19 # include <numaif.h>
20 #endif
21
22 #include "rseq-utils.h"
23
24 /*
25 * rseq-mempool.c: rseq CPU-Local Storage (CLS) memory allocator.
26 *
27 * The rseq per-CPU memory allocator allows the application the request
28 * memory pools of CPU-Local memory each of containing objects of a
29 * given size (rounded to next power of 2), reserving a given virtual
30 * address size per CPU, for a given maximum number of CPUs.
31 *
32 * The per-CPU memory allocator is analogous to TLS (Thread-Local
33 * Storage) memory: TLS is Thread-Local Storage, whereas the per-CPU
34 * memory allocator provides CPU-Local Storage.
35 */
36
37 /*
38 * Use high bits of per-CPU addresses to index the pool.
39 * This leaves the low bits of available to the application for pointer
40 * tagging (based on next power of 2 alignment of the allocations).
41 */
42 #if RSEQ_BITS_PER_LONG == 64
43 # define POOL_INDEX_BITS 16
44 #else
45 # define POOL_INDEX_BITS 8
46 #endif
47 #define MAX_NR_POOLS (1UL << POOL_INDEX_BITS)
48 #define POOL_INDEX_SHIFT (RSEQ_BITS_PER_LONG - POOL_INDEX_BITS)
49 #define MAX_POOL_LEN (1UL << POOL_INDEX_SHIFT)
50 #define MAX_POOL_LEN_MASK (MAX_POOL_LEN - 1)
51
52 #define POOL_SET_NR_ENTRIES POOL_INDEX_SHIFT
53
54 /*
55 * Smallest allocation should hold enough space for a free list pointer.
56 */
57 #if RSEQ_BITS_PER_LONG == 64
58 # define POOL_SET_MIN_ENTRY 3 /* Smallest item_len=8 */
59 #else
60 # define POOL_SET_MIN_ENTRY 2 /* Smallest item_len=4 */
61 #endif
62
63 /*
64 * Skip pool index 0 to ensure allocated entries at index 0 do not match
65 * a NULL pointer.
66 */
67 #define FIRST_POOL 1
68
69 #define BIT_PER_ULONG (8 * sizeof(unsigned long))
70
71 #define MOVE_PAGES_BATCH_SIZE 4096
72
73 struct free_list_node;
74
75 struct free_list_node {
76 struct free_list_node *next;
77 };
78
79 /* This lock protects pool create/destroy. */
80 static pthread_mutex_t pool_lock = PTHREAD_MUTEX_INITIALIZER;
81
82 struct rseq_pool_attr {
83 bool mmap_set;
84 void *(*mmap_func)(void *priv, size_t len);
85 int (*munmap_func)(void *priv, void *ptr, size_t len);
86 void *mmap_priv;
87
88 bool robust_set;
89 };
90
91 struct rseq_percpu_pool_range;
92
93 struct rseq_percpu_pool_range {
94 struct rseq_percpu_pool_range *next;
95 struct rseq_percpu_pool *pool; /* Backward ref. to container pool. */
96 void *base;
97 size_t next_unused;
98 /* Track alloc/free. */
99 unsigned long *alloc_bitmap;
100 };
101
102 struct rseq_percpu_pool {
103 /* Linked-list of ranges. */
104 struct rseq_percpu_pool_range *ranges;
105
106 unsigned int index;
107 size_t item_len;
108 size_t percpu_len;
109 int item_order;
110 int max_nr_cpus;
111
112 /*
113 * The free list chains freed items on the CPU 0 address range.
114 * We should rethink this decision if false sharing between
115 * malloc/free from other CPUs and data accesses from CPU 0
116 * becomes an issue. This is a NULL-terminated singly-linked
117 * list.
118 */
119 struct free_list_node *free_list_head;
120
121 /* This lock protects allocation/free within the pool. */
122 pthread_mutex_t lock;
123
124 struct rseq_pool_attr attr;
125 char *name;
126 };
127
128 //TODO: the array of pools should grow dynamically on create.
129 static struct rseq_percpu_pool rseq_percpu_pool[MAX_NR_POOLS];
130
131 /*
132 * Pool set entries are indexed by item_len rounded to the next power of
133 * 2. A pool set can contain NULL pool entries, in which case the next
134 * large enough entry will be used for allocation.
135 */
136 struct rseq_percpu_pool_set {
137 /* This lock protects add vs malloc/zmalloc within the pool set. */
138 pthread_mutex_t lock;
139 struct rseq_percpu_pool *entries[POOL_SET_NR_ENTRIES];
140 };
141
142 static
143 void *__rseq_pool_percpu_ptr(struct rseq_percpu_pool *pool, int cpu, uintptr_t item_offset)
144 {
145 /* TODO: Implement multi-ranges support. */
146 return pool->ranges->base + (pool->percpu_len * cpu) + item_offset;
147 }
148
149 void *__rseq_percpu_ptr(void __rseq_percpu *_ptr, int cpu)
150 {
151 uintptr_t ptr = (uintptr_t) _ptr;
152 uintptr_t item_offset = ptr & MAX_POOL_LEN_MASK;
153 uintptr_t pool_index = ptr >> POOL_INDEX_SHIFT;
154 struct rseq_percpu_pool *pool = &rseq_percpu_pool[pool_index];
155
156 assert(cpu >= 0);
157 return __rseq_pool_percpu_ptr(pool, cpu, item_offset);
158 }
159
160 static
161 void rseq_percpu_zero_item(struct rseq_percpu_pool *pool, uintptr_t item_offset)
162 {
163 int i;
164
165 for (i = 0; i < pool->max_nr_cpus; i++) {
166 char *p = __rseq_pool_percpu_ptr(pool, i, item_offset);
167 memset(p, 0, pool->item_len);
168 }
169 }
170
171 //TODO: this will need to be reimplemented for ranges,
172 //which cannot use __rseq_pool_percpu_ptr.
173 #if 0 //#ifdef HAVE_LIBNUMA
174 static
175 int rseq_percpu_pool_range_init_numa(struct rseq_percpu_pool *pool, struct rseq_percpu_pool_range *range, int numa_flags)
176 {
177 unsigned long nr_pages;
178 long ret, page_len;
179 int cpu;
180
181 if (!numa_flags)
182 return 0;
183 page_len = rseq_get_page_len();
184 nr_pages = pool->percpu_len >> rseq_get_count_order_ulong(page_len);
185 for (cpu = 0; cpu < pool->max_nr_cpus; cpu++) {
186
187 int status[MOVE_PAGES_BATCH_SIZE];
188 int nodes[MOVE_PAGES_BATCH_SIZE];
189 void *pages[MOVE_PAGES_BATCH_SIZE];
190
191 nodes[0] = numa_node_of_cpu(cpu);
192 for (size_t k = 1; k < RSEQ_ARRAY_SIZE(nodes); ++k) {
193 nodes[k] = nodes[0];
194 }
195
196 for (unsigned long page = 0; page < nr_pages;) {
197
198 size_t max_k = RSEQ_ARRAY_SIZE(pages);
199 size_t left = nr_pages - page;
200
201 if (left < max_k) {
202 max_k = left;
203 }
204
205 for (size_t k = 0; k < max_k; ++k, ++page) {
206 pages[k] = __rseq_pool_percpu_ptr(pool, cpu, page * page_len);
207 status[k] = -EPERM;
208 }
209
210 ret = move_pages(0, max_k, pages, nodes, status, numa_flags);
211
212 if (ret < 0)
213 return ret;
214
215 if (ret > 0) {
216 fprintf(stderr, "%lu pages were not migrated\n", ret);
217 for (size_t k = 0; k < max_k; ++k) {
218 if (status[k] < 0)
219 fprintf(stderr,
220 "Error while moving page %p to numa node %d: %u\n",
221 pages[k], nodes[k], -status[k]);
222 }
223 }
224 }
225 }
226 return 0;
227 }
228
229 int rseq_percpu_pool_init_numa(struct rseq_percpu_pool *pool, int numa_flags)
230 {
231 struct rseq_percpu_pool_range *range;
232 int ret;
233
234 if (!numa_flags)
235 return 0;
236 for (range = pool->ranges; range; range = range->next) {
237 ret = rseq_percpu_pool_range_init_numa(pool, range, numa_flags);
238 if (ret)
239 return ret;
240 }
241 return 0;
242 }
243 #else
244 int rseq_percpu_pool_init_numa(struct rseq_percpu_pool *pool __attribute__((unused)),
245 int numa_flags __attribute__((unused)))
246 {
247 return 0;
248 }
249 #endif
250
251 static
252 void *default_mmap_func(void *priv __attribute__((unused)), size_t len)
253 {
254 void *base;
255
256 base = mmap(NULL, len, PROT_READ | PROT_WRITE,
257 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
258 if (base == MAP_FAILED)
259 return NULL;
260 return base;
261 }
262
263 static
264 int default_munmap_func(void *priv __attribute__((unused)), void *ptr, size_t len)
265 {
266 return munmap(ptr, len);
267 }
268
269 static
270 int create_alloc_bitmap(struct rseq_percpu_pool *pool, struct rseq_percpu_pool_range *range)
271 {
272 size_t count;
273
274 count = ((pool->percpu_len >> pool->item_order) + BIT_PER_ULONG - 1) / BIT_PER_ULONG;
275
276 /*
277 * Not being able to create the validation bitmap is an error
278 * that needs to be reported.
279 */
280 range->alloc_bitmap = calloc(count, sizeof(unsigned long));
281 if (!range->alloc_bitmap)
282 return -1;
283 return 0;
284 }
285
286 static
287 const char *get_pool_name(const struct rseq_percpu_pool *pool)
288 {
289 return pool->name ? : "<anonymous>";
290 }
291
292 static
293 bool addr_in_pool(const struct rseq_percpu_pool *pool, void *addr)
294 {
295 struct rseq_percpu_pool_range *range;
296
297 for (range = pool->ranges; range; range = range->next) {
298 if (addr >= range->base && addr < range->base + range->next_unused)
299 return true;
300 }
301 return false;
302 }
303
304 /* Always inline for __builtin_return_address(0). */
305 static inline __attribute__((always_inline))
306 void check_free_list(const struct rseq_percpu_pool *pool)
307 {
308 size_t total_item = 0, total_never_allocated = 0, total_freed = 0,
309 max_list_traversal = 0, traversal_iteration = 0;
310 struct rseq_percpu_pool_range *range;
311
312 if (!pool->attr.robust_set)
313 return;
314
315 for (range = pool->ranges; range; range = range->next) {
316 total_item += pool->percpu_len >> pool->item_order;
317 total_never_allocated += (pool->percpu_len - range->next_unused) >> pool->item_order;
318 }
319 max_list_traversal = total_item - total_never_allocated;
320
321 for (struct free_list_node *node = pool->free_list_head, *prev = NULL;
322 node;
323 prev = node,
324 node = node->next) {
325
326 void *node_addr = node;
327
328 if (traversal_iteration >= max_list_traversal) {
329 fprintf(stderr, "%s: Corrupted free-list; Possibly infinite loop in pool \"%s\" (%p), caller %p.\n",
330 __func__, get_pool_name(pool), pool, __builtin_return_address(0));
331 abort();
332 }
333
334 /* Node is out of range. */
335 if (!addr_in_pool(pool, node_addr)) {
336 if (prev)
337 fprintf(stderr, "%s: Corrupted free-list node %p -> [out-of-range %p] in pool \"%s\" (%p), caller %p.\n",
338 __func__, prev, node, get_pool_name(pool), pool, __builtin_return_address(0));
339 else
340 fprintf(stderr, "%s: Corrupted free-list node [out-of-range %p] in pool \"%s\" (%p), caller %p.\n",
341 __func__, node, get_pool_name(pool), pool, __builtin_return_address(0));
342 abort();
343 }
344
345 traversal_iteration++;
346 total_freed++;
347 }
348
349 if (total_never_allocated + total_freed != total_item) {
350 fprintf(stderr, "%s: Corrupted free-list in pool \"%s\" (%p); total-item: %zu total-never-used: %zu total-freed: %zu, caller %p.\n",
351 __func__, get_pool_name(pool), pool, total_item, total_never_allocated, total_freed, __builtin_return_address(0));
352 abort();
353 }
354 }
355
356 /* Always inline for __builtin_return_address(0). */
357 static inline __attribute__((always_inline))
358 void destroy_alloc_bitmap(struct rseq_percpu_pool *pool, struct rseq_percpu_pool_range *range)
359 {
360 unsigned long *bitmap = range->alloc_bitmap;
361 size_t count, total_leaks = 0;
362
363 if (!bitmap)
364 return;
365
366 count = ((pool->percpu_len >> pool->item_order) + BIT_PER_ULONG - 1) / BIT_PER_ULONG;
367
368 /* Assert that all items in the pool were freed. */
369 for (size_t k = 0; k < count; ++k)
370 total_leaks += rseq_hweight_ulong(bitmap[k]);
371 if (total_leaks) {
372 fprintf(stderr, "%s: Pool \"%s\" (%p) has %zu leaked items on destroy, caller: %p.\n",
373 __func__, get_pool_name(pool), pool, total_leaks, (void *) __builtin_return_address(0));
374 abort();
375 }
376
377 free(bitmap);
378 }
379
380 /* Always inline for __builtin_return_address(0). */
381 static inline __attribute__((always_inline))
382 int rseq_percpu_pool_range_destroy(struct rseq_percpu_pool *pool,
383 struct rseq_percpu_pool_range *range)
384 {
385 destroy_alloc_bitmap(pool, range);
386 return pool->attr.munmap_func(pool->attr.mmap_priv, range->base,
387 pool->percpu_len * pool->max_nr_cpus);
388 }
389
390 static
391 struct rseq_percpu_pool_range *rseq_percpu_pool_range_create(struct rseq_percpu_pool *pool)
392 {
393 struct rseq_percpu_pool_range *range;
394 void *base;
395
396 range = calloc(1, sizeof(struct rseq_percpu_pool_range));
397 if (!range)
398 return NULL;
399 range->pool = pool;
400
401 base = pool->attr.mmap_func(pool->attr.mmap_priv, pool->percpu_len * pool->max_nr_cpus);
402 if (!base)
403 goto error_alloc;
404 range->base = base;
405 if (pool->attr.robust_set) {
406 if (create_alloc_bitmap(pool, range))
407 goto error_alloc;
408 }
409 return range;
410
411 error_alloc:
412 (void) rseq_percpu_pool_range_destroy(pool, range);
413 return NULL;
414 }
415
416 /* Always inline for __builtin_return_address(0). */
417 static inline __attribute__((always_inline))
418 int __rseq_percpu_pool_destroy(struct rseq_percpu_pool *pool)
419 {
420 struct rseq_percpu_pool_range *range, *next_range;
421 int ret = 0;
422
423 if (!pool->ranges) {
424 errno = ENOENT;
425 ret = -1;
426 goto end;
427 }
428 check_free_list(pool);
429 /* Iteration safe against removal. */
430 for (range = pool->ranges; range && (next_range = range->next, 1); range = next_range) {
431 if (rseq_percpu_pool_range_destroy(pool, range))
432 goto end;
433 /* Update list head to keep list coherent in case of partial failure. */
434 pool->ranges = next_range;
435 }
436 pthread_mutex_destroy(&pool->lock);
437 free(pool->name);
438 memset(pool, 0, sizeof(*pool));
439 end:
440 return ret;
441 }
442
443 int rseq_percpu_pool_destroy(struct rseq_percpu_pool *pool)
444 {
445 int ret;
446
447 pthread_mutex_lock(&pool_lock);
448 ret = __rseq_percpu_pool_destroy(pool);
449 pthread_mutex_unlock(&pool_lock);
450 return ret;
451 }
452
453 struct rseq_percpu_pool *rseq_percpu_pool_create(const char *pool_name,
454 size_t item_len, size_t percpu_len, int max_nr_cpus,
455 const struct rseq_pool_attr *_attr)
456 {
457 struct rseq_percpu_pool *pool;
458 struct rseq_pool_attr attr = {};
459 unsigned int i;
460 int order;
461
462 /* Make sure each item is large enough to contain free list pointers. */
463 if (item_len < sizeof(void *))
464 item_len = sizeof(void *);
465
466 /* Align item_len on next power of two. */
467 order = rseq_get_count_order_ulong(item_len);
468 if (order < 0) {
469 errno = EINVAL;
470 return NULL;
471 }
472 item_len = 1UL << order;
473
474 /* Align percpu_len on page size. */
475 percpu_len = rseq_align(percpu_len, rseq_get_page_len());
476
477 if (max_nr_cpus < 0 || item_len > percpu_len ||
478 percpu_len > (UINTPTR_MAX >> POOL_INDEX_BITS)) {
479 errno = EINVAL;
480 return NULL;
481 }
482
483 if (_attr)
484 memcpy(&attr, _attr, sizeof(attr));
485 if (!attr.mmap_set) {
486 attr.mmap_func = default_mmap_func;
487 attr.munmap_func = default_munmap_func;
488 attr.mmap_priv = NULL;
489 }
490
491 pthread_mutex_lock(&pool_lock);
492 /* Linear scan in array of pools to find empty spot. */
493 for (i = FIRST_POOL; i < MAX_NR_POOLS; i++) {
494 pool = &rseq_percpu_pool[i];
495 if (!pool->ranges)
496 goto found_empty;
497 }
498 errno = ENOMEM;
499 pool = NULL;
500 goto end;
501
502 found_empty:
503 memcpy(&pool->attr, &attr, sizeof(attr));
504 pthread_mutex_init(&pool->lock, NULL);
505 pool->percpu_len = percpu_len;
506 pool->max_nr_cpus = max_nr_cpus;
507 pool->index = i;
508 pool->item_len = item_len;
509 pool->item_order = order;
510
511 //TODO: implement multi-range support.
512 pool->ranges = rseq_percpu_pool_range_create(pool);
513 if (!pool->ranges)
514 goto error_alloc;
515
516 if (pool_name) {
517 pool->name = strdup(pool_name);
518 if (!pool->name)
519 goto error_alloc;
520 }
521 end:
522 pthread_mutex_unlock(&pool_lock);
523 return pool;
524
525 error_alloc:
526 __rseq_percpu_pool_destroy(pool);
527 pthread_mutex_unlock(&pool_lock);
528 errno = ENOMEM;
529 return NULL;
530 }
531
532 /* Always inline for __builtin_return_address(0). */
533 static inline __attribute__((always_inline))
534 void set_alloc_slot(struct rseq_percpu_pool *pool, size_t item_offset)
535 {
536 unsigned long *bitmap = pool->ranges->alloc_bitmap;
537 size_t item_index = item_offset >> pool->item_order;
538 unsigned long mask;
539 size_t k;
540
541 if (!bitmap)
542 return;
543
544 k = item_index / BIT_PER_ULONG;
545 mask = 1ULL << (item_index % BIT_PER_ULONG);
546
547 /* Print error if bit is already set. */
548 if (bitmap[k] & mask) {
549 fprintf(stderr, "%s: Allocator corruption detected for pool: \"%s\" (%p), item offset: %zu, caller: %p.\n",
550 __func__, get_pool_name(pool), pool, item_offset, (void *) __builtin_return_address(0));
551 abort();
552 }
553 bitmap[k] |= mask;
554 }
555
556 static
557 void __rseq_percpu *__rseq_percpu_malloc(struct rseq_percpu_pool *pool, bool zeroed)
558 {
559 struct free_list_node *node;
560 uintptr_t item_offset;
561 void __rseq_percpu *addr;
562
563 pthread_mutex_lock(&pool->lock);
564 /* Get first entry from free list. */
565 node = pool->free_list_head;
566 if (node != NULL) {
567 /* Remove node from free list (update head). */
568 pool->free_list_head = node->next;
569 item_offset = (uintptr_t) ((void *) node - pool->ranges->base);
570 addr = (void *) (((uintptr_t) pool->index << POOL_INDEX_SHIFT) | item_offset);
571 goto end;
572 }
573 if (pool->ranges->next_unused + pool->item_len > pool->percpu_len) {
574 errno = ENOMEM;
575 addr = NULL;
576 goto end;
577 }
578 item_offset = pool->ranges->next_unused;
579 addr = (void *) (((uintptr_t) pool->index << POOL_INDEX_SHIFT) | item_offset);
580 pool->ranges->next_unused += pool->item_len;
581 end:
582 if (addr)
583 set_alloc_slot(pool, item_offset);
584 pthread_mutex_unlock(&pool->lock);
585 if (zeroed && addr)
586 rseq_percpu_zero_item(pool, item_offset);
587 return addr;
588 }
589
590 void __rseq_percpu *rseq_percpu_malloc(struct rseq_percpu_pool *pool)
591 {
592 return __rseq_percpu_malloc(pool, false);
593 }
594
595 void __rseq_percpu *rseq_percpu_zmalloc(struct rseq_percpu_pool *pool)
596 {
597 return __rseq_percpu_malloc(pool, true);
598 }
599
600 /* Always inline for __builtin_return_address(0). */
601 static inline __attribute__((always_inline))
602 void clear_alloc_slot(struct rseq_percpu_pool *pool, size_t item_offset)
603 {
604 unsigned long *bitmap = pool->ranges->alloc_bitmap;
605 size_t item_index = item_offset >> pool->item_order;
606 unsigned long mask;
607 size_t k;
608
609 if (!bitmap)
610 return;
611
612 k = item_index / BIT_PER_ULONG;
613 mask = 1ULL << (item_index % BIT_PER_ULONG);
614
615 /* Print error if bit is not set. */
616 if (!(bitmap[k] & mask)) {
617 fprintf(stderr, "%s: Double-free detected for pool: \"%s\" (%p), item offset: %zu, caller: %p.\n",
618 __func__, get_pool_name(pool), pool, item_offset,
619 (void *) __builtin_return_address(0));
620 abort();
621 }
622 bitmap[k] &= ~mask;
623 }
624
625 void rseq_percpu_free(void __rseq_percpu *_ptr)
626 {
627 uintptr_t ptr = (uintptr_t) _ptr;
628 uintptr_t item_offset = ptr & MAX_POOL_LEN_MASK;
629 uintptr_t pool_index = ptr >> POOL_INDEX_SHIFT;
630 struct rseq_percpu_pool *pool = &rseq_percpu_pool[pool_index];
631 struct free_list_node *head, *item;
632
633 pthread_mutex_lock(&pool->lock);
634 clear_alloc_slot(pool, item_offset);
635 /* Add ptr to head of free list */
636 head = pool->free_list_head;
637 /* Free-list is in CPU 0 range. */
638 item = (struct free_list_node *)__rseq_pool_percpu_ptr(pool, 0, item_offset);
639 item->next = head;
640 pool->free_list_head = item;
641 pthread_mutex_unlock(&pool->lock);
642 }
643
644 struct rseq_percpu_pool_set *rseq_percpu_pool_set_create(void)
645 {
646 struct rseq_percpu_pool_set *pool_set;
647
648 pool_set = calloc(1, sizeof(struct rseq_percpu_pool_set));
649 if (!pool_set)
650 return NULL;
651 pthread_mutex_init(&pool_set->lock, NULL);
652 return pool_set;
653 }
654
655 int rseq_percpu_pool_set_destroy(struct rseq_percpu_pool_set *pool_set)
656 {
657 int order, ret;
658
659 for (order = POOL_SET_MIN_ENTRY; order < POOL_SET_NR_ENTRIES; order++) {
660 struct rseq_percpu_pool *pool = pool_set->entries[order];
661
662 if (!pool)
663 continue;
664 ret = rseq_percpu_pool_destroy(pool);
665 if (ret)
666 return ret;
667 pool_set->entries[order] = NULL;
668 }
669 pthread_mutex_destroy(&pool_set->lock);
670 free(pool_set);
671 return 0;
672 }
673
674 /* Ownership of pool is handed over to pool set on success. */
675 int rseq_percpu_pool_set_add_pool(struct rseq_percpu_pool_set *pool_set, struct rseq_percpu_pool *pool)
676 {
677 size_t item_order = pool->item_order;
678 int ret = 0;
679
680 pthread_mutex_lock(&pool_set->lock);
681 if (pool_set->entries[item_order]) {
682 errno = EBUSY;
683 ret = -1;
684 goto end;
685 }
686 pool_set->entries[pool->item_order] = pool;
687 end:
688 pthread_mutex_unlock(&pool_set->lock);
689 return ret;
690 }
691
692 static
693 void __rseq_percpu *__rseq_percpu_pool_set_malloc(struct rseq_percpu_pool_set *pool_set, size_t len, bool zeroed)
694 {
695 int order, min_order = POOL_SET_MIN_ENTRY;
696 struct rseq_percpu_pool *pool;
697 void __rseq_percpu *addr;
698
699 order = rseq_get_count_order_ulong(len);
700 if (order > POOL_SET_MIN_ENTRY)
701 min_order = order;
702 again:
703 pthread_mutex_lock(&pool_set->lock);
704 /* First smallest present pool where @len fits. */
705 for (order = min_order; order < POOL_SET_NR_ENTRIES; order++) {
706 pool = pool_set->entries[order];
707
708 if (!pool)
709 continue;
710 if (pool->item_len >= len)
711 goto found;
712 }
713 pool = NULL;
714 found:
715 pthread_mutex_unlock(&pool_set->lock);
716 if (pool) {
717 addr = __rseq_percpu_malloc(pool, zeroed);
718 if (addr == NULL && errno == ENOMEM) {
719 /*
720 * If the allocation failed, try again with a
721 * larger pool.
722 */
723 min_order = order + 1;
724 goto again;
725 }
726 } else {
727 /* Not found. */
728 errno = ENOMEM;
729 addr = NULL;
730 }
731 return addr;
732 }
733
734 void __rseq_percpu *rseq_percpu_pool_set_malloc(struct rseq_percpu_pool_set *pool_set, size_t len)
735 {
736 return __rseq_percpu_pool_set_malloc(pool_set, len, false);
737 }
738
739 void __rseq_percpu *rseq_percpu_pool_set_zmalloc(struct rseq_percpu_pool_set *pool_set, size_t len)
740 {
741 return __rseq_percpu_pool_set_malloc(pool_set, len, true);
742 }
743
744 struct rseq_pool_attr *rseq_pool_attr_create(void)
745 {
746 return calloc(1, sizeof(struct rseq_pool_attr));
747 }
748
749 void rseq_pool_attr_destroy(struct rseq_pool_attr *attr)
750 {
751 free(attr);
752 }
753
754 int rseq_pool_attr_set_mmap(struct rseq_pool_attr *attr,
755 void *(*mmap_func)(void *priv, size_t len),
756 int (*munmap_func)(void *priv, void *ptr, size_t len),
757 void *mmap_priv)
758 {
759 if (!attr) {
760 errno = EINVAL;
761 return -1;
762 }
763 attr->mmap_set = true;
764 attr->mmap_func = mmap_func;
765 attr->munmap_func = munmap_func;
766 attr->mmap_priv = mmap_priv;
767 return 0;
768 }
769
770 int rseq_pool_attr_set_robust(struct rseq_pool_attr *attr)
771 {
772 if (!attr) {
773 errno = EINVAL;
774 return -1;
775 }
776 attr->robust_set = true;
777 return 0;
778 }
This page took 0.045482 seconds and 3 git commands to generate.