Commit | Line | Data |
---|---|---|
ef6695f1 MD |
1 | // SPDX-License-Identifier: MIT |
2 | // SPDX-FileCopyrightText: 2024 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
3 | ||
4 | #include <rseq/percpu-alloc.h> | |
5 | #include <sys/mman.h> | |
6 | #include <assert.h> | |
7 | #include <string.h> | |
8 | #include <pthread.h> | |
9 | #include <unistd.h> | |
10 | #include <stdlib.h> | |
11 | #include <rseq/compiler.h> | |
12 | #include <errno.h> | |
13 | #include <stdint.h> | |
14 | #include <stdbool.h> | |
367e559c MD |
15 | #include <stdio.h> |
16 | ||
17 | #ifdef HAVE_LIBNUMA | |
18 | # include <numa.h> | |
19 | # include <numaif.h> | |
20 | #endif | |
ef6695f1 | 21 | |
19be9217 MD |
22 | #include "rseq-alloc-utils.h" |
23 | ||
ef6695f1 | 24 | /* |
8ab16a24 | 25 | * rseq-percpu-alloc.c: rseq CPU-Local Storage (CLS) memory allocator. |
ef6695f1 | 26 | * |
8ab16a24 MD |
27 | * The rseq per-CPU memory allocator allows the application the request |
28 | * memory pools of CPU-Local memory each of containing objects of a | |
8aa1462d MD |
29 | * given size (rounded to next power of 2), reserving a given virtual |
30 | * address size per CPU, for a given maximum number of CPUs. | |
8ab16a24 MD |
31 | * |
32 | * The per-CPU memory allocator is analogous to TLS (Thread-Local | |
33 | * Storage) memory: TLS is Thread-Local Storage, whereas the per-CPU | |
34 | * memory allocator provides CPU-Local Storage. | |
ef6695f1 MD |
35 | */ |
36 | ||
72b100a1 | 37 | /* |
8ab16a24 | 38 | * Use high bits of per-CPU addresses to index the pool. |
72b100a1 MD |
39 | * This leaves the low bits of available to the application for pointer |
40 | * tagging (based on next power of 2 alignment of the allocations). | |
41 | */ | |
ef6695f1 | 42 | #if RSEQ_BITS_PER_LONG == 64 |
72b100a1 | 43 | # define POOL_INDEX_BITS 16 |
ef6695f1 | 44 | #else |
72b100a1 | 45 | # define POOL_INDEX_BITS 8 |
ef6695f1 | 46 | #endif |
72b100a1 MD |
47 | #define MAX_NR_POOLS (1UL << POOL_INDEX_BITS) |
48 | #define POOL_INDEX_SHIFT (RSEQ_BITS_PER_LONG - POOL_INDEX_BITS) | |
49 | #define MAX_POOL_LEN (1UL << POOL_INDEX_SHIFT) | |
50 | #define MAX_POOL_LEN_MASK (MAX_POOL_LEN - 1) | |
ef6695f1 | 51 | |
72b100a1 | 52 | #define POOL_SET_NR_ENTRIES POOL_INDEX_SHIFT |
ef6695f1 | 53 | |
72b100a1 MD |
54 | /* |
55 | * Smallest allocation should hold enough space for a free list pointer. | |
56 | */ | |
ef6695f1 MD |
57 | #if RSEQ_BITS_PER_LONG == 64 |
58 | # define POOL_SET_MIN_ENTRY 3 /* Smallest item_len=8 */ | |
59 | #else | |
60 | # define POOL_SET_MIN_ENTRY 2 /* Smallest item_len=4 */ | |
61 | #endif | |
62 | ||
bb1552e2 MD |
63 | /* |
64 | * Skip pool index 0 to ensure allocated entries at index 0 do not match | |
65 | * a NULL pointer. | |
66 | */ | |
67 | #define FIRST_POOL 1 | |
68 | ||
ef6695f1 MD |
69 | struct free_list_node; |
70 | ||
71 | struct free_list_node { | |
72 | struct free_list_node *next; | |
73 | }; | |
74 | ||
75 | /* This lock protects pool create/destroy. */ | |
76 | static pthread_mutex_t pool_lock = PTHREAD_MUTEX_INITIALIZER; | |
77 | ||
9bd07c29 MD |
78 | struct rseq_mmap_attr { |
79 | void *(*mmap_func)(void *priv, size_t len); | |
80 | int (*munmap_func)(void *priv, void *ptr, size_t len); | |
81 | void *mmap_priv; | |
82 | }; | |
83 | ||
ef6695f1 MD |
84 | struct rseq_percpu_pool { |
85 | void *base; | |
86 | unsigned int index; | |
87 | size_t item_len; | |
88 | size_t percpu_len; | |
89 | int item_order; | |
90 | int max_nr_cpus; | |
91 | ||
92 | /* | |
8ab16a24 | 93 | * The free list chains freed items on the CPU 0 address range. |
ef6695f1 | 94 | * We should rethink this decision if false sharing between |
8ab16a24 | 95 | * malloc/free from other CPUs and data accesses from CPU 0 |
ef6695f1 MD |
96 | * becomes an issue. This is a NULL-terminated singly-linked |
97 | * list. | |
98 | */ | |
99 | struct free_list_node *free_list_head; | |
100 | size_t next_unused; | |
101 | /* This lock protects allocation/free within the pool. */ | |
102 | pthread_mutex_t lock; | |
9bd07c29 MD |
103 | |
104 | struct rseq_mmap_attr mmap_attr; | |
ef6695f1 MD |
105 | }; |
106 | ||
107 | //TODO: the array of pools should grow dynamically on create. | |
108 | static struct rseq_percpu_pool rseq_percpu_pool[MAX_NR_POOLS]; | |
109 | ||
110 | /* | |
111 | * Pool set entries are indexed by item_len rounded to the next power of | |
112 | * 2. A pool set can contain NULL pool entries, in which case the next | |
113 | * large enough entry will be used for allocation. | |
114 | */ | |
115 | struct rseq_percpu_pool_set { | |
116 | /* This lock protects add vs malloc/zmalloc within the pool set. */ | |
117 | pthread_mutex_t lock; | |
118 | struct rseq_percpu_pool *entries[POOL_SET_NR_ENTRIES]; | |
119 | }; | |
120 | ||
367e559c MD |
121 | static |
122 | void *__rseq_pool_percpu_ptr(struct rseq_percpu_pool *pool, int cpu, uintptr_t item_offset) | |
123 | { | |
124 | return pool->base + (pool->percpu_len * cpu) + item_offset; | |
125 | } | |
126 | ||
d24ee051 | 127 | void *__rseq_percpu_ptr(void __rseq_percpu *_ptr, int cpu) |
367e559c MD |
128 | { |
129 | uintptr_t ptr = (uintptr_t) _ptr; | |
72b100a1 MD |
130 | uintptr_t item_offset = ptr & MAX_POOL_LEN_MASK; |
131 | uintptr_t pool_index = ptr >> POOL_INDEX_SHIFT; | |
367e559c MD |
132 | struct rseq_percpu_pool *pool = &rseq_percpu_pool[pool_index]; |
133 | ||
134 | assert(cpu >= 0); | |
135 | return __rseq_pool_percpu_ptr(pool, cpu, item_offset); | |
136 | } | |
137 | ||
138 | static | |
139 | void rseq_percpu_zero_item(struct rseq_percpu_pool *pool, uintptr_t item_offset) | |
140 | { | |
141 | int i; | |
142 | ||
143 | for (i = 0; i < pool->max_nr_cpus; i++) { | |
144 | char *p = __rseq_pool_percpu_ptr(pool, i, item_offset); | |
145 | memset(p, 0, pool->item_len); | |
146 | } | |
147 | } | |
148 | ||
149 | #ifdef HAVE_LIBNUMA | |
9bd07c29 | 150 | int rseq_percpu_pool_init_numa(struct rseq_percpu_pool *pool, int numa_flags) |
367e559c MD |
151 | { |
152 | unsigned long nr_pages, page; | |
153 | long ret, page_len; | |
154 | int cpu; | |
155 | ||
156 | if (!numa_flags) | |
9bd07c29 | 157 | return 0; |
367e559c | 158 | page_len = rseq_get_page_len(); |
19be9217 | 159 | nr_pages = pool->percpu_len >> rseq_get_count_order_ulong(page_len); |
367e559c MD |
160 | for (cpu = 0; cpu < pool->max_nr_cpus; cpu++) { |
161 | int node = numa_node_of_cpu(cpu); | |
162 | ||
163 | /* TODO: batch move_pages() call with an array of pages. */ | |
164 | for (page = 0; page < nr_pages; page++) { | |
165 | void *pageptr = __rseq_pool_percpu_ptr(pool, cpu, page * page_len); | |
166 | int status = -EPERM; | |
167 | ||
168 | ret = move_pages(0, 1, &pageptr, &node, &status, numa_flags); | |
9bd07c29 MD |
169 | if (ret) |
170 | return ret; | |
367e559c MD |
171 | } |
172 | } | |
9bd07c29 | 173 | return 0; |
367e559c MD |
174 | } |
175 | #else | |
367e559c MD |
176 | void rseq_percpu_pool_init_numa(struct rseq_percpu_pool *pool __attribute__((unused)), |
177 | int numa_flags __attribute__((unused))) | |
178 | { | |
9bd07c29 | 179 | return 0; |
367e559c MD |
180 | } |
181 | #endif | |
182 | ||
9bd07c29 MD |
183 | static |
184 | void *default_mmap_func(void *priv __attribute__((unused)), size_t len) | |
185 | { | |
186 | void *base; | |
187 | ||
188 | base = mmap(NULL, len, PROT_READ | PROT_WRITE, | |
189 | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); | |
190 | if (base == MAP_FAILED) | |
191 | return NULL; | |
192 | return base; | |
193 | } | |
194 | ||
195 | static | |
196 | int default_munmap_func(void *priv __attribute__((unused)), void *ptr, size_t len) | |
197 | { | |
198 | return munmap(ptr, len); | |
199 | } | |
200 | ||
ef6695f1 MD |
201 | struct rseq_percpu_pool *rseq_percpu_pool_create(size_t item_len, |
202 | size_t percpu_len, int max_nr_cpus, | |
6b30db4e MD |
203 | const struct rseq_mmap_attr *mmap_attr, |
204 | int flags) | |
ef6695f1 | 205 | { |
9bd07c29 MD |
206 | void *(*mmap_func)(void *priv, size_t len); |
207 | int (*munmap_func)(void *priv, void *ptr, size_t len); | |
208 | void *mmap_priv; | |
ef6695f1 MD |
209 | struct rseq_percpu_pool *pool; |
210 | void *base; | |
211 | unsigned int i; | |
212 | int order; | |
ef6695f1 | 213 | |
6b30db4e MD |
214 | if (flags) { |
215 | errno = EINVAL; | |
216 | return NULL; | |
217 | } | |
218 | ||
ef6695f1 MD |
219 | /* Make sure each item is large enough to contain free list pointers. */ |
220 | if (item_len < sizeof(void *)) | |
221 | item_len = sizeof(void *); | |
222 | ||
223 | /* Align item_len on next power of two. */ | |
19be9217 | 224 | order = rseq_get_count_order_ulong(item_len); |
ef6695f1 MD |
225 | if (order < 0) { |
226 | errno = EINVAL; | |
227 | return NULL; | |
228 | } | |
229 | item_len = 1UL << order; | |
230 | ||
231 | /* Align percpu_len on page size. */ | |
367e559c | 232 | percpu_len = rseq_align(percpu_len, rseq_get_page_len()); |
ef6695f1 MD |
233 | |
234 | if (max_nr_cpus < 0 || item_len > percpu_len || | |
72b100a1 | 235 | percpu_len > (UINTPTR_MAX >> POOL_INDEX_BITS)) { |
ef6695f1 MD |
236 | errno = EINVAL; |
237 | return NULL; | |
238 | } | |
239 | ||
9bd07c29 MD |
240 | if (mmap_attr) { |
241 | mmap_func = mmap_attr->mmap_func; | |
242 | munmap_func = mmap_attr->munmap_func; | |
243 | mmap_priv = mmap_attr->mmap_priv; | |
244 | } else { | |
245 | mmap_func = default_mmap_func; | |
246 | munmap_func = default_munmap_func; | |
247 | mmap_priv = NULL; | |
248 | } | |
ef6695f1 MD |
249 | pthread_mutex_lock(&pool_lock); |
250 | /* Linear scan in array of pools to find empty spot. */ | |
bb1552e2 | 251 | for (i = FIRST_POOL; i < MAX_NR_POOLS; i++) { |
ef6695f1 MD |
252 | pool = &rseq_percpu_pool[i]; |
253 | if (!pool->base) | |
254 | goto found_empty; | |
255 | } | |
256 | errno = ENOMEM; | |
257 | pool = NULL; | |
258 | goto end; | |
259 | ||
260 | found_empty: | |
9bd07c29 MD |
261 | base = mmap_func(mmap_priv, percpu_len * max_nr_cpus); |
262 | if (!base) { | |
ef6695f1 MD |
263 | pool = NULL; |
264 | goto end; | |
265 | } | |
ef6695f1 MD |
266 | pthread_mutex_init(&pool->lock, NULL); |
267 | pool->base = base; | |
268 | pool->percpu_len = percpu_len; | |
269 | pool->max_nr_cpus = max_nr_cpus; | |
270 | pool->index = i; | |
271 | pool->item_len = item_len; | |
272 | pool->item_order = order; | |
9bd07c29 MD |
273 | pool->mmap_attr.mmap_func = mmap_func; |
274 | pool->mmap_attr.munmap_func = munmap_func; | |
275 | pool->mmap_attr.mmap_priv = mmap_priv; | |
ef6695f1 MD |
276 | end: |
277 | pthread_mutex_unlock(&pool_lock); | |
278 | return pool; | |
279 | } | |
280 | ||
281 | int rseq_percpu_pool_destroy(struct rseq_percpu_pool *pool) | |
282 | { | |
283 | int ret; | |
284 | ||
285 | pthread_mutex_lock(&pool_lock); | |
286 | if (!pool->base) { | |
287 | errno = ENOENT; | |
288 | ret = -1; | |
289 | goto end; | |
290 | } | |
9bd07c29 MD |
291 | ret = pool->mmap_attr.munmap_func(pool->mmap_attr.mmap_priv, pool->base, |
292 | pool->percpu_len * pool->max_nr_cpus); | |
ef6695f1 MD |
293 | if (ret) |
294 | goto end; | |
295 | pthread_mutex_destroy(&pool->lock); | |
296 | memset(pool, 0, sizeof(*pool)); | |
297 | end: | |
298 | pthread_mutex_unlock(&pool_lock); | |
299 | return 0; | |
300 | } | |
301 | ||
ef6695f1 | 302 | static |
d24ee051 | 303 | void __rseq_percpu *__rseq_percpu_malloc(struct rseq_percpu_pool *pool, bool zeroed) |
ef6695f1 MD |
304 | { |
305 | struct free_list_node *node; | |
306 | uintptr_t item_offset; | |
d24ee051 | 307 | void __rseq_percpu *addr; |
ef6695f1 MD |
308 | |
309 | pthread_mutex_lock(&pool->lock); | |
310 | /* Get first entry from free list. */ | |
311 | node = pool->free_list_head; | |
312 | if (node != NULL) { | |
313 | /* Remove node from free list (update head). */ | |
314 | pool->free_list_head = node->next; | |
315 | item_offset = (uintptr_t) ((void *) node - pool->base); | |
72b100a1 | 316 | addr = (void *) (((uintptr_t) pool->index << POOL_INDEX_SHIFT) | item_offset); |
ef6695f1 MD |
317 | goto end; |
318 | } | |
319 | if (pool->next_unused + pool->item_len > pool->percpu_len) { | |
ea1a3ada | 320 | errno = ENOMEM; |
ef6695f1 MD |
321 | addr = NULL; |
322 | goto end; | |
323 | } | |
324 | item_offset = pool->next_unused; | |
72b100a1 | 325 | addr = (void *) (((uintptr_t) pool->index << POOL_INDEX_SHIFT) | item_offset); |
ef6695f1 MD |
326 | pool->next_unused += pool->item_len; |
327 | end: | |
328 | pthread_mutex_unlock(&pool->lock); | |
329 | if (zeroed && addr) | |
330 | rseq_percpu_zero_item(pool, item_offset); | |
331 | return addr; | |
332 | } | |
333 | ||
d24ee051 | 334 | void __rseq_percpu *rseq_percpu_malloc(struct rseq_percpu_pool *pool) |
ef6695f1 MD |
335 | { |
336 | return __rseq_percpu_malloc(pool, false); | |
337 | } | |
338 | ||
d24ee051 | 339 | void __rseq_percpu *rseq_percpu_zmalloc(struct rseq_percpu_pool *pool) |
ef6695f1 MD |
340 | { |
341 | return __rseq_percpu_malloc(pool, true); | |
342 | } | |
343 | ||
d24ee051 | 344 | void rseq_percpu_free(void __rseq_percpu *_ptr) |
ef6695f1 MD |
345 | { |
346 | uintptr_t ptr = (uintptr_t) _ptr; | |
72b100a1 MD |
347 | uintptr_t item_offset = ptr & MAX_POOL_LEN_MASK; |
348 | uintptr_t pool_index = ptr >> POOL_INDEX_SHIFT; | |
ef6695f1 MD |
349 | struct rseq_percpu_pool *pool = &rseq_percpu_pool[pool_index]; |
350 | struct free_list_node *head, *item; | |
351 | ||
352 | pthread_mutex_lock(&pool->lock); | |
353 | /* Add ptr to head of free list */ | |
354 | head = pool->free_list_head; | |
8ab16a24 | 355 | /* Free-list is in CPU 0 range. */ |
ef6695f1 MD |
356 | item = (struct free_list_node *)__rseq_pool_percpu_ptr(pool, 0, item_offset); |
357 | item->next = head; | |
358 | pool->free_list_head = item; | |
359 | pthread_mutex_unlock(&pool->lock); | |
360 | } | |
361 | ||
362 | struct rseq_percpu_pool_set *rseq_percpu_pool_set_create(void) | |
363 | { | |
364 | struct rseq_percpu_pool_set *pool_set; | |
365 | ||
366 | pool_set = calloc(1, sizeof(struct rseq_percpu_pool_set)); | |
367 | if (!pool_set) | |
368 | return NULL; | |
369 | pthread_mutex_init(&pool_set->lock, NULL); | |
370 | return pool_set; | |
371 | } | |
372 | ||
373 | int rseq_percpu_pool_set_destroy(struct rseq_percpu_pool_set *pool_set) | |
374 | { | |
375 | int order, ret; | |
376 | ||
377 | for (order = POOL_SET_MIN_ENTRY; order < POOL_SET_NR_ENTRIES; order++) { | |
378 | struct rseq_percpu_pool *pool = pool_set->entries[order]; | |
379 | ||
380 | if (!pool) | |
381 | continue; | |
382 | ret = rseq_percpu_pool_destroy(pool); | |
383 | if (ret) | |
384 | return ret; | |
385 | pool_set->entries[order] = NULL; | |
386 | } | |
387 | pthread_mutex_destroy(&pool_set->lock); | |
388 | free(pool_set); | |
389 | return 0; | |
390 | } | |
391 | ||
392 | /* Ownership of pool is handed over to pool set on success. */ | |
393 | int rseq_percpu_pool_set_add_pool(struct rseq_percpu_pool_set *pool_set, struct rseq_percpu_pool *pool) | |
394 | { | |
395 | size_t item_order = pool->item_order; | |
396 | int ret = 0; | |
397 | ||
398 | pthread_mutex_lock(&pool_set->lock); | |
399 | if (pool_set->entries[item_order]) { | |
400 | errno = EBUSY; | |
401 | ret = -1; | |
402 | goto end; | |
403 | } | |
404 | pool_set->entries[pool->item_order] = pool; | |
405 | end: | |
406 | pthread_mutex_unlock(&pool_set->lock); | |
407 | return ret; | |
408 | } | |
409 | ||
410 | static | |
d24ee051 | 411 | void __rseq_percpu *__rseq_percpu_pool_set_malloc(struct rseq_percpu_pool_set *pool_set, size_t len, bool zeroed) |
ef6695f1 MD |
412 | { |
413 | int order, min_order = POOL_SET_MIN_ENTRY; | |
414 | struct rseq_percpu_pool *pool; | |
d24ee051 | 415 | void __rseq_percpu *addr; |
ef6695f1 | 416 | |
d06f5cf5 MD |
417 | order = rseq_get_count_order_ulong(len); |
418 | if (order > POOL_SET_MIN_ENTRY) | |
419 | min_order = order; | |
ef6695f1 MD |
420 | again: |
421 | pthread_mutex_lock(&pool_set->lock); | |
422 | /* First smallest present pool where @len fits. */ | |
423 | for (order = min_order; order < POOL_SET_NR_ENTRIES; order++) { | |
424 | pool = pool_set->entries[order]; | |
425 | ||
426 | if (!pool) | |
427 | continue; | |
428 | if (pool->item_len >= len) | |
429 | goto found; | |
430 | } | |
431 | pool = NULL; | |
432 | found: | |
433 | pthread_mutex_unlock(&pool_set->lock); | |
434 | if (pool) { | |
435 | addr = __rseq_percpu_malloc(pool, zeroed); | |
436 | if (addr == NULL && errno == ENOMEM) { | |
437 | /* | |
438 | * If the allocation failed, try again with a | |
439 | * larger pool. | |
440 | */ | |
441 | min_order = order + 1; | |
442 | goto again; | |
443 | } | |
444 | } else { | |
445 | /* Not found. */ | |
446 | errno = ENOMEM; | |
447 | addr = NULL; | |
448 | } | |
449 | return addr; | |
450 | } | |
451 | ||
d24ee051 | 452 | void __rseq_percpu *rseq_percpu_pool_set_malloc(struct rseq_percpu_pool_set *pool_set, size_t len) |
ef6695f1 MD |
453 | { |
454 | return __rseq_percpu_pool_set_malloc(pool_set, len, false); | |
455 | } | |
456 | ||
d24ee051 | 457 | void __rseq_percpu *rseq_percpu_pool_set_zmalloc(struct rseq_percpu_pool_set *pool_set, size_t len) |
ef6695f1 MD |
458 | { |
459 | return __rseq_percpu_pool_set_malloc(pool_set, len, true); | |
460 | } | |
9bd07c29 MD |
461 | |
462 | struct rseq_mmap_attr *rseq_mmap_attr_create(void *(*mmap_func)(void *priv, size_t len), | |
463 | int (*munmap_func)(void *priv, void *ptr, size_t len), | |
464 | void *mmap_priv) | |
465 | { | |
466 | struct rseq_mmap_attr *attr = calloc(1, sizeof(struct rseq_mmap_attr)); | |
467 | ||
468 | if (!attr) | |
469 | return NULL; | |
470 | attr->mmap_func = mmap_func; | |
471 | attr->munmap_func = munmap_func; | |
472 | attr->mmap_priv = mmap_priv; | |
473 | return attr; | |
474 | } | |
475 | ||
476 | void rseq_mmap_attr_destroy(struct rseq_mmap_attr *attr) | |
477 | { | |
478 | free(attr); | |
479 | } |