mempool: introduce poison attribute
[librseq.git] / include / rseq / mempool.h
CommitLineData
ef6695f1
MD
1/* SPDX-License-Identifier: MIT */
2/* SPDX-FileCopyrightText: 2024 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> */
3
34337fec
MD
4#ifndef _RSEQ_MEMPOOL_H
5#define _RSEQ_MEMPOOL_H
ef6695f1 6
f2981623 7#include <rseq/compiler.h>
ef6695f1
MD
8#include <stddef.h>
9#include <sys/types.h>
e229a2dd 10#include <sys/mman.h>
455e090e 11#include <stdint.h>
ef6695f1
MD
12
13/*
89b7e681
MD
14 * rseq/mempool.h: rseq memory pool allocator.
15 *
16 * The rseq memory pool allocator can be configured as either a global
17 * allocator (default) or a per-CPU memory allocator.
18 *
19 * The rseq global memory allocator allows the application to request
20 * memory pools of global memory each of containing objects of a
21 * given size (rounded to next power of 2), reserving a given virtual
22 * address size of the requested stride.
8aa1462d
MD
23 *
24 * The rseq per-CPU memory allocator allows the application the request
25 * memory pools of CPU-Local memory each of containing objects of a
26 * given size (rounded to next power of 2), reserving a given virtual
27 * address size per CPU, for a given maximum number of CPUs.
28 *
29 * The per-CPU memory allocator is analogous to TLS (Thread-Local
30 * Storage) memory: TLS is Thread-Local Storage, whereas the per-CPU
31 * memory allocator provides CPU-Local Storage.
89b7e681
MD
32 *
33 * Memory pool sets can be created by adding one or more pools into
34 * them. They can be used to perform allocation of variable length
35 * objects.
ef6695f1
MD
36 */
37
c7ec94e0
MD
38#ifdef __cplusplus
39extern "C" {
40#endif
41
bef24483
MD
42/*
43 * The percpu offset stride can be overridden by the user code.
44 * The stride *must* match for all objects belonging to a given pool
45 * between arguments to:
46 *
06e0b1c0
MD
47 * - rseq_mempool_create(),
48 * - rseq_percpu_ptr().
e30d5eb8 49 * - rseq_mempool_percpu_free(),
bef24483 50 */
cb475906 51#define RSEQ_MEMPOOL_STRIDE (1U << 16) /* stride: 64kB */
f2981623 52
d24ee051
MD
53/*
54 * Tag pointers returned by:
e30d5eb8
MD
55 * - rseq_mempool_percpu_malloc(),
56 * - rseq_mempool_percpu_zmalloc(),
57 * - rseq_mempool_set_percpu_malloc(),
58 * - rseq_mempool_set_percpu_zmalloc().
d24ee051 59 *
8aa1462d
MD
60 * and passed as parameter to:
61 * - rseq_percpu_ptr(),
e30d5eb8 62 * - rseq_mempool_percpu_free().
8aa1462d 63 *
d24ee051
MD
64 * with __rseq_percpu for use by static analyzers.
65 */
66#define __rseq_percpu
67
0ba2a93e
MD
68struct rseq_mempool_attr;
69struct rseq_mempool;
ef6695f1 70
8aa1462d 71/*
e30d5eb8 72 * rseq_mempool_create: Create a memory pool.
8aa1462d 73 *
cb475906
MD
74 * Create a memory pool for items of size @item_len (rounded to
75 * next power of two).
8aa1462d 76 *
d6acc8aa
MD
77 * The @attr pointer used to specify the pool attributes. If NULL, use a
78 * default attribute values. The @attr can be destroyed immediately
e30d5eb8 79 * after rseq_mempool_create() returns. The caller keeps ownership
89b7e681 80 * of @attr. Default attributes select a global mempool type.
8aa1462d 81 *
ca452fee
MD
82 * The argument @pool_name can be used to given a name to the pool for
83 * debugging purposes. It can be NULL if no name is given.
84 *
8aa1462d
MD
85 * Returns a pointer to the created percpu pool. Return NULL on error,
86 * with errno set accordingly:
cb475906 87 *
8aa1462d
MD
88 * EINVAL: Invalid argument.
89 * ENOMEM: Not enough resources (memory or pool indexes) available to
90 * allocate pool.
91 *
a82006d0
MD
92 * In addition, if the attr mmap callback fails, NULL is returned and
93 * errno is propagated from the callback. The default callback can
9bd07c29 94 * return errno=ENOMEM.
8aa1462d
MD
95 *
96 * This API is MT-safe.
97 */
0ba2a93e 98struct rseq_mempool *rseq_mempool_create(const char *pool_name,
cb475906 99 size_t item_len, const struct rseq_mempool_attr *attr);
8aa1462d
MD
100
101/*
0ba2a93e 102 * rseq_mempool_destroy: Destroy a per-cpu memory pool.
8aa1462d
MD
103 *
104 * Destroy a per-cpu memory pool, unmapping its memory and removing the
105 * pool entry from the global index. No pointers allocated from the
106 * pool should be used when it is destroyed. This includes rseq_percpu_ptr().
107 *
108 * Argument @pool is a pointer to the per-cpu pool to destroy.
109 *
110 * Return values: 0 on success, -1 on error, with errno set accordingly:
89b7e681 111 *
8aa1462d
MD
112 * ENOENT: Trying to free a pool which was not allocated.
113 *
9bd07c29
MD
114 * If the munmap_func callback fails, -1 is returned and errno is
115 * propagated from the callback. The default callback can return
116 * errno=EINVAL.
8aa1462d
MD
117 *
118 * This API is MT-safe.
119 */
0ba2a93e 120int rseq_mempool_destroy(struct rseq_mempool *pool);
ef6695f1 121
8aa1462d 122/*
15da5c27 123 * rseq_mempool_percpu_malloc: Allocate memory from a per-cpu pool.
8aa1462d
MD
124 *
125 * Allocate an item from a per-cpu @pool. The allocation will reserve
126 * an item of the size specified by @item_len (rounded to next power of
127 * two) at pool creation. This effectively reserves space for this item
128 * on all CPUs.
129 *
130 * On success, return a "__rseq_percpu" encoded pointer to the pool
131 * item. This encoded pointer is meant to be passed to rseq_percpu_ptr()
132 * to be decoded to a valid address before being accessed.
133 *
134 * Return NULL (errno=ENOMEM) if there is not enough space left in the
135 * pool to allocate an item.
136 *
137 * This API is MT-safe.
138 */
15da5c27 139void __rseq_percpu *rseq_mempool_percpu_malloc(struct rseq_mempool *pool);
8aa1462d
MD
140
141/*
15da5c27 142 * rseq_mempool_percpu_zmalloc: Allocated zero-initialized memory from a per-cpu pool.
8aa1462d
MD
143 *
144 * Allocate memory for an item within the pool, and zero-initialize its
15da5c27 145 * memory on all CPUs. See rseq_mempool_percpu_malloc for details.
8aa1462d
MD
146 *
147 * This API is MT-safe.
148 */
15da5c27 149void __rseq_percpu *rseq_mempool_percpu_zmalloc(struct rseq_mempool *pool);
8aa1462d
MD
150
151/*
15da5c27
MD
152 * rseq_mempool_malloc: Allocate memory from a global pool.
153 *
154 * Wrapper to allocate memory from a global pool, which can be
155 * used directly without per-cpu indexing. Would normally be used
156 * with pools created with max_nr_cpus=1.
157 */
158static inline
159void *rseq_mempool_malloc(struct rseq_mempool *pool)
160{
161 return (void *) rseq_mempool_percpu_malloc(pool);
162}
163
164/*
165 * rseq_mempool_zmalloc: Allocate zero-initialized memory from a global pool.
166 *
167 * Wrapper to allocate memory from a global pool, which can be
168 * used directly without per-cpu indexing. Would normally be used
169 * with pools created with max_nr_cpus=1.
170 */
171static inline
172void *rseq_mempool_zmalloc(struct rseq_mempool *pool)
173{
174 return (void *) rseq_mempool_percpu_zmalloc(pool);
175}
176
177/*
178 * rseq_mempool_percpu_free: Free memory from a per-cpu pool.
8aa1462d
MD
179 *
180 * Free an item pointed to by @ptr from its per-cpu pool.
181 *
182 * The @ptr argument is a __rseq_percpu encoded pointer returned by
183 * either:
184 *
15da5c27
MD
185 * - rseq_mempool_percpu_malloc(),
186 * - rseq_mempool_percpu_zmalloc(),
187 * - rseq_mempool_set_percpu_malloc(),
188 * - rseq_mempool_set_percpu_zmalloc().
8aa1462d 189 *
06e0b1c0 190 * The @stride optional argument to rseq_percpu_free() is a configurable
4aa3220c 191 * stride, which must match the stride received by pool creation.
cb475906 192 * If the argument is not present, use the default RSEQ_MEMPOOL_STRIDE.
4aa3220c 193 *
8aa1462d
MD
194 * This API is MT-safe.
195 */
cb475906 196void librseq_mempool_percpu_free(void __rseq_percpu *ptr, size_t stride);
15da5c27
MD
197
198#define rseq_mempool_percpu_free(_ptr, _stride...) \
cb475906 199 librseq_mempool_percpu_free(_ptr, RSEQ_PARAM_SELECT_ARG1(_, ##_stride, RSEQ_MEMPOOL_STRIDE))
f2981623 200
15da5c27
MD
201/*
202 * rseq_free: Free memory from a global pool.
203 *
204 * Free an item pointed to by @ptr from its global pool. Would normally
205 * be used with pools created with max_nr_cpus=1.
206 *
207 * The @ptr argument is a pointer returned by either:
208 *
209 * - rseq_mempool_malloc(),
210 * - rseq_mempool_zmalloc(),
211 * - rseq_mempool_set_malloc(),
212 * - rseq_mempool_set_zmalloc().
213 *
214 * The @stride optional argument to rseq_free() is a configurable
215 * stride, which must match the stride received by pool creation. If
cb475906 216 * the argument is not present, use the default RSEQ_MEMPOOL_STRIDE.
15da5c27
MD
217 * The stride is needed even for a global pool to know the mapping
218 * address range.
219 *
220 * This API is MT-safe.
221 */
222#define rseq_mempool_free(_ptr, _stride...) \
cb475906 223 librseq_percpu_free((void __rseq_percpu *) _ptr, RSEQ_PARAM_SELECT_ARG1(_, ##_stride, RSEQ_MEMPOOL_STRIDE))
ef6695f1 224
8aa1462d 225/*
4aa3220c 226 * rseq_percpu_ptr: Offset a per-cpu pointer for a given CPU.
8aa1462d 227 *
4aa3220c
MD
228 * Offset a per-cpu pointer @ptr to get the associated pointer for the
229 * given @cpu. The @ptr argument is a __rseq_percpu pointer returned by
230 * either:
8aa1462d 231 *
15da5c27
MD
232 * - rseq_mempool_percpu_malloc(),
233 * - rseq_mempool_percpu_zmalloc(),
234 * - rseq_mempool_set_percpu_malloc(),
235 * - rseq_mempool_set_percpu_zmalloc().
8aa1462d 236 *
06e0b1c0
MD
237 * The macro rseq_percpu_ptr() preserves the type of the @ptr parameter
238 * for the returned pointer, but removes the __rseq_percpu annotation.
8aa1462d 239 *
06e0b1c0 240 * The macro rseq_percpu_ptr() takes an optional @stride argument. If
cb475906 241 * the argument is not present, use the default RSEQ_MEMPOOL_STRIDE.
4aa3220c 242 * This must match the stride used for pool creation.
8aa1462d
MD
243 *
244 * This API is MT-safe.
245 */
06e0b1c0
MD
246#define rseq_percpu_ptr(_ptr, _cpu, _stride...) \
247 ((__typeof__(*(_ptr)) *) ((uintptr_t) (_ptr) + \
248 ((unsigned int) (_cpu) * \
cb475906 249 (uintptr_t) RSEQ_PARAM_SELECT_ARG1(_, ##_stride, RSEQ_MEMPOOL_STRIDE))))
ef6695f1 250
8aa1462d 251/*
0ba2a93e 252 * rseq_mempool_set_create: Create a pool set.
8aa1462d
MD
253 *
254 * Create a set of pools. Its purpose is to offer a memory allocator API
255 * for variable-length items (e.g. variable length strings). When
256 * created, the pool set has no pool. Pools can be created and added to
257 * the set. One common approach would be to create pools for each
258 * relevant power of two allocation size useful for the application.
259 * Only one pool can be added to the pool set for each power of two
260 * allocation size.
261 *
262 * Returns a pool set pointer on success, else returns NULL with
263 * errno=ENOMEM (out of memory).
264 *
265 * This API is MT-safe.
266 */
0ba2a93e 267struct rseq_mempool_set *rseq_mempool_set_create(void);
8aa1462d
MD
268
269/*
0ba2a93e 270 * rseq_mempool_set_destroy: Destroy a pool set.
8aa1462d
MD
271 *
272 * Destroy a pool set and its associated resources. The pools that were
273 * added to the pool set are destroyed as well.
274 *
275 * Returns 0 on success, -1 on failure (or partial failure), with errno
276 * set by rseq_percpu_pool_destroy(). Using a pool set after destroy
277 * failure is undefined.
278 *
279 * This API is MT-safe.
280 */
0ba2a93e 281int rseq_mempool_set_destroy(struct rseq_mempool_set *pool_set);
8aa1462d
MD
282
283/*
0ba2a93e 284 * rseq_mempool_set_add_pool: Add a pool to a pool set.
8aa1462d
MD
285 *
286 * Add a @pool to the @pool_set. On success, its ownership is handed
287 * over to the pool set, so the caller should not destroy it explicitly.
288 * Only one pool can be added to the pool set for each power of two
289 * allocation size.
290 *
291 * Returns 0 on success, -1 on error with the following errno:
292 * - EBUSY: A pool already exists in the pool set for this power of two
293 * allocation size.
294 *
295 * This API is MT-safe.
296 */
0ba2a93e
MD
297int rseq_mempool_set_add_pool(struct rseq_mempool_set *pool_set,
298 struct rseq_mempool *pool);
ef6695f1 299
8aa1462d 300/*
e30d5eb8 301 * rseq_mempool_set_percpu_malloc: Allocate memory from a per-cpu pool set.
8aa1462d
MD
302 *
303 * Allocate an item from a per-cpu @pool. The allocation will reserve
304 * an item of the size specified by @len (rounded to next power of
305 * two). This effectively reserves space for this item on all CPUs.
306 *
307 * The space reservation will search for the smallest pool within
308 * @pool_set which respects the following conditions:
309 *
310 * - it has an item size large enough to fit @len,
311 * - it has space available.
312 *
313 * On success, return a "__rseq_percpu" encoded pointer to the pool
314 * item. This encoded pointer is meant to be passed to rseq_percpu_ptr()
315 * to be decoded to a valid address before being accessed.
316 *
317 * Return NULL (errno=ENOMEM) if there is not enough space left in the
318 * pool to allocate an item.
319 *
320 * This API is MT-safe.
321 */
15da5c27 322void __rseq_percpu *rseq_mempool_set_percpu_malloc(struct rseq_mempool_set *pool_set, size_t len);
8aa1462d
MD
323
324/*
e30d5eb8 325 * rseq_mempool_set_percpu_zmalloc: Allocated zero-initialized memory from a per-cpu pool set.
8aa1462d
MD
326 *
327 * Allocate memory for an item within the pool, and zero-initialize its
e30d5eb8 328 * memory on all CPUs. See rseq_mempool_set_percpu_malloc for details.
8aa1462d
MD
329 *
330 * This API is MT-safe.
331 */
15da5c27
MD
332void __rseq_percpu *rseq_mempool_set_percpu_zmalloc(struct rseq_mempool_set *pool_set, size_t len);
333
334/*
335 * rseq_mempool_set_malloc: Allocate memory from a global pool set.
336 *
337 * Wrapper to allocate memory from a global pool, which can be
338 * used directly without per-cpu indexing. Would normally be used
339 * with pools created with max_nr_cpus=1.
340 */
341static inline
342void *rseq_mempool_set_malloc(struct rseq_mempool_set *pool_set, size_t len)
343{
344 return (void *) rseq_mempool_set_percpu_malloc(pool_set, len);
345}
346
347/*
348 * rseq_mempool_set_zmalloc: Allocate zero-initialized memory from a global pool set.
349 *
350 * Wrapper to allocate memory from a global pool, which can be
351 * used directly without per-cpu indexing. Would normally be used
352 * with pools created with max_nr_cpus=1.
353 */
354static inline
355void *rseq_mempool_set_zmalloc(struct rseq_mempool_set *pool_set, size_t len)
356{
357 return (void *) rseq_mempool_set_percpu_zmalloc(pool_set, len);
358}
ef6695f1 359
9bd07c29 360/*
0ba2a93e 361 * rseq_mempool_init_numa: Move pages to the NUMA node associated to their CPU topology.
9bd07c29
MD
362 *
363 * For pages allocated within @pool, invoke move_pages(2) with the given
364 * @numa_flags to move the pages to the NUMA node associated to their
365 * CPU topology.
366 *
367 * Argument @numa_flags are passed to move_pages(2). The expected flags are:
368 * MPOL_MF_MOVE: move process-private pages to cpu-specific numa nodes.
369 * MPOL_MF_MOVE_ALL: move shared pages to cpu-specific numa nodes
370 * (requires CAP_SYS_NICE).
371 *
372 * Returns 0 on success, else return -1 with errno set by move_pages(2).
373 */
0ba2a93e 374int rseq_mempool_init_numa(struct rseq_mempool *pool, int numa_flags);
9bd07c29
MD
375
376/*
0ba2a93e 377 * rseq_mempool_attr_create: Create a pool attribute structure.
a82006d0 378 */
0ba2a93e 379struct rseq_mempool_attr *rseq_mempool_attr_create(void);
a82006d0
MD
380
381/*
0ba2a93e 382 * rseq_mempool_attr_destroy: Destroy a pool attribute structure.
a82006d0 383 */
0ba2a93e 384void rseq_mempool_attr_destroy(struct rseq_mempool_attr *attr);
a82006d0
MD
385
386/*
0ba2a93e 387 * rseq_mempool_attr_set_mmap: Set pool attribute structure mmap functions.
9bd07c29
MD
388 *
389 * The @mmap_func callback used to map the memory for the pool.
390 *
391 * The @munmap_func callback used to unmap the memory when the pool
392 * is destroyed.
393 *
394 * The @mmap_priv argument is a private data pointer passed to both
395 * @mmap_func and @munmap_func callbacks.
8118247e
MD
396 *
397 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
9bd07c29 398 */
0ba2a93e 399int rseq_mempool_attr_set_mmap(struct rseq_mempool_attr *attr,
a82006d0 400 void *(*mmap_func)(void *priv, size_t len),
9bd07c29
MD
401 int (*munmap_func)(void *priv, void *ptr, size_t len),
402 void *mmap_priv);
403
135811f2
MD
404/*
405 * rseq_mempool_attr_set_init: Set pool attribute structure memory init functions.
406 *
407 * The @init_func callback used to initialized memory after allocation
374c2773
MD
408 * for the pool. The @cpu argument of @init_func, if >= 0, is the cpu to
409 * which belongs the range starting at @addr of length @len. If cpu is
410 * -1, it means the range belongs to a global pool. The @init_func
411 * callback must return 0 on success, -1 on error with errno set. If
412 * @init_func returns failure, the allocation of the pool memory fails,
413 * which either causes the pool creation to fail or memory allocation to
414 * fail (for extensible memory pools).
135811f2
MD
415 *
416 * The @init_priv argument is a private data pointer passed to the
417 * @init_func callback.
418 *
419 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
420 */
421int rseq_mempool_attr_set_init(struct rseq_mempool_attr *attr,
6e329183 422 int (*init_func)(void *priv, void *addr, size_t len, int cpu),
135811f2
MD
423 void *init_priv);
424
d6acc8aa 425/*
0ba2a93e 426 * rseq_mempool_attr_set_robust: Set pool robust attribute.
d6acc8aa
MD
427 *
428 * The robust pool attribute enables runtime validation of the pool:
429 *
430 * - Check for double-free of pointers.
431 *
432 * - Detect memory leaks on pool destruction.
433 *
434 * - Detect free-list corruption on pool destruction.
435 *
436 * There is a marginal runtime overhead on malloc/free operations.
437 *
438 * The memory overhead is (pool->percpu_len / pool->item_len) / CHAR_BIT
439 * bytes, over the lifetime of the pool.
440 *
441 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
442 */
0ba2a93e 443int rseq_mempool_attr_set_robust(struct rseq_mempool_attr *attr);
d6acc8aa 444
cb475906
MD
445/*
446 * rseq_mempool_attr_set_percpu: Set pool type as percpu.
447 *
89b7e681 448 * A pool created with this type is a per-cpu memory pool. The reserved
cb475906
MD
449 * allocation size is @stride, and the maximum CPU value expected
450 * is (@max_nr_cpus - 1). A @stride of 0 uses the default
451 * RSEQ_MEMPOOL_STRIDE.
452 *
453 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
454 */
455int rseq_mempool_attr_set_percpu(struct rseq_mempool_attr *attr,
456 size_t stride, int max_nr_cpus);
457
458/*
459 * rseq_mempool_attr_set_global: Set pool type as global.
460 *
89b7e681 461 * A pool created with this type is a global memory pool. The reserved
cb475906
MD
462 * allocation size is @stride. A @stride of 0 uses the default
463 * RSEQ_MEMPOOL_STRIDE.
464 *
465 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
466 */
467int rseq_mempool_attr_set_global(struct rseq_mempool_attr *attr, size_t stride);
468
e11a02d7
MD
469/*
470 * rseq_mempool_attr_set_max_nr_ranges: Set upper-limit to range allocation.
471 *
472 * Set an upper-limit to range allocation. A @max_nr_ranges value of
473 * 0 means no limit (default).
474 *
475 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
476 */
477int rseq_mempool_attr_set_max_nr_ranges(struct rseq_mempool_attr *attr,
478 unsigned long max_nr_ranges);
479
455e090e
MD
480/*
481 * rseq_mempool_attr_set_poison: Set pool poison value.
482 *
483 * Set a poison value to be set over freed pool entries. This can be
484 * used to anonymize freed memory, and for memory corruption checks
485 * with the robust attribute.
486 *
487 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
488 */
489int rseq_mempool_attr_set_poison(struct rseq_mempool_attr *attr,
490 uintptr_t poison);
491
c6fd3981
MD
492/*
493 * rseq_mempool_range_init_numa: NUMA initialization helper for memory range.
494 *
495 * Helper which can be used from mempool_attr @init_func to move a CPU
496 * memory range to the NUMA node associated to its topology.
497 *
498 * Returns 0 on success, -1 with errno set by move_pages(2) on error.
499 * Returns -1, errno=ENOSYS if NUMA support is not present.
500 */
501int rseq_mempool_range_init_numa(void *addr, size_t len, int cpu, int numa_flags);
502
6037d364
MD
503/*
504 * rseq_mempool_get_max_nr_cpus: Get the max_nr_cpus value configured for a pool.
505 *
506 * Returns a value >= 0 for a per-cpu pool.
507 * Returns -1, errno=EINVAL if the mempool is NULL or if the pool has a
508 * global pool type.
509 */
510int rseq_mempool_get_max_nr_cpus(struct rseq_mempool *mempool);
511
c7ec94e0
MD
512#ifdef __cplusplus
513}
514#endif
515
34337fec 516#endif /* _RSEQ_MEMPOOL_H */
This page took 0.047002 seconds and 4 git commands to generate.