mempool: Detect poison corruption on alloc
[librseq.git] / include / rseq / mempool.h
1 /* SPDX-License-Identifier: MIT */
2 /* SPDX-FileCopyrightText: 2024 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> */
3
4 #ifndef _RSEQ_MEMPOOL_H
5 #define _RSEQ_MEMPOOL_H
6
7 #include <rseq/compiler.h>
8 #include <stddef.h>
9 #include <sys/types.h>
10 #include <sys/mman.h>
11 #include <stdint.h>
12
13 /*
14 * rseq/mempool.h: rseq memory pool allocator.
15 *
16 * The rseq memory pool allocator can be configured as either a global
17 * allocator (default) or a per-CPU memory allocator.
18 *
19 * The rseq global memory allocator allows the application to request
20 * memory pools of global memory each of containing objects of a
21 * given size (rounded to next power of 2), reserving a given virtual
22 * address size of the requested stride.
23 *
24 * The rseq per-CPU memory allocator allows the application the request
25 * memory pools of CPU-Local memory each of containing objects of a
26 * given size (rounded to next power of 2), reserving a given virtual
27 * address size per CPU, for a given maximum number of CPUs.
28 *
29 * The per-CPU memory allocator is analogous to TLS (Thread-Local
30 * Storage) memory: TLS is Thread-Local Storage, whereas the per-CPU
31 * memory allocator provides CPU-Local Storage.
32 *
33 * Memory pool sets can be created by adding one or more pools into
34 * them. They can be used to perform allocation of variable length
35 * objects.
36 */
37
38 #ifdef __cplusplus
39 extern "C" {
40 #endif
41
42 /*
43 * The percpu offset stride can be overridden by the user code.
44 * The stride *must* match for all objects belonging to a given pool
45 * between arguments to:
46 *
47 * - rseq_mempool_create(),
48 * - rseq_percpu_ptr().
49 * - rseq_mempool_percpu_free(),
50 */
51 #define RSEQ_MEMPOOL_STRIDE (1U << 16) /* stride: 64kB */
52
53 /*
54 * Tag pointers returned by:
55 * - rseq_mempool_percpu_malloc(),
56 * - rseq_mempool_percpu_zmalloc(),
57 * - rseq_mempool_set_percpu_malloc(),
58 * - rseq_mempool_set_percpu_zmalloc().
59 *
60 * and passed as parameter to:
61 * - rseq_percpu_ptr(),
62 * - rseq_mempool_percpu_free().
63 *
64 * with __rseq_percpu for use by static analyzers.
65 */
66 #define __rseq_percpu
67
68 struct rseq_mempool_attr;
69 struct rseq_mempool;
70
71 /*
72 * rseq_mempool_create: Create a memory pool.
73 *
74 * Create a memory pool for items of size @item_len (rounded to
75 * next power of two).
76 *
77 * The @attr pointer used to specify the pool attributes. If NULL, use a
78 * default attribute values. The @attr can be destroyed immediately
79 * after rseq_mempool_create() returns. The caller keeps ownership
80 * of @attr. Default attributes select a global mempool type.
81 *
82 * The argument @pool_name can be used to given a name to the pool for
83 * debugging purposes. It can be NULL if no name is given.
84 *
85 * Returns a pointer to the created percpu pool. Return NULL on error,
86 * with errno set accordingly:
87 *
88 * EINVAL: Invalid argument.
89 * ENOMEM: Not enough resources (memory or pool indexes) available to
90 * allocate pool.
91 *
92 * In addition, if the attr mmap callback fails, NULL is returned and
93 * errno is propagated from the callback. The default callback can
94 * return errno=ENOMEM.
95 *
96 * This API is MT-safe.
97 */
98 struct rseq_mempool *rseq_mempool_create(const char *pool_name,
99 size_t item_len, const struct rseq_mempool_attr *attr);
100
101 /*
102 * rseq_mempool_destroy: Destroy a per-cpu memory pool.
103 *
104 * Destroy a per-cpu memory pool, unmapping its memory and removing the
105 * pool entry from the global index. No pointers allocated from the
106 * pool should be used when it is destroyed. This includes rseq_percpu_ptr().
107 *
108 * Argument @pool is a pointer to the per-cpu pool to destroy.
109 *
110 * Return values: 0 on success, -1 on error, with errno set accordingly:
111 *
112 * ENOENT: Trying to free a pool which was not allocated.
113 *
114 * If the munmap_func callback fails, -1 is returned and errno is
115 * propagated from the callback. The default callback can return
116 * errno=EINVAL.
117 *
118 * This API is MT-safe.
119 */
120 int rseq_mempool_destroy(struct rseq_mempool *pool);
121
122 /*
123 * rseq_mempool_percpu_malloc: Allocate memory from a per-cpu pool.
124 *
125 * Allocate an item from a per-cpu @pool. The allocation will reserve
126 * an item of the size specified by @item_len (rounded to next power of
127 * two) at pool creation. This effectively reserves space for this item
128 * on all CPUs.
129 *
130 * On success, return a "__rseq_percpu" encoded pointer to the pool
131 * item. This encoded pointer is meant to be passed to rseq_percpu_ptr()
132 * to be decoded to a valid address before being accessed.
133 *
134 * Return NULL (errno=ENOMEM) if there is not enough space left in the
135 * pool to allocate an item.
136 *
137 * This API is MT-safe.
138 */
139 void __rseq_percpu *rseq_mempool_percpu_malloc(struct rseq_mempool *pool);
140
141 /*
142 * rseq_mempool_percpu_zmalloc: Allocated zero-initialized memory from a per-cpu pool.
143 *
144 * Allocate memory for an item within the pool, and zero-initialize its
145 * memory on all CPUs. See rseq_mempool_percpu_malloc for details.
146 *
147 * This API is MT-safe.
148 */
149 void __rseq_percpu *rseq_mempool_percpu_zmalloc(struct rseq_mempool *pool);
150
151 /*
152 * rseq_mempool_malloc: Allocate memory from a global pool.
153 *
154 * Wrapper to allocate memory from a global pool, which can be
155 * used directly without per-cpu indexing. Would normally be used
156 * with pools created with max_nr_cpus=1.
157 */
158 static inline
159 void *rseq_mempool_malloc(struct rseq_mempool *pool)
160 {
161 return (void *) rseq_mempool_percpu_malloc(pool);
162 }
163
164 /*
165 * rseq_mempool_zmalloc: Allocate zero-initialized memory from a global pool.
166 *
167 * Wrapper to allocate memory from a global pool, which can be
168 * used directly without per-cpu indexing. Would normally be used
169 * with pools created with max_nr_cpus=1.
170 */
171 static inline
172 void *rseq_mempool_zmalloc(struct rseq_mempool *pool)
173 {
174 return (void *) rseq_mempool_percpu_zmalloc(pool);
175 }
176
177 /*
178 * rseq_mempool_percpu_free: Free memory from a per-cpu pool.
179 *
180 * Free an item pointed to by @ptr from its per-cpu pool.
181 *
182 * The @ptr argument is a __rseq_percpu encoded pointer returned by
183 * either:
184 *
185 * - rseq_mempool_percpu_malloc(),
186 * - rseq_mempool_percpu_zmalloc(),
187 * - rseq_mempool_set_percpu_malloc(),
188 * - rseq_mempool_set_percpu_zmalloc().
189 *
190 * The @stride optional argument to rseq_percpu_free() is a configurable
191 * stride, which must match the stride received by pool creation.
192 * If the argument is not present, use the default RSEQ_MEMPOOL_STRIDE.
193 *
194 * This API is MT-safe.
195 */
196 void librseq_mempool_percpu_free(void __rseq_percpu *ptr, size_t stride);
197
198 #define rseq_mempool_percpu_free(_ptr, _stride...) \
199 librseq_mempool_percpu_free(_ptr, RSEQ_PARAM_SELECT_ARG1(_, ##_stride, RSEQ_MEMPOOL_STRIDE))
200
201 /*
202 * rseq_free: Free memory from a global pool.
203 *
204 * Free an item pointed to by @ptr from its global pool. Would normally
205 * be used with pools created with max_nr_cpus=1.
206 *
207 * The @ptr argument is a pointer returned by either:
208 *
209 * - rseq_mempool_malloc(),
210 * - rseq_mempool_zmalloc(),
211 * - rseq_mempool_set_malloc(),
212 * - rseq_mempool_set_zmalloc().
213 *
214 * The @stride optional argument to rseq_free() is a configurable
215 * stride, which must match the stride received by pool creation. If
216 * the argument is not present, use the default RSEQ_MEMPOOL_STRIDE.
217 * The stride is needed even for a global pool to know the mapping
218 * address range.
219 *
220 * This API is MT-safe.
221 */
222 #define rseq_mempool_free(_ptr, _stride...) \
223 librseq_percpu_free((void __rseq_percpu *) _ptr, RSEQ_PARAM_SELECT_ARG1(_, ##_stride, RSEQ_MEMPOOL_STRIDE))
224
225 /*
226 * rseq_percpu_ptr: Offset a per-cpu pointer for a given CPU.
227 *
228 * Offset a per-cpu pointer @ptr to get the associated pointer for the
229 * given @cpu. The @ptr argument is a __rseq_percpu pointer returned by
230 * either:
231 *
232 * - rseq_mempool_percpu_malloc(),
233 * - rseq_mempool_percpu_zmalloc(),
234 * - rseq_mempool_set_percpu_malloc(),
235 * - rseq_mempool_set_percpu_zmalloc().
236 *
237 * The macro rseq_percpu_ptr() preserves the type of the @ptr parameter
238 * for the returned pointer, but removes the __rseq_percpu annotation.
239 *
240 * The macro rseq_percpu_ptr() takes an optional @stride argument. If
241 * the argument is not present, use the default RSEQ_MEMPOOL_STRIDE.
242 * This must match the stride used for pool creation.
243 *
244 * This API is MT-safe.
245 */
246 #define rseq_percpu_ptr(_ptr, _cpu, _stride...) \
247 ((__typeof__(*(_ptr)) *) ((uintptr_t) (_ptr) + \
248 ((unsigned int) (_cpu) * \
249 (uintptr_t) RSEQ_PARAM_SELECT_ARG1(_, ##_stride, RSEQ_MEMPOOL_STRIDE))))
250
251 /*
252 * rseq_mempool_set_create: Create a pool set.
253 *
254 * Create a set of pools. Its purpose is to offer a memory allocator API
255 * for variable-length items (e.g. variable length strings). When
256 * created, the pool set has no pool. Pools can be created and added to
257 * the set. One common approach would be to create pools for each
258 * relevant power of two allocation size useful for the application.
259 * Only one pool can be added to the pool set for each power of two
260 * allocation size.
261 *
262 * Returns a pool set pointer on success, else returns NULL with
263 * errno=ENOMEM (out of memory).
264 *
265 * This API is MT-safe.
266 */
267 struct rseq_mempool_set *rseq_mempool_set_create(void);
268
269 /*
270 * rseq_mempool_set_destroy: Destroy a pool set.
271 *
272 * Destroy a pool set and its associated resources. The pools that were
273 * added to the pool set are destroyed as well.
274 *
275 * Returns 0 on success, -1 on failure (or partial failure), with errno
276 * set by rseq_percpu_pool_destroy(). Using a pool set after destroy
277 * failure is undefined.
278 *
279 * This API is MT-safe.
280 */
281 int rseq_mempool_set_destroy(struct rseq_mempool_set *pool_set);
282
283 /*
284 * rseq_mempool_set_add_pool: Add a pool to a pool set.
285 *
286 * Add a @pool to the @pool_set. On success, its ownership is handed
287 * over to the pool set, so the caller should not destroy it explicitly.
288 * Only one pool can be added to the pool set for each power of two
289 * allocation size.
290 *
291 * Returns 0 on success, -1 on error with the following errno:
292 * - EBUSY: A pool already exists in the pool set for this power of two
293 * allocation size.
294 *
295 * This API is MT-safe.
296 */
297 int rseq_mempool_set_add_pool(struct rseq_mempool_set *pool_set,
298 struct rseq_mempool *pool);
299
300 /*
301 * rseq_mempool_set_percpu_malloc: Allocate memory from a per-cpu pool set.
302 *
303 * Allocate an item from a per-cpu @pool. The allocation will reserve
304 * an item of the size specified by @len (rounded to next power of
305 * two). This effectively reserves space for this item on all CPUs.
306 *
307 * The space reservation will search for the smallest pool within
308 * @pool_set which respects the following conditions:
309 *
310 * - it has an item size large enough to fit @len,
311 * - it has space available.
312 *
313 * On success, return a "__rseq_percpu" encoded pointer to the pool
314 * item. This encoded pointer is meant to be passed to rseq_percpu_ptr()
315 * to be decoded to a valid address before being accessed.
316 *
317 * Return NULL (errno=ENOMEM) if there is not enough space left in the
318 * pool to allocate an item.
319 *
320 * This API is MT-safe.
321 */
322 void __rseq_percpu *rseq_mempool_set_percpu_malloc(struct rseq_mempool_set *pool_set, size_t len);
323
324 /*
325 * rseq_mempool_set_percpu_zmalloc: Allocated zero-initialized memory from a per-cpu pool set.
326 *
327 * Allocate memory for an item within the pool, and zero-initialize its
328 * memory on all CPUs. See rseq_mempool_set_percpu_malloc for details.
329 *
330 * This API is MT-safe.
331 */
332 void __rseq_percpu *rseq_mempool_set_percpu_zmalloc(struct rseq_mempool_set *pool_set, size_t len);
333
334 /*
335 * rseq_mempool_set_malloc: Allocate memory from a global pool set.
336 *
337 * Wrapper to allocate memory from a global pool, which can be
338 * used directly without per-cpu indexing. Would normally be used
339 * with pools created with max_nr_cpus=1.
340 */
341 static inline
342 void *rseq_mempool_set_malloc(struct rseq_mempool_set *pool_set, size_t len)
343 {
344 return (void *) rseq_mempool_set_percpu_malloc(pool_set, len);
345 }
346
347 /*
348 * rseq_mempool_set_zmalloc: Allocate zero-initialized memory from a global pool set.
349 *
350 * Wrapper to allocate memory from a global pool, which can be
351 * used directly without per-cpu indexing. Would normally be used
352 * with pools created with max_nr_cpus=1.
353 */
354 static inline
355 void *rseq_mempool_set_zmalloc(struct rseq_mempool_set *pool_set, size_t len)
356 {
357 return (void *) rseq_mempool_set_percpu_zmalloc(pool_set, len);
358 }
359
360 /*
361 * rseq_mempool_init_numa: Move pages to the NUMA node associated to their CPU topology.
362 *
363 * For pages allocated within @pool, invoke move_pages(2) with the given
364 * @numa_flags to move the pages to the NUMA node associated to their
365 * CPU topology.
366 *
367 * Argument @numa_flags are passed to move_pages(2). The expected flags are:
368 * MPOL_MF_MOVE: move process-private pages to cpu-specific numa nodes.
369 * MPOL_MF_MOVE_ALL: move shared pages to cpu-specific numa nodes
370 * (requires CAP_SYS_NICE).
371 *
372 * Returns 0 on success, else return -1 with errno set by move_pages(2).
373 */
374 int rseq_mempool_init_numa(struct rseq_mempool *pool, int numa_flags);
375
376 /*
377 * rseq_mempool_attr_create: Create a pool attribute structure.
378 */
379 struct rseq_mempool_attr *rseq_mempool_attr_create(void);
380
381 /*
382 * rseq_mempool_attr_destroy: Destroy a pool attribute structure.
383 */
384 void rseq_mempool_attr_destroy(struct rseq_mempool_attr *attr);
385
386 /*
387 * rseq_mempool_attr_set_mmap: Set pool attribute structure mmap functions.
388 *
389 * The @mmap_func callback used to map the memory for the pool.
390 *
391 * The @munmap_func callback used to unmap the memory when the pool
392 * is destroyed.
393 *
394 * The @mmap_priv argument is a private data pointer passed to both
395 * @mmap_func and @munmap_func callbacks.
396 *
397 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
398 */
399 int rseq_mempool_attr_set_mmap(struct rseq_mempool_attr *attr,
400 void *(*mmap_func)(void *priv, size_t len),
401 int (*munmap_func)(void *priv, void *ptr, size_t len),
402 void *mmap_priv);
403
404 /*
405 * rseq_mempool_attr_set_init: Set pool attribute structure memory init functions.
406 *
407 * The @init_func callback used to initialized memory after allocation
408 * for the pool. The @cpu argument of @init_func, if >= 0, is the cpu to
409 * which belongs the range starting at @addr of length @len. If cpu is
410 * -1, it means the range belongs to a global pool. The @init_func
411 * callback must return 0 on success, -1 on error with errno set. If
412 * @init_func returns failure, the allocation of the pool memory fails,
413 * which either causes the pool creation to fail or memory allocation to
414 * fail (for extensible memory pools).
415 *
416 * The @init_priv argument is a private data pointer passed to the
417 * @init_func callback.
418 *
419 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
420 */
421 int rseq_mempool_attr_set_init(struct rseq_mempool_attr *attr,
422 int (*init_func)(void *priv, void *addr, size_t len, int cpu),
423 void *init_priv);
424
425 /*
426 * rseq_mempool_attr_set_robust: Set pool robust attribute.
427 *
428 * The robust pool attribute enables runtime validation of the pool:
429 *
430 * - Check for double-free of pointers.
431 *
432 * - Detect memory leaks on pool destruction.
433 *
434 * - Detect free-list corruption on pool destruction.
435 *
436 * - Detect poison value corruption on allocation and pool destruction.
437 *
438 * There is a marginal runtime overhead on malloc/free operations.
439 *
440 * The memory overhead is (pool->percpu_len / pool->item_len) / CHAR_BIT
441 * bytes, over the lifetime of the pool.
442 *
443 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
444 */
445 int rseq_mempool_attr_set_robust(struct rseq_mempool_attr *attr);
446
447 /*
448 * rseq_mempool_attr_set_percpu: Set pool type as percpu.
449 *
450 * A pool created with this type is a per-cpu memory pool. The reserved
451 * allocation size is @stride, and the maximum CPU value expected
452 * is (@max_nr_cpus - 1). A @stride of 0 uses the default
453 * RSEQ_MEMPOOL_STRIDE.
454 *
455 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
456 */
457 int rseq_mempool_attr_set_percpu(struct rseq_mempool_attr *attr,
458 size_t stride, int max_nr_cpus);
459
460 /*
461 * rseq_mempool_attr_set_global: Set pool type as global.
462 *
463 * A pool created with this type is a global memory pool. The reserved
464 * allocation size is @stride. A @stride of 0 uses the default
465 * RSEQ_MEMPOOL_STRIDE.
466 *
467 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
468 */
469 int rseq_mempool_attr_set_global(struct rseq_mempool_attr *attr, size_t stride);
470
471 /*
472 * rseq_mempool_attr_set_max_nr_ranges: Set upper-limit to range allocation.
473 *
474 * Set an upper-limit to range allocation. A @max_nr_ranges value of
475 * 0 means no limit (default).
476 *
477 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
478 */
479 int rseq_mempool_attr_set_max_nr_ranges(struct rseq_mempool_attr *attr,
480 unsigned long max_nr_ranges);
481
482 /*
483 * rseq_mempool_attr_set_poison: Set pool poison value.
484 *
485 * Set a poison value to be set over freed pool entries. This can be
486 * used to anonymize freed memory, and for memory corruption checks
487 * with the robust attribute.
488 *
489 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
490 */
491 int rseq_mempool_attr_set_poison(struct rseq_mempool_attr *attr,
492 uintptr_t poison);
493
494 /*
495 * rseq_mempool_range_init_numa: NUMA initialization helper for memory range.
496 *
497 * Helper which can be used from mempool_attr @init_func to move a CPU
498 * memory range to the NUMA node associated to its topology.
499 *
500 * Returns 0 on success, -1 with errno set by move_pages(2) on error.
501 * Returns -1, errno=ENOSYS if NUMA support is not present.
502 */
503 int rseq_mempool_range_init_numa(void *addr, size_t len, int cpu, int numa_flags);
504
505 /*
506 * rseq_mempool_get_max_nr_cpus: Get the max_nr_cpus value configured for a pool.
507 *
508 * Returns a value >= 0 for a per-cpu pool.
509 * Returns -1, errno=EINVAL if the mempool is NULL or if the pool has a
510 * global pool type.
511 */
512 int rseq_mempool_get_max_nr_cpus(struct rseq_mempool *mempool);
513
514 #ifdef __cplusplus
515 }
516 #endif
517
518 #endif /* _RSEQ_MEMPOOL_H */
This page took 0.039026 seconds and 4 git commands to generate.