mempool: Document destroy after fork for populate none
[librseq.git] / include / rseq / mempool.h
CommitLineData
ef6695f1
MD
1/* SPDX-License-Identifier: MIT */
2/* SPDX-FileCopyrightText: 2024 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> */
3
34337fec
MD
4#ifndef _RSEQ_MEMPOOL_H
5#define _RSEQ_MEMPOOL_H
ef6695f1 6
f2981623 7#include <rseq/compiler.h>
ef6695f1
MD
8#include <stddef.h>
9#include <sys/types.h>
e229a2dd 10#include <sys/mman.h>
455e090e 11#include <stdint.h>
ef6695f1
MD
12
13/*
89b7e681
MD
14 * rseq/mempool.h: rseq memory pool allocator.
15 *
16 * The rseq memory pool allocator can be configured as either a global
17 * allocator (default) or a per-CPU memory allocator.
18 *
19 * The rseq global memory allocator allows the application to request
20 * memory pools of global memory each of containing objects of a
21 * given size (rounded to next power of 2), reserving a given virtual
22 * address size of the requested stride.
8aa1462d
MD
23 *
24 * The rseq per-CPU memory allocator allows the application the request
25 * memory pools of CPU-Local memory each of containing objects of a
26 * given size (rounded to next power of 2), reserving a given virtual
27 * address size per CPU, for a given maximum number of CPUs.
28 *
29 * The per-CPU memory allocator is analogous to TLS (Thread-Local
30 * Storage) memory: TLS is Thread-Local Storage, whereas the per-CPU
31 * memory allocator provides CPU-Local Storage.
89b7e681
MD
32 *
33 * Memory pool sets can be created by adding one or more pools into
34 * them. They can be used to perform allocation of variable length
35 * objects.
ef6695f1
MD
36 */
37
c7ec94e0
MD
38#ifdef __cplusplus
39extern "C" {
40#endif
41
bef24483
MD
42/*
43 * The percpu offset stride can be overridden by the user code.
44 * The stride *must* match for all objects belonging to a given pool
45 * between arguments to:
46 *
06e0b1c0
MD
47 * - rseq_mempool_create(),
48 * - rseq_percpu_ptr().
e30d5eb8 49 * - rseq_mempool_percpu_free(),
bef24483 50 */
cb475906 51#define RSEQ_MEMPOOL_STRIDE (1U << 16) /* stride: 64kB */
f2981623 52
d24ee051
MD
53/*
54 * Tag pointers returned by:
e30d5eb8
MD
55 * - rseq_mempool_percpu_malloc(),
56 * - rseq_mempool_percpu_zmalloc(),
6ff43d9a 57 * - rseq_mempool_percpu_malloc_init(),
e30d5eb8 58 * - rseq_mempool_set_percpu_malloc(),
6ff43d9a
MD
59 * - rseq_mempool_set_percpu_zmalloc(),
60 * - rseq_mempool_set_percpu_malloc_init().
d24ee051 61 *
8aa1462d
MD
62 * and passed as parameter to:
63 * - rseq_percpu_ptr(),
e30d5eb8 64 * - rseq_mempool_percpu_free().
8aa1462d 65 *
d24ee051
MD
66 * with __rseq_percpu for use by static analyzers.
67 */
68#define __rseq_percpu
69
0ba2a93e
MD
70struct rseq_mempool_attr;
71struct rseq_mempool;
ef6695f1 72
8aa1462d 73/*
e30d5eb8 74 * rseq_mempool_create: Create a memory pool.
8aa1462d 75 *
cb475906
MD
76 * Create a memory pool for items of size @item_len (rounded to
77 * next power of two).
8aa1462d 78 *
d6acc8aa
MD
79 * The @attr pointer used to specify the pool attributes. If NULL, use a
80 * default attribute values. The @attr can be destroyed immediately
e30d5eb8 81 * after rseq_mempool_create() returns. The caller keeps ownership
89b7e681 82 * of @attr. Default attributes select a global mempool type.
8aa1462d 83 *
ca452fee
MD
84 * The argument @pool_name can be used to given a name to the pool for
85 * debugging purposes. It can be NULL if no name is given.
86 *
8aa1462d
MD
87 * Returns a pointer to the created percpu pool. Return NULL on error,
88 * with errno set accordingly:
cb475906 89 *
8aa1462d
MD
90 * EINVAL: Invalid argument.
91 * ENOMEM: Not enough resources (memory or pool indexes) available to
92 * allocate pool.
93 *
a82006d0
MD
94 * In addition, if the attr mmap callback fails, NULL is returned and
95 * errno is propagated from the callback. The default callback can
9bd07c29 96 * return errno=ENOMEM.
8aa1462d
MD
97 *
98 * This API is MT-safe.
99 */
0ba2a93e 100struct rseq_mempool *rseq_mempool_create(const char *pool_name,
cb475906 101 size_t item_len, const struct rseq_mempool_attr *attr);
8aa1462d
MD
102
103/*
0ba2a93e 104 * rseq_mempool_destroy: Destroy a per-cpu memory pool.
8aa1462d
MD
105 *
106 * Destroy a per-cpu memory pool, unmapping its memory and removing the
107 * pool entry from the global index. No pointers allocated from the
108 * pool should be used when it is destroyed. This includes rseq_percpu_ptr().
109 *
110 * Argument @pool is a pointer to the per-cpu pool to destroy.
111 *
112 * Return values: 0 on success, -1 on error, with errno set accordingly:
89b7e681 113 *
8aa1462d
MD
114 * ENOENT: Trying to free a pool which was not allocated.
115 *
9bd07c29
MD
116 * If the munmap_func callback fails, -1 is returned and errno is
117 * propagated from the callback. The default callback can return
118 * errno=EINVAL.
8aa1462d
MD
119 *
120 * This API is MT-safe.
121 */
0ba2a93e 122int rseq_mempool_destroy(struct rseq_mempool *pool);
ef6695f1 123
8aa1462d 124/*
15da5c27 125 * rseq_mempool_percpu_malloc: Allocate memory from a per-cpu pool.
8aa1462d
MD
126 *
127 * Allocate an item from a per-cpu @pool. The allocation will reserve
128 * an item of the size specified by @item_len (rounded to next power of
129 * two) at pool creation. This effectively reserves space for this item
130 * on all CPUs.
131 *
132 * On success, return a "__rseq_percpu" encoded pointer to the pool
133 * item. This encoded pointer is meant to be passed to rseq_percpu_ptr()
134 * to be decoded to a valid address before being accessed.
135 *
136 * Return NULL (errno=ENOMEM) if there is not enough space left in the
137 * pool to allocate an item.
138 *
139 * This API is MT-safe.
140 */
15da5c27 141void __rseq_percpu *rseq_mempool_percpu_malloc(struct rseq_mempool *pool);
8aa1462d
MD
142
143/*
6ff43d9a 144 * rseq_mempool_percpu_zmalloc: Allocate zero-initialized memory from a per-cpu pool.
8aa1462d
MD
145 *
146 * Allocate memory for an item within the pool, and zero-initialize its
15da5c27 147 * memory on all CPUs. See rseq_mempool_percpu_malloc for details.
8aa1462d
MD
148 *
149 * This API is MT-safe.
150 */
15da5c27 151void __rseq_percpu *rseq_mempool_percpu_zmalloc(struct rseq_mempool *pool);
8aa1462d 152
6ff43d9a
MD
153/*
154 * rseq_mempool_percpu_malloc_init: Allocate initialized memory from a per-cpu pool.
155 *
156 * Allocate memory for an item within the pool, and initialize its
157 * memory on all CPUs with content from @init_ptr of length @init_len.
158 * See rseq_mempool_percpu_malloc for details.
159 *
160 * Return NULL (errno=ENOMEM) if there is not enough space left in the
161 * pool to allocate an item. Return NULL (errno=EINVAL) if init_len is
162 * larger than the pool item_len.
163 *
164 * This API is MT-safe.
165 */
166void __rseq_percpu *rseq_mempool_percpu_malloc_init(struct rseq_mempool *pool,
167 void *init_ptr, size_t init_len);
168
8aa1462d 169/*
15da5c27
MD
170 * rseq_mempool_malloc: Allocate memory from a global pool.
171 *
172 * Wrapper to allocate memory from a global pool, which can be
173 * used directly without per-cpu indexing. Would normally be used
174 * with pools created with max_nr_cpus=1.
175 */
176static inline
177void *rseq_mempool_malloc(struct rseq_mempool *pool)
178{
179 return (void *) rseq_mempool_percpu_malloc(pool);
180}
181
182/*
183 * rseq_mempool_zmalloc: Allocate zero-initialized memory from a global pool.
184 *
185 * Wrapper to allocate memory from a global pool, which can be
186 * used directly without per-cpu indexing. Would normally be used
187 * with pools created with max_nr_cpus=1.
188 */
189static inline
190void *rseq_mempool_zmalloc(struct rseq_mempool *pool)
191{
192 return (void *) rseq_mempool_percpu_zmalloc(pool);
193}
194
6ff43d9a
MD
195/*
196 * rseq_mempool_malloc_init: Allocate initialized memory from a global pool.
197 *
198 * Wrapper to allocate memory from a global pool, which can be
199 * used directly without per-cpu indexing. Would normally be used
200 * with pools created with max_nr_cpus=1.
201 */
202static inline
203void *rseq_mempool_malloc_init(struct rseq_mempool *pool,
204 void *init_ptr, size_t init_len)
205{
206 return (void *) rseq_mempool_percpu_malloc_init(pool, init_ptr, init_len);
207}
208
15da5c27
MD
209/*
210 * rseq_mempool_percpu_free: Free memory from a per-cpu pool.
8aa1462d
MD
211 *
212 * Free an item pointed to by @ptr from its per-cpu pool.
213 *
214 * The @ptr argument is a __rseq_percpu encoded pointer returned by
215 * either:
216 *
15da5c27
MD
217 * - rseq_mempool_percpu_malloc(),
218 * - rseq_mempool_percpu_zmalloc(),
6ff43d9a 219 * - rseq_mempool_percpu_malloc_init(),
15da5c27 220 * - rseq_mempool_set_percpu_malloc(),
6ff43d9a
MD
221 * - rseq_mempool_set_percpu_zmalloc(),
222 * - rseq_mempool_set_percpu_malloc_init().
8aa1462d 223 *
06e0b1c0 224 * The @stride optional argument to rseq_percpu_free() is a configurable
4aa3220c 225 * stride, which must match the stride received by pool creation.
cb475906 226 * If the argument is not present, use the default RSEQ_MEMPOOL_STRIDE.
4aa3220c 227 *
8aa1462d
MD
228 * This API is MT-safe.
229 */
cb475906 230void librseq_mempool_percpu_free(void __rseq_percpu *ptr, size_t stride);
15da5c27
MD
231
232#define rseq_mempool_percpu_free(_ptr, _stride...) \
cb475906 233 librseq_mempool_percpu_free(_ptr, RSEQ_PARAM_SELECT_ARG1(_, ##_stride, RSEQ_MEMPOOL_STRIDE))
f2981623 234
15da5c27
MD
235/*
236 * rseq_free: Free memory from a global pool.
237 *
238 * Free an item pointed to by @ptr from its global pool. Would normally
239 * be used with pools created with max_nr_cpus=1.
240 *
241 * The @ptr argument is a pointer returned by either:
242 *
243 * - rseq_mempool_malloc(),
244 * - rseq_mempool_zmalloc(),
6ff43d9a 245 * - rseq_mempool_malloc_init(),
15da5c27 246 * - rseq_mempool_set_malloc(),
6ff43d9a
MD
247 * - rseq_mempool_set_zmalloc(),
248 * - rseq_mempool_set_malloc_init().
15da5c27
MD
249 *
250 * The @stride optional argument to rseq_free() is a configurable
251 * stride, which must match the stride received by pool creation. If
cb475906 252 * the argument is not present, use the default RSEQ_MEMPOOL_STRIDE.
15da5c27
MD
253 * The stride is needed even for a global pool to know the mapping
254 * address range.
255 *
256 * This API is MT-safe.
257 */
258#define rseq_mempool_free(_ptr, _stride...) \
cb475906 259 librseq_percpu_free((void __rseq_percpu *) _ptr, RSEQ_PARAM_SELECT_ARG1(_, ##_stride, RSEQ_MEMPOOL_STRIDE))
ef6695f1 260
8aa1462d 261/*
4aa3220c 262 * rseq_percpu_ptr: Offset a per-cpu pointer for a given CPU.
8aa1462d 263 *
4aa3220c
MD
264 * Offset a per-cpu pointer @ptr to get the associated pointer for the
265 * given @cpu. The @ptr argument is a __rseq_percpu pointer returned by
266 * either:
8aa1462d 267 *
15da5c27
MD
268 * - rseq_mempool_percpu_malloc(),
269 * - rseq_mempool_percpu_zmalloc(),
6ff43d9a 270 * - rseq_mempool_percpu_malloc_init(),
15da5c27 271 * - rseq_mempool_set_percpu_malloc(),
6ff43d9a
MD
272 * - rseq_mempool_set_percpu_zmalloc(),
273 * - rseq_mempool_set_percpu_malloc_init().
8aa1462d 274 *
06e0b1c0
MD
275 * The macro rseq_percpu_ptr() preserves the type of the @ptr parameter
276 * for the returned pointer, but removes the __rseq_percpu annotation.
8aa1462d 277 *
06e0b1c0 278 * The macro rseq_percpu_ptr() takes an optional @stride argument. If
cb475906 279 * the argument is not present, use the default RSEQ_MEMPOOL_STRIDE.
4aa3220c 280 * This must match the stride used for pool creation.
8aa1462d
MD
281 *
282 * This API is MT-safe.
283 */
06e0b1c0
MD
284#define rseq_percpu_ptr(_ptr, _cpu, _stride...) \
285 ((__typeof__(*(_ptr)) *) ((uintptr_t) (_ptr) + \
286 ((unsigned int) (_cpu) * \
cb475906 287 (uintptr_t) RSEQ_PARAM_SELECT_ARG1(_, ##_stride, RSEQ_MEMPOOL_STRIDE))))
ef6695f1 288
8aa1462d 289/*
0ba2a93e 290 * rseq_mempool_set_create: Create a pool set.
8aa1462d
MD
291 *
292 * Create a set of pools. Its purpose is to offer a memory allocator API
293 * for variable-length items (e.g. variable length strings). When
294 * created, the pool set has no pool. Pools can be created and added to
295 * the set. One common approach would be to create pools for each
296 * relevant power of two allocation size useful for the application.
297 * Only one pool can be added to the pool set for each power of two
298 * allocation size.
299 *
300 * Returns a pool set pointer on success, else returns NULL with
301 * errno=ENOMEM (out of memory).
302 *
303 * This API is MT-safe.
304 */
0ba2a93e 305struct rseq_mempool_set *rseq_mempool_set_create(void);
8aa1462d
MD
306
307/*
0ba2a93e 308 * rseq_mempool_set_destroy: Destroy a pool set.
8aa1462d
MD
309 *
310 * Destroy a pool set and its associated resources. The pools that were
311 * added to the pool set are destroyed as well.
312 *
313 * Returns 0 on success, -1 on failure (or partial failure), with errno
314 * set by rseq_percpu_pool_destroy(). Using a pool set after destroy
315 * failure is undefined.
316 *
317 * This API is MT-safe.
318 */
0ba2a93e 319int rseq_mempool_set_destroy(struct rseq_mempool_set *pool_set);
8aa1462d
MD
320
321/*
0ba2a93e 322 * rseq_mempool_set_add_pool: Add a pool to a pool set.
8aa1462d
MD
323 *
324 * Add a @pool to the @pool_set. On success, its ownership is handed
325 * over to the pool set, so the caller should not destroy it explicitly.
326 * Only one pool can be added to the pool set for each power of two
327 * allocation size.
328 *
329 * Returns 0 on success, -1 on error with the following errno:
330 * - EBUSY: A pool already exists in the pool set for this power of two
331 * allocation size.
332 *
333 * This API is MT-safe.
334 */
0ba2a93e
MD
335int rseq_mempool_set_add_pool(struct rseq_mempool_set *pool_set,
336 struct rseq_mempool *pool);
ef6695f1 337
8aa1462d 338/*
e30d5eb8 339 * rseq_mempool_set_percpu_malloc: Allocate memory from a per-cpu pool set.
8aa1462d
MD
340 *
341 * Allocate an item from a per-cpu @pool. The allocation will reserve
342 * an item of the size specified by @len (rounded to next power of
343 * two). This effectively reserves space for this item on all CPUs.
344 *
345 * The space reservation will search for the smallest pool within
346 * @pool_set which respects the following conditions:
347 *
348 * - it has an item size large enough to fit @len,
349 * - it has space available.
350 *
351 * On success, return a "__rseq_percpu" encoded pointer to the pool
352 * item. This encoded pointer is meant to be passed to rseq_percpu_ptr()
353 * to be decoded to a valid address before being accessed.
354 *
355 * Return NULL (errno=ENOMEM) if there is not enough space left in the
356 * pool to allocate an item.
357 *
358 * This API is MT-safe.
359 */
15da5c27 360void __rseq_percpu *rseq_mempool_set_percpu_malloc(struct rseq_mempool_set *pool_set, size_t len);
8aa1462d
MD
361
362/*
6ff43d9a 363 * rseq_mempool_set_percpu_zmalloc: Allocate zero-initialized memory from a per-cpu pool set.
8aa1462d
MD
364 *
365 * Allocate memory for an item within the pool, and zero-initialize its
e30d5eb8 366 * memory on all CPUs. See rseq_mempool_set_percpu_malloc for details.
8aa1462d
MD
367 *
368 * This API is MT-safe.
369 */
15da5c27
MD
370void __rseq_percpu *rseq_mempool_set_percpu_zmalloc(struct rseq_mempool_set *pool_set, size_t len);
371
6ff43d9a
MD
372/*
373 * rseq_mempool_set_percpu_malloc_init: Allocate initialized memory from a per-cpu pool set.
374 *
375 * Allocate memory for an item within the pool, and initialize its
376 * memory on all CPUs with content from @init_ptr of length @len.
377 * See rseq_mempool_set_percpu_malloc for details.
378 *
379 * This API is MT-safe.
380 */
381void __rseq_percpu *rseq_mempool_set_percpu_malloc_init(struct rseq_mempool_set *pool_set,
382 void *init_ptr, size_t len);
383
15da5c27
MD
384/*
385 * rseq_mempool_set_malloc: Allocate memory from a global pool set.
386 *
387 * Wrapper to allocate memory from a global pool, which can be
388 * used directly without per-cpu indexing. Would normally be used
389 * with pools created with max_nr_cpus=1.
390 */
391static inline
392void *rseq_mempool_set_malloc(struct rseq_mempool_set *pool_set, size_t len)
393{
394 return (void *) rseq_mempool_set_percpu_malloc(pool_set, len);
395}
396
397/*
398 * rseq_mempool_set_zmalloc: Allocate zero-initialized memory from a global pool set.
399 *
400 * Wrapper to allocate memory from a global pool, which can be
401 * used directly without per-cpu indexing. Would normally be used
402 * with pools created with max_nr_cpus=1.
403 */
404static inline
405void *rseq_mempool_set_zmalloc(struct rseq_mempool_set *pool_set, size_t len)
406{
407 return (void *) rseq_mempool_set_percpu_zmalloc(pool_set, len);
408}
ef6695f1 409
6ff43d9a
MD
410/*
411 * rseq_mempool_set_malloc_init: Allocate initialized memory from a global pool set.
412 *
413 * Wrapper to allocate memory from a global pool, which can be
414 * used directly without per-cpu indexing. Would normally be used
415 * with pools created with max_nr_cpus=1.
416 */
417static inline
418void *rseq_mempool_set_malloc_init(struct rseq_mempool_set *pool_set, void *init_ptr, size_t len)
419{
420 return (void *) rseq_mempool_set_percpu_malloc_init(pool_set, init_ptr, len);
421}
422
423
9bd07c29 424/*
0ba2a93e 425 * rseq_mempool_init_numa: Move pages to the NUMA node associated to their CPU topology.
9bd07c29
MD
426 *
427 * For pages allocated within @pool, invoke move_pages(2) with the given
428 * @numa_flags to move the pages to the NUMA node associated to their
429 * CPU topology.
430 *
431 * Argument @numa_flags are passed to move_pages(2). The expected flags are:
432 * MPOL_MF_MOVE: move process-private pages to cpu-specific numa nodes.
433 * MPOL_MF_MOVE_ALL: move shared pages to cpu-specific numa nodes
434 * (requires CAP_SYS_NICE).
435 *
436 * Returns 0 on success, else return -1 with errno set by move_pages(2).
437 */
0ba2a93e 438int rseq_mempool_init_numa(struct rseq_mempool *pool, int numa_flags);
9bd07c29
MD
439
440/*
0ba2a93e 441 * rseq_mempool_attr_create: Create a pool attribute structure.
a82006d0 442 */
0ba2a93e 443struct rseq_mempool_attr *rseq_mempool_attr_create(void);
a82006d0
MD
444
445/*
0ba2a93e 446 * rseq_mempool_attr_destroy: Destroy a pool attribute structure.
a82006d0 447 */
0ba2a93e 448void rseq_mempool_attr_destroy(struct rseq_mempool_attr *attr);
a82006d0 449
135811f2
MD
450/*
451 * rseq_mempool_attr_set_init: Set pool attribute structure memory init functions.
452 *
453 * The @init_func callback used to initialized memory after allocation
374c2773
MD
454 * for the pool. The @cpu argument of @init_func, if >= 0, is the cpu to
455 * which belongs the range starting at @addr of length @len. If cpu is
456 * -1, it means the range belongs to a global pool. The @init_func
457 * callback must return 0 on success, -1 on error with errno set. If
458 * @init_func returns failure, the allocation of the pool memory fails,
459 * which either causes the pool creation to fail or memory allocation to
460 * fail (for extensible memory pools).
135811f2
MD
461 *
462 * The @init_priv argument is a private data pointer passed to the
463 * @init_func callback.
464 *
465 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
466 */
467int rseq_mempool_attr_set_init(struct rseq_mempool_attr *attr,
6e329183 468 int (*init_func)(void *priv, void *addr, size_t len, int cpu),
135811f2
MD
469 void *init_priv);
470
d6acc8aa 471/*
0ba2a93e 472 * rseq_mempool_attr_set_robust: Set pool robust attribute.
d6acc8aa
MD
473 *
474 * The robust pool attribute enables runtime validation of the pool:
475 *
476 * - Check for double-free of pointers.
477 *
478 * - Detect memory leaks on pool destruction.
86617384 479 *
d6acc8aa
MD
480 * - Detect free-list corruption on pool destruction.
481 *
86617384
MD
482 * - Detect poison value corruption on allocation and pool destruction.
483 *
d6acc8aa
MD
484 * There is a marginal runtime overhead on malloc/free operations.
485 *
486 * The memory overhead is (pool->percpu_len / pool->item_len) / CHAR_BIT
487 * bytes, over the lifetime of the pool.
488 *
489 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
490 */
0ba2a93e 491int rseq_mempool_attr_set_robust(struct rseq_mempool_attr *attr);
d6acc8aa 492
cb475906
MD
493/*
494 * rseq_mempool_attr_set_percpu: Set pool type as percpu.
495 *
89b7e681 496 * A pool created with this type is a per-cpu memory pool. The reserved
cb475906
MD
497 * allocation size is @stride, and the maximum CPU value expected
498 * is (@max_nr_cpus - 1). A @stride of 0 uses the default
499 * RSEQ_MEMPOOL_STRIDE.
500 *
501 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
502 */
503int rseq_mempool_attr_set_percpu(struct rseq_mempool_attr *attr,
504 size_t stride, int max_nr_cpus);
505
506/*
507 * rseq_mempool_attr_set_global: Set pool type as global.
508 *
89b7e681 509 * A pool created with this type is a global memory pool. The reserved
cb475906
MD
510 * allocation size is @stride. A @stride of 0 uses the default
511 * RSEQ_MEMPOOL_STRIDE.
512 *
513 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
514 */
515int rseq_mempool_attr_set_global(struct rseq_mempool_attr *attr, size_t stride);
516
e11a02d7
MD
517/*
518 * rseq_mempool_attr_set_max_nr_ranges: Set upper-limit to range allocation.
519 *
520 * Set an upper-limit to range allocation. A @max_nr_ranges value of
521 * 0 means no limit (default).
522 *
523 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
524 */
525int rseq_mempool_attr_set_max_nr_ranges(struct rseq_mempool_attr *attr,
526 unsigned long max_nr_ranges);
527
455e090e
MD
528/*
529 * rseq_mempool_attr_set_poison: Set pool poison value.
530 *
531 * Set a poison value to be set over freed pool entries. This can be
532 * used to anonymize freed memory, and for memory corruption checks
533 * with the robust attribute.
534 *
535 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
536 */
537int rseq_mempool_attr_set_poison(struct rseq_mempool_attr *attr,
538 uintptr_t poison);
539
a5694a4d
MD
540enum rseq_mempool_populate_policy {
541 /*
4e8ae59d 542 * RSEQ_MEMPOOL_POPULATE_PRIVATE_NONE (default):
a5694a4d
MD
543 * Do not populate pages for any of the CPUs when creating the
544 * mempool. Rely on copy-on-write (COW) of per-cpu pages to
545 * populate per-cpu pages from the initial values pages on
4e8ae59d 546 * first write. This mempool is only meant for single-process
bf7b01a3 547 * use (private mapping). Note that this type of pool cannot
dac2ccf0
MD
548 * be accessed from children processes across fork. It is
549 * however valid to destroy a pool from a child process after
550 * a fork to free its remaining resources.
a5694a4d 551 */
4e8ae59d 552 RSEQ_MEMPOOL_POPULATE_PRIVATE_NONE = 0,
a5694a4d 553 /*
4e8ae59d 554 * RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL:
a5694a4d 555 * Populate pages for all CPUs from 0 to (max_nr_cpus - 1)
4e8ae59d
MD
556 * when creating the mempool. This mempool is only meant for
557 * single-process use (private mapping).
a5694a4d 558 */
4e8ae59d 559 RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL = 1,
a5694a4d
MD
560};
561
562/*
563 * rseq_mempool_attr_set_populate_policy: Set pool page populate policy.
564 *
565 * Set page populate policy for the mempool.
566 *
567 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
568 */
569int rseq_mempool_attr_set_populate_policy(struct rseq_mempool_attr *attr,
570 enum rseq_mempool_populate_policy policy);
571
c6fd3981
MD
572/*
573 * rseq_mempool_range_init_numa: NUMA initialization helper for memory range.
574 *
575 * Helper which can be used from mempool_attr @init_func to move a CPU
576 * memory range to the NUMA node associated to its topology.
577 *
578 * Returns 0 on success, -1 with errno set by move_pages(2) on error.
579 * Returns -1, errno=ENOSYS if NUMA support is not present.
580 */
581int rseq_mempool_range_init_numa(void *addr, size_t len, int cpu, int numa_flags);
582
6037d364
MD
583/*
584 * rseq_mempool_get_max_nr_cpus: Get the max_nr_cpus value configured for a pool.
585 *
586 * Returns a value >= 0 for a per-cpu pool.
587 * Returns -1, errno=EINVAL if the mempool is NULL or if the pool has a
588 * global pool type.
589 */
590int rseq_mempool_get_max_nr_cpus(struct rseq_mempool *mempool);
591
c7ec94e0
MD
592#ifdef __cplusplus
593}
594#endif
595
34337fec 596#endif /* _RSEQ_MEMPOOL_H */
This page took 0.054395 seconds and 4 git commands to generate.