Add Olivier Dion to list of mempool authors
[librseq.git] / include / rseq / mempool.h
CommitLineData
ef6695f1
MD
1/* SPDX-License-Identifier: MIT */
2/* SPDX-FileCopyrightText: 2024 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> */
cabbbc8e 3/* SPDX-FileCopyrightText: 2024 Olivier Dion <odion@efficios.com> */
ef6695f1 4
34337fec
MD
5#ifndef _RSEQ_MEMPOOL_H
6#define _RSEQ_MEMPOOL_H
ef6695f1 7
f2981623 8#include <rseq/compiler.h>
ef6695f1
MD
9#include <stddef.h>
10#include <sys/types.h>
e229a2dd 11#include <sys/mman.h>
455e090e 12#include <stdint.h>
ef6695f1
MD
13
14/*
89b7e681
MD
15 * rseq/mempool.h: rseq memory pool allocator.
16 *
17 * The rseq memory pool allocator can be configured as either a global
18 * allocator (default) or a per-CPU memory allocator.
19 *
20 * The rseq global memory allocator allows the application to request
21 * memory pools of global memory each of containing objects of a
22 * given size (rounded to next power of 2), reserving a given virtual
23 * address size of the requested stride.
8aa1462d
MD
24 *
25 * The rseq per-CPU memory allocator allows the application the request
26 * memory pools of CPU-Local memory each of containing objects of a
27 * given size (rounded to next power of 2), reserving a given virtual
28 * address size per CPU, for a given maximum number of CPUs.
29 *
30 * The per-CPU memory allocator is analogous to TLS (Thread-Local
31 * Storage) memory: TLS is Thread-Local Storage, whereas the per-CPU
32 * memory allocator provides CPU-Local Storage.
89b7e681
MD
33 *
34 * Memory pool sets can be created by adding one or more pools into
35 * them. They can be used to perform allocation of variable length
36 * objects.
ef6695f1
MD
37 */
38
c7ec94e0
MD
39#ifdef __cplusplus
40extern "C" {
41#endif
42
bef24483
MD
43/*
44 * The percpu offset stride can be overridden by the user code.
45 * The stride *must* match for all objects belonging to a given pool
46 * between arguments to:
47 *
06e0b1c0
MD
48 * - rseq_mempool_create(),
49 * - rseq_percpu_ptr().
e30d5eb8 50 * - rseq_mempool_percpu_free(),
bef24483 51 */
cb475906 52#define RSEQ_MEMPOOL_STRIDE (1U << 16) /* stride: 64kB */
f2981623 53
d24ee051
MD
54/*
55 * Tag pointers returned by:
e30d5eb8
MD
56 * - rseq_mempool_percpu_malloc(),
57 * - rseq_mempool_percpu_zmalloc(),
6ff43d9a 58 * - rseq_mempool_percpu_malloc_init(),
e30d5eb8 59 * - rseq_mempool_set_percpu_malloc(),
6ff43d9a
MD
60 * - rseq_mempool_set_percpu_zmalloc(),
61 * - rseq_mempool_set_percpu_malloc_init().
d24ee051 62 *
8aa1462d
MD
63 * and passed as parameter to:
64 * - rseq_percpu_ptr(),
e30d5eb8 65 * - rseq_mempool_percpu_free().
8aa1462d 66 *
d24ee051
MD
67 * with __rseq_percpu for use by static analyzers.
68 */
69#define __rseq_percpu
70
0ba2a93e
MD
71struct rseq_mempool_attr;
72struct rseq_mempool;
ef6695f1 73
8aa1462d 74/*
e30d5eb8 75 * rseq_mempool_create: Create a memory pool.
8aa1462d 76 *
cb475906
MD
77 * Create a memory pool for items of size @item_len (rounded to
78 * next power of two).
8aa1462d 79 *
d6acc8aa
MD
80 * The @attr pointer used to specify the pool attributes. If NULL, use a
81 * default attribute values. The @attr can be destroyed immediately
e30d5eb8 82 * after rseq_mempool_create() returns. The caller keeps ownership
89b7e681 83 * of @attr. Default attributes select a global mempool type.
8aa1462d 84 *
ca452fee
MD
85 * The argument @pool_name can be used to given a name to the pool for
86 * debugging purposes. It can be NULL if no name is given.
87 *
8aa1462d
MD
88 * Returns a pointer to the created percpu pool. Return NULL on error,
89 * with errno set accordingly:
cb475906 90 *
8aa1462d
MD
91 * EINVAL: Invalid argument.
92 * ENOMEM: Not enough resources (memory or pool indexes) available to
93 * allocate pool.
94 *
a82006d0
MD
95 * In addition, if the attr mmap callback fails, NULL is returned and
96 * errno is propagated from the callback. The default callback can
9bd07c29 97 * return errno=ENOMEM.
8aa1462d
MD
98 *
99 * This API is MT-safe.
100 */
0ba2a93e 101struct rseq_mempool *rseq_mempool_create(const char *pool_name,
cb475906 102 size_t item_len, const struct rseq_mempool_attr *attr);
8aa1462d
MD
103
104/*
0ba2a93e 105 * rseq_mempool_destroy: Destroy a per-cpu memory pool.
8aa1462d
MD
106 *
107 * Destroy a per-cpu memory pool, unmapping its memory and removing the
108 * pool entry from the global index. No pointers allocated from the
109 * pool should be used when it is destroyed. This includes rseq_percpu_ptr().
110 *
111 * Argument @pool is a pointer to the per-cpu pool to destroy.
112 *
113 * Return values: 0 on success, -1 on error, with errno set accordingly:
89b7e681 114 *
8aa1462d
MD
115 * ENOENT: Trying to free a pool which was not allocated.
116 *
9bd07c29
MD
117 * If the munmap_func callback fails, -1 is returned and errno is
118 * propagated from the callback. The default callback can return
119 * errno=EINVAL.
8aa1462d
MD
120 *
121 * This API is MT-safe.
122 */
0ba2a93e 123int rseq_mempool_destroy(struct rseq_mempool *pool);
ef6695f1 124
8aa1462d 125/*
15da5c27 126 * rseq_mempool_percpu_malloc: Allocate memory from a per-cpu pool.
8aa1462d
MD
127 *
128 * Allocate an item from a per-cpu @pool. The allocation will reserve
129 * an item of the size specified by @item_len (rounded to next power of
130 * two) at pool creation. This effectively reserves space for this item
131 * on all CPUs.
132 *
133 * On success, return a "__rseq_percpu" encoded pointer to the pool
134 * item. This encoded pointer is meant to be passed to rseq_percpu_ptr()
135 * to be decoded to a valid address before being accessed.
136 *
137 * Return NULL (errno=ENOMEM) if there is not enough space left in the
138 * pool to allocate an item.
139 *
140 * This API is MT-safe.
141 */
15da5c27 142void __rseq_percpu *rseq_mempool_percpu_malloc(struct rseq_mempool *pool);
8aa1462d
MD
143
144/*
6ff43d9a 145 * rseq_mempool_percpu_zmalloc: Allocate zero-initialized memory from a per-cpu pool.
8aa1462d
MD
146 *
147 * Allocate memory for an item within the pool, and zero-initialize its
15da5c27 148 * memory on all CPUs. See rseq_mempool_percpu_malloc for details.
8aa1462d
MD
149 *
150 * This API is MT-safe.
151 */
15da5c27 152void __rseq_percpu *rseq_mempool_percpu_zmalloc(struct rseq_mempool *pool);
8aa1462d 153
6ff43d9a
MD
154/*
155 * rseq_mempool_percpu_malloc_init: Allocate initialized memory from a per-cpu pool.
156 *
157 * Allocate memory for an item within the pool, and initialize its
158 * memory on all CPUs with content from @init_ptr of length @init_len.
159 * See rseq_mempool_percpu_malloc for details.
160 *
161 * Return NULL (errno=ENOMEM) if there is not enough space left in the
162 * pool to allocate an item. Return NULL (errno=EINVAL) if init_len is
163 * larger than the pool item_len.
164 *
165 * This API is MT-safe.
166 */
167void __rseq_percpu *rseq_mempool_percpu_malloc_init(struct rseq_mempool *pool,
168 void *init_ptr, size_t init_len);
169
8aa1462d 170/*
15da5c27
MD
171 * rseq_mempool_malloc: Allocate memory from a global pool.
172 *
173 * Wrapper to allocate memory from a global pool, which can be
174 * used directly without per-cpu indexing. Would normally be used
175 * with pools created with max_nr_cpus=1.
176 */
177static inline
178void *rseq_mempool_malloc(struct rseq_mempool *pool)
179{
180 return (void *) rseq_mempool_percpu_malloc(pool);
181}
182
183/*
184 * rseq_mempool_zmalloc: Allocate zero-initialized memory from a global pool.
185 *
186 * Wrapper to allocate memory from a global pool, which can be
187 * used directly without per-cpu indexing. Would normally be used
188 * with pools created with max_nr_cpus=1.
189 */
190static inline
191void *rseq_mempool_zmalloc(struct rseq_mempool *pool)
192{
193 return (void *) rseq_mempool_percpu_zmalloc(pool);
194}
195
6ff43d9a
MD
196/*
197 * rseq_mempool_malloc_init: Allocate initialized memory from a global pool.
198 *
199 * Wrapper to allocate memory from a global pool, which can be
200 * used directly without per-cpu indexing. Would normally be used
201 * with pools created with max_nr_cpus=1.
202 */
203static inline
204void *rseq_mempool_malloc_init(struct rseq_mempool *pool,
205 void *init_ptr, size_t init_len)
206{
207 return (void *) rseq_mempool_percpu_malloc_init(pool, init_ptr, init_len);
208}
209
15da5c27
MD
210/*
211 * rseq_mempool_percpu_free: Free memory from a per-cpu pool.
8aa1462d
MD
212 *
213 * Free an item pointed to by @ptr from its per-cpu pool.
214 *
215 * The @ptr argument is a __rseq_percpu encoded pointer returned by
216 * either:
217 *
15da5c27
MD
218 * - rseq_mempool_percpu_malloc(),
219 * - rseq_mempool_percpu_zmalloc(),
6ff43d9a 220 * - rseq_mempool_percpu_malloc_init(),
15da5c27 221 * - rseq_mempool_set_percpu_malloc(),
6ff43d9a
MD
222 * - rseq_mempool_set_percpu_zmalloc(),
223 * - rseq_mempool_set_percpu_malloc_init().
8aa1462d 224 *
06e0b1c0 225 * The @stride optional argument to rseq_percpu_free() is a configurable
4aa3220c 226 * stride, which must match the stride received by pool creation.
cb475906 227 * If the argument is not present, use the default RSEQ_MEMPOOL_STRIDE.
4aa3220c 228 *
8aa1462d
MD
229 * This API is MT-safe.
230 */
cb475906 231void librseq_mempool_percpu_free(void __rseq_percpu *ptr, size_t stride);
15da5c27
MD
232
233#define rseq_mempool_percpu_free(_ptr, _stride...) \
cb475906 234 librseq_mempool_percpu_free(_ptr, RSEQ_PARAM_SELECT_ARG1(_, ##_stride, RSEQ_MEMPOOL_STRIDE))
f2981623 235
15da5c27
MD
236/*
237 * rseq_free: Free memory from a global pool.
238 *
239 * Free an item pointed to by @ptr from its global pool. Would normally
240 * be used with pools created with max_nr_cpus=1.
241 *
242 * The @ptr argument is a pointer returned by either:
243 *
244 * - rseq_mempool_malloc(),
245 * - rseq_mempool_zmalloc(),
6ff43d9a 246 * - rseq_mempool_malloc_init(),
15da5c27 247 * - rseq_mempool_set_malloc(),
6ff43d9a
MD
248 * - rseq_mempool_set_zmalloc(),
249 * - rseq_mempool_set_malloc_init().
15da5c27
MD
250 *
251 * The @stride optional argument to rseq_free() is a configurable
252 * stride, which must match the stride received by pool creation. If
cb475906 253 * the argument is not present, use the default RSEQ_MEMPOOL_STRIDE.
15da5c27
MD
254 * The stride is needed even for a global pool to know the mapping
255 * address range.
256 *
257 * This API is MT-safe.
258 */
259#define rseq_mempool_free(_ptr, _stride...) \
cb475906 260 librseq_percpu_free((void __rseq_percpu *) _ptr, RSEQ_PARAM_SELECT_ARG1(_, ##_stride, RSEQ_MEMPOOL_STRIDE))
ef6695f1 261
8aa1462d 262/*
4aa3220c 263 * rseq_percpu_ptr: Offset a per-cpu pointer for a given CPU.
8aa1462d 264 *
4aa3220c
MD
265 * Offset a per-cpu pointer @ptr to get the associated pointer for the
266 * given @cpu. The @ptr argument is a __rseq_percpu pointer returned by
267 * either:
8aa1462d 268 *
15da5c27
MD
269 * - rseq_mempool_percpu_malloc(),
270 * - rseq_mempool_percpu_zmalloc(),
6ff43d9a 271 * - rseq_mempool_percpu_malloc_init(),
15da5c27 272 * - rseq_mempool_set_percpu_malloc(),
6ff43d9a
MD
273 * - rseq_mempool_set_percpu_zmalloc(),
274 * - rseq_mempool_set_percpu_malloc_init().
8aa1462d 275 *
06e0b1c0
MD
276 * The macro rseq_percpu_ptr() preserves the type of the @ptr parameter
277 * for the returned pointer, but removes the __rseq_percpu annotation.
8aa1462d 278 *
06e0b1c0 279 * The macro rseq_percpu_ptr() takes an optional @stride argument. If
cb475906 280 * the argument is not present, use the default RSEQ_MEMPOOL_STRIDE.
4aa3220c 281 * This must match the stride used for pool creation.
8aa1462d
MD
282 *
283 * This API is MT-safe.
284 */
06e0b1c0
MD
285#define rseq_percpu_ptr(_ptr, _cpu, _stride...) \
286 ((__typeof__(*(_ptr)) *) ((uintptr_t) (_ptr) + \
287 ((unsigned int) (_cpu) * \
cb475906 288 (uintptr_t) RSEQ_PARAM_SELECT_ARG1(_, ##_stride, RSEQ_MEMPOOL_STRIDE))))
ef6695f1 289
8aa1462d 290/*
0ba2a93e 291 * rseq_mempool_set_create: Create a pool set.
8aa1462d
MD
292 *
293 * Create a set of pools. Its purpose is to offer a memory allocator API
294 * for variable-length items (e.g. variable length strings). When
295 * created, the pool set has no pool. Pools can be created and added to
296 * the set. One common approach would be to create pools for each
297 * relevant power of two allocation size useful for the application.
298 * Only one pool can be added to the pool set for each power of two
299 * allocation size.
300 *
301 * Returns a pool set pointer on success, else returns NULL with
302 * errno=ENOMEM (out of memory).
303 *
304 * This API is MT-safe.
305 */
0ba2a93e 306struct rseq_mempool_set *rseq_mempool_set_create(void);
8aa1462d
MD
307
308/*
0ba2a93e 309 * rseq_mempool_set_destroy: Destroy a pool set.
8aa1462d
MD
310 *
311 * Destroy a pool set and its associated resources. The pools that were
312 * added to the pool set are destroyed as well.
313 *
314 * Returns 0 on success, -1 on failure (or partial failure), with errno
315 * set by rseq_percpu_pool_destroy(). Using a pool set after destroy
316 * failure is undefined.
317 *
318 * This API is MT-safe.
319 */
0ba2a93e 320int rseq_mempool_set_destroy(struct rseq_mempool_set *pool_set);
8aa1462d
MD
321
322/*
0ba2a93e 323 * rseq_mempool_set_add_pool: Add a pool to a pool set.
8aa1462d
MD
324 *
325 * Add a @pool to the @pool_set. On success, its ownership is handed
326 * over to the pool set, so the caller should not destroy it explicitly.
327 * Only one pool can be added to the pool set for each power of two
328 * allocation size.
329 *
330 * Returns 0 on success, -1 on error with the following errno:
331 * - EBUSY: A pool already exists in the pool set for this power of two
332 * allocation size.
333 *
334 * This API is MT-safe.
335 */
0ba2a93e
MD
336int rseq_mempool_set_add_pool(struct rseq_mempool_set *pool_set,
337 struct rseq_mempool *pool);
ef6695f1 338
8aa1462d 339/*
e30d5eb8 340 * rseq_mempool_set_percpu_malloc: Allocate memory from a per-cpu pool set.
8aa1462d
MD
341 *
342 * Allocate an item from a per-cpu @pool. The allocation will reserve
343 * an item of the size specified by @len (rounded to next power of
344 * two). This effectively reserves space for this item on all CPUs.
345 *
346 * The space reservation will search for the smallest pool within
347 * @pool_set which respects the following conditions:
348 *
349 * - it has an item size large enough to fit @len,
350 * - it has space available.
351 *
352 * On success, return a "__rseq_percpu" encoded pointer to the pool
353 * item. This encoded pointer is meant to be passed to rseq_percpu_ptr()
354 * to be decoded to a valid address before being accessed.
355 *
356 * Return NULL (errno=ENOMEM) if there is not enough space left in the
357 * pool to allocate an item.
358 *
359 * This API is MT-safe.
360 */
15da5c27 361void __rseq_percpu *rseq_mempool_set_percpu_malloc(struct rseq_mempool_set *pool_set, size_t len);
8aa1462d
MD
362
363/*
6ff43d9a 364 * rseq_mempool_set_percpu_zmalloc: Allocate zero-initialized memory from a per-cpu pool set.
8aa1462d
MD
365 *
366 * Allocate memory for an item within the pool, and zero-initialize its
e30d5eb8 367 * memory on all CPUs. See rseq_mempool_set_percpu_malloc for details.
8aa1462d
MD
368 *
369 * This API is MT-safe.
370 */
15da5c27
MD
371void __rseq_percpu *rseq_mempool_set_percpu_zmalloc(struct rseq_mempool_set *pool_set, size_t len);
372
6ff43d9a
MD
373/*
374 * rseq_mempool_set_percpu_malloc_init: Allocate initialized memory from a per-cpu pool set.
375 *
376 * Allocate memory for an item within the pool, and initialize its
377 * memory on all CPUs with content from @init_ptr of length @len.
378 * See rseq_mempool_set_percpu_malloc for details.
379 *
380 * This API is MT-safe.
381 */
382void __rseq_percpu *rseq_mempool_set_percpu_malloc_init(struct rseq_mempool_set *pool_set,
383 void *init_ptr, size_t len);
384
15da5c27
MD
385/*
386 * rseq_mempool_set_malloc: Allocate memory from a global pool set.
387 *
388 * Wrapper to allocate memory from a global pool, which can be
389 * used directly without per-cpu indexing. Would normally be used
390 * with pools created with max_nr_cpus=1.
391 */
392static inline
393void *rseq_mempool_set_malloc(struct rseq_mempool_set *pool_set, size_t len)
394{
395 return (void *) rseq_mempool_set_percpu_malloc(pool_set, len);
396}
397
398/*
399 * rseq_mempool_set_zmalloc: Allocate zero-initialized memory from a global pool set.
400 *
401 * Wrapper to allocate memory from a global pool, which can be
402 * used directly without per-cpu indexing. Would normally be used
403 * with pools created with max_nr_cpus=1.
404 */
405static inline
406void *rseq_mempool_set_zmalloc(struct rseq_mempool_set *pool_set, size_t len)
407{
408 return (void *) rseq_mempool_set_percpu_zmalloc(pool_set, len);
409}
ef6695f1 410
6ff43d9a
MD
411/*
412 * rseq_mempool_set_malloc_init: Allocate initialized memory from a global pool set.
413 *
414 * Wrapper to allocate memory from a global pool, which can be
415 * used directly without per-cpu indexing. Would normally be used
416 * with pools created with max_nr_cpus=1.
417 */
418static inline
419void *rseq_mempool_set_malloc_init(struct rseq_mempool_set *pool_set, void *init_ptr, size_t len)
420{
421 return (void *) rseq_mempool_set_percpu_malloc_init(pool_set, init_ptr, len);
422}
423
424
9bd07c29 425/*
0ba2a93e 426 * rseq_mempool_init_numa: Move pages to the NUMA node associated to their CPU topology.
9bd07c29
MD
427 *
428 * For pages allocated within @pool, invoke move_pages(2) with the given
429 * @numa_flags to move the pages to the NUMA node associated to their
430 * CPU topology.
431 *
432 * Argument @numa_flags are passed to move_pages(2). The expected flags are:
433 * MPOL_MF_MOVE: move process-private pages to cpu-specific numa nodes.
434 * MPOL_MF_MOVE_ALL: move shared pages to cpu-specific numa nodes
435 * (requires CAP_SYS_NICE).
436 *
437 * Returns 0 on success, else return -1 with errno set by move_pages(2).
438 */
0ba2a93e 439int rseq_mempool_init_numa(struct rseq_mempool *pool, int numa_flags);
9bd07c29
MD
440
441/*
0ba2a93e 442 * rseq_mempool_attr_create: Create a pool attribute structure.
a82006d0 443 */
0ba2a93e 444struct rseq_mempool_attr *rseq_mempool_attr_create(void);
a82006d0
MD
445
446/*
0ba2a93e 447 * rseq_mempool_attr_destroy: Destroy a pool attribute structure.
a82006d0 448 */
0ba2a93e 449void rseq_mempool_attr_destroy(struct rseq_mempool_attr *attr);
a82006d0 450
135811f2
MD
451/*
452 * rseq_mempool_attr_set_init: Set pool attribute structure memory init functions.
453 *
454 * The @init_func callback used to initialized memory after allocation
374c2773
MD
455 * for the pool. The @cpu argument of @init_func, if >= 0, is the cpu to
456 * which belongs the range starting at @addr of length @len. If cpu is
457 * -1, it means the range belongs to a global pool. The @init_func
458 * callback must return 0 on success, -1 on error with errno set. If
459 * @init_func returns failure, the allocation of the pool memory fails,
460 * which either causes the pool creation to fail or memory allocation to
461 * fail (for extensible memory pools).
135811f2
MD
462 *
463 * The @init_priv argument is a private data pointer passed to the
464 * @init_func callback.
465 *
466 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
467 */
468int rseq_mempool_attr_set_init(struct rseq_mempool_attr *attr,
6e329183 469 int (*init_func)(void *priv, void *addr, size_t len, int cpu),
135811f2
MD
470 void *init_priv);
471
d6acc8aa 472/*
0ba2a93e 473 * rseq_mempool_attr_set_robust: Set pool robust attribute.
d6acc8aa
MD
474 *
475 * The robust pool attribute enables runtime validation of the pool:
476 *
477 * - Check for double-free of pointers.
478 *
479 * - Detect memory leaks on pool destruction.
86617384 480 *
d6acc8aa
MD
481 * - Detect free-list corruption on pool destruction.
482 *
86617384
MD
483 * - Detect poison value corruption on allocation and pool destruction.
484 *
d6acc8aa
MD
485 * There is a marginal runtime overhead on malloc/free operations.
486 *
487 * The memory overhead is (pool->percpu_len / pool->item_len) / CHAR_BIT
488 * bytes, over the lifetime of the pool.
489 *
490 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
491 */
0ba2a93e 492int rseq_mempool_attr_set_robust(struct rseq_mempool_attr *attr);
d6acc8aa 493
cb475906
MD
494/*
495 * rseq_mempool_attr_set_percpu: Set pool type as percpu.
496 *
89b7e681 497 * A pool created with this type is a per-cpu memory pool. The reserved
cb475906
MD
498 * allocation size is @stride, and the maximum CPU value expected
499 * is (@max_nr_cpus - 1). A @stride of 0 uses the default
500 * RSEQ_MEMPOOL_STRIDE.
501 *
502 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
503 */
504int rseq_mempool_attr_set_percpu(struct rseq_mempool_attr *attr,
505 size_t stride, int max_nr_cpus);
506
507/*
508 * rseq_mempool_attr_set_global: Set pool type as global.
509 *
89b7e681 510 * A pool created with this type is a global memory pool. The reserved
cb475906
MD
511 * allocation size is @stride. A @stride of 0 uses the default
512 * RSEQ_MEMPOOL_STRIDE.
513 *
514 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
515 */
516int rseq_mempool_attr_set_global(struct rseq_mempool_attr *attr, size_t stride);
517
e11a02d7
MD
518/*
519 * rseq_mempool_attr_set_max_nr_ranges: Set upper-limit to range allocation.
520 *
521 * Set an upper-limit to range allocation. A @max_nr_ranges value of
522 * 0 means no limit (default).
523 *
524 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
525 */
526int rseq_mempool_attr_set_max_nr_ranges(struct rseq_mempool_attr *attr,
527 unsigned long max_nr_ranges);
528
455e090e
MD
529/*
530 * rseq_mempool_attr_set_poison: Set pool poison value.
531 *
532 * Set a poison value to be set over freed pool entries. This can be
533 * used to anonymize freed memory, and for memory corruption checks
534 * with the robust attribute.
535 *
536 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
537 */
538int rseq_mempool_attr_set_poison(struct rseq_mempool_attr *attr,
539 uintptr_t poison);
540
a5694a4d
MD
541enum rseq_mempool_populate_policy {
542 /*
4e8ae59d 543 * RSEQ_MEMPOOL_POPULATE_PRIVATE_NONE (default):
a5694a4d
MD
544 * Do not populate pages for any of the CPUs when creating the
545 * mempool. Rely on copy-on-write (COW) of per-cpu pages to
546 * populate per-cpu pages from the initial values pages on
4e8ae59d 547 * first write. This mempool is only meant for single-process
bf7b01a3 548 * use (private mapping). Note that this type of pool cannot
dac2ccf0
MD
549 * be accessed from children processes across fork. It is
550 * however valid to destroy a pool from a child process after
551 * a fork to free its remaining resources.
a5694a4d 552 */
4e8ae59d 553 RSEQ_MEMPOOL_POPULATE_PRIVATE_NONE = 0,
a5694a4d 554 /*
4e8ae59d 555 * RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL:
a5694a4d 556 * Populate pages for all CPUs from 0 to (max_nr_cpus - 1)
4e8ae59d
MD
557 * when creating the mempool. This mempool is only meant for
558 * single-process use (private mapping).
a5694a4d 559 */
4e8ae59d 560 RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL = 1,
a5694a4d
MD
561};
562
563/*
564 * rseq_mempool_attr_set_populate_policy: Set pool page populate policy.
565 *
566 * Set page populate policy for the mempool.
567 *
568 * Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
569 */
570int rseq_mempool_attr_set_populate_policy(struct rseq_mempool_attr *attr,
571 enum rseq_mempool_populate_policy policy);
572
c6fd3981
MD
573/*
574 * rseq_mempool_range_init_numa: NUMA initialization helper for memory range.
575 *
576 * Helper which can be used from mempool_attr @init_func to move a CPU
577 * memory range to the NUMA node associated to its topology.
578 *
579 * Returns 0 on success, -1 with errno set by move_pages(2) on error.
580 * Returns -1, errno=ENOSYS if NUMA support is not present.
581 */
582int rseq_mempool_range_init_numa(void *addr, size_t len, int cpu, int numa_flags);
583
6037d364
MD
584/*
585 * rseq_mempool_get_max_nr_cpus: Get the max_nr_cpus value configured for a pool.
586 *
587 * Returns a value >= 0 for a per-cpu pool.
588 * Returns -1, errno=EINVAL if the mempool is NULL or if the pool has a
589 * global pool type.
590 */
591int rseq_mempool_get_max_nr_cpus(struct rseq_mempool *mempool);
592
c7ec94e0
MD
593#ifdef __cplusplus
594}
595#endif
596
34337fec 597#endif /* _RSEQ_MEMPOOL_H */
This page took 0.053007 seconds and 4 git commands to generate.