*/
#define __rseq_percpu
-struct rseq_pool_attr;
-struct rseq_percpu_pool;
+struct rseq_mempool_attr;
+struct rseq_mempool;
/*
* rseq_percpu_pool_create: Create a per-cpu memory pool.
*
* This API is MT-safe.
*/
-struct rseq_percpu_pool *rseq_percpu_pool_create(const char *pool_name,
+struct rseq_mempool *rseq_mempool_create(const char *pool_name,
size_t item_len, size_t percpu_stride, int max_nr_cpus,
- const struct rseq_pool_attr *attr);
+ const struct rseq_mempool_attr *attr);
/*
- * rseq_percpu_pool_destroy: Destroy a per-cpu memory pool.
+ * rseq_mempool_destroy: Destroy a per-cpu memory pool.
*
* Destroy a per-cpu memory pool, unmapping its memory and removing the
* pool entry from the global index. No pointers allocated from the
*
* This API is MT-safe.
*/
-int rseq_percpu_pool_destroy(struct rseq_percpu_pool *pool);
+int rseq_mempool_destroy(struct rseq_mempool *pool);
/*
* rseq_percpu_malloc: Allocate memory from a per-cpu pool.
*
* This API is MT-safe.
*/
-void __rseq_percpu *rseq_percpu_malloc(struct rseq_percpu_pool *pool);
+void __rseq_percpu *rseq_percpu_malloc(struct rseq_mempool *pool);
/*
* rseq_percpu_zmalloc: Allocated zero-initialized memory from a per-cpu pool.
*
* This API is MT-safe.
*/
-void __rseq_percpu *rseq_percpu_zmalloc(struct rseq_percpu_pool *pool);
+void __rseq_percpu *rseq_percpu_zmalloc(struct rseq_mempool *pool);
/*
* rseq_percpu_free: Free memory from a per-cpu pool.
#define rseq_percpu_ptr(ptr, cpu) __rseq_percpu_ptr(ptr, cpu, RSEQ_PERCPU_STRIDE)
/*
- * rseq_percpu_pool_set_create: Create a pool set.
+ * rseq_mempool_set_create: Create a pool set.
*
* Create a set of pools. Its purpose is to offer a memory allocator API
* for variable-length items (e.g. variable length strings). When
*
* This API is MT-safe.
*/
-struct rseq_percpu_pool_set *rseq_percpu_pool_set_create(void);
+struct rseq_mempool_set *rseq_mempool_set_create(void);
/*
- * rseq_percpu_pool_set_destroy: Destroy a pool set.
+ * rseq_mempool_set_destroy: Destroy a pool set.
*
* Destroy a pool set and its associated resources. The pools that were
* added to the pool set are destroyed as well.
*
* This API is MT-safe.
*/
-int rseq_percpu_pool_set_destroy(struct rseq_percpu_pool_set *pool_set);
+int rseq_mempool_set_destroy(struct rseq_mempool_set *pool_set);
/*
- * rseq_percpu_pool_set_add_pool: Add a pool to a pool set.
+ * rseq_mempool_set_add_pool: Add a pool to a pool set.
*
* Add a @pool to the @pool_set. On success, its ownership is handed
* over to the pool set, so the caller should not destroy it explicitly.
*
* This API is MT-safe.
*/
-int rseq_percpu_pool_set_add_pool(struct rseq_percpu_pool_set *pool_set,
- struct rseq_percpu_pool *pool);
+int rseq_mempool_set_add_pool(struct rseq_mempool_set *pool_set,
+ struct rseq_mempool *pool);
/*
- * rseq_percpu_pool_set_malloc: Allocate memory from a per-cpu pool set.
+ * rseq_percpu_mempool_set_malloc: Allocate memory from a per-cpu pool set.
*
* Allocate an item from a per-cpu @pool. The allocation will reserve
* an item of the size specified by @len (rounded to next power of
*
* This API is MT-safe.
*/
-void __rseq_percpu *rseq_percpu_pool_set_malloc(struct rseq_percpu_pool_set *pool_set, size_t len);
+void __rseq_percpu *rseq_percpu_mempool_set_malloc(struct rseq_mempool_set *pool_set, size_t len);
/*
- * rseq_percpu_pool_set_zmalloc: Allocated zero-initialized memory from a per-cpu pool set.
+ * rseq_percpu_mempool_set_zmalloc: Allocated zero-initialized memory from a per-cpu pool set.
*
* Allocate memory for an item within the pool, and zero-initialize its
- * memory on all CPUs. See rseq_percpu_pool_set_malloc for details.
+ * memory on all CPUs. See rseq_percpu_mempool_set_malloc for details.
*
* This API is MT-safe.
*/
-void __rseq_percpu *rseq_percpu_pool_set_zmalloc(struct rseq_percpu_pool_set *pool_set, size_t len);
+void __rseq_percpu *rseq_percpu_mempool_set_zmalloc(struct rseq_mempool_set *pool_set, size_t len);
/*
- * rseq_percpu_pool_init_numa: Move pages to the NUMA node associated to their CPU topology.
+ * rseq_mempool_init_numa: Move pages to the NUMA node associated to their CPU topology.
*
* For pages allocated within @pool, invoke move_pages(2) with the given
* @numa_flags to move the pages to the NUMA node associated to their
*
* Returns 0 on success, else return -1 with errno set by move_pages(2).
*/
-int rseq_percpu_pool_init_numa(struct rseq_percpu_pool *pool, int numa_flags);
+int rseq_mempool_init_numa(struct rseq_mempool *pool, int numa_flags);
/*
- * rseq_pool_attr_create: Create a pool attribute structure.
+ * rseq_mempool_attr_create: Create a pool attribute structure.
*/
-struct rseq_pool_attr *rseq_pool_attr_create(void);
+struct rseq_mempool_attr *rseq_mempool_attr_create(void);
/*
- * rseq_pool_attr_destroy: Destroy a pool attribute structure.
+ * rseq_mempool_attr_destroy: Destroy a pool attribute structure.
*/
-void rseq_pool_attr_destroy(struct rseq_pool_attr *attr);
+void rseq_mempool_attr_destroy(struct rseq_mempool_attr *attr);
/*
- * rseq_pool_attr_set_mmap: Set pool attribute structure mmap functions.
+ * rseq_mempool_attr_set_mmap: Set pool attribute structure mmap functions.
*
* The @mmap_func callback used to map the memory for the pool.
*
*
* Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
*/
-int rseq_pool_attr_set_mmap(struct rseq_pool_attr *attr,
+int rseq_mempool_attr_set_mmap(struct rseq_mempool_attr *attr,
void *(*mmap_func)(void *priv, size_t len),
int (*munmap_func)(void *priv, void *ptr, size_t len),
void *mmap_priv);
/*
- * rseq_pool_attr_set_robust: Set pool robust attribute.
+ * rseq_mempool_attr_set_robust: Set pool robust attribute.
*
* The robust pool attribute enables runtime validation of the pool:
*
*
* Returns 0 on success, -1 with errno=EINVAL if arguments are invalid.
*/
-int rseq_pool_attr_set_robust(struct rseq_pool_attr *attr);
+int rseq_mempool_attr_set_robust(struct rseq_mempool_attr *attr);
#ifdef __cplusplus
}
#define MOVE_PAGES_BATCH_SIZE 4096
-#define RANGE_HEADER_OFFSET sizeof(struct rseq_percpu_pool_range)
+#define RANGE_HEADER_OFFSET sizeof(struct rseq_mempool_range)
struct free_list_node;
struct free_list_node *next;
};
-struct rseq_pool_attr {
+struct rseq_mempool_attr {
bool mmap_set;
void *(*mmap_func)(void *priv, size_t len);
int (*munmap_func)(void *priv, void *ptr, size_t len);
bool robust_set;
};
-struct rseq_percpu_pool_range;
+struct rseq_mempool_range;
-struct rseq_percpu_pool_range {
- struct rseq_percpu_pool_range *next;
- struct rseq_percpu_pool *pool; /* Backward ref. to container pool. */
+struct rseq_mempool_range {
+ struct rseq_mempool_range *next;
+ struct rseq_mempool *pool; /* Backward ref. to container pool. */
void *header;
void *base;
size_t next_unused;
unsigned long *alloc_bitmap;
};
-struct rseq_percpu_pool {
+struct rseq_mempool {
/* Linked-list of ranges. */
- struct rseq_percpu_pool_range *ranges;
+ struct rseq_mempool_range *ranges;
size_t item_len;
size_t percpu_stride;
/* This lock protects allocation/free within the pool. */
pthread_mutex_t lock;
- struct rseq_pool_attr attr;
+ struct rseq_mempool_attr attr;
char *name;
};
* 2. A pool set can contain NULL pool entries, in which case the next
* large enough entry will be used for allocation.
*/
-struct rseq_percpu_pool_set {
+struct rseq_mempool_set {
/* This lock protects add vs malloc/zmalloc within the pool set. */
pthread_mutex_t lock;
- struct rseq_percpu_pool *entries[POOL_SET_NR_ENTRIES];
+ struct rseq_mempool *entries[POOL_SET_NR_ENTRIES];
};
static
-void *__rseq_pool_percpu_ptr(struct rseq_percpu_pool *pool, int cpu,
+void *__rseq_pool_percpu_ptr(struct rseq_mempool *pool, int cpu,
uintptr_t item_offset, size_t stride)
{
/* TODO: Implement multi-ranges support. */
}
static
-void rseq_percpu_zero_item(struct rseq_percpu_pool *pool, uintptr_t item_offset)
+void rseq_percpu_zero_item(struct rseq_mempool *pool, uintptr_t item_offset)
{
int i;
//which cannot use __rseq_pool_percpu_ptr.
#if 0 //#ifdef HAVE_LIBNUMA
static
-int rseq_percpu_pool_range_init_numa(struct rseq_percpu_pool *pool, struct rseq_percpu_pool_range *range, int numa_flags)
+int rseq_mempool_range_init_numa(struct rseq_mempool *pool, struct rseq_mempool_range *range, int numa_flags)
{
unsigned long nr_pages, page_len;
long ret;
return 0;
}
-int rseq_percpu_pool_init_numa(struct rseq_percpu_pool *pool, int numa_flags)
+int rseq_mempool_init_numa(struct rseq_mempool *pool, int numa_flags)
{
- struct rseq_percpu_pool_range *range;
+ struct rseq_mempool_range *range;
int ret;
if (!numa_flags)
return 0;
for (range = pool->ranges; range; range = range->next) {
- ret = rseq_percpu_pool_range_init_numa(pool, range, numa_flags);
+ ret = rseq_mempool_range_init_numa(pool, range, numa_flags);
if (ret)
return ret;
}
return 0;
}
#else
-int rseq_percpu_pool_init_numa(struct rseq_percpu_pool *pool __attribute__((unused)),
+int rseq_mempool_init_numa(struct rseq_mempool *pool __attribute__((unused)),
int numa_flags __attribute__((unused)))
{
return 0;
}
static
-int create_alloc_bitmap(struct rseq_percpu_pool *pool, struct rseq_percpu_pool_range *range)
+int create_alloc_bitmap(struct rseq_mempool *pool, struct rseq_mempool_range *range)
{
size_t count;
}
static
-const char *get_pool_name(const struct rseq_percpu_pool *pool)
+const char *get_pool_name(const struct rseq_mempool *pool)
{
return pool->name ? : "<anonymous>";
}
static
-bool addr_in_pool(const struct rseq_percpu_pool *pool, void *addr)
+bool addr_in_pool(const struct rseq_mempool *pool, void *addr)
{
- struct rseq_percpu_pool_range *range;
+ struct rseq_mempool_range *range;
for (range = pool->ranges; range; range = range->next) {
if (addr >= range->base && addr < range->base + range->next_unused)
/* Always inline for __builtin_return_address(0). */
static inline __attribute__((always_inline))
-void check_free_list(const struct rseq_percpu_pool *pool)
+void check_free_list(const struct rseq_mempool *pool)
{
size_t total_item = 0, total_never_allocated = 0, total_freed = 0,
max_list_traversal = 0, traversal_iteration = 0;
- struct rseq_percpu_pool_range *range;
+ struct rseq_mempool_range *range;
if (!pool->attr.robust_set)
return;
/* Always inline for __builtin_return_address(0). */
static inline __attribute__((always_inline))
-void destroy_alloc_bitmap(struct rseq_percpu_pool *pool, struct rseq_percpu_pool_range *range)
+void destroy_alloc_bitmap(struct rseq_mempool *pool, struct rseq_mempool_range *range)
{
unsigned long *bitmap = range->alloc_bitmap;
size_t count, total_leaks = 0;
/* Always inline for __builtin_return_address(0). */
static inline __attribute__((always_inline))
-int rseq_percpu_pool_range_destroy(struct rseq_percpu_pool *pool,
- struct rseq_percpu_pool_range *range)
+int rseq_mempool_range_destroy(struct rseq_mempool *pool,
+ struct rseq_mempool_range *range)
{
destroy_alloc_bitmap(pool, range);
/* range is a header located one page before the aligned mapping. */
* @pre_header before the mapping.
*/
static
-void *aligned_mmap_anonymous(struct rseq_percpu_pool *pool,
+void *aligned_mmap_anonymous(struct rseq_mempool *pool,
size_t page_size, size_t len, size_t alignment,
void **pre_header, size_t pre_header_len)
{
}
static
-struct rseq_percpu_pool_range *rseq_percpu_pool_range_create(struct rseq_percpu_pool *pool)
+struct rseq_mempool_range *rseq_mempool_range_create(struct rseq_mempool *pool)
{
- struct rseq_percpu_pool_range *range;
+ struct rseq_mempool_range *range;
unsigned long page_size;
void *header;
void *base;
&header, page_size);
if (!base)
return NULL;
- range = (struct rseq_percpu_pool_range *) (base - RANGE_HEADER_OFFSET);
+ range = (struct rseq_mempool_range *) (base - RANGE_HEADER_OFFSET);
range->pool = pool;
range->base = base;
range->header = header;
return range;
error_alloc:
- (void) rseq_percpu_pool_range_destroy(pool, range);
+ (void) rseq_mempool_range_destroy(pool, range);
return NULL;
}
-int rseq_percpu_pool_destroy(struct rseq_percpu_pool *pool)
+int rseq_mempool_destroy(struct rseq_mempool *pool)
{
- struct rseq_percpu_pool_range *range, *next_range;
+ struct rseq_mempool_range *range, *next_range;
int ret = 0;
if (!pool)
check_free_list(pool);
/* Iteration safe against removal. */
for (range = pool->ranges; range && (next_range = range->next, 1); range = next_range) {
- if (rseq_percpu_pool_range_destroy(pool, range))
+ if (rseq_mempool_range_destroy(pool, range))
goto end;
/* Update list head to keep list coherent in case of partial failure. */
pool->ranges = next_range;
return ret;
}
-struct rseq_percpu_pool *rseq_percpu_pool_create(const char *pool_name,
+struct rseq_mempool *rseq_mempool_create(const char *pool_name,
size_t item_len, size_t percpu_stride, int max_nr_cpus,
- const struct rseq_pool_attr *_attr)
+ const struct rseq_mempool_attr *_attr)
{
- struct rseq_percpu_pool *pool;
- struct rseq_pool_attr attr = {};
+ struct rseq_mempool *pool;
+ struct rseq_mempool_attr attr = {};
int order;
/* Make sure each item is large enough to contain free list pointers. */
attr.mmap_priv = NULL;
}
- pool = calloc(1, sizeof(struct rseq_percpu_pool));
+ pool = calloc(1, sizeof(struct rseq_mempool));
if (!pool)
return NULL;
pool->item_order = order;
//TODO: implement multi-range support.
- pool->ranges = rseq_percpu_pool_range_create(pool);
+ pool->ranges = rseq_mempool_range_create(pool);
if (!pool->ranges)
goto error_alloc;
return pool;
error_alloc:
- rseq_percpu_pool_destroy(pool);
+ rseq_mempool_destroy(pool);
errno = ENOMEM;
return NULL;
}
/* Always inline for __builtin_return_address(0). */
static inline __attribute__((always_inline))
-void set_alloc_slot(struct rseq_percpu_pool *pool, size_t item_offset)
+void set_alloc_slot(struct rseq_mempool *pool, size_t item_offset)
{
unsigned long *bitmap = pool->ranges->alloc_bitmap;
size_t item_index = item_offset >> pool->item_order;
}
static
-void __rseq_percpu *__rseq_percpu_malloc(struct rseq_percpu_pool *pool, bool zeroed)
+void __rseq_percpu *__rseq_percpu_malloc(struct rseq_mempool *pool, bool zeroed)
{
struct free_list_node *node;
uintptr_t item_offset;
return addr;
}
-void __rseq_percpu *rseq_percpu_malloc(struct rseq_percpu_pool *pool)
+void __rseq_percpu *rseq_percpu_malloc(struct rseq_mempool *pool)
{
return __rseq_percpu_malloc(pool, false);
}
-void __rseq_percpu *rseq_percpu_zmalloc(struct rseq_percpu_pool *pool)
+void __rseq_percpu *rseq_percpu_zmalloc(struct rseq_mempool *pool)
{
return __rseq_percpu_malloc(pool, true);
}
/* Always inline for __builtin_return_address(0). */
static inline __attribute__((always_inline))
-void clear_alloc_slot(struct rseq_percpu_pool *pool, size_t item_offset)
+void clear_alloc_slot(struct rseq_mempool *pool, size_t item_offset)
{
unsigned long *bitmap = pool->ranges->alloc_bitmap;
size_t item_index = item_offset >> pool->item_order;
{
uintptr_t ptr = (uintptr_t) _ptr;
void *range_base = (void *) (ptr & (~(percpu_stride - 1)));
- struct rseq_percpu_pool_range *range = (struct rseq_percpu_pool_range *) (range_base - RANGE_HEADER_OFFSET);
- struct rseq_percpu_pool *pool = range->pool;
+ struct rseq_mempool_range *range = (struct rseq_mempool_range *) (range_base - RANGE_HEADER_OFFSET);
+ struct rseq_mempool *pool = range->pool;
uintptr_t item_offset = ptr & (percpu_stride - 1);
struct free_list_node *head, *item;
pthread_mutex_unlock(&pool->lock);
}
-struct rseq_percpu_pool_set *rseq_percpu_pool_set_create(void)
+struct rseq_mempool_set *rseq_mempool_set_create(void)
{
- struct rseq_percpu_pool_set *pool_set;
+ struct rseq_mempool_set *pool_set;
- pool_set = calloc(1, sizeof(struct rseq_percpu_pool_set));
+ pool_set = calloc(1, sizeof(struct rseq_mempool_set));
if (!pool_set)
return NULL;
pthread_mutex_init(&pool_set->lock, NULL);
return pool_set;
}
-int rseq_percpu_pool_set_destroy(struct rseq_percpu_pool_set *pool_set)
+int rseq_mempool_set_destroy(struct rseq_mempool_set *pool_set)
{
int order, ret;
for (order = POOL_SET_MIN_ENTRY; order < POOL_SET_NR_ENTRIES; order++) {
- struct rseq_percpu_pool *pool = pool_set->entries[order];
+ struct rseq_mempool *pool = pool_set->entries[order];
if (!pool)
continue;
- ret = rseq_percpu_pool_destroy(pool);
+ ret = rseq_mempool_destroy(pool);
if (ret)
return ret;
pool_set->entries[order] = NULL;
}
/* Ownership of pool is handed over to pool set on success. */
-int rseq_percpu_pool_set_add_pool(struct rseq_percpu_pool_set *pool_set, struct rseq_percpu_pool *pool)
+int rseq_mempool_set_add_pool(struct rseq_mempool_set *pool_set, struct rseq_mempool *pool)
{
size_t item_order = pool->item_order;
int ret = 0;
}
static
-void __rseq_percpu *__rseq_percpu_pool_set_malloc(struct rseq_percpu_pool_set *pool_set, size_t len, bool zeroed)
+void __rseq_percpu *__rseq_mempool_set_malloc(struct rseq_mempool_set *pool_set, size_t len, bool zeroed)
{
int order, min_order = POOL_SET_MIN_ENTRY;
- struct rseq_percpu_pool *pool;
+ struct rseq_mempool *pool;
void __rseq_percpu *addr;
order = rseq_get_count_order_ulong(len);
return addr;
}
-void __rseq_percpu *rseq_percpu_pool_set_malloc(struct rseq_percpu_pool_set *pool_set, size_t len)
+void __rseq_percpu *rseq_percpu_mempool_set_malloc(struct rseq_mempool_set *pool_set, size_t len)
{
- return __rseq_percpu_pool_set_malloc(pool_set, len, false);
+ return __rseq_mempool_set_malloc(pool_set, len, false);
}
-void __rseq_percpu *rseq_percpu_pool_set_zmalloc(struct rseq_percpu_pool_set *pool_set, size_t len)
+void __rseq_percpu *rseq_percpu_mempool_set_zmalloc(struct rseq_mempool_set *pool_set, size_t len)
{
- return __rseq_percpu_pool_set_malloc(pool_set, len, true);
+ return __rseq_mempool_set_malloc(pool_set, len, true);
}
-struct rseq_pool_attr *rseq_pool_attr_create(void)
+struct rseq_mempool_attr *rseq_mempool_attr_create(void)
{
- return calloc(1, sizeof(struct rseq_pool_attr));
+ return calloc(1, sizeof(struct rseq_mempool_attr));
}
-void rseq_pool_attr_destroy(struct rseq_pool_attr *attr)
+void rseq_mempool_attr_destroy(struct rseq_mempool_attr *attr)
{
free(attr);
}
-int rseq_pool_attr_set_mmap(struct rseq_pool_attr *attr,
+int rseq_mempool_attr_set_mmap(struct rseq_mempool_attr *attr,
void *(*mmap_func)(void *priv, size_t len),
int (*munmap_func)(void *priv, void *ptr, size_t len),
void *mmap_priv)
return 0;
}
-int rseq_pool_attr_set_robust(struct rseq_pool_attr *attr)
+int rseq_mempool_attr_set_robust(struct rseq_mempool_attr *attr)
{
if (!attr) {
errno = EINVAL;
{
struct test_data __rseq_percpu *ptr;
struct test_data *iter, *tmp;
- struct rseq_percpu_pool *mempool;
- struct rseq_pool_attr *attr;
+ struct rseq_mempool *mempool;
+ struct rseq_mempool_attr *attr;
uint64_t count = 0;
LIST_HEAD(list);
int ret, i;
- attr = rseq_pool_attr_create();
+ attr = rseq_mempool_attr_create();
ok(attr, "Create pool attribute");
- ret = rseq_pool_attr_set_robust(attr);
+ ret = rseq_mempool_attr_set_robust(attr);
ok(ret == 0, "Setting mempool robust attribute");
- mempool = rseq_percpu_pool_create("test_data",
+ mempool = rseq_mempool_create("test_data",
sizeof(struct test_data),
stride, CPU_SETSIZE, attr);
ok(mempool, "Create mempool of size %zu", stride);
- rseq_pool_attr_destroy(attr);
+ rseq_mempool_attr_destroy(attr);
for (;;) {
struct test_data *cpuptr;
ptr = iter->backref;
__rseq_percpu_free(ptr, stride);
}
- ret = rseq_percpu_pool_destroy(mempool);
+ ret = rseq_mempool_destroy(mempool);
ok(ret == 0, "Destroy mempool");
}
-static void test_robust_double_free(struct rseq_percpu_pool *pool)
+static void test_robust_double_free(struct rseq_mempool *pool)
{
struct test_data __rseq_percpu *ptr;
rseq_percpu_free(ptr);
}
-static void test_robust_corrupt_after_free(struct rseq_percpu_pool *pool)
+static void test_robust_corrupt_after_free(struct rseq_mempool *pool)
{
struct test_data __rseq_percpu *ptr;
struct test_data *cpuptr;
rseq_percpu_free(ptr);
cpuptr->value = (uintptr_t) test_robust_corrupt_after_free;
- rseq_percpu_pool_destroy(pool);
+ rseq_mempool_destroy(pool);
}
-static void test_robust_memory_leak(struct rseq_percpu_pool *pool)
+static void test_robust_memory_leak(struct rseq_mempool *pool)
{
(void) rseq_percpu_malloc(pool);
- rseq_percpu_pool_destroy(pool);
+ rseq_mempool_destroy(pool);
}
-static void test_robust_free_list_corruption(struct rseq_percpu_pool *pool)
+static void test_robust_free_list_corruption(struct rseq_mempool *pool)
{
struct test_data __rseq_percpu *ptr;
struct test_data *cpuptr;
(void) rseq_percpu_malloc(pool);
}
-static int run_robust_test(void (*test)(struct rseq_percpu_pool*),
- struct rseq_percpu_pool *pool)
+static int run_robust_test(void (*test)(struct rseq_mempool*),
+ struct rseq_mempool *pool)
{
pid_t cpid;
int status;
static void run_robust_tests(void)
{
- struct rseq_pool_attr *attr;
- struct rseq_percpu_pool *pool;
+ struct rseq_mempool_attr *attr;
+ struct rseq_mempool *pool;
- attr = rseq_pool_attr_create();
+ attr = rseq_mempool_attr_create();
- rseq_pool_attr_set_robust(attr);
+ rseq_mempool_attr_set_robust(attr);
- pool = rseq_percpu_pool_create("mempool-robust",
+ pool = rseq_mempool_create("mempool-robust",
sizeof(void*), RSEQ_PERCPU_STRIDE, 1,
attr);
- rseq_pool_attr_destroy(attr);
+ rseq_mempool_attr_destroy(attr);
ok(run_robust_test(test_robust_double_free, pool),
"robust-double-free");
ok(run_robust_test(test_robust_free_list_corruption, pool),
"robust-free-list-corruption");
- rseq_percpu_pool_destroy(pool);
+ rseq_mempool_destroy(pool);
}
int main(void)
pthread_t test_threads[num_threads];
struct spinlock_test_data __rseq_percpu *data;
struct spinlock_thread_test_data thread_data[num_threads];
- struct rseq_percpu_pool *mempool;
+ struct rseq_mempool *mempool;
- mempool = rseq_percpu_pool_create("spinlock_test_data",
+ mempool = rseq_mempool_create("spinlock_test_data",
sizeof(struct spinlock_test_data),
0, CPU_SETSIZE, NULL);
if (!mempool) {
- perror("rseq_percpu_pool_create");
+ perror("rseq_mempool_create");
abort();
}
data = (struct spinlock_test_data __rseq_percpu *)rseq_percpu_zmalloc(mempool);
assert(sum == (uint64_t)opt_reps * num_threads);
rseq_percpu_free(data);
- ret = rseq_percpu_pool_destroy(mempool);
+ ret = rseq_mempool_destroy(mempool);
if (ret) {
- perror("rseq_percpu_pool_destroy");
+ perror("rseq_mempool_destroy");
abort();
}
}
pthread_t test_threads[num_threads];
struct inc_test_data __rseq_percpu *data;
struct inc_thread_test_data thread_data[num_threads];
- struct rseq_percpu_pool *mempool;
+ struct rseq_mempool *mempool;
- mempool = rseq_percpu_pool_create("inc_test_data",
+ mempool = rseq_mempool_create("inc_test_data",
sizeof(struct inc_test_data),
0, CPU_SETSIZE, NULL);
if (!mempool) {
- perror("rseq_percpu_pool_create");
+ perror("rseq_mempool_create");
abort();
}
data = (struct inc_test_data __rseq_percpu *)rseq_percpu_zmalloc(mempool);
assert(sum == (uint64_t)opt_reps * num_threads);
rseq_percpu_free(data);
- ret = rseq_percpu_pool_destroy(mempool);
+ ret = rseq_mempool_destroy(mempool);
if (ret) {
- perror("rseq_percpu_pool_destroy");
+ perror("rseq_mempool_destroy");
abort();
}
}
struct percpu_list __rseq_percpu *list;
pthread_t test_threads[num_threads];
cpu_set_t allowed_cpus;
- struct rseq_percpu_pool *mempool;
+ struct rseq_mempool *mempool;
- mempool = rseq_percpu_pool_create("percpu_list", sizeof(struct percpu_list),
+ mempool = rseq_mempool_create("percpu_list", sizeof(struct percpu_list),
0, CPU_SETSIZE, NULL);
if (!mempool) {
- perror("rseq_percpu_pool_create");
+ perror("rseq_mempool_create");
abort();
}
list = (struct percpu_list __rseq_percpu *)rseq_percpu_zmalloc(mempool);
*/
assert(sum == expected_sum);
rseq_percpu_free(list);
- ret = rseq_percpu_pool_destroy(mempool);
+ ret = rseq_mempool_destroy(mempool);
if (ret) {
- perror("rseq_percpu_pool_destroy");
+ perror("rseq_mempool_destroy");
abort();
}
}
struct percpu_buffer __rseq_percpu *buffer;
pthread_t test_threads[num_threads];
cpu_set_t allowed_cpus;
- struct rseq_percpu_pool *mempool;
+ struct rseq_mempool *mempool;
- mempool = rseq_percpu_pool_create("percpu_buffer", sizeof(struct percpu_buffer),
+ mempool = rseq_mempool_create("percpu_buffer", sizeof(struct percpu_buffer),
0, CPU_SETSIZE, NULL);
if (!mempool) {
- perror("rseq_percpu_pool_create");
+ perror("rseq_mempool_create");
abort();
}
buffer = (struct percpu_buffer __rseq_percpu *)rseq_percpu_zmalloc(mempool);
*/
assert(sum == expected_sum);
rseq_percpu_free(buffer);
- ret = rseq_percpu_pool_destroy(mempool);
+ ret = rseq_mempool_destroy(mempool);
if (ret) {
- perror("rseq_percpu_pool_destroy");
+ perror("rseq_mempool_destroy");
abort();
}
}
struct percpu_memcpy_buffer *buffer;
pthread_t test_threads[num_threads];
cpu_set_t allowed_cpus;
- struct rseq_percpu_pool *mempool;
+ struct rseq_mempool *mempool;
- mempool = rseq_percpu_pool_create("percpu_memcpy_buffer",
+ mempool = rseq_mempool_create("percpu_memcpy_buffer",
sizeof(struct percpu_memcpy_buffer),
0, CPU_SETSIZE, NULL);
if (!mempool) {
- perror("rseq_percpu_pool_create");
+ perror("rseq_mempool_create");
abort();
}
buffer = (struct percpu_memcpy_buffer __rseq_percpu *)rseq_percpu_zmalloc(mempool);
*/
assert(sum == expected_sum);
rseq_percpu_free(buffer);
- ret = rseq_percpu_pool_destroy(mempool);
+ ret = rseq_mempool_destroy(mempool);
if (ret) {
- perror("rseq_percpu_pool_destroy");
+ perror("rseq_mempool_destroy");
abort();
}
}
/* Test MEMBARRIER_CMD_PRIVATE_RESTART_RSEQ_ON_CPU membarrier command. */
#ifdef TEST_MEMBARRIER
struct test_membarrier_thread_args {
- struct rseq_percpu_pool *mempool;
+ struct rseq_mempool *mempool;
struct percpu_list __rseq_percpu *percpu_list_ptr;
int stop;
};
}
static
-struct percpu_list __rseq_percpu *test_membarrier_alloc_percpu_list(struct rseq_percpu_pool *mempool)
+struct percpu_list __rseq_percpu *test_membarrier_alloc_percpu_list(struct rseq_mempool *mempool)
{
struct percpu_list __rseq_percpu *list;
int i;
struct percpu_list __rseq_percpu *list_a, __rseq_percpu *list_b;
intptr_t expect_a = 0, expect_b = 0;
int cpu_a = 0, cpu_b = 0;
- struct rseq_percpu_pool *mempool;
+ struct rseq_mempool *mempool;
int ret;
long long total_count = 0;
- mempool = rseq_percpu_pool_create("percpu_list", sizeof(struct percpu_list),
+ mempool = rseq_mempool_create("percpu_list", sizeof(struct percpu_list),
0, CPU_SETSIZE, NULL);
if (!mempool) {
- perror("rseq_percpu_pool_create");
+ perror("rseq_mempool_create");
abort();
}
args->mempool = mempool;
errno, strerror(errno));
abort();
}
- ret = rseq_percpu_pool_destroy(mempool);
+ ret = rseq_mempool_destroy(mempool);
if (ret) {
- perror("rseq_percpu_pool_destroy");
+ perror("rseq_mempool_destroy");
abort();
}