#include "rseq-utils.h"
/*
- * rseq-percpu-alloc.c: rseq CPU-Local Storage (CLS) memory allocator.
+ * rseq-mempool.c: rseq CPU-Local Storage (CLS) memory allocator.
*
* The rseq per-CPU memory allocator allows the application the request
* memory pools of CPU-Local memory each of containing objects of a
* memory allocator provides CPU-Local Storage.
*/
-/*
- * Use high bits of per-CPU addresses to index the pool.
- * This leaves the low bits of available to the application for pointer
- * tagging (based on next power of 2 alignment of the allocations).
- */
-#if RSEQ_BITS_PER_LONG == 64
-# define POOL_INDEX_BITS 16
-#else
-# define POOL_INDEX_BITS 8
-#endif
-#define MAX_NR_POOLS (1UL << POOL_INDEX_BITS)
-#define POOL_INDEX_SHIFT (RSEQ_BITS_PER_LONG - POOL_INDEX_BITS)
-#define MAX_POOL_LEN (1UL << POOL_INDEX_SHIFT)
-#define MAX_POOL_LEN_MASK (MAX_POOL_LEN - 1)
-
-#define POOL_SET_NR_ENTRIES POOL_INDEX_SHIFT
+#define POOL_SET_NR_ENTRIES RSEQ_BITS_PER_LONG
/*
* Smallest allocation should hold enough space for a free list pointer.
#define MOVE_PAGES_BATCH_SIZE 4096
+#define RANGE_HEADER_OFFSET sizeof(struct rseq_mempool_range)
+
struct free_list_node;
struct free_list_node {
struct free_list_node *next;
};
-/* This lock protects pool create/destroy. */
-static pthread_mutex_t pool_lock = PTHREAD_MUTEX_INITIALIZER;
-
-struct rseq_pool_attr {
+struct rseq_mempool_attr {
bool mmap_set;
void *(*mmap_func)(void *priv, size_t len);
int (*munmap_func)(void *priv, void *ptr, size_t len);
bool robust_set;
};
-struct rseq_percpu_pool {
+struct rseq_mempool_range;
+
+struct rseq_mempool_range {
+ struct rseq_mempool_range *next;
+ struct rseq_mempool *pool; /* Backward ref. to container pool. */
+ void *header;
void *base;
- unsigned int index;
+ size_t next_unused;
+ /* Track alloc/free. */
+ unsigned long *alloc_bitmap;
+};
+
+struct rseq_mempool {
+ /* Linked-list of ranges. */
+ struct rseq_mempool_range *ranges;
+
size_t item_len;
- size_t percpu_len;
+ size_t percpu_stride;
int item_order;
int max_nr_cpus;
* list.
*/
struct free_list_node *free_list_head;
- size_t next_unused;
+
/* This lock protects allocation/free within the pool. */
pthread_mutex_t lock;
- struct rseq_pool_attr attr;
-
+ struct rseq_mempool_attr attr;
char *name;
- /* Track alloc/free. */
- unsigned long *alloc_bitmap;
};
-//TODO: the array of pools should grow dynamically on create.
-static struct rseq_percpu_pool rseq_percpu_pool[MAX_NR_POOLS];
-
/*
* Pool set entries are indexed by item_len rounded to the next power of
* 2. A pool set can contain NULL pool entries, in which case the next
* large enough entry will be used for allocation.
*/
-struct rseq_percpu_pool_set {
+struct rseq_mempool_set {
/* This lock protects add vs malloc/zmalloc within the pool set. */
pthread_mutex_t lock;
- struct rseq_percpu_pool *entries[POOL_SET_NR_ENTRIES];
+ struct rseq_mempool *entries[POOL_SET_NR_ENTRIES];
};
static
-void *__rseq_pool_percpu_ptr(struct rseq_percpu_pool *pool, int cpu, uintptr_t item_offset)
+void *__rseq_pool_percpu_ptr(struct rseq_mempool *pool, int cpu,
+ uintptr_t item_offset, size_t stride)
{
- return pool->base + (pool->percpu_len * cpu) + item_offset;
-}
-
-void *__rseq_percpu_ptr(void __rseq_percpu *_ptr, int cpu)
-{
- uintptr_t ptr = (uintptr_t) _ptr;
- uintptr_t item_offset = ptr & MAX_POOL_LEN_MASK;
- uintptr_t pool_index = ptr >> POOL_INDEX_SHIFT;
- struct rseq_percpu_pool *pool = &rseq_percpu_pool[pool_index];
-
- assert(cpu >= 0);
- return __rseq_pool_percpu_ptr(pool, cpu, item_offset);
+ /* TODO: Implement multi-ranges support. */
+ return pool->ranges->base + (stride * cpu) + item_offset;
}
static
-void rseq_percpu_zero_item(struct rseq_percpu_pool *pool, uintptr_t item_offset)
+void rseq_percpu_zero_item(struct rseq_mempool *pool, uintptr_t item_offset)
{
int i;
for (i = 0; i < pool->max_nr_cpus; i++) {
- char *p = __rseq_pool_percpu_ptr(pool, i, item_offset);
+ char *p = __rseq_pool_percpu_ptr(pool, i,
+ item_offset, pool->percpu_stride);
memset(p, 0, pool->item_len);
}
}
-#ifdef HAVE_LIBNUMA
-int rseq_percpu_pool_init_numa(struct rseq_percpu_pool *pool, int numa_flags)
+//TODO: this will need to be reimplemented for ranges,
+//which cannot use __rseq_pool_percpu_ptr.
+#if 0 //#ifdef HAVE_LIBNUMA
+static
+int rseq_mempool_range_init_numa(struct rseq_mempool *pool, struct rseq_mempool_range *range, int numa_flags)
{
- unsigned long nr_pages;
- long ret, page_len;
+ unsigned long nr_pages, page_len;
+ long ret;
int cpu;
if (!numa_flags)
return 0;
page_len = rseq_get_page_len();
- nr_pages = pool->percpu_len >> rseq_get_count_order_ulong(page_len);
+ nr_pages = pool->percpu_stride >> rseq_get_count_order_ulong(page_len);
for (cpu = 0; cpu < pool->max_nr_cpus; cpu++) {
int status[MOVE_PAGES_BATCH_SIZE];
}
return 0;
}
+
+int rseq_mempool_init_numa(struct rseq_mempool *pool, int numa_flags)
+{
+ struct rseq_mempool_range *range;
+ int ret;
+
+ if (!numa_flags)
+ return 0;
+ for (range = pool->ranges; range; range = range->next) {
+ ret = rseq_mempool_range_init_numa(pool, range, numa_flags);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
#else
-void rseq_percpu_pool_init_numa(struct rseq_percpu_pool *pool __attribute__((unused)),
+int rseq_mempool_init_numa(struct rseq_mempool *pool __attribute__((unused)),
int numa_flags __attribute__((unused)))
{
return 0;
}
static
-int create_alloc_bitmap(struct rseq_percpu_pool *pool)
+int create_alloc_bitmap(struct rseq_mempool *pool, struct rseq_mempool_range *range)
{
size_t count;
- count = ((pool->percpu_len >> pool->item_order) + BIT_PER_ULONG - 1) / BIT_PER_ULONG;
+ count = ((pool->percpu_stride >> pool->item_order) + BIT_PER_ULONG - 1) / BIT_PER_ULONG;
/*
* Not being able to create the validation bitmap is an error
* that needs to be reported.
*/
- pool->alloc_bitmap = calloc(count, sizeof(unsigned long));
- if (!pool->alloc_bitmap)
+ range->alloc_bitmap = calloc(count, sizeof(unsigned long));
+ if (!range->alloc_bitmap)
return -1;
return 0;
}
static
-const char *get_pool_name(const struct rseq_percpu_pool *pool)
+const char *get_pool_name(const struct rseq_mempool *pool)
{
return pool->name ? : "<anonymous>";
}
+static
+bool addr_in_pool(const struct rseq_mempool *pool, void *addr)
+{
+ struct rseq_mempool_range *range;
+
+ for (range = pool->ranges; range; range = range->next) {
+ if (addr >= range->base && addr < range->base + range->next_unused)
+ return true;
+ }
+ return false;
+}
+
/* Always inline for __builtin_return_address(0). */
static inline __attribute__((always_inline))
-void check_free_list(const struct rseq_percpu_pool *pool)
+void check_free_list(const struct rseq_mempool *pool)
{
- size_t total_item = pool->percpu_len >> pool->item_order;
- size_t total_never_allocated = (pool->percpu_len - pool->next_unused) >> pool->item_order;
- size_t total_freed = 0;
- size_t max_list_traversal = total_item - total_never_allocated;
- size_t traversal_iteration = 0;
+ size_t total_item = 0, total_never_allocated = 0, total_freed = 0,
+ max_list_traversal = 0, traversal_iteration = 0;
+ struct rseq_mempool_range *range;
+
+ if (!pool->attr.robust_set)
+ return;
+
+ for (range = pool->ranges; range; range = range->next) {
+ total_item += pool->percpu_stride >> pool->item_order;
+ total_never_allocated += (pool->percpu_stride - range->next_unused) >> pool->item_order;
+ }
+ max_list_traversal = total_item - total_never_allocated;
for (struct free_list_node *node = pool->free_list_head, *prev = NULL;
node;
}
/* Node is out of range. */
- if ((node_addr < pool->base) ||
- (node_addr >= pool->base + pool->next_unused)) {
+ if (!addr_in_pool(pool, node_addr)) {
if (prev)
fprintf(stderr, "%s: Corrupted free-list node %p -> [out-of-range %p] in pool \"%s\" (%p), caller %p.\n",
__func__, prev, node, get_pool_name(pool), pool, __builtin_return_address(0));
abort();
}
- traversal_iteration += 1;
- total_freed += 1;
+ traversal_iteration++;
+ total_freed++;
}
if (total_never_allocated + total_freed != total_item) {
__func__, get_pool_name(pool), pool, total_item, total_never_allocated, total_freed, __builtin_return_address(0));
abort();
}
-
}
/* Always inline for __builtin_return_address(0). */
static inline __attribute__((always_inline))
-void destroy_alloc_bitmap(struct rseq_percpu_pool *pool)
+void destroy_alloc_bitmap(struct rseq_mempool *pool, struct rseq_mempool_range *range)
{
- unsigned long *bitmap = pool->alloc_bitmap;
+ unsigned long *bitmap = range->alloc_bitmap;
size_t count, total_leaks = 0;
if (!bitmap)
return;
- count = ((pool->percpu_len >> pool->item_order) + BIT_PER_ULONG - 1) / BIT_PER_ULONG;
+ count = ((pool->percpu_stride >> pool->item_order) + BIT_PER_ULONG - 1) / BIT_PER_ULONG;
/* Assert that all items in the pool were freed. */
for (size_t k = 0; k < count; ++k)
abort();
}
- check_free_list(pool);
-
free(bitmap);
}
/* Always inline for __builtin_return_address(0). */
static inline __attribute__((always_inline))
-int __rseq_percpu_pool_destroy(struct rseq_percpu_pool *pool)
+int rseq_mempool_range_destroy(struct rseq_mempool *pool,
+ struct rseq_mempool_range *range)
{
- int ret;
+ destroy_alloc_bitmap(pool, range);
+ /* range is a header located one page before the aligned mapping. */
+ return pool->attr.munmap_func(pool->attr.mmap_priv, range->header,
+ (pool->percpu_stride * pool->max_nr_cpus) + rseq_get_page_len());
+}
- if (!pool->base) {
- errno = ENOENT;
- ret = -1;
- goto end;
+/*
+ * Allocate a memory mapping aligned on @alignment, with an optional
+ * @pre_header before the mapping.
+ */
+static
+void *aligned_mmap_anonymous(struct rseq_mempool *pool,
+ size_t page_size, size_t len, size_t alignment,
+ void **pre_header, size_t pre_header_len)
+{
+ size_t minimum_page_count, page_count, extra, total_allocate = 0;
+ int page_order;
+ void *ptr;
+
+ if (len < page_size || alignment < page_size ||
+ !is_pow2(len) || !is_pow2(alignment)) {
+ errno = EINVAL;
+ return NULL;
}
- /*
- * This must be done before releasing pool->base for checking the
- * free-list.
- */
- destroy_alloc_bitmap(pool);
- ret = pool->attr.munmap_func(pool->attr.mmap_priv, pool->base,
- pool->percpu_len * pool->max_nr_cpus);
- if (ret)
- goto end;
- pthread_mutex_destroy(&pool->lock);
- free(pool->name);
- memset(pool, 0, sizeof(*pool));
-end:
- return 0;
+ page_order = rseq_get_count_order_ulong(page_size);
+ if (page_order < 0) {
+ errno = EINVAL;
+ return NULL;
+ }
+ if (pre_header_len && (pre_header_len & (page_size - 1))) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ minimum_page_count = (pre_header_len + len) >> page_order;
+ page_count = (pre_header_len + len + alignment - page_size) >> page_order;
+
+ assert(page_count >= minimum_page_count);
+
+ ptr = pool->attr.mmap_func(pool->attr.mmap_priv, page_count << page_order);
+ if (!ptr)
+ goto alloc_error;
+
+ total_allocate = page_count << page_order;
+
+ if (!(((uintptr_t) ptr + pre_header_len) & (alignment - 1))) {
+ /* Pointer is already aligned. ptr points to pre_header. */
+ goto out;
+ }
+
+ /* Unmap extra before. */
+ extra = offset_align((uintptr_t) ptr + pre_header_len, alignment);
+ assert(!(extra & (page_size - 1)));
+ if (pool->attr.munmap_func(pool->attr.mmap_priv, ptr, extra)) {
+ perror("munmap");
+ abort();
+ }
+ total_allocate -= extra;
+ ptr += extra; /* ptr points to pre_header */
+ page_count -= extra >> page_order;
+out:
+ assert(page_count >= minimum_page_count);
+
+ if (page_count > minimum_page_count) {
+ void *extra_ptr;
+
+ /* Unmap extra after. */
+ extra_ptr = ptr + (minimum_page_count << page_order);
+ extra = (page_count - minimum_page_count) << page_order;
+ if (pool->attr.munmap_func(pool->attr.mmap_priv, extra_ptr, extra)) {
+ perror("munmap");
+ abort();
+ }
+ total_allocate -= extra;
+ }
+
+ assert(!(((uintptr_t)ptr + pre_header_len) & (alignment - 1)));
+ assert(total_allocate == len + pre_header_len);
+
+alloc_error:
+ if (ptr) {
+ if (pre_header)
+ *pre_header = ptr;
+ ptr += pre_header_len;
+ }
+ return ptr;
}
-int rseq_percpu_pool_destroy(struct rseq_percpu_pool *pool)
+static
+struct rseq_mempool_range *rseq_mempool_range_create(struct rseq_mempool *pool)
{
- int ret;
+ struct rseq_mempool_range *range;
+ unsigned long page_size;
+ void *header;
+ void *base;
+
+ page_size = rseq_get_page_len();
- pthread_mutex_lock(&pool_lock);
- ret = __rseq_percpu_pool_destroy(pool);
- pthread_mutex_unlock(&pool_lock);
+ base = aligned_mmap_anonymous(pool, page_size,
+ pool->percpu_stride * pool->max_nr_cpus,
+ pool->percpu_stride,
+ &header, page_size);
+ if (!base)
+ return NULL;
+ range = (struct rseq_mempool_range *) (base - RANGE_HEADER_OFFSET);
+ range->pool = pool;
+ range->base = base;
+ range->header = header;
+ if (pool->attr.robust_set) {
+ if (create_alloc_bitmap(pool, range))
+ goto error_alloc;
+ }
+ return range;
+
+error_alloc:
+ (void) rseq_mempool_range_destroy(pool, range);
+ return NULL;
+}
+
+int rseq_mempool_destroy(struct rseq_mempool *pool)
+{
+ struct rseq_mempool_range *range, *next_range;
+ int ret = 0;
+
+ if (!pool)
+ return 0;
+ check_free_list(pool);
+ /* Iteration safe against removal. */
+ for (range = pool->ranges; range && (next_range = range->next, 1); range = next_range) {
+ if (rseq_mempool_range_destroy(pool, range))
+ goto end;
+ /* Update list head to keep list coherent in case of partial failure. */
+ pool->ranges = next_range;
+ }
+ pthread_mutex_destroy(&pool->lock);
+ free(pool->name);
+ memset(pool, 0, sizeof(*pool));
+end:
return ret;
}
-struct rseq_percpu_pool *rseq_percpu_pool_create(const char *pool_name,
- size_t item_len, size_t percpu_len, int max_nr_cpus,
- const struct rseq_pool_attr *_attr)
+struct rseq_mempool *rseq_mempool_create(const char *pool_name,
+ size_t item_len, size_t percpu_stride, int max_nr_cpus,
+ const struct rseq_mempool_attr *_attr)
{
- struct rseq_percpu_pool *pool;
- struct rseq_pool_attr attr = {};
- void *base;
- unsigned int i;
+ struct rseq_mempool *pool;
+ struct rseq_mempool_attr attr = {};
int order;
/* Make sure each item is large enough to contain free list pointers. */
}
item_len = 1UL << order;
- /* Align percpu_len on page size. */
- percpu_len = rseq_align(percpu_len, rseq_get_page_len());
+ if (!percpu_stride)
+ percpu_stride = RSEQ_PERCPU_STRIDE; /* Use default */
- if (max_nr_cpus < 0 || item_len > percpu_len ||
- percpu_len > (UINTPTR_MAX >> POOL_INDEX_BITS)) {
+ if (max_nr_cpus < 0 || item_len > percpu_stride ||
+ percpu_stride < (size_t) rseq_get_page_len() ||
+ !is_pow2(percpu_stride)) {
errno = EINVAL;
return NULL;
}
attr.mmap_priv = NULL;
}
- pthread_mutex_lock(&pool_lock);
- /* Linear scan in array of pools to find empty spot. */
- for (i = FIRST_POOL; i < MAX_NR_POOLS; i++) {
- pool = &rseq_percpu_pool[i];
- if (!pool->base)
- goto found_empty;
- }
- errno = ENOMEM;
- pool = NULL;
- goto end;
+ pool = calloc(1, sizeof(struct rseq_mempool));
+ if (!pool)
+ return NULL;
-found_empty:
- base = attr.mmap_func(attr.mmap_priv, percpu_len * max_nr_cpus);
- if (!base)
- goto error_alloc;
+ memcpy(&pool->attr, &attr, sizeof(attr));
pthread_mutex_init(&pool->lock, NULL);
- pool->base = base;
- pool->percpu_len = percpu_len;
+ pool->percpu_stride = percpu_stride;
pool->max_nr_cpus = max_nr_cpus;
- pool->index = i;
pool->item_len = item_len;
pool->item_order = order;
- memcpy(&pool->attr, &attr, sizeof(attr));
+
+ //TODO: implement multi-range support.
+ pool->ranges = rseq_mempool_range_create(pool);
+ if (!pool->ranges)
+ goto error_alloc;
if (pool_name) {
pool->name = strdup(pool_name);
if (!pool->name)
goto error_alloc;
}
-
- if (attr.robust_set) {
- if (create_alloc_bitmap(pool))
- goto error_alloc;
- }
-end:
- pthread_mutex_unlock(&pool_lock);
return pool;
error_alloc:
- __rseq_percpu_pool_destroy(pool);
- pthread_mutex_unlock(&pool_lock);
+ rseq_mempool_destroy(pool);
errno = ENOMEM;
return NULL;
}
/* Always inline for __builtin_return_address(0). */
static inline __attribute__((always_inline))
-void set_alloc_slot(struct rseq_percpu_pool *pool, size_t item_offset)
+void set_alloc_slot(struct rseq_mempool *pool, size_t item_offset)
{
- unsigned long *bitmap = pool->alloc_bitmap;
+ unsigned long *bitmap = pool->ranges->alloc_bitmap;
size_t item_index = item_offset >> pool->item_order;
unsigned long mask;
size_t k;
}
static
-void __rseq_percpu *__rseq_percpu_malloc(struct rseq_percpu_pool *pool, bool zeroed)
+void __rseq_percpu *__rseq_percpu_malloc(struct rseq_mempool *pool, bool zeroed)
{
struct free_list_node *node;
uintptr_t item_offset;
if (node != NULL) {
/* Remove node from free list (update head). */
pool->free_list_head = node->next;
- item_offset = (uintptr_t) ((void *) node - pool->base);
- addr = (void *) (((uintptr_t) pool->index << POOL_INDEX_SHIFT) | item_offset);
+ item_offset = (uintptr_t) ((void *) node - pool->ranges->base);
+ addr = (void __rseq_percpu *) (pool->ranges->base + item_offset);
goto end;
}
- if (pool->next_unused + pool->item_len > pool->percpu_len) {
+ if (pool->ranges->next_unused + pool->item_len > pool->percpu_stride) {
errno = ENOMEM;
addr = NULL;
goto end;
}
- item_offset = pool->next_unused;
- addr = (void *) (((uintptr_t) pool->index << POOL_INDEX_SHIFT) | item_offset);
- pool->next_unused += pool->item_len;
- set_alloc_slot(pool, item_offset);
+ item_offset = pool->ranges->next_unused;
+ addr = (void __rseq_percpu *) (pool->ranges->base + item_offset);
+ pool->ranges->next_unused += pool->item_len;
end:
+ if (addr)
+ set_alloc_slot(pool, item_offset);
pthread_mutex_unlock(&pool->lock);
if (zeroed && addr)
rseq_percpu_zero_item(pool, item_offset);
return addr;
}
-void __rseq_percpu *rseq_percpu_malloc(struct rseq_percpu_pool *pool)
+void __rseq_percpu *rseq_percpu_malloc(struct rseq_mempool *pool)
{
return __rseq_percpu_malloc(pool, false);
}
-void __rseq_percpu *rseq_percpu_zmalloc(struct rseq_percpu_pool *pool)
+void __rseq_percpu *rseq_percpu_zmalloc(struct rseq_mempool *pool)
{
return __rseq_percpu_malloc(pool, true);
}
/* Always inline for __builtin_return_address(0). */
static inline __attribute__((always_inline))
-void clear_alloc_slot(struct rseq_percpu_pool *pool, size_t item_offset)
+void clear_alloc_slot(struct rseq_mempool *pool, size_t item_offset)
{
- unsigned long *bitmap = pool->alloc_bitmap;
+ unsigned long *bitmap = pool->ranges->alloc_bitmap;
size_t item_index = item_offset >> pool->item_order;
unsigned long mask;
size_t k;
bitmap[k] &= ~mask;
}
-void rseq_percpu_free(void __rseq_percpu *_ptr)
+void librseq_percpu_free(void __rseq_percpu *_ptr, size_t percpu_stride)
{
uintptr_t ptr = (uintptr_t) _ptr;
- uintptr_t item_offset = ptr & MAX_POOL_LEN_MASK;
- uintptr_t pool_index = ptr >> POOL_INDEX_SHIFT;
- struct rseq_percpu_pool *pool = &rseq_percpu_pool[pool_index];
+ void *range_base = (void *) (ptr & (~(percpu_stride - 1)));
+ struct rseq_mempool_range *range = (struct rseq_mempool_range *) (range_base - RANGE_HEADER_OFFSET);
+ struct rseq_mempool *pool = range->pool;
+ uintptr_t item_offset = ptr & (percpu_stride - 1);
struct free_list_node *head, *item;
pthread_mutex_lock(&pool->lock);
/* Add ptr to head of free list */
head = pool->free_list_head;
/* Free-list is in CPU 0 range. */
- item = (struct free_list_node *)__rseq_pool_percpu_ptr(pool, 0, item_offset);
+ item = (struct free_list_node *) ptr;
item->next = head;
pool->free_list_head = item;
pthread_mutex_unlock(&pool->lock);
}
-struct rseq_percpu_pool_set *rseq_percpu_pool_set_create(void)
+struct rseq_mempool_set *rseq_mempool_set_create(void)
{
- struct rseq_percpu_pool_set *pool_set;
+ struct rseq_mempool_set *pool_set;
- pool_set = calloc(1, sizeof(struct rseq_percpu_pool_set));
+ pool_set = calloc(1, sizeof(struct rseq_mempool_set));
if (!pool_set)
return NULL;
pthread_mutex_init(&pool_set->lock, NULL);
return pool_set;
}
-int rseq_percpu_pool_set_destroy(struct rseq_percpu_pool_set *pool_set)
+int rseq_mempool_set_destroy(struct rseq_mempool_set *pool_set)
{
int order, ret;
for (order = POOL_SET_MIN_ENTRY; order < POOL_SET_NR_ENTRIES; order++) {
- struct rseq_percpu_pool *pool = pool_set->entries[order];
+ struct rseq_mempool *pool = pool_set->entries[order];
if (!pool)
continue;
- ret = rseq_percpu_pool_destroy(pool);
+ ret = rseq_mempool_destroy(pool);
if (ret)
return ret;
pool_set->entries[order] = NULL;
}
/* Ownership of pool is handed over to pool set on success. */
-int rseq_percpu_pool_set_add_pool(struct rseq_percpu_pool_set *pool_set, struct rseq_percpu_pool *pool)
+int rseq_mempool_set_add_pool(struct rseq_mempool_set *pool_set, struct rseq_mempool *pool)
{
size_t item_order = pool->item_order;
int ret = 0;
}
static
-void __rseq_percpu *__rseq_percpu_pool_set_malloc(struct rseq_percpu_pool_set *pool_set, size_t len, bool zeroed)
+void __rseq_percpu *__rseq_mempool_set_malloc(struct rseq_mempool_set *pool_set, size_t len, bool zeroed)
{
int order, min_order = POOL_SET_MIN_ENTRY;
- struct rseq_percpu_pool *pool;
+ struct rseq_mempool *pool;
void __rseq_percpu *addr;
order = rseq_get_count_order_ulong(len);
return addr;
}
-void __rseq_percpu *rseq_percpu_pool_set_malloc(struct rseq_percpu_pool_set *pool_set, size_t len)
+void __rseq_percpu *rseq_percpu_mempool_set_malloc(struct rseq_mempool_set *pool_set, size_t len)
{
- return __rseq_percpu_pool_set_malloc(pool_set, len, false);
+ return __rseq_mempool_set_malloc(pool_set, len, false);
}
-void __rseq_percpu *rseq_percpu_pool_set_zmalloc(struct rseq_percpu_pool_set *pool_set, size_t len)
+void __rseq_percpu *rseq_percpu_mempool_set_zmalloc(struct rseq_mempool_set *pool_set, size_t len)
{
- return __rseq_percpu_pool_set_malloc(pool_set, len, true);
+ return __rseq_mempool_set_malloc(pool_set, len, true);
}
-struct rseq_pool_attr *rseq_pool_attr_create(void)
+struct rseq_mempool_attr *rseq_mempool_attr_create(void)
{
- return calloc(1, sizeof(struct rseq_pool_attr));
+ return calloc(1, sizeof(struct rseq_mempool_attr));
}
-void rseq_pool_attr_destroy(struct rseq_pool_attr *attr)
+void rseq_mempool_attr_destroy(struct rseq_mempool_attr *attr)
{
free(attr);
}
-int rseq_pool_attr_set_mmap(struct rseq_pool_attr *attr,
+int rseq_mempool_attr_set_mmap(struct rseq_mempool_attr *attr,
void *(*mmap_func)(void *priv, size_t len),
int (*munmap_func)(void *priv, void *ptr, size_t len),
void *mmap_priv)
return 0;
}
-int rseq_pool_attr_set_robust(struct rseq_pool_attr *attr)
+int rseq_mempool_attr_set_robust(struct rseq_mempool_attr *attr)
{
if (!attr) {
errno = EINVAL;