struct rseq_mmap_attr mmap_attr;
+ char *name;
/* Track alloc/free. */
unsigned long *alloc_bitmap;
};
return 0;
}
+static
+const char *get_pool_name(const struct rseq_percpu_pool *pool)
+{
+ return pool->name ? : "<anonymous>";
+}
+
/* Always inline for __builtin_return_address(0). */
static inline __attribute__((always_inline))
void check_free_list(const struct rseq_percpu_pool *pool)
void *node_addr = node;
if (traversal_iteration >= max_list_traversal) {
- fprintf(stderr, "%s: Corrupted free-list; Possibly infinite loop in pool %p, caller %p.\n",
- __func__, pool, __builtin_return_address(0));
+ fprintf(stderr, "%s: Corrupted free-list; Possibly infinite loop in pool \"%s\" (%p), caller %p.\n",
+ __func__, get_pool_name(pool), pool, __builtin_return_address(0));
abort();
}
if ((node_addr < pool->base) ||
(node_addr >= pool->base + pool->next_unused)) {
if (prev)
- fprintf(stderr, "%s: Corrupted free-list node %p -> [out-of-range %p] in pool %p, caller %p.\n",
- __func__, prev, node, pool, __builtin_return_address(0));
+ fprintf(stderr, "%s: Corrupted free-list node %p -> [out-of-range %p] in pool \"%s\" (%p), caller %p.\n",
+ __func__, prev, node, get_pool_name(pool), pool, __builtin_return_address(0));
else
- fprintf(stderr, "%s: Corrupted free-list node [out-of-range %p] in pool %p, caller %p.\n",
- __func__, node, pool, __builtin_return_address(0));
+ fprintf(stderr, "%s: Corrupted free-list node [out-of-range %p] in pool \"%s\" (%p), caller %p.\n",
+ __func__, node, get_pool_name(pool), pool, __builtin_return_address(0));
abort();
}
}
if (total_never_allocated + total_freed != total_item) {
- fprintf(stderr, "%s: Corrupted free-list in pool %p; total-item: %zu total-never-used: %zu total-freed: %zu, caller %p.\n",
- __func__, pool, total_item, total_never_allocated, total_freed, __builtin_return_address(0));
+ fprintf(stderr, "%s: Corrupted free-list in pool \"%s\" (%p); total-item: %zu total-never-used: %zu total-freed: %zu, caller %p.\n",
+ __func__, get_pool_name(pool), pool, total_item, total_never_allocated, total_freed, __builtin_return_address(0));
abort();
}
for (size_t k = 0; k < count; ++k)
total_leaks += rseq_hweight_ulong(bitmap[k]);
if (total_leaks) {
- fprintf(stderr, "%s: Pool has %zu leaked items on destroy, caller: %p.\n",
- __func__, total_leaks, (void *) __builtin_return_address(0));
+ fprintf(stderr, "%s: Pool \"%s\" (%p) has %zu leaked items on destroy, caller: %p.\n",
+ __func__, get_pool_name(pool), pool, total_leaks, (void *) __builtin_return_address(0));
abort();
}
if (ret)
goto end;
pthread_mutex_destroy(&pool->lock);
+ free(pool->name);
memset(pool, 0, sizeof(*pool));
end:
return 0;
return ret;
}
-struct rseq_percpu_pool *rseq_percpu_pool_create(size_t item_len,
- size_t percpu_len, int max_nr_cpus,
+struct rseq_percpu_pool *rseq_percpu_pool_create(const char *pool_name,
+ size_t item_len, size_t percpu_len, int max_nr_cpus,
const struct rseq_mmap_attr *mmap_attr,
int flags)
{
pool->mmap_attr.munmap_func = munmap_func;
pool->mmap_attr.mmap_priv = mmap_priv;
+ if (pool_name) {
+ pool->name = strdup(pool_name);
+ if (!pool->name)
+ goto error_alloc;
+ }
+
if (RSEQ_POOL_ROBUST & flags) {
if (create_alloc_bitmap(pool))
goto error_alloc;
/* Print error if bit is already set. */
if (bitmap[k] & mask) {
- fprintf(stderr, "%s: Allocator corruption detected for pool: %p, item offset: %zu, caller: %p.\n",
- __func__, pool, item_offset, (void *) __builtin_return_address(0));
+ fprintf(stderr, "%s: Allocator corruption detected for pool: \"%s\" (%p), item offset: %zu, caller: %p.\n",
+ __func__, get_pool_name(pool), pool, item_offset, (void *) __builtin_return_address(0));
abort();
}
bitmap[k] |= mask;
/* Print error if bit is not set. */
if (!(bitmap[k] & mask)) {
- fprintf(stderr, "%s: Double-free detected for pool: %p, item offset: %zu, caller: %p.\n",
- __func__, pool, item_offset, (void *) __builtin_return_address(0));
+ fprintf(stderr, "%s: Double-free detected for pool: \"%s\" (%p), item offset: %zu, caller: %p.\n",
+ __func__, get_pool_name(pool), pool, item_offset,
+ (void *) __builtin_return_address(0));
abort();
}
bitmap[k] &= ~mask;
struct spinlock_thread_test_data thread_data[num_threads];
struct rseq_percpu_pool *mempool;
- mempool = rseq_percpu_pool_create(sizeof(struct spinlock_test_data),
+ mempool = rseq_percpu_pool_create("spinlock_test_data",
+ sizeof(struct spinlock_test_data),
PERCPU_POOL_LEN, CPU_SETSIZE, NULL, 0);
if (!mempool) {
perror("rseq_percpu_pool_create");
struct inc_thread_test_data thread_data[num_threads];
struct rseq_percpu_pool *mempool;
- mempool = rseq_percpu_pool_create(sizeof(struct inc_test_data),
+ mempool = rseq_percpu_pool_create("inc_test_data",
+ sizeof(struct inc_test_data),
PERCPU_POOL_LEN, CPU_SETSIZE, NULL, 0);
if (!mempool) {
perror("rseq_percpu_pool_create");
cpu_set_t allowed_cpus;
struct rseq_percpu_pool *mempool;
- mempool = rseq_percpu_pool_create(sizeof(struct percpu_list),
+ mempool = rseq_percpu_pool_create("percpu_list", sizeof(struct percpu_list),
PERCPU_POOL_LEN, CPU_SETSIZE, NULL, 0);
if (!mempool) {
perror("rseq_percpu_pool_create");
cpu_set_t allowed_cpus;
struct rseq_percpu_pool *mempool;
- mempool = rseq_percpu_pool_create(sizeof(struct percpu_buffer),
+ mempool = rseq_percpu_pool_create("percpu_buffer", sizeof(struct percpu_buffer),
PERCPU_POOL_LEN, CPU_SETSIZE, NULL, 0);
if (!mempool) {
perror("rseq_percpu_pool_create");
cpu_set_t allowed_cpus;
struct rseq_percpu_pool *mempool;
- mempool = rseq_percpu_pool_create(sizeof(struct percpu_memcpy_buffer),
+ mempool = rseq_percpu_pool_create("percpu_memcpy_buffer",
+ sizeof(struct percpu_memcpy_buffer),
PERCPU_POOL_LEN, CPU_SETSIZE, NULL, 0);
if (!mempool) {
perror("rseq_percpu_pool_create");
int ret;
long long total_count = 0;
- mempool = rseq_percpu_pool_create(sizeof(struct percpu_list),
+ mempool = rseq_percpu_pool_create("percpu_list", sizeof(struct percpu_list),
PERCPU_POOL_LEN, CPU_SETSIZE, NULL, 0);
if (!mempool) {
perror("rseq_percpu_pool_create");