Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Change-Id: I886c840f394cb1de0b5ab71b7417b99675ea1468
* after rseq_percpu_pool_create() returns. The caller keeps ownership
* of @mmap_attr.
*
* after rseq_percpu_pool_create() returns. The caller keeps ownership
* of @mmap_attr.
*
+ * The argument @pool_name can be used to given a name to the pool for
+ * debugging purposes. It can be NULL if no name is given.
+ *
* Argument @flags is a bitwise-or'd selector of:
* - RSEQ_POOL_ROBUST
*
* Argument @flags is a bitwise-or'd selector of:
* - RSEQ_POOL_ROBUST
*
*
* This API is MT-safe.
*/
*
* This API is MT-safe.
*/
-struct rseq_percpu_pool *rseq_percpu_pool_create(size_t item_len,
- size_t percpu_len, int max_nr_cpus,
+struct rseq_percpu_pool *rseq_percpu_pool_create(const char *pool_name,
+ size_t item_len, size_t percpu_len, int max_nr_cpus,
const struct rseq_mmap_attr *mmap_attr,
int flags);
const struct rseq_mmap_attr *mmap_attr,
int flags);
struct rseq_mmap_attr mmap_attr;
struct rseq_mmap_attr mmap_attr;
/* Track alloc/free. */
unsigned long *alloc_bitmap;
};
/* Track alloc/free. */
unsigned long *alloc_bitmap;
};
+static
+const char *get_pool_name(const struct rseq_percpu_pool *pool)
+{
+ return pool->name ? : "<anonymous>";
+}
+
/* Always inline for __builtin_return_address(0). */
static inline __attribute__((always_inline))
void check_free_list(const struct rseq_percpu_pool *pool)
/* Always inline for __builtin_return_address(0). */
static inline __attribute__((always_inline))
void check_free_list(const struct rseq_percpu_pool *pool)
void *node_addr = node;
if (traversal_iteration >= max_list_traversal) {
void *node_addr = node;
if (traversal_iteration >= max_list_traversal) {
- fprintf(stderr, "%s: Corrupted free-list; Possibly infinite loop in pool %p, caller %p.\n",
- __func__, pool, __builtin_return_address(0));
+ fprintf(stderr, "%s: Corrupted free-list; Possibly infinite loop in pool \"%s\" (%p), caller %p.\n",
+ __func__, get_pool_name(pool), pool, __builtin_return_address(0));
if ((node_addr < pool->base) ||
(node_addr >= pool->base + pool->next_unused)) {
if (prev)
if ((node_addr < pool->base) ||
(node_addr >= pool->base + pool->next_unused)) {
if (prev)
- fprintf(stderr, "%s: Corrupted free-list node %p -> [out-of-range %p] in pool %p, caller %p.\n",
- __func__, prev, node, pool, __builtin_return_address(0));
+ fprintf(stderr, "%s: Corrupted free-list node %p -> [out-of-range %p] in pool \"%s\" (%p), caller %p.\n",
+ __func__, prev, node, get_pool_name(pool), pool, __builtin_return_address(0));
- fprintf(stderr, "%s: Corrupted free-list node [out-of-range %p] in pool %p, caller %p.\n",
- __func__, node, pool, __builtin_return_address(0));
+ fprintf(stderr, "%s: Corrupted free-list node [out-of-range %p] in pool \"%s\" (%p), caller %p.\n",
+ __func__, node, get_pool_name(pool), pool, __builtin_return_address(0));
}
if (total_never_allocated + total_freed != total_item) {
}
if (total_never_allocated + total_freed != total_item) {
- fprintf(stderr, "%s: Corrupted free-list in pool %p; total-item: %zu total-never-used: %zu total-freed: %zu, caller %p.\n",
- __func__, pool, total_item, total_never_allocated, total_freed, __builtin_return_address(0));
+ fprintf(stderr, "%s: Corrupted free-list in pool \"%s\" (%p); total-item: %zu total-never-used: %zu total-freed: %zu, caller %p.\n",
+ __func__, get_pool_name(pool), pool, total_item, total_never_allocated, total_freed, __builtin_return_address(0));
for (size_t k = 0; k < count; ++k)
total_leaks += rseq_hweight_ulong(bitmap[k]);
if (total_leaks) {
for (size_t k = 0; k < count; ++k)
total_leaks += rseq_hweight_ulong(bitmap[k]);
if (total_leaks) {
- fprintf(stderr, "%s: Pool has %zu leaked items on destroy, caller: %p.\n",
- __func__, total_leaks, (void *) __builtin_return_address(0));
+ fprintf(stderr, "%s: Pool \"%s\" (%p) has %zu leaked items on destroy, caller: %p.\n",
+ __func__, get_pool_name(pool), pool, total_leaks, (void *) __builtin_return_address(0));
if (ret)
goto end;
pthread_mutex_destroy(&pool->lock);
if (ret)
goto end;
pthread_mutex_destroy(&pool->lock);
memset(pool, 0, sizeof(*pool));
end:
return 0;
memset(pool, 0, sizeof(*pool));
end:
return 0;
-struct rseq_percpu_pool *rseq_percpu_pool_create(size_t item_len,
- size_t percpu_len, int max_nr_cpus,
+struct rseq_percpu_pool *rseq_percpu_pool_create(const char *pool_name,
+ size_t item_len, size_t percpu_len, int max_nr_cpus,
const struct rseq_mmap_attr *mmap_attr,
int flags)
{
const struct rseq_mmap_attr *mmap_attr,
int flags)
{
pool->mmap_attr.munmap_func = munmap_func;
pool->mmap_attr.mmap_priv = mmap_priv;
pool->mmap_attr.munmap_func = munmap_func;
pool->mmap_attr.mmap_priv = mmap_priv;
+ if (pool_name) {
+ pool->name = strdup(pool_name);
+ if (!pool->name)
+ goto error_alloc;
+ }
+
if (RSEQ_POOL_ROBUST & flags) {
if (create_alloc_bitmap(pool))
goto error_alloc;
if (RSEQ_POOL_ROBUST & flags) {
if (create_alloc_bitmap(pool))
goto error_alloc;
/* Print error if bit is already set. */
if (bitmap[k] & mask) {
/* Print error if bit is already set. */
if (bitmap[k] & mask) {
- fprintf(stderr, "%s: Allocator corruption detected for pool: %p, item offset: %zu, caller: %p.\n",
- __func__, pool, item_offset, (void *) __builtin_return_address(0));
+ fprintf(stderr, "%s: Allocator corruption detected for pool: \"%s\" (%p), item offset: %zu, caller: %p.\n",
+ __func__, get_pool_name(pool), pool, item_offset, (void *) __builtin_return_address(0));
abort();
}
bitmap[k] |= mask;
abort();
}
bitmap[k] |= mask;
/* Print error if bit is not set. */
if (!(bitmap[k] & mask)) {
/* Print error if bit is not set. */
if (!(bitmap[k] & mask)) {
- fprintf(stderr, "%s: Double-free detected for pool: %p, item offset: %zu, caller: %p.\n",
- __func__, pool, item_offset, (void *) __builtin_return_address(0));
+ fprintf(stderr, "%s: Double-free detected for pool: \"%s\" (%p), item offset: %zu, caller: %p.\n",
+ __func__, get_pool_name(pool), pool, item_offset,
+ (void *) __builtin_return_address(0));
abort();
}
bitmap[k] &= ~mask;
abort();
}
bitmap[k] &= ~mask;
struct spinlock_thread_test_data thread_data[num_threads];
struct rseq_percpu_pool *mempool;
struct spinlock_thread_test_data thread_data[num_threads];
struct rseq_percpu_pool *mempool;
- mempool = rseq_percpu_pool_create(sizeof(struct spinlock_test_data),
+ mempool = rseq_percpu_pool_create("spinlock_test_data",
+ sizeof(struct spinlock_test_data),
PERCPU_POOL_LEN, CPU_SETSIZE, NULL, 0);
if (!mempool) {
perror("rseq_percpu_pool_create");
PERCPU_POOL_LEN, CPU_SETSIZE, NULL, 0);
if (!mempool) {
perror("rseq_percpu_pool_create");
struct inc_thread_test_data thread_data[num_threads];
struct rseq_percpu_pool *mempool;
struct inc_thread_test_data thread_data[num_threads];
struct rseq_percpu_pool *mempool;
- mempool = rseq_percpu_pool_create(sizeof(struct inc_test_data),
+ mempool = rseq_percpu_pool_create("inc_test_data",
+ sizeof(struct inc_test_data),
PERCPU_POOL_LEN, CPU_SETSIZE, NULL, 0);
if (!mempool) {
perror("rseq_percpu_pool_create");
PERCPU_POOL_LEN, CPU_SETSIZE, NULL, 0);
if (!mempool) {
perror("rseq_percpu_pool_create");
cpu_set_t allowed_cpus;
struct rseq_percpu_pool *mempool;
cpu_set_t allowed_cpus;
struct rseq_percpu_pool *mempool;
- mempool = rseq_percpu_pool_create(sizeof(struct percpu_list),
+ mempool = rseq_percpu_pool_create("percpu_list", sizeof(struct percpu_list),
PERCPU_POOL_LEN, CPU_SETSIZE, NULL, 0);
if (!mempool) {
perror("rseq_percpu_pool_create");
PERCPU_POOL_LEN, CPU_SETSIZE, NULL, 0);
if (!mempool) {
perror("rseq_percpu_pool_create");
cpu_set_t allowed_cpus;
struct rseq_percpu_pool *mempool;
cpu_set_t allowed_cpus;
struct rseq_percpu_pool *mempool;
- mempool = rseq_percpu_pool_create(sizeof(struct percpu_buffer),
+ mempool = rseq_percpu_pool_create("percpu_buffer", sizeof(struct percpu_buffer),
PERCPU_POOL_LEN, CPU_SETSIZE, NULL, 0);
if (!mempool) {
perror("rseq_percpu_pool_create");
PERCPU_POOL_LEN, CPU_SETSIZE, NULL, 0);
if (!mempool) {
perror("rseq_percpu_pool_create");
cpu_set_t allowed_cpus;
struct rseq_percpu_pool *mempool;
cpu_set_t allowed_cpus;
struct rseq_percpu_pool *mempool;
- mempool = rseq_percpu_pool_create(sizeof(struct percpu_memcpy_buffer),
+ mempool = rseq_percpu_pool_create("percpu_memcpy_buffer",
+ sizeof(struct percpu_memcpy_buffer),
PERCPU_POOL_LEN, CPU_SETSIZE, NULL, 0);
if (!mempool) {
perror("rseq_percpu_pool_create");
PERCPU_POOL_LEN, CPU_SETSIZE, NULL, 0);
if (!mempool) {
perror("rseq_percpu_pool_create");
int ret;
long long total_count = 0;
int ret;
long long total_count = 0;
- mempool = rseq_percpu_pool_create(sizeof(struct percpu_list),
+ mempool = rseq_percpu_pool_create("percpu_list", sizeof(struct percpu_list),
PERCPU_POOL_LEN, CPU_SETSIZE, NULL, 0);
if (!mempool) {
perror("rseq_percpu_pool_create");
PERCPU_POOL_LEN, CPU_SETSIZE, NULL, 0);
if (!mempool) {
perror("rseq_percpu_pool_create");