int rseq_mempool_destroy(struct rseq_mempool *pool);
/*
- * rseq_percpu_malloc: Allocate memory from a per-cpu pool.
+ * rseq_mempool_percpu_malloc: Allocate memory from a per-cpu pool.
*
* Allocate an item from a per-cpu @pool. The allocation will reserve
* an item of the size specified by @item_len (rounded to next power of
*
* This API is MT-safe.
*/
-void __rseq_percpu *rseq_percpu_malloc(struct rseq_mempool *pool);
+void __rseq_percpu *rseq_mempool_percpu_malloc(struct rseq_mempool *pool);
/*
- * rseq_percpu_zmalloc: Allocated zero-initialized memory from a per-cpu pool.
+ * rseq_mempool_percpu_zmalloc: Allocated zero-initialized memory from a per-cpu pool.
*
* Allocate memory for an item within the pool, and zero-initialize its
- * memory on all CPUs. See rseq_percpu_malloc for details.
+ * memory on all CPUs. See rseq_mempool_percpu_malloc for details.
*
* This API is MT-safe.
*/
-void __rseq_percpu *rseq_percpu_zmalloc(struct rseq_mempool *pool);
+void __rseq_percpu *rseq_mempool_percpu_zmalloc(struct rseq_mempool *pool);
/*
- * rseq_percpu_free: Free memory from a per-cpu pool.
+ * rseq_mempool_malloc: Allocate memory from a global pool.
+ *
+ * Wrapper to allocate memory from a global pool, which can be
+ * used directly without per-cpu indexing. Would normally be used
+ * with pools created with max_nr_cpus=1.
+ */
+static inline
+void *rseq_mempool_malloc(struct rseq_mempool *pool)
+{
+ return (void *) rseq_mempool_percpu_malloc(pool);
+}
+
+/*
+ * rseq_mempool_zmalloc: Allocate zero-initialized memory from a global pool.
+ *
+ * Wrapper to allocate memory from a global pool, which can be
+ * used directly without per-cpu indexing. Would normally be used
+ * with pools created with max_nr_cpus=1.
+ */
+static inline
+void *rseq_mempool_zmalloc(struct rseq_mempool *pool)
+{
+ return (void *) rseq_mempool_percpu_zmalloc(pool);
+}
+
+/*
+ * rseq_mempool_percpu_free: Free memory from a per-cpu pool.
*
* Free an item pointed to by @ptr from its per-cpu pool.
*
* The @ptr argument is a __rseq_percpu encoded pointer returned by
* either:
*
- * - rseq_percpu_malloc(),
- * - rseq_percpu_zmalloc(),
- * - rseq_percpu_pool_set_malloc(),
- * - rseq_percpu_pool_set_zmalloc().
+ * - rseq_mempool_percpu_malloc(),
+ * - rseq_mempool_percpu_zmalloc(),
+ * - rseq_mempool_set_percpu_malloc(),
+ * - rseq_mempool_set_percpu_zmalloc().
*
* The @stride optional argument to rseq_percpu_free() is a configurable
* stride, which must match the stride received by pool creation.
*
* This API is MT-safe.
*/
-void librseq_percpu_free(void __rseq_percpu *ptr, size_t percpu_stride);
+void librseq_mempool_percpu_free(void __rseq_percpu *ptr, size_t percpu_stride);
+
+#define rseq_mempool_percpu_free(_ptr, _stride...) \
+ librseq_mempool_percpu_free(_ptr, RSEQ_PARAM_SELECT_ARG1(_, ##_stride, RSEQ_PERCPU_STRIDE))
-#define rseq_percpu_free(_ptr, _stride...) \
- librseq_percpu_free(_ptr, RSEQ_PARAM_SELECT_ARG1(_, ##_stride, RSEQ_PERCPU_STRIDE))
+/*
+ * rseq_free: Free memory from a global pool.
+ *
+ * Free an item pointed to by @ptr from its global pool. Would normally
+ * be used with pools created with max_nr_cpus=1.
+ *
+ * The @ptr argument is a pointer returned by either:
+ *
+ * - rseq_mempool_malloc(),
+ * - rseq_mempool_zmalloc(),
+ * - rseq_mempool_set_malloc(),
+ * - rseq_mempool_set_zmalloc().
+ *
+ * The @stride optional argument to rseq_free() is a configurable
+ * stride, which must match the stride received by pool creation. If
+ * the argument is not present, use the default RSEQ_PERCPU_STRIDE.
+ * The stride is needed even for a global pool to know the mapping
+ * address range.
+ *
+ * This API is MT-safe.
+ */
+#define rseq_mempool_free(_ptr, _stride...) \
+ librseq_percpu_free((void __rseq_percpu *) _ptr, RSEQ_PARAM_SELECT_ARG1(_, ##_stride, RSEQ_PERCPU_STRIDE))
/*
* rseq_percpu_ptr: Offset a per-cpu pointer for a given CPU.
* given @cpu. The @ptr argument is a __rseq_percpu pointer returned by
* either:
*
- * - rseq_percpu_malloc(),
- * - rseq_percpu_zmalloc(),
- * - rseq_percpu_pool_set_malloc(),
- * - rseq_percpu_pool_set_zmalloc().
+ * - rseq_mempool_percpu_malloc(),
+ * - rseq_mempool_percpu_zmalloc(),
+ * - rseq_mempool_set_percpu_malloc(),
+ * - rseq_mempool_set_percpu_zmalloc().
*
* The macro rseq_percpu_ptr() preserves the type of the @ptr parameter
* for the returned pointer, but removes the __rseq_percpu annotation.
*
* This API is MT-safe.
*/
-void __rseq_percpu *rseq_percpu_mempool_set_malloc(struct rseq_mempool_set *pool_set, size_t len);
+void __rseq_percpu *rseq_mempool_set_percpu_malloc(struct rseq_mempool_set *pool_set, size_t len);
/*
* rseq_percpu_mempool_set_zmalloc: Allocated zero-initialized memory from a per-cpu pool set.
*
* This API is MT-safe.
*/
-void __rseq_percpu *rseq_percpu_mempool_set_zmalloc(struct rseq_mempool_set *pool_set, size_t len);
+void __rseq_percpu *rseq_mempool_set_percpu_zmalloc(struct rseq_mempool_set *pool_set, size_t len);
+
+/*
+ * rseq_mempool_set_malloc: Allocate memory from a global pool set.
+ *
+ * Wrapper to allocate memory from a global pool, which can be
+ * used directly without per-cpu indexing. Would normally be used
+ * with pools created with max_nr_cpus=1.
+ */
+static inline
+void *rseq_mempool_set_malloc(struct rseq_mempool_set *pool_set, size_t len)
+{
+ return (void *) rseq_mempool_set_percpu_malloc(pool_set, len);
+}
+
+/*
+ * rseq_mempool_set_zmalloc: Allocate zero-initialized memory from a global pool set.
+ *
+ * Wrapper to allocate memory from a global pool, which can be
+ * used directly without per-cpu indexing. Would normally be used
+ * with pools created with max_nr_cpus=1.
+ */
+static inline
+void *rseq_mempool_set_zmalloc(struct rseq_mempool_set *pool_set, size_t len)
+{
+ return (void *) rseq_mempool_set_percpu_zmalloc(pool_set, len);
+}
/*
* rseq_mempool_init_numa: Move pages to the NUMA node associated to their CPU topology.
return addr;
}
-void __rseq_percpu *rseq_percpu_malloc(struct rseq_mempool *pool)
+void __rseq_percpu *rseq_mempool_percpu_malloc(struct rseq_mempool *pool)
{
return __rseq_percpu_malloc(pool, false);
}
-void __rseq_percpu *rseq_percpu_zmalloc(struct rseq_mempool *pool)
+void __rseq_percpu *rseq_mempool_percpu_zmalloc(struct rseq_mempool *pool)
{
return __rseq_percpu_malloc(pool, true);
}
bitmap[k] &= ~mask;
}
-void librseq_percpu_free(void __rseq_percpu *_ptr, size_t percpu_stride)
+void librseq_mempool_percpu_free(void __rseq_percpu *_ptr, size_t percpu_stride)
{
uintptr_t ptr = (uintptr_t) _ptr;
void *range_base = (void *) (ptr & (~(percpu_stride - 1)));
return addr;
}
-void __rseq_percpu *rseq_percpu_mempool_set_malloc(struct rseq_mempool_set *pool_set, size_t len)
+void __rseq_percpu *rseq_mempool_set_percpu_malloc(struct rseq_mempool_set *pool_set, size_t len)
{
return __rseq_mempool_set_malloc(pool_set, len, false);
}
-void __rseq_percpu *rseq_percpu_mempool_set_zmalloc(struct rseq_mempool_set *pool_set, size_t len)
+void __rseq_percpu *rseq_mempool_set_percpu_zmalloc(struct rseq_mempool_set *pool_set, size_t len)
{
return __rseq_mempool_set_malloc(pool_set, len, true);
}
for (;;) {
struct test_data *cpuptr;
- ptr = (struct test_data __rseq_percpu *) rseq_percpu_zmalloc(mempool);
+ ptr = (struct test_data __rseq_percpu *) rseq_mempool_percpu_zmalloc(mempool);
if (!ptr)
break;
/* Link items in cpu 0. */
list_for_each_entry_safe(iter, tmp, &list, node) {
ptr = iter->backref;
- rseq_percpu_free(ptr, stride);
+ rseq_mempool_percpu_free(ptr, stride);
}
ret = rseq_mempool_destroy(mempool);
ok(ret == 0, "Destroy mempool");
{
struct test_data __rseq_percpu *ptr;
- ptr = (struct test_data __rseq_percpu *) rseq_percpu_malloc(pool);
+ ptr = (struct test_data __rseq_percpu *) rseq_mempool_percpu_malloc(pool);
- rseq_percpu_free(ptr);
- rseq_percpu_free(ptr);
+ rseq_mempool_percpu_free(ptr);
+ rseq_mempool_percpu_free(ptr);
}
static void test_robust_corrupt_after_free(struct rseq_mempool *pool)
struct test_data __rseq_percpu *ptr;
struct test_data *cpuptr;
- ptr = (struct test_data __rseq_percpu *) rseq_percpu_malloc(pool);
+ ptr = (struct test_data __rseq_percpu *) rseq_mempool_percpu_malloc(pool);
cpuptr = (struct test_data *) rseq_percpu_ptr(ptr, 0);
- rseq_percpu_free(ptr);
+ rseq_mempool_percpu_free(ptr);
cpuptr->value = (uintptr_t) test_robust_corrupt_after_free;
rseq_mempool_destroy(pool);
static void test_robust_memory_leak(struct rseq_mempool *pool)
{
- (void) rseq_percpu_malloc(pool);
+ (void) rseq_mempool_percpu_malloc(pool);
rseq_mempool_destroy(pool);
}
struct test_data __rseq_percpu *ptr;
struct test_data *cpuptr;
- ptr = (struct test_data __rseq_percpu *) rseq_percpu_malloc(pool);
+ ptr = (struct test_data __rseq_percpu *) rseq_mempool_percpu_malloc(pool);
cpuptr = (struct test_data *) rseq_percpu_ptr(ptr, 0);
- rseq_percpu_free(ptr);
+ rseq_mempool_percpu_free(ptr);
cpuptr->value = (uintptr_t) cpuptr;
- (void) rseq_percpu_malloc(pool);
- (void) rseq_percpu_malloc(pool);
+ (void) rseq_mempool_percpu_malloc(pool);
+ (void) rseq_mempool_percpu_malloc(pool);
}
static int run_robust_test(void (*test)(struct rseq_mempool*),
perror("rseq_mempool_create");
abort();
}
- data = (struct spinlock_test_data __rseq_percpu *)rseq_percpu_zmalloc(mempool);
+ data = (struct spinlock_test_data __rseq_percpu *)rseq_mempool_percpu_zmalloc(mempool);
if (!data) {
- perror("rseq_percpu_zmalloc");
+ perror("rseq_mempool_percpu_zmalloc");
abort();
}
sum += rseq_percpu_ptr(data, i)->count;
assert(sum == (uint64_t)opt_reps * num_threads);
- rseq_percpu_free(data);
+ rseq_mempool_percpu_free(data);
ret = rseq_mempool_destroy(mempool);
if (ret) {
perror("rseq_mempool_destroy");
perror("rseq_mempool_create");
abort();
}
- data = (struct inc_test_data __rseq_percpu *)rseq_percpu_zmalloc(mempool);
+ data = (struct inc_test_data __rseq_percpu *)rseq_mempool_percpu_zmalloc(mempool);
if (!data) {
- perror("rseq_percpu_zmalloc");
+ perror("rseq_mempool_percpu_zmalloc");
abort();
}
sum += rseq_percpu_ptr(data, i)->count;
assert(sum == (uint64_t)opt_reps * num_threads);
- rseq_percpu_free(data);
+ rseq_mempool_percpu_free(data);
ret = rseq_mempool_destroy(mempool);
if (ret) {
perror("rseq_mempool_destroy");
perror("rseq_mempool_create");
abort();
}
- list = (struct percpu_list __rseq_percpu *)rseq_percpu_zmalloc(mempool);
+ list = (struct percpu_list __rseq_percpu *)rseq_mempool_percpu_zmalloc(mempool);
if (!list) {
- perror("rseq_percpu_zmalloc");
+ perror("rseq_mempool_percpu_zmalloc");
abort();
}
* test is running).
*/
assert(sum == expected_sum);
- rseq_percpu_free(list);
+ rseq_mempool_percpu_free(list);
ret = rseq_mempool_destroy(mempool);
if (ret) {
perror("rseq_mempool_destroy");
perror("rseq_mempool_create");
abort();
}
- buffer = (struct percpu_buffer __rseq_percpu *)rseq_percpu_zmalloc(mempool);
+ buffer = (struct percpu_buffer __rseq_percpu *)rseq_mempool_percpu_zmalloc(mempool);
if (!buffer) {
- perror("rseq_percpu_zmalloc");
+ perror("rseq_mempool_percpu_zmalloc");
abort();
}
* test is running).
*/
assert(sum == expected_sum);
- rseq_percpu_free(buffer);
+ rseq_mempool_percpu_free(buffer);
ret = rseq_mempool_destroy(mempool);
if (ret) {
perror("rseq_mempool_destroy");
perror("rseq_mempool_create");
abort();
}
- buffer = (struct percpu_memcpy_buffer __rseq_percpu *)rseq_percpu_zmalloc(mempool);
+ buffer = (struct percpu_memcpy_buffer __rseq_percpu *)rseq_mempool_percpu_zmalloc(mempool);
if (!buffer) {
- perror("rseq_percpu_zmalloc");
+ perror("rseq_mempool_percpu_zmalloc");
abort();
}
* test is running).
*/
assert(sum == expected_sum);
- rseq_percpu_free(buffer);
+ rseq_mempool_percpu_free(buffer);
ret = rseq_mempool_destroy(mempool);
if (ret) {
perror("rseq_mempool_destroy");
struct percpu_list __rseq_percpu *list;
int i;
- list = (struct percpu_list __rseq_percpu *)rseq_percpu_zmalloc(mempool);
+ list = (struct percpu_list __rseq_percpu *)rseq_mempool_percpu_zmalloc(mempool);
if (!list) {
- perror("rseq_percpu_zmalloc");
+ perror("rseq_mempool_percpu_zmalloc");
return NULL;
}
for (i = 0; i < CPU_SETSIZE; i++) {
for (i = 0; i < CPU_SETSIZE; i++)
free(rseq_percpu_ptr(list, i)->head);
- rseq_percpu_free(list);
+ rseq_mempool_percpu_free(list);
}
static