From 15da5c27fcf3e212417afb4c7d4442a0066cfc6d Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Fri, 8 Mar 2024 17:22:07 -0500 Subject: [PATCH] mempool: namespacing, add global alloc/free Signed-off-by: Mathieu Desnoyers Change-Id: I6b3a6f9948f8400ae691738d7caa15164e634b74 --- include/rseq/mempool.h | 114 ++++++++++++++++++++++++++++++++++------- src/rseq-mempool.c | 10 ++-- tests/mempool_test.c | 24 ++++----- tests/param_test.c | 36 ++++++------- 4 files changed, 130 insertions(+), 54 deletions(-) diff --git a/include/rseq/mempool.h b/include/rseq/mempool.h index f321313..5945759 100644 --- a/include/rseq/mempool.h +++ b/include/rseq/mempool.h @@ -112,7 +112,7 @@ struct rseq_mempool *rseq_mempool_create(const char *pool_name, int rseq_mempool_destroy(struct rseq_mempool *pool); /* - * rseq_percpu_malloc: Allocate memory from a per-cpu pool. + * rseq_mempool_percpu_malloc: Allocate memory from a per-cpu pool. * * Allocate an item from a per-cpu @pool. The allocation will reserve * an item of the size specified by @item_len (rounded to next power of @@ -128,30 +128,56 @@ int rseq_mempool_destroy(struct rseq_mempool *pool); * * This API is MT-safe. */ -void __rseq_percpu *rseq_percpu_malloc(struct rseq_mempool *pool); +void __rseq_percpu *rseq_mempool_percpu_malloc(struct rseq_mempool *pool); /* - * rseq_percpu_zmalloc: Allocated zero-initialized memory from a per-cpu pool. + * rseq_mempool_percpu_zmalloc: Allocated zero-initialized memory from a per-cpu pool. * * Allocate memory for an item within the pool, and zero-initialize its - * memory on all CPUs. See rseq_percpu_malloc for details. + * memory on all CPUs. See rseq_mempool_percpu_malloc for details. * * This API is MT-safe. */ -void __rseq_percpu *rseq_percpu_zmalloc(struct rseq_mempool *pool); +void __rseq_percpu *rseq_mempool_percpu_zmalloc(struct rseq_mempool *pool); /* - * rseq_percpu_free: Free memory from a per-cpu pool. + * rseq_mempool_malloc: Allocate memory from a global pool. + * + * Wrapper to allocate memory from a global pool, which can be + * used directly without per-cpu indexing. Would normally be used + * with pools created with max_nr_cpus=1. + */ +static inline +void *rseq_mempool_malloc(struct rseq_mempool *pool) +{ + return (void *) rseq_mempool_percpu_malloc(pool); +} + +/* + * rseq_mempool_zmalloc: Allocate zero-initialized memory from a global pool. + * + * Wrapper to allocate memory from a global pool, which can be + * used directly without per-cpu indexing. Would normally be used + * with pools created with max_nr_cpus=1. + */ +static inline +void *rseq_mempool_zmalloc(struct rseq_mempool *pool) +{ + return (void *) rseq_mempool_percpu_zmalloc(pool); +} + +/* + * rseq_mempool_percpu_free: Free memory from a per-cpu pool. * * Free an item pointed to by @ptr from its per-cpu pool. * * The @ptr argument is a __rseq_percpu encoded pointer returned by * either: * - * - rseq_percpu_malloc(), - * - rseq_percpu_zmalloc(), - * - rseq_percpu_pool_set_malloc(), - * - rseq_percpu_pool_set_zmalloc(). + * - rseq_mempool_percpu_malloc(), + * - rseq_mempool_percpu_zmalloc(), + * - rseq_mempool_set_percpu_malloc(), + * - rseq_mempool_set_percpu_zmalloc(). * * The @stride optional argument to rseq_percpu_free() is a configurable * stride, which must match the stride received by pool creation. @@ -159,10 +185,34 @@ void __rseq_percpu *rseq_percpu_zmalloc(struct rseq_mempool *pool); * * This API is MT-safe. */ -void librseq_percpu_free(void __rseq_percpu *ptr, size_t percpu_stride); +void librseq_mempool_percpu_free(void __rseq_percpu *ptr, size_t percpu_stride); + +#define rseq_mempool_percpu_free(_ptr, _stride...) \ + librseq_mempool_percpu_free(_ptr, RSEQ_PARAM_SELECT_ARG1(_, ##_stride, RSEQ_PERCPU_STRIDE)) -#define rseq_percpu_free(_ptr, _stride...) \ - librseq_percpu_free(_ptr, RSEQ_PARAM_SELECT_ARG1(_, ##_stride, RSEQ_PERCPU_STRIDE)) +/* + * rseq_free: Free memory from a global pool. + * + * Free an item pointed to by @ptr from its global pool. Would normally + * be used with pools created with max_nr_cpus=1. + * + * The @ptr argument is a pointer returned by either: + * + * - rseq_mempool_malloc(), + * - rseq_mempool_zmalloc(), + * - rseq_mempool_set_malloc(), + * - rseq_mempool_set_zmalloc(). + * + * The @stride optional argument to rseq_free() is a configurable + * stride, which must match the stride received by pool creation. If + * the argument is not present, use the default RSEQ_PERCPU_STRIDE. + * The stride is needed even for a global pool to know the mapping + * address range. + * + * This API is MT-safe. + */ +#define rseq_mempool_free(_ptr, _stride...) \ + librseq_percpu_free((void __rseq_percpu *) _ptr, RSEQ_PARAM_SELECT_ARG1(_, ##_stride, RSEQ_PERCPU_STRIDE)) /* * rseq_percpu_ptr: Offset a per-cpu pointer for a given CPU. @@ -171,10 +221,10 @@ void librseq_percpu_free(void __rseq_percpu *ptr, size_t percpu_stride); * given @cpu. The @ptr argument is a __rseq_percpu pointer returned by * either: * - * - rseq_percpu_malloc(), - * - rseq_percpu_zmalloc(), - * - rseq_percpu_pool_set_malloc(), - * - rseq_percpu_pool_set_zmalloc(). + * - rseq_mempool_percpu_malloc(), + * - rseq_mempool_percpu_zmalloc(), + * - rseq_mempool_set_percpu_malloc(), + * - rseq_mempool_set_percpu_zmalloc(). * * The macro rseq_percpu_ptr() preserves the type of the @ptr parameter * for the returned pointer, but removes the __rseq_percpu annotation. @@ -261,7 +311,7 @@ int rseq_mempool_set_add_pool(struct rseq_mempool_set *pool_set, * * This API is MT-safe. */ -void __rseq_percpu *rseq_percpu_mempool_set_malloc(struct rseq_mempool_set *pool_set, size_t len); +void __rseq_percpu *rseq_mempool_set_percpu_malloc(struct rseq_mempool_set *pool_set, size_t len); /* * rseq_percpu_mempool_set_zmalloc: Allocated zero-initialized memory from a per-cpu pool set. @@ -271,7 +321,33 @@ void __rseq_percpu *rseq_percpu_mempool_set_malloc(struct rseq_mempool_set *pool * * This API is MT-safe. */ -void __rseq_percpu *rseq_percpu_mempool_set_zmalloc(struct rseq_mempool_set *pool_set, size_t len); +void __rseq_percpu *rseq_mempool_set_percpu_zmalloc(struct rseq_mempool_set *pool_set, size_t len); + +/* + * rseq_mempool_set_malloc: Allocate memory from a global pool set. + * + * Wrapper to allocate memory from a global pool, which can be + * used directly without per-cpu indexing. Would normally be used + * with pools created with max_nr_cpus=1. + */ +static inline +void *rseq_mempool_set_malloc(struct rseq_mempool_set *pool_set, size_t len) +{ + return (void *) rseq_mempool_set_percpu_malloc(pool_set, len); +} + +/* + * rseq_mempool_set_zmalloc: Allocate zero-initialized memory from a global pool set. + * + * Wrapper to allocate memory from a global pool, which can be + * used directly without per-cpu indexing. Would normally be used + * with pools created with max_nr_cpus=1. + */ +static inline +void *rseq_mempool_set_zmalloc(struct rseq_mempool_set *pool_set, size_t len) +{ + return (void *) rseq_mempool_set_percpu_zmalloc(pool_set, len); +} /* * rseq_mempool_init_numa: Move pages to the NUMA node associated to their CPU topology. diff --git a/src/rseq-mempool.c b/src/rseq-mempool.c index 1ece00e..8910dff 100644 --- a/src/rseq-mempool.c +++ b/src/rseq-mempool.c @@ -620,12 +620,12 @@ end: return addr; } -void __rseq_percpu *rseq_percpu_malloc(struct rseq_mempool *pool) +void __rseq_percpu *rseq_mempool_percpu_malloc(struct rseq_mempool *pool) { return __rseq_percpu_malloc(pool, false); } -void __rseq_percpu *rseq_percpu_zmalloc(struct rseq_mempool *pool) +void __rseq_percpu *rseq_mempool_percpu_zmalloc(struct rseq_mempool *pool) { return __rseq_percpu_malloc(pool, true); } @@ -655,7 +655,7 @@ void clear_alloc_slot(struct rseq_mempool *pool, size_t item_offset) bitmap[k] &= ~mask; } -void librseq_percpu_free(void __rseq_percpu *_ptr, size_t percpu_stride) +void librseq_mempool_percpu_free(void __rseq_percpu *_ptr, size_t percpu_stride) { uintptr_t ptr = (uintptr_t) _ptr; void *range_base = (void *) (ptr & (~(percpu_stride - 1))); @@ -765,12 +765,12 @@ found: return addr; } -void __rseq_percpu *rseq_percpu_mempool_set_malloc(struct rseq_mempool_set *pool_set, size_t len) +void __rseq_percpu *rseq_mempool_set_percpu_malloc(struct rseq_mempool_set *pool_set, size_t len) { return __rseq_mempool_set_malloc(pool_set, len, false); } -void __rseq_percpu *rseq_percpu_mempool_set_zmalloc(struct rseq_mempool_set *pool_set, size_t len) +void __rseq_percpu *rseq_mempool_set_percpu_zmalloc(struct rseq_mempool_set *pool_set, size_t len) { return __rseq_mempool_set_malloc(pool_set, len, true); } diff --git a/tests/mempool_test.c b/tests/mempool_test.c index 39d5b3b..0fe44ca 100644 --- a/tests/mempool_test.c +++ b/tests/mempool_test.c @@ -53,7 +53,7 @@ static void test_mempool_fill(size_t stride) for (;;) { struct test_data *cpuptr; - ptr = (struct test_data __rseq_percpu *) rseq_percpu_zmalloc(mempool); + ptr = (struct test_data __rseq_percpu *) rseq_mempool_percpu_zmalloc(mempool); if (!ptr) break; /* Link items in cpu 0. */ @@ -84,7 +84,7 @@ static void test_mempool_fill(size_t stride) list_for_each_entry_safe(iter, tmp, &list, node) { ptr = iter->backref; - rseq_percpu_free(ptr, stride); + rseq_mempool_percpu_free(ptr, stride); } ret = rseq_mempool_destroy(mempool); ok(ret == 0, "Destroy mempool"); @@ -94,10 +94,10 @@ static void test_robust_double_free(struct rseq_mempool *pool) { struct test_data __rseq_percpu *ptr; - ptr = (struct test_data __rseq_percpu *) rseq_percpu_malloc(pool); + ptr = (struct test_data __rseq_percpu *) rseq_mempool_percpu_malloc(pool); - rseq_percpu_free(ptr); - rseq_percpu_free(ptr); + rseq_mempool_percpu_free(ptr); + rseq_mempool_percpu_free(ptr); } static void test_robust_corrupt_after_free(struct rseq_mempool *pool) @@ -105,10 +105,10 @@ static void test_robust_corrupt_after_free(struct rseq_mempool *pool) struct test_data __rseq_percpu *ptr; struct test_data *cpuptr; - ptr = (struct test_data __rseq_percpu *) rseq_percpu_malloc(pool); + ptr = (struct test_data __rseq_percpu *) rseq_mempool_percpu_malloc(pool); cpuptr = (struct test_data *) rseq_percpu_ptr(ptr, 0); - rseq_percpu_free(ptr); + rseq_mempool_percpu_free(ptr); cpuptr->value = (uintptr_t) test_robust_corrupt_after_free; rseq_mempool_destroy(pool); @@ -116,7 +116,7 @@ static void test_robust_corrupt_after_free(struct rseq_mempool *pool) static void test_robust_memory_leak(struct rseq_mempool *pool) { - (void) rseq_percpu_malloc(pool); + (void) rseq_mempool_percpu_malloc(pool); rseq_mempool_destroy(pool); } @@ -126,15 +126,15 @@ static void test_robust_free_list_corruption(struct rseq_mempool *pool) struct test_data __rseq_percpu *ptr; struct test_data *cpuptr; - ptr = (struct test_data __rseq_percpu *) rseq_percpu_malloc(pool); + ptr = (struct test_data __rseq_percpu *) rseq_mempool_percpu_malloc(pool); cpuptr = (struct test_data *) rseq_percpu_ptr(ptr, 0); - rseq_percpu_free(ptr); + rseq_mempool_percpu_free(ptr); cpuptr->value = (uintptr_t) cpuptr; - (void) rseq_percpu_malloc(pool); - (void) rseq_percpu_malloc(pool); + (void) rseq_mempool_percpu_malloc(pool); + (void) rseq_mempool_percpu_malloc(pool); } static int run_robust_test(void (*test)(struct rseq_mempool*), diff --git a/tests/param_test.c b/tests/param_test.c index bb8b15a..f34b5c2 100644 --- a/tests/param_test.c +++ b/tests/param_test.c @@ -504,9 +504,9 @@ static void test_percpu_spinlock(void) perror("rseq_mempool_create"); abort(); } - data = (struct spinlock_test_data __rseq_percpu *)rseq_percpu_zmalloc(mempool); + data = (struct spinlock_test_data __rseq_percpu *)rseq_mempool_percpu_zmalloc(mempool); if (!data) { - perror("rseq_percpu_zmalloc"); + perror("rseq_mempool_percpu_zmalloc"); abort(); } @@ -541,7 +541,7 @@ static void test_percpu_spinlock(void) sum += rseq_percpu_ptr(data, i)->count; assert(sum == (uint64_t)opt_reps * num_threads); - rseq_percpu_free(data); + rseq_mempool_percpu_free(data); ret = rseq_mempool_destroy(mempool); if (ret) { perror("rseq_mempool_destroy"); @@ -600,9 +600,9 @@ static void test_percpu_inc(void) perror("rseq_mempool_create"); abort(); } - data = (struct inc_test_data __rseq_percpu *)rseq_percpu_zmalloc(mempool); + data = (struct inc_test_data __rseq_percpu *)rseq_mempool_percpu_zmalloc(mempool); if (!data) { - perror("rseq_percpu_zmalloc"); + perror("rseq_mempool_percpu_zmalloc"); abort(); } @@ -637,7 +637,7 @@ static void test_percpu_inc(void) sum += rseq_percpu_ptr(data, i)->count; assert(sum == (uint64_t)opt_reps * num_threads); - rseq_percpu_free(data); + rseq_mempool_percpu_free(data); ret = rseq_mempool_destroy(mempool); if (ret) { perror("rseq_mempool_destroy"); @@ -773,9 +773,9 @@ static void test_percpu_list(void) perror("rseq_mempool_create"); abort(); } - list = (struct percpu_list __rseq_percpu *)rseq_percpu_zmalloc(mempool); + list = (struct percpu_list __rseq_percpu *)rseq_mempool_percpu_zmalloc(mempool); if (!list) { - perror("rseq_percpu_zmalloc"); + perror("rseq_mempool_percpu_zmalloc"); abort(); } @@ -835,7 +835,7 @@ static void test_percpu_list(void) * test is running). */ assert(sum == expected_sum); - rseq_percpu_free(list); + rseq_mempool_percpu_free(list); ret = rseq_mempool_destroy(mempool); if (ret) { perror("rseq_mempool_destroy"); @@ -984,9 +984,9 @@ static void test_percpu_buffer(void) perror("rseq_mempool_create"); abort(); } - buffer = (struct percpu_buffer __rseq_percpu *)rseq_percpu_zmalloc(mempool); + buffer = (struct percpu_buffer __rseq_percpu *)rseq_mempool_percpu_zmalloc(mempool); if (!buffer) { - perror("rseq_percpu_zmalloc"); + perror("rseq_mempool_percpu_zmalloc"); abort(); } @@ -1065,7 +1065,7 @@ static void test_percpu_buffer(void) * test is running). */ assert(sum == expected_sum); - rseq_percpu_free(buffer); + rseq_mempool_percpu_free(buffer); ret = rseq_mempool_destroy(mempool); if (ret) { perror("rseq_mempool_destroy"); @@ -1225,9 +1225,9 @@ static void test_percpu_memcpy_buffer(void) perror("rseq_mempool_create"); abort(); } - buffer = (struct percpu_memcpy_buffer __rseq_percpu *)rseq_percpu_zmalloc(mempool); + buffer = (struct percpu_memcpy_buffer __rseq_percpu *)rseq_mempool_percpu_zmalloc(mempool); if (!buffer) { - perror("rseq_percpu_zmalloc"); + perror("rseq_mempool_percpu_zmalloc"); abort(); } @@ -1303,7 +1303,7 @@ static void test_percpu_memcpy_buffer(void) * test is running). */ assert(sum == expected_sum); - rseq_percpu_free(buffer); + rseq_mempool_percpu_free(buffer); ret = rseq_mempool_destroy(mempool); if (ret) { perror("rseq_mempool_destroy"); @@ -1410,9 +1410,9 @@ struct percpu_list __rseq_percpu *test_membarrier_alloc_percpu_list(struct rseq_ struct percpu_list __rseq_percpu *list; int i; - list = (struct percpu_list __rseq_percpu *)rseq_percpu_zmalloc(mempool); + list = (struct percpu_list __rseq_percpu *)rseq_mempool_percpu_zmalloc(mempool); if (!list) { - perror("rseq_percpu_zmalloc"); + perror("rseq_mempool_percpu_zmalloc"); return NULL; } for (i = 0; i < CPU_SETSIZE; i++) { @@ -1435,7 +1435,7 @@ void test_membarrier_free_percpu_list(struct percpu_list __rseq_percpu *list) for (i = 0; i < CPU_SETSIZE; i++) free(rseq_percpu_ptr(list, i)->head); - rseq_percpu_free(list); + rseq_mempool_percpu_free(list); } static -- 2.34.1