* The stride *must* match for all objects belonging to a given pool
* between arguments to:
*
- * - rseq_percpu_pool_create(),
- * - __rseq_percpu_free(),
- * - __rseq_percpu_ptr().
+ * - rseq_mempool_create(),
+ * - rseq_percpu_ptr().
+ * - rseq_percpu_free(),
*/
#if RSEQ_BITS_PER_LONG == 64
# define RSEQ_PERCPU_STRIDE (1U << 24) /* 64-bit stride: 16MB */
* - rseq_percpu_pool_set_malloc(),
* - rseq_percpu_pool_set_zmalloc().
*
- * The @stride argument to __rseq_percpu_free() is a configurable
+ * The @stride optional argument to rseq_percpu_free() is a configurable
* stride, which must match the stride received by pool creation.
- * rseq_percpu_free() uses the default RSEQ_PERCPU_STRIDE stride.
+ * If the argument is not present, use the default RSEQ_PERCPU_STRIDE.
*
* This API is MT-safe.
*/
-void __rseq_percpu_free(void __rseq_percpu *ptr, size_t percpu_stride);
+void librseq_percpu_free(void __rseq_percpu *ptr, size_t percpu_stride);
-#define rseq_percpu_free(ptr) __rseq_percpu_free(ptr, RSEQ_PERCPU_STRIDE)
+#define rseq_percpu_free(_ptr, _stride...) \
+ librseq_percpu_free(_ptr, RSEQ_PARAM_SELECT_ARG1(_, ##_stride, RSEQ_PERCPU_STRIDE))
/*
* rseq_percpu_ptr: Offset a per-cpu pointer for a given CPU.
* - rseq_percpu_pool_set_malloc(),
* - rseq_percpu_pool_set_zmalloc().
*
- * The macros rseq_percpu_ptr() and __rseq_percpu_ptr() preserve the
- * type of the @ptr parameter for the returned pointer, but removes the
- * __rseq_percpu annotation.
+ * The macro rseq_percpu_ptr() preserves the type of the @ptr parameter
+ * for the returned pointer, but removes the __rseq_percpu annotation.
*
- * The macro __rseq_percpu_ptr() takes a configurable @stride argument,
- * whereas rseq_percpu_ptr() uses the RSEQ_PERCPU_STRIDE default stride.
+ * The macro rseq_percpu_ptr() takes an optional @stride argument. If
+ * the argument is not present, use the default RSEQ_PERCPU_STRIDE.
* This must match the stride used for pool creation.
*
* This API is MT-safe.
*/
-#define __rseq_percpu_ptr(ptr, cpu, stride) \
- ((__typeof__(*(ptr)) *) ((uintptr_t) (ptr) + ((unsigned int) (cpu) * (uintptr_t) (stride))))
-
-#define rseq_percpu_ptr(ptr, cpu) __rseq_percpu_ptr(ptr, cpu, RSEQ_PERCPU_STRIDE)
+#define rseq_percpu_ptr(_ptr, _cpu, _stride...) \
+ ((__typeof__(*(_ptr)) *) ((uintptr_t) (_ptr) + \
+ ((unsigned int) (_cpu) * \
+ (uintptr_t) RSEQ_PARAM_SELECT_ARG1(_, ##_stride, RSEQ_PERCPU_STRIDE))))
/*
* rseq_mempool_set_create: Create a pool set.
if (!ptr)
break;
/* Link items in cpu 0. */
- cpuptr = __rseq_percpu_ptr(ptr, 0, stride);
+ cpuptr = rseq_percpu_ptr(ptr, 0, stride);
cpuptr->backref = ptr;
/* Randomize items in list. */
if (count & 1)
list_for_each_entry(iter, &list, node) {
ptr = iter->backref;
for (i = 0; i < CPU_SETSIZE; i++) {
- struct test_data *cpuptr = __rseq_percpu_ptr(ptr, i, stride);
+ struct test_data *cpuptr = rseq_percpu_ptr(ptr, i, stride);
if (cpuptr->value != 0)
abort();
list_for_each_entry_safe(iter, tmp, &list, node) {
ptr = iter->backref;
- __rseq_percpu_free(ptr, stride);
+ rseq_percpu_free(ptr, stride);
}
ret = rseq_mempool_destroy(mempool);
ok(ret == 0, "Destroy mempool");