static void test_percpu_spinlock(void)
{
const int num_threads = opt_threads;
- int i, ret;
+ int i, ret, max_nr_cpus;
uint64_t sum;
pthread_t test_threads[num_threads];
struct spinlock_test_data __rseq_percpu *data;
perror("rseq_mempool_attr_create");
abort();
}
- ret = rseq_mempool_attr_set_percpu(attr, RSEQ_MEMPOOL_STRIDE, CPU_SETSIZE);
+ ret = rseq_mempool_attr_set_percpu(attr, RSEQ_MEMPOOL_STRIDE, 0);
if (ret) {
perror("rseq_mempool_attr_set_percpu");
abort();
abort();
}
rseq_mempool_attr_destroy(attr);
+ max_nr_cpus = rseq_mempool_get_max_nr_cpus(mempool);
data = (struct spinlock_test_data __rseq_percpu *)rseq_mempool_percpu_zmalloc(mempool);
if (!data) {
perror("rseq_mempool_percpu_zmalloc");
}
sum = 0;
- for (i = 0; i < CPU_SETSIZE; i++)
+ for (i = 0; i < max_nr_cpus; i++)
sum += rseq_percpu_ptr(data, i)->count;
assert(sum == (uint64_t)opt_reps * num_threads);
static void test_percpu_inc(void)
{
const int num_threads = opt_threads;
- int i, ret;
+ int i, ret, max_nr_cpus;
uint64_t sum;
pthread_t test_threads[num_threads];
struct inc_test_data __rseq_percpu *data;
perror("rseq_mempool_attr_create");
abort();
}
- ret = rseq_mempool_attr_set_percpu(attr, RSEQ_MEMPOOL_STRIDE, CPU_SETSIZE);
+ ret = rseq_mempool_attr_set_percpu(attr, RSEQ_MEMPOOL_STRIDE, 0);
if (ret) {
perror("rseq_mempool_attr_set_percpu");
abort();
abort();
}
rseq_mempool_attr_destroy(attr);
+ max_nr_cpus = rseq_mempool_get_max_nr_cpus(mempool);
data = (struct inc_test_data __rseq_percpu *)rseq_mempool_percpu_zmalloc(mempool);
if (!data) {
perror("rseq_mempool_percpu_zmalloc");
}
sum = 0;
- for (i = 0; i < CPU_SETSIZE; i++)
+ for (i = 0; i < max_nr_cpus; i++)
sum += rseq_percpu_ptr(data, i)->count;
assert(sum == (uint64_t)opt_reps * num_threads);
static void test_percpu_list(void)
{
const int num_threads = opt_threads;
- int i, j, ret;
+ int i, j, ret, max_nr_cpus;
uint64_t sum = 0, expected_sum = 0;
struct percpu_list __rseq_percpu *list;
pthread_t test_threads[num_threads];
perror("rseq_mempool_attr_create");
abort();
}
- ret = rseq_mempool_attr_set_percpu(attr, RSEQ_MEMPOOL_STRIDE, CPU_SETSIZE);
+ ret = rseq_mempool_attr_set_percpu(attr, RSEQ_MEMPOOL_STRIDE, 0);
if (ret) {
perror("rseq_mempool_attr_set_percpu");
abort();
abort();
}
rseq_mempool_attr_destroy(attr);
+ max_nr_cpus = rseq_mempool_get_max_nr_cpus(mempool);
list = (struct percpu_list __rseq_percpu *)rseq_mempool_percpu_zmalloc(mempool);
if (!list) {
perror("rseq_mempool_percpu_zmalloc");
/* Generate list entries for every usable cpu. */
sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
- for (i = 0; i < CPU_SETSIZE; i++) {
+ for (i = 0; i < max_nr_cpus; i++) {
if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue;
for (j = 1; j <= 100; j++) {
}
}
- for (i = 0; i < CPU_SETSIZE; i++) {
+ for (i = 0; i < max_nr_cpus; i++) {
struct percpu_list_node *node;
if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
static void test_percpu_buffer(void)
{
const int num_threads = opt_threads;
- int i, j, ret;
+ int i, j, ret, max_nr_cpus;
uint64_t sum = 0, expected_sum = 0;
struct percpu_buffer __rseq_percpu *buffer;
pthread_t test_threads[num_threads];
perror("rseq_mempool_attr_create");
abort();
}
- ret = rseq_mempool_attr_set_percpu(attr, RSEQ_MEMPOOL_STRIDE, CPU_SETSIZE);
+ ret = rseq_mempool_attr_set_percpu(attr, RSEQ_MEMPOOL_STRIDE, 0);
if (ret) {
perror("rseq_mempool_attr_set_percpu");
abort();
abort();
}
rseq_mempool_attr_destroy(attr);
+ max_nr_cpus = rseq_mempool_get_max_nr_cpus(mempool);
buffer = (struct percpu_buffer __rseq_percpu *)rseq_mempool_percpu_zmalloc(mempool);
if (!buffer) {
perror("rseq_mempool_percpu_zmalloc");
/* Generate list entries for every usable cpu. */
sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
- for (i = 0; i < CPU_SETSIZE; i++) {
+ for (i = 0; i < max_nr_cpus; i++) {
struct percpu_buffer *cpubuffer;
if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
/* Worse-case is every item in same CPU. */
cpubuffer->array =
(struct percpu_buffer_node **)
- malloc(sizeof(*cpubuffer->array) * CPU_SETSIZE *
+ malloc(sizeof(*cpubuffer->array) * max_nr_cpus *
BUFFER_ITEM_PER_CPU);
assert(cpubuffer->array);
- cpubuffer->buflen = CPU_SETSIZE * BUFFER_ITEM_PER_CPU;
+ cpubuffer->buflen = max_nr_cpus * BUFFER_ITEM_PER_CPU;
for (j = 1; j <= BUFFER_ITEM_PER_CPU; j++) {
struct percpu_buffer_node *node;
}
}
- for (i = 0; i < CPU_SETSIZE; i++) {
+ for (i = 0; i < max_nr_cpus; i++) {
struct percpu_buffer *cpubuffer;
struct percpu_buffer_node *node;
static void test_percpu_memcpy_buffer(void)
{
const int num_threads = opt_threads;
- int i, j, ret;
+ int i, j, ret, max_nr_cpus;
uint64_t sum = 0, expected_sum = 0;
struct percpu_memcpy_buffer *buffer;
pthread_t test_threads[num_threads];
perror("rseq_mempool_attr_create");
abort();
}
- ret = rseq_mempool_attr_set_percpu(attr, RSEQ_MEMPOOL_STRIDE, CPU_SETSIZE);
+ ret = rseq_mempool_attr_set_percpu(attr, RSEQ_MEMPOOL_STRIDE, 0);
if (ret) {
perror("rseq_mempool_attr_set_percpu");
abort();
abort();
}
rseq_mempool_attr_destroy(attr);
+ max_nr_cpus = rseq_mempool_get_max_nr_cpus(mempool);
buffer = (struct percpu_memcpy_buffer __rseq_percpu *)rseq_mempool_percpu_zmalloc(mempool);
if (!buffer) {
perror("rseq_mempool_percpu_zmalloc");
/* Generate list entries for every usable cpu. */
sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
- for (i = 0; i < CPU_SETSIZE; i++) {
+ for (i = 0; i < max_nr_cpus; i++) {
struct percpu_memcpy_buffer *cpubuffer;
if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
/* Worse-case is every item in same CPU. */
cpubuffer->array =
(struct percpu_memcpy_buffer_node *)
- malloc(sizeof(*cpubuffer->array) * CPU_SETSIZE *
+ malloc(sizeof(*cpubuffer->array) * max_nr_cpus *
MEMCPY_BUFFER_ITEM_PER_CPU);
assert(cpubuffer->array);
- cpubuffer->buflen = CPU_SETSIZE * MEMCPY_BUFFER_ITEM_PER_CPU;
+ cpubuffer->buflen = max_nr_cpus * MEMCPY_BUFFER_ITEM_PER_CPU;
for (j = 1; j <= MEMCPY_BUFFER_ITEM_PER_CPU; j++) {
expected_sum += 2 * j + 1;
}
}
- for (i = 0; i < CPU_SETSIZE; i++) {
+ for (i = 0; i < max_nr_cpus; i++) {
struct percpu_memcpy_buffer_node item;
struct percpu_memcpy_buffer *cpubuffer;
struct rseq_mempool *mempool;
struct percpu_list __rseq_percpu *percpu_list_ptr;
int stop;
+ int max_nr_cpus;
};
/* Worker threads modify data in their "active" percpu lists. */
struct percpu_list __rseq_percpu *test_membarrier_alloc_percpu_list(struct rseq_mempool *mempool)
{
struct percpu_list __rseq_percpu *list;
- int i;
+ int i, max_nr_cpus;
+ max_nr_cpus = rseq_mempool_get_max_nr_cpus(mempool);
list = (struct percpu_list __rseq_percpu *)rseq_mempool_percpu_zmalloc(mempool);
if (!list) {
perror("rseq_mempool_percpu_zmalloc");
return NULL;
}
- for (i = 0; i < CPU_SETSIZE; i++) {
+ for (i = 0; i < max_nr_cpus; i++) {
struct percpu_list *cpulist = rseq_percpu_ptr(list, i);
struct percpu_list_node *node;
}
static
-void test_membarrier_free_percpu_list(struct percpu_list __rseq_percpu *list)
+void test_membarrier_free_percpu_list(struct test_membarrier_thread_args *args,
+ struct percpu_list __rseq_percpu *list)
{
int i;
- for (i = 0; i < CPU_SETSIZE; i++)
+ for (i = 0; i < args->max_nr_cpus; i++)
free(rseq_percpu_ptr(list, i)->head);
rseq_mempool_percpu_free(list);
}
static
-long long test_membarrier_count_percpu_list(struct percpu_list __rseq_percpu *list)
+long long test_membarrier_count_percpu_list(struct test_membarrier_thread_args *args,
+ struct percpu_list __rseq_percpu *list)
{
long long total_count = 0;
int i;
- for (i = 0; i < CPU_SETSIZE; i++)
+ for (i = 0; i < args->max_nr_cpus; i++)
total_count += rseq_percpu_ptr(list, i)->head->data;
return total_count;
}
perror("rseq_mempool_attr_create");
abort();
}
- ret = rseq_mempool_attr_set_percpu(attr, RSEQ_MEMPOOL_STRIDE, CPU_SETSIZE);
+ ret = rseq_mempool_attr_set_percpu(attr, RSEQ_MEMPOOL_STRIDE, 0);
if (ret) {
perror("rseq_mempool_attr_set_percpu");
abort();
abort();
}
rseq_mempool_attr_destroy(attr);
+ args->max_nr_cpus = rseq_mempool_get_max_nr_cpus(mempool);
args->mempool = mempool;
if (rseq_register_current_thread()) {
while (!RSEQ_READ_ONCE(args->stop)) {
/* list_a is "active". */
- cpu_a = rand() % CPU_SETSIZE;
+ cpu_a = rand() % args->max_nr_cpus;
/*
* As list_b is "inactive", we should never see changes
* to list_b.
*/
expect_a = RSEQ_READ_ONCE(rseq_percpu_ptr(list_a, cpu_a)->head->data);
- cpu_b = rand() % CPU_SETSIZE;
+ cpu_b = rand() % args->max_nr_cpus;
/*
* As list_a is "inactive", we should never see changes
* to list_a.
expect_b = RSEQ_READ_ONCE(rseq_percpu_ptr(list_b, cpu_b)->head->data);
}
- total_count += test_membarrier_count_percpu_list(list_a);
- total_count += test_membarrier_count_percpu_list(list_b);
+ total_count += test_membarrier_count_percpu_list(args, list_a);
+ total_count += test_membarrier_count_percpu_list(args, list_b);
/* Validate that we observe the right number of increments. */
if (total_count != opt_threads * opt_reps) {
total_count, opt_threads * opt_reps);
abort();
}
- test_membarrier_free_percpu_list(list_a);
- test_membarrier_free_percpu_list(list_b);
+ test_membarrier_free_percpu_list(args, list_a);
+ test_membarrier_free_percpu_list(args, list_b);
if (rseq_unregister_current_thread()) {
fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",