Update rseq critical section identifiers to match pseudo-code
[librseq.git] / tests / basic_percpu_ops_test.c
CommitLineData
90702366 1// SPDX-License-Identifier: MIT
f2d7b530 2// SPDX-FileCopyrightText: 2018-2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
b848736e
MD
3#ifndef _GNU_SOURCE
4#define _GNU_SOURCE
5#endif
6#include <assert.h>
7#include <pthread.h>
8#include <sched.h>
9#include <stdint.h>
a91728e0 10#include <inttypes.h>
b848736e
MD
11#include <stdio.h>
12#include <stdlib.h>
13#include <string.h>
14#include <stddef.h>
15
16#include <rseq/rseq.h>
17
544cdc88
MJ
18#include "tap.h"
19
d1cdec98 20#define NR_TESTS 4
544cdc88 21
b848736e
MD
22#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
23
40293a78
MD
24#ifdef BUILDOPT_RSEQ_PERCPU_MM_CID
25# define RSEQ_PERCPU RSEQ_PERCPU_MM_CID
26static
27int get_current_cpu_id(void)
28{
29 return rseq_current_mm_cid();
30}
31static
32bool rseq_validate_cpu_id(void)
33{
34 return rseq_mm_cid_available();
35}
40797ae3
MD
36static
37bool rseq_use_cpu_index(void)
38{
39 return false; /* Use mm_cid */
40}
40293a78
MD
41#else
42# define RSEQ_PERCPU RSEQ_PERCPU_CPU_ID
43static
44int get_current_cpu_id(void)
45{
46 return rseq_cpu_start();
47}
48static
49bool rseq_validate_cpu_id(void)
50{
51 return rseq_current_cpu_raw() >= 0;
52}
40797ae3
MD
53static
54bool rseq_use_cpu_index(void)
55{
56 return true; /* Use cpu_id as index. */
57}
40293a78
MD
58#endif
59
b848736e
MD
60struct percpu_lock_entry {
61 intptr_t v;
62} __attribute__((aligned(128)));
63
64struct percpu_lock {
65 struct percpu_lock_entry c[CPU_SETSIZE];
66};
67
68struct test_data_entry {
69 intptr_t count;
70} __attribute__((aligned(128)));
71
72struct spinlock_test_data {
73 struct percpu_lock lock;
74 struct test_data_entry c[CPU_SETSIZE];
75 int reps;
76};
77
78struct percpu_list_node {
79 intptr_t data;
80 struct percpu_list_node *next;
81};
82
83struct percpu_list_entry {
84 struct percpu_list_node *head;
85} __attribute__((aligned(128)));
86
87struct percpu_list {
88 struct percpu_list_entry c[CPU_SETSIZE];
89};
90
91/* A simple percpu spinlock. Returns the cpu lock was acquired on. */
6e284b80 92static int rseq_this_cpu_lock(struct percpu_lock *lock)
b848736e
MD
93{
94 int cpu;
95
96 for (;;) {
97 int ret;
98
40293a78 99 cpu = get_current_cpu_id();
41149e28 100 ret = rseq_load_cbne_store__ptr(RSEQ_MO_RELAXED, RSEQ_PERCPU,
40293a78 101 &lock->c[cpu].v, 0, 1, cpu);
b848736e
MD
102 if (rseq_likely(!ret))
103 break;
104 /* Retry if comparison fails or rseq aborts. */
105 }
106 /*
107 * Acquire semantic when taking lock after control dependency.
108 * Matches rseq_smp_store_release().
109 */
110 rseq_smp_acquire__after_ctrl_dep();
111 return cpu;
112}
113
6e284b80 114static void rseq_percpu_unlock(struct percpu_lock *lock, int cpu)
b848736e
MD
115{
116 assert(lock->c[cpu].v == 1);
117 /*
118 * Release lock, with release semantic. Matches
119 * rseq_smp_acquire__after_ctrl_dep().
120 */
121 rseq_smp_store_release(&lock->c[cpu].v, 0);
122}
123
6e284b80 124static void *test_percpu_spinlock_thread(void *arg)
b848736e 125{
d268885a 126 struct spinlock_test_data *data = (struct spinlock_test_data *) arg;
b848736e
MD
127 int i, cpu;
128
129 if (rseq_register_current_thread()) {
130 fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
131 errno, strerror(errno));
132 abort();
133 }
134 for (i = 0; i < data->reps; i++) {
135 cpu = rseq_this_cpu_lock(&data->lock);
136 data->c[cpu].count++;
137 rseq_percpu_unlock(&data->lock, cpu);
138 }
139 if (rseq_unregister_current_thread()) {
140 fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
141 errno, strerror(errno));
142 abort();
143 }
144
145 return NULL;
146}
147
148/*
149 * A simple test which implements a sharded counter using a per-cpu
150 * lock. Obviously real applications might prefer to simply use a
151 * per-cpu increment; however, this is reasonable for a test and the
152 * lock can be extended to synchronize more complicated operations.
153 */
6e284b80 154static void test_percpu_spinlock(void)
b848736e
MD
155{
156 const int num_threads = 200;
157 int i;
a91728e0 158 uint64_t sum, expected_sum;
b848736e
MD
159 pthread_t test_threads[num_threads];
160 struct spinlock_test_data data;
161
544cdc88
MJ
162 diag("spinlock");
163
b848736e
MD
164 memset(&data, 0, sizeof(data));
165 data.reps = 5000;
166
167 for (i = 0; i < num_threads; i++)
168 pthread_create(&test_threads[i], NULL,
169 test_percpu_spinlock_thread, &data);
170
171 for (i = 0; i < num_threads; i++)
172 pthread_join(test_threads[i], NULL);
173
174 sum = 0;
175 for (i = 0; i < CPU_SETSIZE; i++)
176 sum += data.c[i].count;
177
a91728e0
MJ
178 expected_sum = (uint64_t)data.reps * num_threads;
179
180 ok(sum == expected_sum, "spinlock - sum (%" PRIu64 " == %" PRIu64 ")", sum, expected_sum);
b848736e
MD
181}
182
6e284b80 183static void this_cpu_list_push(struct percpu_list *list,
b848736e
MD
184 struct percpu_list_node *node,
185 int *_cpu)
186{
187 int cpu;
188
189 for (;;) {
190 intptr_t *targetptr, newval, expect;
191 int ret;
192
40293a78 193 cpu = get_current_cpu_id();
b848736e
MD
194 /* Load list->c[cpu].head with single-copy atomicity. */
195 expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head);
196 newval = (intptr_t)node;
197 targetptr = (intptr_t *)&list->c[cpu].head;
198 node->next = (struct percpu_list_node *)expect;
41149e28 199 ret = rseq_load_cbne_store__ptr(RSEQ_MO_RELAXED, RSEQ_PERCPU,
40293a78 200 targetptr, expect, newval, cpu);
b848736e
MD
201 if (rseq_likely(!ret))
202 break;
203 /* Retry if comparison fails or rseq aborts. */
204 }
205 if (_cpu)
206 *_cpu = cpu;
207}
208
209/*
210 * Unlike a traditional lock-less linked list; the availability of a
211 * rseq primitive allows us to implement pop without concerns over
212 * ABA-type races.
213 */
6e284b80 214static struct percpu_list_node *this_cpu_list_pop(struct percpu_list *list,
b848736e
MD
215 int *_cpu)
216{
217 for (;;) {
218 struct percpu_list_node *head;
219 intptr_t *targetptr, expectnot, *load;
d35eae6b
MD
220 long offset;
221 int ret, cpu;
b848736e 222
40293a78 223 cpu = get_current_cpu_id();
b848736e
MD
224 targetptr = (intptr_t *)&list->c[cpu].head;
225 expectnot = (intptr_t)NULL;
226 offset = offsetof(struct percpu_list_node, next);
227 load = (intptr_t *)&head;
41149e28 228 ret = rseq_load_cbeq_store_add_load_store__ptr(RSEQ_MO_RELAXED, RSEQ_PERCPU,
40293a78 229 targetptr, expectnot,
b848736e
MD
230 offset, load, cpu);
231 if (rseq_likely(!ret)) {
232 if (_cpu)
233 *_cpu = cpu;
234 return head;
235 }
236 if (ret > 0)
237 return NULL;
238 /* Retry if rseq aborts. */
239 }
240}
241
242/*
243 * __percpu_list_pop is not safe against concurrent accesses. Should
244 * only be used on lists that are not concurrently modified.
245 */
6e284b80 246static struct percpu_list_node *__percpu_list_pop(struct percpu_list *list, int cpu)
b848736e
MD
247{
248 struct percpu_list_node *node;
249
250 node = list->c[cpu].head;
251 if (!node)
252 return NULL;
253 list->c[cpu].head = node->next;
254 return node;
255}
256
6e284b80 257static void *test_percpu_list_thread(void *arg)
b848736e
MD
258{
259 int i;
260 struct percpu_list *list = (struct percpu_list *)arg;
261
262 if (rseq_register_current_thread()) {
263 fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
264 errno, strerror(errno));
265 abort();
266 }
267
268 for (i = 0; i < 100000; i++) {
269 struct percpu_list_node *node;
270
271 node = this_cpu_list_pop(list, NULL);
272 sched_yield(); /* encourage shuffling */
273 if (node)
274 this_cpu_list_push(list, node, NULL);
275 }
276
277 if (rseq_unregister_current_thread()) {
278 fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
279 errno, strerror(errno));
280 abort();
281 }
282
283 return NULL;
284}
285
286/* Simultaneous modification to a per-cpu linked list from many threads. */
6e284b80 287static void test_percpu_list(void)
b848736e
MD
288{
289 int i, j;
290 uint64_t sum = 0, expected_sum = 0;
291 struct percpu_list list;
292 pthread_t test_threads[200];
293 cpu_set_t allowed_cpus;
294
544cdc88
MJ
295 diag("percpu_list");
296
b848736e
MD
297 memset(&list, 0, sizeof(list));
298
299 /* Generate list entries for every usable cpu. */
300 sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
301 for (i = 0; i < CPU_SETSIZE; i++) {
40797ae3 302 if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
b848736e
MD
303 continue;
304 for (j = 1; j <= 100; j++) {
305 struct percpu_list_node *node;
306
307 expected_sum += j;
308
d268885a 309 node = (struct percpu_list_node *) malloc(sizeof(*node));
b848736e
MD
310 assert(node);
311 node->data = j;
312 node->next = list.c[i].head;
313 list.c[i].head = node;
314 }
315 }
316
317 for (i = 0; i < 200; i++)
318 pthread_create(&test_threads[i], NULL,
319 test_percpu_list_thread, &list);
320
321 for (i = 0; i < 200; i++)
322 pthread_join(test_threads[i], NULL);
323
324 for (i = 0; i < CPU_SETSIZE; i++) {
325 struct percpu_list_node *node;
326
40797ae3 327 if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
b848736e
MD
328 continue;
329
330 while ((node = __percpu_list_pop(&list, i))) {
331 sum += node->data;
332 free(node);
333 }
334 }
335
336 /*
337 * All entries should now be accounted for (unless some external
338 * actor is interfering with our allowed affinity while this
339 * test is running).
340 */
a91728e0 341 ok(sum == expected_sum, "percpu_list - sum (%" PRIu64 " == %" PRIu64 ")", sum, expected_sum);
b848736e
MD
342}
343
344int main(void)
345{
544cdc88
MJ
346 plan_tests(NR_TESTS);
347
8b34114a 348 if (!rseq_available(RSEQ_AVAILABLE_QUERY_KERNEL)) {
d1cdec98
MJ
349 skip(NR_TESTS, "The rseq syscall is unavailable");
350 goto end;
351 }
352
b848736e 353 if (rseq_register_current_thread()) {
d1cdec98 354 fail("rseq_register_current_thread(...) failed(%d): %s\n",
b848736e 355 errno, strerror(errno));
d1cdec98
MJ
356 goto end;
357 } else {
358 pass("Registered current thread with rseq");
b848736e 359 }
40293a78 360 if (!rseq_validate_cpu_id()) {
8bfe3ecf 361 skip(NR_TESTS - 1, "Error: cpu id getter unavailable");
40293a78
MD
362 goto end;
363 }
b848736e 364 test_percpu_spinlock();
b848736e 365 test_percpu_list();
544cdc88 366
b848736e 367 if (rseq_unregister_current_thread()) {
d1cdec98 368 fail("rseq_unregister_current_thread(...) failed(%d): %s\n",
b848736e 369 errno, strerror(errno));
d1cdec98
MJ
370 goto end;
371 } else {
372 pass("Unregistered current thread with rseq");
b848736e 373 }
d1cdec98
MJ
374end:
375 exit(exit_status());
b848736e 376}
This page took 0.038075 seconds and 4 git commands to generate.