fix: handle EINTR correctly in get_cpu_mask_from_sysfs
[librseq.git] / tests / mempool_test.c
CommitLineData
d273fd4b
MD
1// SPDX-License-Identifier: MIT
2// SPDX-FileCopyrightText: 2024 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
3/*
4 * rseq memory pool test.
5 */
6
7#ifndef _GNU_SOURCE
8#define _GNU_SOURCE
9#endif
10#include <assert.h>
11#include <sched.h>
12#include <signal.h>
13#include <stdio.h>
14#include <string.h>
15#include <sys/time.h>
16#include <inttypes.h>
17#include <stdlib.h>
c15b99f6
OD
18#include <sys/wait.h>
19#include <unistd.h>
d273fd4b
MD
20
21#include <rseq/mempool.h>
c3fad074 22#include "../src/rseq-utils.h"
d273fd4b 23
252f9411 24#include "../src/list.h"
d273fd4b
MD
25#include "tap.h"
26
455e090e
MD
27#if RSEQ_BITS_PER_LONG == 64
28# define POISON_VALUE 0xABCDABCDABCDABCDULL
29#else
30# define POISON_VALUE 0xABCDABCDUL
31#endif
32
d273fd4b 33struct test_data {
5e000535 34 uintptr_t value[2];
d273fd4b
MD
35 struct test_data __rseq_percpu *backref;
36 struct list_head node;
37};
38
53569d03
MD
39static void test_mempool_fill(enum rseq_mempool_populate_policy policy,
40 unsigned long max_nr_ranges, size_t stride)
d273fd4b
MD
41{
42 struct test_data __rseq_percpu *ptr;
84a5a73a 43 struct test_data *iter, *tmp;
0ba2a93e
MD
44 struct rseq_mempool *mempool;
45 struct rseq_mempool_attr *attr;
d273fd4b
MD
46 uint64_t count = 0;
47 LIST_HEAD(list);
5e000535 48 int ret, i, size_order;
e62b6d54
MD
49 struct test_data init_value = {
50 .value = {
51 123,
52 456,
53 },
54 .backref = NULL,
55 .node = {},
56 };
d273fd4b 57
0ba2a93e 58 attr = rseq_mempool_attr_create();
a8ad787a 59 ok(attr, "Create pool attribute");
0ba2a93e 60 ret = rseq_mempool_attr_set_robust(attr);
d273fd4b 61 ok(ret == 0, "Setting mempool robust attribute");
cb475906
MD
62 ret = rseq_mempool_attr_set_percpu(attr, stride, CPU_SETSIZE);
63 ok(ret == 0, "Setting mempool percpu type");
dcb59d50
MD
64 ret = rseq_mempool_attr_set_max_nr_ranges(attr, max_nr_ranges);
65 ok(ret == 0, "Setting mempool max_nr_ranges=%lu", max_nr_ranges);
455e090e
MD
66 ret = rseq_mempool_attr_set_poison(attr, POISON_VALUE);
67 ok(ret == 0, "Setting mempool poison");
53569d03
MD
68 ret = rseq_mempool_attr_set_populate_policy(attr, policy);
69 ok(ret == 0, "Setting mempool populate policy to %s",
805d0043 70 policy == RSEQ_MEMPOOL_POPULATE_COW_INIT ? "COW_INIT" : "COW_ZERO");
0ba2a93e 71 mempool = rseq_mempool_create("test_data",
cb475906 72 sizeof(struct test_data), attr);
f2981623 73 ok(mempool, "Create mempool of size %zu", stride);
0ba2a93e 74 rseq_mempool_attr_destroy(attr);
d273fd4b
MD
75
76 for (;;) {
77 struct test_data *cpuptr;
78
15da5c27 79 ptr = (struct test_data __rseq_percpu *) rseq_mempool_percpu_zmalloc(mempool);
d273fd4b
MD
80 if (!ptr)
81 break;
82 /* Link items in cpu 0. */
06e0b1c0 83 cpuptr = rseq_percpu_ptr(ptr, 0, stride);
d273fd4b
MD
84 cpuptr->backref = ptr;
85 /* Randomize items in list. */
86 if (count & 1)
87 list_add(&cpuptr->node, &list);
88 else
89 list_add_tail(&cpuptr->node, &list);
90 count++;
91 }
92
5e000535
MD
93 size_order = rseq_get_count_order_ulong(sizeof(struct test_data));
94 ok(count * (1U << size_order) == stride * max_nr_ranges,
dcb59d50 95 "Allocated %" PRIu64 " objects in pool", count);
d273fd4b
MD
96
97 list_for_each_entry(iter, &list, node) {
98 ptr = iter->backref;
99 for (i = 0; i < CPU_SETSIZE; i++) {
06e0b1c0 100 struct test_data *cpuptr = rseq_percpu_ptr(ptr, i, stride);
d273fd4b 101
5e000535 102 if (cpuptr->value[0] != 0)
d273fd4b 103 abort();
5e000535 104 cpuptr->value[0]++;
d273fd4b
MD
105 }
106 }
d273fd4b
MD
107 ok(1, "Check for pool content corruption");
108
84a5a73a 109 list_for_each_entry_safe(iter, tmp, &list, node) {
d273fd4b 110 ptr = iter->backref;
15da5c27 111 rseq_mempool_percpu_free(ptr, stride);
d273fd4b 112 }
c6a3de0e
MD
113 ok(1, "Free all objects");
114
115 ptr = (struct test_data __rseq_percpu *) rseq_mempool_percpu_zmalloc(mempool);
116 if (!ptr)
117 abort();
118 ok(1, "Allocate one object");
119
120 rseq_mempool_percpu_free(ptr, stride);
121 ok(1, "Free one object");
122
e62b6d54
MD
123 ptr = (struct test_data __rseq_percpu *)
124 rseq_mempool_percpu_malloc_init(mempool,
125 &init_value, sizeof(struct test_data));
126 if (!ptr)
127 abort();
128 ok(1, "Allocate one initialized object");
129
130 ok(ptr->value[0] == 123 && ptr->value[1] == 456, "Validate initial values");
131
132 rseq_mempool_percpu_free(ptr, stride);
133 ok(1, "Free one object");
134
0ba2a93e 135 ret = rseq_mempool_destroy(mempool);
d273fd4b
MD
136 ok(ret == 0, "Destroy mempool");
137}
138
fbe55804
MD
139static void test_robust_double_free(struct rseq_mempool *pool,
140 enum rseq_mempool_populate_policy policy __attribute__((unused)))
c15b99f6
OD
141{
142 struct test_data __rseq_percpu *ptr;
143
15da5c27 144 ptr = (struct test_data __rseq_percpu *) rseq_mempool_percpu_malloc(pool);
c15b99f6 145
15da5c27
MD
146 rseq_mempool_percpu_free(ptr);
147 rseq_mempool_percpu_free(ptr);
c15b99f6
OD
148}
149
fbe55804
MD
150static void test_robust_corrupt_after_free(struct rseq_mempool *pool,
151 enum rseq_mempool_populate_policy policy)
c15b99f6
OD
152{
153 struct test_data __rseq_percpu *ptr;
154 struct test_data *cpuptr;
155
15da5c27 156 ptr = (struct test_data __rseq_percpu *) rseq_mempool_percpu_malloc(pool);
fbe55804
MD
157 /*
158 * Corrupt free list: For robust pools, the free list is located
805d0043
MD
159 * after the last cpu memory range for COW_ZERO, and after the init
160 * values memory range for COW_INIT.
fbe55804 161 */
805d0043 162 if (policy == RSEQ_MEMPOOL_POPULATE_COW_ZERO)
fbe55804
MD
163 cpuptr = (struct test_data *) rseq_percpu_ptr(ptr, rseq_mempool_get_max_nr_cpus(pool));
164 else
165 cpuptr = (struct test_data *) rseq_percpu_ptr(ptr, rseq_mempool_get_max_nr_cpus(pool) + 1);
c15b99f6 166
15da5c27 167 rseq_mempool_percpu_free(ptr);
5e000535 168 cpuptr->value[0] = (uintptr_t) test_robust_corrupt_after_free;
c15b99f6 169
0ba2a93e 170 rseq_mempool_destroy(pool);
c15b99f6
OD
171}
172
fbe55804
MD
173static void test_robust_memory_leak(struct rseq_mempool *pool,
174 enum rseq_mempool_populate_policy policy __attribute__((unused)))
c15b99f6 175{
15da5c27 176 (void) rseq_mempool_percpu_malloc(pool);
c15b99f6 177
0ba2a93e 178 rseq_mempool_destroy(pool);
c15b99f6
OD
179}
180
fbe55804
MD
181static void test_robust_free_list_corruption(struct rseq_mempool *pool,
182 enum rseq_mempool_populate_policy policy)
c15b99f6
OD
183{
184 struct test_data __rseq_percpu *ptr;
185 struct test_data *cpuptr;
186
15da5c27 187 ptr = (struct test_data __rseq_percpu *) rseq_mempool_percpu_malloc(pool);
fbe55804
MD
188 /*
189 * Corrupt free list: For robust pools, the free list is located
805d0043
MD
190 * after the last cpu memory range for COW_ZERO, and after the init
191 * values memory range for COW_INIT.
fbe55804 192 */
805d0043 193 if (policy == RSEQ_MEMPOOL_POPULATE_COW_ZERO)
fbe55804
MD
194 cpuptr = (struct test_data *) rseq_percpu_ptr(ptr, rseq_mempool_get_max_nr_cpus(pool));
195 else
196 cpuptr = (struct test_data *) rseq_percpu_ptr(ptr, rseq_mempool_get_max_nr_cpus(pool) + 1);
c15b99f6 197
15da5c27 198 rseq_mempool_percpu_free(ptr);
c15b99f6 199
5e000535 200 cpuptr->value[0] = (uintptr_t) cpuptr;
c15b99f6 201
15da5c27
MD
202 (void) rseq_mempool_percpu_malloc(pool);
203 (void) rseq_mempool_percpu_malloc(pool);
c15b99f6
OD
204}
205
fbe55804
MD
206static void test_robust_poison_corruption_malloc(struct rseq_mempool *pool,
207 enum rseq_mempool_populate_policy policy __attribute__((unused)))
5e000535
MD
208{
209 struct test_data __rseq_percpu *ptr;
210 struct test_data *cpuptr;
211
212 ptr = (struct test_data __rseq_percpu *) rseq_mempool_percpu_malloc(pool);
213 cpuptr = (struct test_data *) rseq_percpu_ptr(ptr, 0);
214
215 rseq_mempool_percpu_free(ptr);
216
fbe55804 217 cpuptr->value[0] = 1;
5e000535
MD
218
219 (void) rseq_mempool_percpu_malloc(pool);
220}
221
fbe55804
MD
222static void test_robust_poison_corruption_destroy(struct rseq_mempool *pool,
223 enum rseq_mempool_populate_policy policy __attribute__((unused)))
5e000535
MD
224{
225 struct test_data __rseq_percpu *ptr;
226 struct test_data *cpuptr;
227
228 ptr = (struct test_data __rseq_percpu *) rseq_mempool_percpu_malloc(pool);
229 cpuptr = (struct test_data *) rseq_percpu_ptr(ptr, 0);
230
231 rseq_mempool_percpu_free(ptr);
232
fbe55804 233 cpuptr->value[0] = 1;
5e000535
MD
234
235 rseq_mempool_destroy(pool);
236}
237
d9050fc5
OD
238static struct rseq_mempool *make_test_pool(enum rseq_mempool_populate_policy policy)
239{
240 struct rseq_mempool_attr *attr;
241 struct rseq_mempool *pool;
242 int ret;
243
244 pool = NULL;
245
246 attr = rseq_mempool_attr_create();
247
248 if (!attr) {
249 goto out;
250 }
251
252 ret = rseq_mempool_attr_set_robust(attr);
253
254 if (0 != ret) {
255 goto err_attr;
256 }
257
258 ret = rseq_mempool_attr_set_percpu(attr, RSEQ_MEMPOOL_STRIDE, 1);
259
260 if (0 != ret) {
261 goto err_attr;
262 }
263
264 ret = rseq_mempool_attr_set_populate_policy(attr, policy);
265
266 if (0 != ret) {
267 goto err_attr;
268 }
269
270 pool = rseq_mempool_create("mempool-robust",
271 sizeof(struct test_data), attr);
272err_attr:
273 rseq_mempool_attr_destroy(attr);
274out:
275 return pool;
276
277}
278
fbe55804 279static int run_robust_test(void (*test)(struct rseq_mempool *, enum rseq_mempool_populate_policy),
d9050fc5 280 enum rseq_mempool_populate_policy policy)
c15b99f6
OD
281{
282 pid_t cpid;
283 int status;
d9050fc5 284 struct rseq_mempool *pool;
c15b99f6
OD
285
286 cpid = fork();
287
288 switch (cpid) {
289 case -1:
290 return 0;
291 case 0:
d9050fc5
OD
292 /*
293 * Intentional leak of test pool because some tests might want
294 * to do an explicit destroy on it.
295 */
296 pool = make_test_pool(policy);
297 if (!pool)
298 _exit(EXIT_FAILURE);
fbe55804 299 test(pool, policy);
c15b99f6
OD
300 _exit(EXIT_FAILURE);
301 default:
302 waitpid(cpid, &status, 0);
303 }
304
305 if (WIFSIGNALED(status) &&
306 (SIGABRT == WTERMSIG(status)))
307 return 1;
308
309 return 0;
310}
311
53569d03 312static void run_robust_tests(enum rseq_mempool_populate_policy policy)
c15b99f6 313{
c15b99f6 314
d9050fc5 315 ok(run_robust_test(test_robust_double_free, policy),
c15b99f6
OD
316 "robust-double-free");
317
d9050fc5 318 ok(run_robust_test(test_robust_memory_leak, policy),
c15b99f6
OD
319 "robust-memory-leak");
320
d9050fc5 321 ok(run_robust_test(test_robust_poison_corruption_malloc, policy),
5e000535
MD
322 "robust-poison-corruption-malloc");
323
d9050fc5 324 ok(run_robust_test(test_robust_poison_corruption_destroy, policy),
5e000535
MD
325 "robust-poison-corruption-destroy");
326
d9050fc5 327 ok(run_robust_test(test_robust_corrupt_after_free, policy),
fbe55804
MD
328 "robust-corrupt-after-free");
329
d9050fc5 330 ok(run_robust_test(test_robust_free_list_corruption, policy),
fbe55804 331 "robust-free-list-corruption");
c15b99f6
OD
332}
333
612ee3bf
MD
334static void fork_child(struct rseq_mempool *pool,
335 enum rseq_mempool_populate_policy policy __attribute__((unused)))
336{
337 rseq_mempool_destroy(pool);
338}
339
340/*
341 * Test that destroying a mempool works in child after fork.
342 */
343static int run_fork_destroy_pool_test(void (*test)(struct rseq_mempool *, enum rseq_mempool_populate_policy),
344 enum rseq_mempool_populate_policy policy)
345{
346 pid_t cpid;
347 int status;
348 struct rseq_mempool *pool;
349
350 pool = make_test_pool(policy);
351 if (!pool)
352 _exit(EXIT_FAILURE);
353
354 cpid = fork();
355
356 switch (cpid) {
357 case -1:
358 return 0;
359 case 0:
360 test(pool, policy);
361 _exit(EXIT_SUCCESS);
362 default:
363 waitpid(cpid, &status, 0);
364 }
365
366 if (WIFSIGNALED(status))
367 return 0;
368
369 return 1;
370}
371
d273fd4b
MD
372int main(void)
373{
374 size_t len;
dcb59d50 375 unsigned long nr_ranges;
d273fd4b 376
579badcb
MD
377 plan_no_plan();
378
dcb59d50
MD
379 for (nr_ranges = 1; nr_ranges < 32; nr_ranges <<= 1) {
380 /* From page size to 64kB */
381 for (len = rseq_get_page_len(); len < 65536; len <<= 1) {
805d0043
MD
382 test_mempool_fill(RSEQ_MEMPOOL_POPULATE_COW_ZERO, nr_ranges, len);
383 test_mempool_fill(RSEQ_MEMPOOL_POPULATE_COW_INIT, nr_ranges, len);
dcb59d50 384 }
d273fd4b
MD
385 }
386
dcb59d50
MD
387 len = rseq_get_page_len();
388 if (len < 65536)
389 len = 65536;
390 /* From min(page size, 64kB) to 4MB */
53569d03 391 for (; len < 4096 * 1024; len <<= 1) {
805d0043
MD
392 test_mempool_fill(RSEQ_MEMPOOL_POPULATE_COW_ZERO, 1, len);
393 test_mempool_fill(RSEQ_MEMPOOL_POPULATE_COW_INIT, 1, len);
53569d03 394 }
dcb59d50 395
805d0043
MD
396 run_robust_tests(RSEQ_MEMPOOL_POPULATE_COW_ZERO);
397 run_robust_tests(RSEQ_MEMPOOL_POPULATE_COW_INIT);
398 ok(run_fork_destroy_pool_test(fork_child, RSEQ_MEMPOOL_POPULATE_COW_ZERO),
399 "fork destroy pool test populate COW_ZERO");
400 ok(run_fork_destroy_pool_test(fork_child, RSEQ_MEMPOOL_POPULATE_COW_INIT),
401 "fork destroy pool test populate COW_INIT");
c15b99f6 402
d273fd4b
MD
403 exit(exit_status());
404}
This page took 0.048567 seconds and 4 git commands to generate.