1 // SPDX-License-Identifier: MIT
2 // SPDX-FileCopyrightText: 2024 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * rseq memory pool test.
21 #include <rseq/mempool.h>
22 #include "../src/rseq-utils.h"
27 #if RSEQ_BITS_PER_LONG == 64
28 # define POISON_VALUE 0xABCDABCDABCDABCDULL
30 # define POISON_VALUE 0xABCDABCDUL
35 struct test_data __rseq_percpu
*backref
;
36 struct list_head node
;
39 static void test_mempool_fill(enum rseq_mempool_populate_policy policy
,
40 unsigned long max_nr_ranges
, size_t stride
)
42 struct test_data __rseq_percpu
*ptr
;
43 struct test_data
*iter
, *tmp
;
44 struct rseq_mempool
*mempool
;
45 struct rseq_mempool_attr
*attr
;
48 int ret
, i
, size_order
;
49 struct test_data init_value
= {
58 attr
= rseq_mempool_attr_create();
59 ok(attr
, "Create pool attribute");
60 ret
= rseq_mempool_attr_set_robust(attr
);
61 ok(ret
== 0, "Setting mempool robust attribute");
62 ret
= rseq_mempool_attr_set_percpu(attr
, stride
, CPU_SETSIZE
);
63 ok(ret
== 0, "Setting mempool percpu type");
64 ret
= rseq_mempool_attr_set_max_nr_ranges(attr
, max_nr_ranges
);
65 ok(ret
== 0, "Setting mempool max_nr_ranges=%lu", max_nr_ranges
);
66 ret
= rseq_mempool_attr_set_poison(attr
, POISON_VALUE
);
67 ok(ret
== 0, "Setting mempool poison");
68 ret
= rseq_mempool_attr_set_populate_policy(attr
, policy
);
69 ok(ret
== 0, "Setting mempool populate policy to %s",
70 policy
== RSEQ_MEMPOOL_POPULATE_PRIVATE_NONE
? "NONE" : "ALL");
71 mempool
= rseq_mempool_create("test_data",
72 sizeof(struct test_data
), attr
);
73 ok(mempool
, "Create mempool of size %zu", stride
);
74 rseq_mempool_attr_destroy(attr
);
77 struct test_data
*cpuptr
;
79 ptr
= (struct test_data __rseq_percpu
*) rseq_mempool_percpu_zmalloc(mempool
);
82 /* Link items in cpu 0. */
83 cpuptr
= rseq_percpu_ptr(ptr
, 0, stride
);
84 cpuptr
->backref
= ptr
;
85 /* Randomize items in list. */
87 list_add(&cpuptr
->node
, &list
);
89 list_add_tail(&cpuptr
->node
, &list
);
93 size_order
= rseq_get_count_order_ulong(sizeof(struct test_data
));
94 ok(count
* (1U << size_order
) == stride
* max_nr_ranges
,
95 "Allocated %" PRIu64
" objects in pool", count
);
97 list_for_each_entry(iter
, &list
, node
) {
99 for (i
= 0; i
< CPU_SETSIZE
; i
++) {
100 struct test_data
*cpuptr
= rseq_percpu_ptr(ptr
, i
, stride
);
102 if (cpuptr
->value
[0] != 0)
107 ok(1, "Check for pool content corruption");
109 list_for_each_entry_safe(iter
, tmp
, &list
, node
) {
111 rseq_mempool_percpu_free(ptr
, stride
);
113 ok(1, "Free all objects");
115 ptr
= (struct test_data __rseq_percpu
*) rseq_mempool_percpu_zmalloc(mempool
);
118 ok(1, "Allocate one object");
120 rseq_mempool_percpu_free(ptr
, stride
);
121 ok(1, "Free one object");
123 ptr
= (struct test_data __rseq_percpu
*)
124 rseq_mempool_percpu_malloc_init(mempool
,
125 &init_value
, sizeof(struct test_data
));
128 ok(1, "Allocate one initialized object");
130 ok(ptr
->value
[0] == 123 && ptr
->value
[1] == 456, "Validate initial values");
132 rseq_mempool_percpu_free(ptr
, stride
);
133 ok(1, "Free one object");
135 ret
= rseq_mempool_destroy(mempool
);
136 ok(ret
== 0, "Destroy mempool");
139 static void test_robust_double_free(struct rseq_mempool
*pool
,
140 enum rseq_mempool_populate_policy policy
__attribute__((unused
)))
142 struct test_data __rseq_percpu
*ptr
;
144 ptr
= (struct test_data __rseq_percpu
*) rseq_mempool_percpu_malloc(pool
);
146 rseq_mempool_percpu_free(ptr
);
147 rseq_mempool_percpu_free(ptr
);
150 static void test_robust_corrupt_after_free(struct rseq_mempool
*pool
,
151 enum rseq_mempool_populate_policy policy
)
153 struct test_data __rseq_percpu
*ptr
;
154 struct test_data
*cpuptr
;
156 ptr
= (struct test_data __rseq_percpu
*) rseq_mempool_percpu_malloc(pool
);
158 * Corrupt free list: For robust pools, the free list is located
159 * after the last cpu memory range for populate all, and after
160 * the init values memory range for populate none.
162 if (policy
== RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL
)
163 cpuptr
= (struct test_data
*) rseq_percpu_ptr(ptr
, rseq_mempool_get_max_nr_cpus(pool
));
165 cpuptr
= (struct test_data
*) rseq_percpu_ptr(ptr
, rseq_mempool_get_max_nr_cpus(pool
) + 1);
167 rseq_mempool_percpu_free(ptr
);
168 cpuptr
->value
[0] = (uintptr_t) test_robust_corrupt_after_free
;
170 rseq_mempool_destroy(pool
);
173 static void test_robust_memory_leak(struct rseq_mempool
*pool
,
174 enum rseq_mempool_populate_policy policy
__attribute__((unused
)))
176 (void) rseq_mempool_percpu_malloc(pool
);
178 rseq_mempool_destroy(pool
);
181 static void test_robust_free_list_corruption(struct rseq_mempool
*pool
,
182 enum rseq_mempool_populate_policy policy
)
184 struct test_data __rseq_percpu
*ptr
;
185 struct test_data
*cpuptr
;
187 ptr
= (struct test_data __rseq_percpu
*) rseq_mempool_percpu_malloc(pool
);
189 * Corrupt free list: For robust pools, the free list is located
190 * after the last cpu memory range for populate all, and after
191 * the init values memory range for populate none.
193 if (policy
== RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL
)
194 cpuptr
= (struct test_data
*) rseq_percpu_ptr(ptr
, rseq_mempool_get_max_nr_cpus(pool
));
196 cpuptr
= (struct test_data
*) rseq_percpu_ptr(ptr
, rseq_mempool_get_max_nr_cpus(pool
) + 1);
198 rseq_mempool_percpu_free(ptr
);
200 cpuptr
->value
[0] = (uintptr_t) cpuptr
;
202 (void) rseq_mempool_percpu_malloc(pool
);
203 (void) rseq_mempool_percpu_malloc(pool
);
206 static void test_robust_poison_corruption_malloc(struct rseq_mempool
*pool
,
207 enum rseq_mempool_populate_policy policy
__attribute__((unused
)))
209 struct test_data __rseq_percpu
*ptr
;
210 struct test_data
*cpuptr
;
212 ptr
= (struct test_data __rseq_percpu
*) rseq_mempool_percpu_malloc(pool
);
213 cpuptr
= (struct test_data
*) rseq_percpu_ptr(ptr
, 0);
215 rseq_mempool_percpu_free(ptr
);
217 cpuptr
->value
[0] = 1;
219 (void) rseq_mempool_percpu_malloc(pool
);
222 static void test_robust_poison_corruption_destroy(struct rseq_mempool
*pool
,
223 enum rseq_mempool_populate_policy policy
__attribute__((unused
)))
225 struct test_data __rseq_percpu
*ptr
;
226 struct test_data
*cpuptr
;
228 ptr
= (struct test_data __rseq_percpu
*) rseq_mempool_percpu_malloc(pool
);
229 cpuptr
= (struct test_data
*) rseq_percpu_ptr(ptr
, 0);
231 rseq_mempool_percpu_free(ptr
);
233 cpuptr
->value
[0] = 1;
235 rseq_mempool_destroy(pool
);
238 static int run_robust_test(void (*test
)(struct rseq_mempool
*, enum rseq_mempool_populate_policy
),
239 struct rseq_mempool
*pool
, enum rseq_mempool_populate_policy policy
)
253 waitpid(cpid
, &status
, 0);
256 if (WIFSIGNALED(status
) &&
257 (SIGABRT
== WTERMSIG(status
)))
263 static void run_robust_tests(enum rseq_mempool_populate_policy policy
)
265 struct rseq_mempool_attr
*attr
;
266 struct rseq_mempool
*pool
;
269 attr
= rseq_mempool_attr_create();
270 ok(attr
, "Create mempool attributes");
272 ret
= rseq_mempool_attr_set_robust(attr
);
273 ok(ret
== 0, "Setting mempool robust attribute");
275 ret
= rseq_mempool_attr_set_percpu(attr
, RSEQ_MEMPOOL_STRIDE
, 1);
276 ok(ret
== 0, "Setting mempool percpu type");
278 ret
= rseq_mempool_attr_set_populate_policy(attr
, policy
);
279 ok(ret
== 0, "Setting mempool populate policy to %s",
280 policy
== RSEQ_MEMPOOL_POPULATE_PRIVATE_NONE
? "PRIVATE_NONE" : "PRIVATE_ALL");
282 pool
= rseq_mempool_create("mempool-robust",
283 sizeof(struct test_data
), attr
);
285 rseq_mempool_attr_destroy(attr
);
287 ok(run_robust_test(test_robust_double_free
, pool
, policy
),
288 "robust-double-free");
290 ok(run_robust_test(test_robust_memory_leak
, pool
, policy
),
291 "robust-memory-leak");
293 ok(run_robust_test(test_robust_poison_corruption_malloc
, pool
, policy
),
294 "robust-poison-corruption-malloc");
296 ok(run_robust_test(test_robust_poison_corruption_destroy
, pool
, policy
),
297 "robust-poison-corruption-destroy");
299 ok(run_robust_test(test_robust_corrupt_after_free
, pool
, policy
),
300 "robust-corrupt-after-free");
302 ok(run_robust_test(test_robust_free_list_corruption
, pool
, policy
),
303 "robust-free-list-corruption");
305 rseq_mempool_destroy(pool
);
311 unsigned long nr_ranges
;
315 for (nr_ranges
= 1; nr_ranges
< 32; nr_ranges
<<= 1) {
316 /* From page size to 64kB */
317 for (len
= rseq_get_page_len(); len
< 65536; len
<<= 1) {
318 test_mempool_fill(RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL
, nr_ranges
, len
);
319 test_mempool_fill(RSEQ_MEMPOOL_POPULATE_PRIVATE_NONE
, nr_ranges
, len
);
323 len
= rseq_get_page_len();
326 /* From min(page size, 64kB) to 4MB */
327 for (; len
< 4096 * 1024; len
<<= 1) {
328 test_mempool_fill(RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL
, 1, len
);
329 test_mempool_fill(RSEQ_MEMPOOL_POPULATE_PRIVATE_NONE
, 1, len
);
332 run_robust_tests(RSEQ_MEMPOOL_POPULATE_PRIVATE_ALL
);
333 run_robust_tests(RSEQ_MEMPOOL_POPULATE_PRIVATE_NONE
);