Commit | Line | Data |
---|---|---|
10544ee8 MD |
1 | /* |
2 | * lttng-ust-urcu.c | |
3 | * | |
4 | * Userspace RCU library for LTTng-UST, derived from liburcu "bulletproof" version. | |
5 | * | |
6 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
7 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. | |
8 | * | |
9 | * This library is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU Lesser General Public | |
11 | * License as published by the Free Software Foundation; either | |
12 | * version 2.1 of the License, or (at your option) any later version. | |
13 | * | |
14 | * This library is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * Lesser General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU Lesser General Public | |
20 | * License along with this library; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
22 | * | |
23 | * IBM's contributions to this file may be relicensed under LGPLv2 or later. | |
24 | */ | |
25 | ||
26 | #define _LGPL_SOURCE | |
27 | #include <stdio.h> | |
28 | #include <pthread.h> | |
29 | #include <signal.h> | |
30 | #include <assert.h> | |
31 | #include <stdlib.h> | |
32 | #include <string.h> | |
33 | #include <errno.h> | |
34 | #include <poll.h> | |
35 | #include <unistd.h> | |
36 | #include <stdbool.h> | |
37 | #include <sys/mman.h> | |
38 | ||
39 | #include <urcu/arch.h> | |
40 | #include <urcu/wfcqueue.h> | |
41 | #include <lttng/urcu/static/urcu-ust.h> | |
42 | #include <lttng/urcu/pointer.h> | |
43 | #include <urcu/tls-compat.h> | |
44 | ||
45 | /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ | |
46 | #undef _LGPL_SOURCE | |
47 | #include <lttng/urcu/urcu-ust.h> | |
48 | #define _LGPL_SOURCE | |
49 | ||
50 | #ifndef MAP_ANONYMOUS | |
51 | #define MAP_ANONYMOUS MAP_ANON | |
52 | #endif | |
53 | ||
54 | #ifdef __linux__ | |
55 | static | |
56 | void *mremap_wrapper(void *old_address, size_t old_size, | |
57 | size_t new_size, int flags) | |
58 | { | |
59 | return mremap(old_address, old_size, new_size, flags); | |
60 | } | |
61 | #else | |
62 | ||
63 | #define MREMAP_MAYMOVE 1 | |
64 | #define MREMAP_FIXED 2 | |
65 | ||
66 | /* | |
67 | * mremap wrapper for non-Linux systems not allowing MAYMOVE. | |
68 | * This is not generic. | |
69 | */ | |
70 | static | |
71 | void *mremap_wrapper(void *old_address, size_t old_size, | |
72 | size_t new_size, int flags) | |
73 | { | |
74 | assert(!(flags & MREMAP_MAYMOVE)); | |
75 | ||
76 | return MAP_FAILED; | |
77 | } | |
78 | #endif | |
79 | ||
80 | /* Sleep delay in ms */ | |
81 | #define RCU_SLEEP_DELAY_MS 10 | |
82 | #define INIT_NR_THREADS 8 | |
83 | #define ARENA_INIT_ALLOC \ | |
84 | sizeof(struct registry_chunk) \ | |
85 | + INIT_NR_THREADS * sizeof(struct lttng_ust_urcu_reader) | |
86 | ||
87 | /* | |
88 | * Active attempts to check for reader Q.S. before calling sleep(). | |
89 | */ | |
90 | #define RCU_QS_ACTIVE_ATTEMPTS 100 | |
91 | ||
92 | static | |
93 | int lttng_ust_urcu_refcount; | |
94 | ||
95 | /* If the headers do not support membarrier system call, fall back smp_mb. */ | |
96 | #ifdef __NR_membarrier | |
97 | # define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__) | |
98 | #else | |
99 | # define membarrier(...) -ENOSYS | |
100 | #endif | |
101 | ||
102 | enum membarrier_cmd { | |
103 | MEMBARRIER_CMD_QUERY = 0, | |
104 | MEMBARRIER_CMD_SHARED = (1 << 0), | |
105 | /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */ | |
106 | /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */ | |
107 | MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3), | |
108 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4), | |
109 | }; | |
110 | ||
111 | static | |
112 | void __attribute__((constructor)) _lttng_ust_urcu_init(void); | |
113 | static | |
114 | void __attribute__((destructor)) lttng_ust_urcu_exit(void); | |
115 | ||
116 | #ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER | |
117 | int lttng_ust_urcu_has_sys_membarrier; | |
118 | #endif | |
119 | ||
120 | /* | |
121 | * rcu_gp_lock ensures mutual exclusion between threads calling | |
122 | * synchronize_rcu(). | |
123 | */ | |
124 | static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER; | |
125 | /* | |
126 | * rcu_registry_lock ensures mutual exclusion between threads | |
127 | * registering and unregistering themselves to/from the registry, and | |
128 | * with threads reading that registry from synchronize_rcu(). However, | |
129 | * this lock is not held all the way through the completion of awaiting | |
130 | * for the grace period. It is sporadically released between iterations | |
131 | * on the registry. | |
132 | * rcu_registry_lock may nest inside rcu_gp_lock. | |
133 | */ | |
134 | static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER; | |
135 | ||
136 | static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER; | |
137 | static int initialized; | |
138 | ||
139 | static pthread_key_t lttng_ust_urcu_key; | |
140 | ||
141 | struct lttng_ust_urcu_gp lttng_ust_urcu_gp = { .ctr = LTTNG_UST_URCU_GP_COUNT }; | |
142 | ||
143 | /* | |
144 | * Pointer to registry elements. Written to only by each individual reader. Read | |
145 | * by both the reader and the writers. | |
146 | */ | |
147 | DEFINE_URCU_TLS(struct lttng_ust_urcu_reader *, lttng_ust_urcu_reader); | |
148 | ||
149 | static CDS_LIST_HEAD(registry); | |
150 | ||
151 | struct registry_chunk { | |
152 | size_t data_len; /* data length */ | |
153 | size_t used; /* amount of data used */ | |
154 | struct cds_list_head node; /* chunk_list node */ | |
155 | char data[]; | |
156 | }; | |
157 | ||
158 | struct registry_arena { | |
159 | struct cds_list_head chunk_list; | |
160 | }; | |
161 | ||
162 | static struct registry_arena registry_arena = { | |
163 | .chunk_list = CDS_LIST_HEAD_INIT(registry_arena.chunk_list), | |
164 | }; | |
165 | ||
166 | /* Saved fork signal mask, protected by rcu_gp_lock */ | |
167 | static sigset_t saved_fork_signal_mask; | |
168 | ||
169 | static void mutex_lock(pthread_mutex_t *mutex) | |
170 | { | |
171 | int ret; | |
172 | ||
173 | #ifndef DISTRUST_SIGNALS_EXTREME | |
174 | ret = pthread_mutex_lock(mutex); | |
175 | if (ret) | |
176 | abort(); | |
177 | #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ | |
178 | while ((ret = pthread_mutex_trylock(mutex)) != 0) { | |
179 | if (ret != EBUSY && ret != EINTR) | |
180 | abort(); | |
181 | poll(NULL,0,10); | |
182 | } | |
183 | #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ | |
184 | } | |
185 | ||
186 | static void mutex_unlock(pthread_mutex_t *mutex) | |
187 | { | |
188 | int ret; | |
189 | ||
190 | ret = pthread_mutex_unlock(mutex); | |
191 | if (ret) | |
192 | abort(); | |
193 | } | |
194 | ||
195 | static void smp_mb_master(void) | |
196 | { | |
197 | if (caa_likely(lttng_ust_urcu_has_sys_membarrier)) { | |
198 | if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0)) | |
199 | abort(); | |
200 | } else { | |
201 | cmm_smp_mb(); | |
202 | } | |
203 | } | |
204 | ||
205 | /* | |
206 | * Always called with rcu_registry lock held. Releases this lock between | |
207 | * iterations and grabs it again. Holds the lock when it returns. | |
208 | */ | |
209 | static void wait_for_readers(struct cds_list_head *input_readers, | |
210 | struct cds_list_head *cur_snap_readers, | |
211 | struct cds_list_head *qsreaders) | |
212 | { | |
213 | unsigned int wait_loops = 0; | |
214 | struct lttng_ust_urcu_reader *index, *tmp; | |
215 | ||
216 | /* | |
217 | * Wait for each thread URCU_TLS(lttng_ust_urcu_reader).ctr to either | |
218 | * indicate quiescence (not nested), or observe the current | |
219 | * rcu_gp.ctr value. | |
220 | */ | |
221 | for (;;) { | |
222 | if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS) | |
223 | wait_loops++; | |
224 | ||
225 | cds_list_for_each_entry_safe(index, tmp, input_readers, node) { | |
226 | switch (lttng_ust_urcu_reader_state(&index->ctr)) { | |
227 | case LTTNG_UST_URCU_READER_ACTIVE_CURRENT: | |
228 | if (cur_snap_readers) { | |
229 | cds_list_move(&index->node, | |
230 | cur_snap_readers); | |
231 | break; | |
232 | } | |
233 | /* Fall-through */ | |
234 | case LTTNG_UST_URCU_READER_INACTIVE: | |
235 | cds_list_move(&index->node, qsreaders); | |
236 | break; | |
237 | case LTTNG_UST_URCU_READER_ACTIVE_OLD: | |
238 | /* | |
239 | * Old snapshot. Leaving node in | |
240 | * input_readers will make us busy-loop | |
241 | * until the snapshot becomes current or | |
242 | * the reader becomes inactive. | |
243 | */ | |
244 | break; | |
245 | } | |
246 | } | |
247 | ||
248 | if (cds_list_empty(input_readers)) { | |
249 | break; | |
250 | } else { | |
251 | /* Temporarily unlock the registry lock. */ | |
252 | mutex_unlock(&rcu_registry_lock); | |
253 | if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) | |
254 | (void) poll(NULL, 0, RCU_SLEEP_DELAY_MS); | |
255 | else | |
256 | caa_cpu_relax(); | |
257 | /* Re-lock the registry lock before the next loop. */ | |
258 | mutex_lock(&rcu_registry_lock); | |
259 | } | |
260 | } | |
261 | } | |
262 | ||
263 | void lttng_ust_urcu_synchronize_rcu(void) | |
264 | { | |
265 | CDS_LIST_HEAD(cur_snap_readers); | |
266 | CDS_LIST_HEAD(qsreaders); | |
267 | sigset_t newmask, oldmask; | |
268 | int ret; | |
269 | ||
270 | ret = sigfillset(&newmask); | |
271 | assert(!ret); | |
272 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); | |
273 | assert(!ret); | |
274 | ||
275 | mutex_lock(&rcu_gp_lock); | |
276 | ||
277 | mutex_lock(&rcu_registry_lock); | |
278 | ||
279 | if (cds_list_empty(®istry)) | |
280 | goto out; | |
281 | ||
282 | /* All threads should read qparity before accessing data structure | |
283 | * where new ptr points to. */ | |
284 | /* Write new ptr before changing the qparity */ | |
285 | smp_mb_master(); | |
286 | ||
287 | /* | |
288 | * Wait for readers to observe original parity or be quiescent. | |
289 | * wait_for_readers() can release and grab again rcu_registry_lock | |
290 | * interally. | |
291 | */ | |
292 | wait_for_readers(®istry, &cur_snap_readers, &qsreaders); | |
293 | ||
294 | /* | |
295 | * Adding a cmm_smp_mb() which is _not_ formally required, but makes the | |
296 | * model easier to understand. It does not have a big performance impact | |
297 | * anyway, given this is the write-side. | |
298 | */ | |
299 | cmm_smp_mb(); | |
300 | ||
301 | /* Switch parity: 0 -> 1, 1 -> 0 */ | |
302 | CMM_STORE_SHARED(lttng_ust_urcu_gp.ctr, lttng_ust_urcu_gp.ctr ^ LTTNG_UST_URCU_GP_CTR_PHASE); | |
303 | ||
304 | /* | |
305 | * Must commit qparity update to memory before waiting for other parity | |
306 | * quiescent state. Failure to do so could result in the writer waiting | |
307 | * forever while new readers are always accessing data (no progress). | |
308 | * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED. | |
309 | */ | |
310 | ||
311 | /* | |
312 | * Adding a cmm_smp_mb() which is _not_ formally required, but makes the | |
313 | * model easier to understand. It does not have a big performance impact | |
314 | * anyway, given this is the write-side. | |
315 | */ | |
316 | cmm_smp_mb(); | |
317 | ||
318 | /* | |
319 | * Wait for readers to observe new parity or be quiescent. | |
320 | * wait_for_readers() can release and grab again rcu_registry_lock | |
321 | * interally. | |
322 | */ | |
323 | wait_for_readers(&cur_snap_readers, NULL, &qsreaders); | |
324 | ||
325 | /* | |
326 | * Put quiescent reader list back into registry. | |
327 | */ | |
328 | cds_list_splice(&qsreaders, ®istry); | |
329 | ||
330 | /* | |
331 | * Finish waiting for reader threads before letting the old ptr being | |
332 | * freed. | |
333 | */ | |
334 | smp_mb_master(); | |
335 | out: | |
336 | mutex_unlock(&rcu_registry_lock); | |
337 | mutex_unlock(&rcu_gp_lock); | |
338 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
339 | assert(!ret); | |
340 | } | |
341 | ||
342 | /* | |
343 | * library wrappers to be used by non-LGPL compatible source code. | |
344 | */ | |
345 | ||
346 | void lttng_ust_urcu_read_lock(void) | |
347 | { | |
348 | _lttng_ust_urcu_read_lock(); | |
349 | } | |
350 | ||
351 | void lttng_ust_urcu_read_unlock(void) | |
352 | { | |
353 | _lttng_ust_urcu_read_unlock(); | |
354 | } | |
355 | ||
356 | int lttng_ust_urcu_read_ongoing(void) | |
357 | { | |
358 | return _lttng_ust_urcu_read_ongoing(); | |
359 | } | |
360 | ||
361 | /* | |
362 | * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk. | |
363 | * Else, try expanding the last chunk. If this fails, allocate a new | |
364 | * chunk twice as big as the last chunk. | |
365 | * Memory used by chunks _never_ moves. A chunk could theoretically be | |
366 | * freed when all "used" slots are released, but we don't do it at this | |
367 | * point. | |
368 | */ | |
369 | static | |
370 | void expand_arena(struct registry_arena *arena) | |
371 | { | |
372 | struct registry_chunk *new_chunk, *last_chunk; | |
373 | size_t old_chunk_len, new_chunk_len; | |
374 | ||
375 | /* No chunk. */ | |
376 | if (cds_list_empty(&arena->chunk_list)) { | |
377 | assert(ARENA_INIT_ALLOC >= | |
378 | sizeof(struct registry_chunk) | |
379 | + sizeof(struct lttng_ust_urcu_reader)); | |
380 | new_chunk_len = ARENA_INIT_ALLOC; | |
381 | new_chunk = (struct registry_chunk *) mmap(NULL, | |
382 | new_chunk_len, | |
383 | PROT_READ | PROT_WRITE, | |
384 | MAP_ANONYMOUS | MAP_PRIVATE, | |
385 | -1, 0); | |
386 | if (new_chunk == MAP_FAILED) | |
387 | abort(); | |
388 | memset(new_chunk, 0, new_chunk_len); | |
389 | new_chunk->data_len = | |
390 | new_chunk_len - sizeof(struct registry_chunk); | |
391 | cds_list_add_tail(&new_chunk->node, &arena->chunk_list); | |
392 | return; /* We're done. */ | |
393 | } | |
394 | ||
395 | /* Try expanding last chunk. */ | |
396 | last_chunk = cds_list_entry(arena->chunk_list.prev, | |
397 | struct registry_chunk, node); | |
398 | old_chunk_len = | |
399 | last_chunk->data_len + sizeof(struct registry_chunk); | |
400 | new_chunk_len = old_chunk_len << 1; | |
401 | ||
402 | /* Don't allow memory mapping to move, just expand. */ | |
403 | new_chunk = mremap_wrapper(last_chunk, old_chunk_len, | |
404 | new_chunk_len, 0); | |
405 | if (new_chunk != MAP_FAILED) { | |
406 | /* Should not have moved. */ | |
407 | assert(new_chunk == last_chunk); | |
408 | memset((char *) last_chunk + old_chunk_len, 0, | |
409 | new_chunk_len - old_chunk_len); | |
410 | last_chunk->data_len = | |
411 | new_chunk_len - sizeof(struct registry_chunk); | |
412 | return; /* We're done. */ | |
413 | } | |
414 | ||
415 | /* Remap did not succeed, we need to add a new chunk. */ | |
416 | new_chunk = (struct registry_chunk *) mmap(NULL, | |
417 | new_chunk_len, | |
418 | PROT_READ | PROT_WRITE, | |
419 | MAP_ANONYMOUS | MAP_PRIVATE, | |
420 | -1, 0); | |
421 | if (new_chunk == MAP_FAILED) | |
422 | abort(); | |
423 | memset(new_chunk, 0, new_chunk_len); | |
424 | new_chunk->data_len = | |
425 | new_chunk_len - sizeof(struct registry_chunk); | |
426 | cds_list_add_tail(&new_chunk->node, &arena->chunk_list); | |
427 | } | |
428 | ||
429 | static | |
430 | struct lttng_ust_urcu_reader *arena_alloc(struct registry_arena *arena) | |
431 | { | |
432 | struct registry_chunk *chunk; | |
433 | struct lttng_ust_urcu_reader *rcu_reader_reg; | |
434 | int expand_done = 0; /* Only allow to expand once per alloc */ | |
435 | size_t len = sizeof(struct lttng_ust_urcu_reader); | |
436 | ||
437 | retry: | |
438 | cds_list_for_each_entry(chunk, &arena->chunk_list, node) { | |
439 | if (chunk->data_len - chunk->used < len) | |
440 | continue; | |
441 | /* Find spot */ | |
442 | for (rcu_reader_reg = (struct lttng_ust_urcu_reader *) &chunk->data[0]; | |
443 | rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len]; | |
444 | rcu_reader_reg++) { | |
445 | if (!rcu_reader_reg->alloc) { | |
446 | rcu_reader_reg->alloc = 1; | |
447 | chunk->used += len; | |
448 | return rcu_reader_reg; | |
449 | } | |
450 | } | |
451 | } | |
452 | ||
453 | if (!expand_done) { | |
454 | expand_arena(arena); | |
455 | expand_done = 1; | |
456 | goto retry; | |
457 | } | |
458 | ||
459 | return NULL; | |
460 | } | |
461 | ||
462 | /* Called with signals off and mutex locked */ | |
463 | static | |
464 | void add_thread(void) | |
465 | { | |
466 | struct lttng_ust_urcu_reader *rcu_reader_reg; | |
467 | int ret; | |
468 | ||
469 | rcu_reader_reg = arena_alloc(®istry_arena); | |
470 | if (!rcu_reader_reg) | |
471 | abort(); | |
472 | ret = pthread_setspecific(lttng_ust_urcu_key, rcu_reader_reg); | |
473 | if (ret) | |
474 | abort(); | |
475 | ||
476 | /* Add to registry */ | |
477 | rcu_reader_reg->tid = pthread_self(); | |
478 | assert(rcu_reader_reg->ctr == 0); | |
479 | cds_list_add(&rcu_reader_reg->node, ®istry); | |
480 | /* | |
481 | * Reader threads are pointing to the reader registry. This is | |
482 | * why its memory should never be relocated. | |
483 | */ | |
484 | URCU_TLS(lttng_ust_urcu_reader) = rcu_reader_reg; | |
485 | } | |
486 | ||
487 | /* Called with mutex locked */ | |
488 | static | |
489 | void cleanup_thread(struct registry_chunk *chunk, | |
490 | struct lttng_ust_urcu_reader *rcu_reader_reg) | |
491 | { | |
492 | rcu_reader_reg->ctr = 0; | |
493 | cds_list_del(&rcu_reader_reg->node); | |
494 | rcu_reader_reg->tid = 0; | |
495 | rcu_reader_reg->alloc = 0; | |
496 | chunk->used -= sizeof(struct lttng_ust_urcu_reader); | |
497 | } | |
498 | ||
499 | static | |
500 | struct registry_chunk *find_chunk(struct lttng_ust_urcu_reader *rcu_reader_reg) | |
501 | { | |
502 | struct registry_chunk *chunk; | |
503 | ||
504 | cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) { | |
505 | if (rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[0]) | |
506 | continue; | |
507 | if (rcu_reader_reg >= (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len]) | |
508 | continue; | |
509 | return chunk; | |
510 | } | |
511 | return NULL; | |
512 | } | |
513 | ||
514 | /* Called with signals off and mutex locked */ | |
515 | static | |
516 | void remove_thread(struct lttng_ust_urcu_reader *rcu_reader_reg) | |
517 | { | |
518 | cleanup_thread(find_chunk(rcu_reader_reg), rcu_reader_reg); | |
519 | URCU_TLS(lttng_ust_urcu_reader) = NULL; | |
520 | } | |
521 | ||
522 | /* Disable signals, take mutex, add to registry */ | |
523 | void lttng_ust_urcu_register(void) | |
524 | { | |
525 | sigset_t newmask, oldmask; | |
526 | int ret; | |
527 | ||
528 | ret = sigfillset(&newmask); | |
529 | if (ret) | |
530 | abort(); | |
531 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); | |
532 | if (ret) | |
533 | abort(); | |
534 | ||
535 | /* | |
536 | * Check if a signal concurrently registered our thread since | |
537 | * the check in rcu_read_lock(). | |
538 | */ | |
539 | if (URCU_TLS(lttng_ust_urcu_reader)) | |
540 | goto end; | |
541 | ||
542 | /* | |
543 | * Take care of early registration before lttng_ust_urcu constructor. | |
544 | */ | |
545 | _lttng_ust_urcu_init(); | |
546 | ||
547 | mutex_lock(&rcu_registry_lock); | |
548 | add_thread(); | |
549 | mutex_unlock(&rcu_registry_lock); | |
550 | end: | |
551 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
552 | if (ret) | |
553 | abort(); | |
554 | } | |
555 | ||
556 | void lttng_ust_urcu_register_thread(void) | |
557 | { | |
558 | if (caa_unlikely(!URCU_TLS(lttng_ust_urcu_reader))) | |
559 | lttng_ust_urcu_register(); /* If not yet registered. */ | |
560 | } | |
561 | ||
562 | /* Disable signals, take mutex, remove from registry */ | |
563 | static | |
564 | void lttng_ust_urcu_unregister(struct lttng_ust_urcu_reader *rcu_reader_reg) | |
565 | { | |
566 | sigset_t newmask, oldmask; | |
567 | int ret; | |
568 | ||
569 | ret = sigfillset(&newmask); | |
570 | if (ret) | |
571 | abort(); | |
572 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); | |
573 | if (ret) | |
574 | abort(); | |
575 | ||
576 | mutex_lock(&rcu_registry_lock); | |
577 | remove_thread(rcu_reader_reg); | |
578 | mutex_unlock(&rcu_registry_lock); | |
579 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
580 | if (ret) | |
581 | abort(); | |
582 | lttng_ust_urcu_exit(); | |
583 | } | |
584 | ||
585 | /* | |
586 | * Remove thread from the registry when it exits, and flag it as | |
587 | * destroyed so garbage collection can take care of it. | |
588 | */ | |
589 | static | |
590 | void lttng_ust_urcu_thread_exit_notifier(void *rcu_key) | |
591 | { | |
592 | lttng_ust_urcu_unregister(rcu_key); | |
593 | } | |
594 | ||
595 | #ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER | |
596 | static | |
597 | void lttng_ust_urcu_sys_membarrier_status(bool available) | |
598 | { | |
599 | if (!available) | |
600 | abort(); | |
601 | } | |
602 | #else | |
603 | static | |
604 | void lttng_ust_urcu_sys_membarrier_status(bool available) | |
605 | { | |
606 | if (!available) | |
607 | return; | |
608 | lttng_ust_urcu_has_sys_membarrier = 1; | |
609 | } | |
610 | #endif | |
611 | ||
612 | static | |
613 | void lttng_ust_urcu_sys_membarrier_init(void) | |
614 | { | |
615 | bool available = false; | |
616 | int mask; | |
617 | ||
618 | mask = membarrier(MEMBARRIER_CMD_QUERY, 0); | |
619 | if (mask >= 0) { | |
620 | if (mask & MEMBARRIER_CMD_PRIVATE_EXPEDITED) { | |
621 | if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0)) | |
622 | abort(); | |
623 | available = true; | |
624 | } | |
625 | } | |
626 | lttng_ust_urcu_sys_membarrier_status(available); | |
627 | } | |
628 | ||
629 | static | |
630 | void _lttng_ust_urcu_init(void) | |
631 | { | |
632 | mutex_lock(&init_lock); | |
633 | if (!lttng_ust_urcu_refcount++) { | |
634 | int ret; | |
635 | ||
636 | ret = pthread_key_create(<tng_ust_urcu_key, | |
637 | lttng_ust_urcu_thread_exit_notifier); | |
638 | if (ret) | |
639 | abort(); | |
640 | lttng_ust_urcu_sys_membarrier_init(); | |
641 | initialized = 1; | |
642 | } | |
643 | mutex_unlock(&init_lock); | |
644 | } | |
645 | ||
646 | static | |
647 | void lttng_ust_urcu_exit(void) | |
648 | { | |
649 | mutex_lock(&init_lock); | |
650 | if (!--lttng_ust_urcu_refcount) { | |
651 | struct registry_chunk *chunk, *tmp; | |
652 | int ret; | |
653 | ||
654 | cds_list_for_each_entry_safe(chunk, tmp, | |
655 | ®istry_arena.chunk_list, node) { | |
656 | munmap((void *) chunk, chunk->data_len | |
657 | + sizeof(struct registry_chunk)); | |
658 | } | |
659 | CDS_INIT_LIST_HEAD(®istry_arena.chunk_list); | |
660 | ret = pthread_key_delete(lttng_ust_urcu_key); | |
661 | if (ret) | |
662 | abort(); | |
663 | } | |
664 | mutex_unlock(&init_lock); | |
665 | } | |
666 | ||
667 | /* | |
668 | * Holding the rcu_gp_lock and rcu_registry_lock across fork will make | |
669 | * sure we fork() don't race with a concurrent thread executing with | |
670 | * any of those locks held. This ensures that the registry and data | |
671 | * protected by rcu_gp_lock are in a coherent state in the child. | |
672 | */ | |
673 | void lttng_ust_urcu_before_fork(void) | |
674 | { | |
675 | sigset_t newmask, oldmask; | |
676 | int ret; | |
677 | ||
678 | ret = sigfillset(&newmask); | |
679 | assert(!ret); | |
680 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); | |
681 | assert(!ret); | |
682 | mutex_lock(&rcu_gp_lock); | |
683 | mutex_lock(&rcu_registry_lock); | |
684 | saved_fork_signal_mask = oldmask; | |
685 | } | |
686 | ||
687 | void lttng_ust_urcu_after_fork_parent(void) | |
688 | { | |
689 | sigset_t oldmask; | |
690 | int ret; | |
691 | ||
692 | oldmask = saved_fork_signal_mask; | |
693 | mutex_unlock(&rcu_registry_lock); | |
694 | mutex_unlock(&rcu_gp_lock); | |
695 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
696 | assert(!ret); | |
697 | } | |
698 | ||
699 | /* | |
700 | * Prune all entries from registry except our own thread. Fits the Linux | |
701 | * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held. | |
702 | */ | |
703 | static | |
704 | void lttng_ust_urcu_prune_registry(void) | |
705 | { | |
706 | struct registry_chunk *chunk; | |
707 | struct lttng_ust_urcu_reader *rcu_reader_reg; | |
708 | ||
709 | cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) { | |
710 | for (rcu_reader_reg = (struct lttng_ust_urcu_reader *) &chunk->data[0]; | |
711 | rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len]; | |
712 | rcu_reader_reg++) { | |
713 | if (!rcu_reader_reg->alloc) | |
714 | continue; | |
715 | if (rcu_reader_reg->tid == pthread_self()) | |
716 | continue; | |
717 | cleanup_thread(chunk, rcu_reader_reg); | |
718 | } | |
719 | } | |
720 | } | |
721 | ||
722 | void lttng_ust_urcu_after_fork_child(void) | |
723 | { | |
724 | sigset_t oldmask; | |
725 | int ret; | |
726 | ||
727 | lttng_ust_urcu_prune_registry(); | |
728 | oldmask = saved_fork_signal_mask; | |
729 | mutex_unlock(&rcu_registry_lock); | |
730 | mutex_unlock(&rcu_gp_lock); | |
731 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
732 | assert(!ret); | |
733 | } |