4 * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; only
9 * version 2.1 of the License.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
27 #include <linux/membarrier.h>
31 #ifdef __NR_membarrier
32 # define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__)
34 # define membarrier(...) -ENOSYS
37 struct rseq_thread_state
{
38 uint32_t fallback_wait_cnt
;
39 uint32_t fallback_cnt
;
40 sigset_t sigmask_saved
;
43 __attribute__((weak
)) __thread
volatile struct rseq __rseq_abi
= {
47 static __thread
volatile struct rseq_thread_state rseq_thread_state
;
49 int rseq_has_sys_membarrier
;
51 static int sys_rseq(volatile struct rseq
*rseq_abi
, int flags
)
53 return syscall(__NR_rseq
, rseq_abi
, flags
);
56 int rseq_register_current_thread(void)
60 rc
= sys_rseq(&__rseq_abi
, 0);
62 fprintf(stderr
, "Error: sys_rseq(...) failed(%d): %s\n",
63 errno
, strerror(errno
));
66 assert(rseq_current_cpu() >= 0);
70 int rseq_unregister_current_thread(void)
74 rc
= sys_rseq(NULL
, 0);
76 fprintf(stderr
, "Error: sys_rseq(...) failed(%d): %s\n",
77 errno
, strerror(errno
));
83 int rseq_init_lock(struct rseq_lock
*rlock
)
87 ret
= pthread_mutex_init(&rlock
->lock
, NULL
);
92 rlock
->state
= RSEQ_LOCK_STATE_RESTART
;
96 int rseq_destroy_lock(struct rseq_lock
*rlock
)
100 ret
= pthread_mutex_destroy(&rlock
->lock
);
108 static void signal_off_save(sigset_t
*oldset
)
114 ret
= pthread_sigmask(SIG_BLOCK
, &set
, oldset
);
119 static void signal_restore(sigset_t oldset
)
123 ret
= pthread_sigmask(SIG_SETMASK
, &oldset
, NULL
);
128 static void rseq_fallback_lock(struct rseq_lock
*rlock
)
130 signal_off_save((sigset_t
*)&rseq_thread_state
.sigmask_saved
);
131 pthread_mutex_lock(&rlock
->lock
);
132 rseq_thread_state
.fallback_cnt
++;
134 * For concurrent threads arriving before we set LOCK:
135 * reading cpu_id after setting the state to LOCK
136 * ensures they restart.
138 ACCESS_ONCE(rlock
->state
) = RSEQ_LOCK_STATE_LOCK
;
140 * For concurrent threads arriving after we set LOCK:
141 * those will grab the lock, so we are protected by
146 void rseq_fallback_wait(struct rseq_lock
*rlock
)
148 signal_off_save((sigset_t
*)&rseq_thread_state
.sigmask_saved
);
149 pthread_mutex_lock(&rlock
->lock
);
150 rseq_thread_state
.fallback_wait_cnt
++;
151 pthread_mutex_unlock(&rlock
->lock
);
152 signal_restore(rseq_thread_state
.sigmask_saved
);
155 static void rseq_fallback_unlock(struct rseq_lock
*rlock
, int cpu_at_start
)
158 * Concurrent rseq arriving before we set state back to RESTART
159 * grab the lock. Those arriving after we set state back to
160 * RESTART will perform restartable critical sections. The next
161 * owner of the lock will take take of making sure it prevents
162 * concurrent restartable sequences from completing. We may be
163 * writing from another CPU, so update the state with a store
164 * release semantic to ensure restartable sections will see our
165 * side effect (writing to *p) before they enter their
166 * restartable critical section.
168 * In cases where we observe that we are on the right CPU after the
169 * critical section, program order ensures that following restartable
170 * critical sections will see our stores, so we don't have to use
171 * store-release or membarrier.
173 * Use sys_membarrier when available to remove the memory barrier
174 * implied by smp_load_acquire().
177 if (likely(rseq_current_cpu() == cpu_at_start
)) {
178 ACCESS_ONCE(rlock
->state
) = RSEQ_LOCK_STATE_RESTART
;
180 if (!has_fast_acquire_release() && rseq_has_sys_membarrier
) {
181 if (membarrier(MEMBARRIER_CMD_SHARED
, 0))
183 ACCESS_ONCE(rlock
->state
) = RSEQ_LOCK_STATE_RESTART
;
186 * Store with release semantic to ensure
187 * restartable sections will see our side effect
188 * (writing to *p) before they enter their
189 * restartable critical section. Matches
190 * smp_load_acquire() in rseq_start().
192 smp_store_release(&rlock
->state
,
193 RSEQ_LOCK_STATE_RESTART
);
196 pthread_mutex_unlock(&rlock
->lock
);
197 signal_restore(rseq_thread_state
.sigmask_saved
);
200 int rseq_fallback_current_cpu(void)
204 cpu
= sched_getcpu();
206 perror("sched_getcpu()");
212 int rseq_fallback_begin(struct rseq_lock
*rlock
)
214 rseq_fallback_lock(rlock
);
215 return rseq_fallback_current_cpu();
218 void rseq_fallback_end(struct rseq_lock
*rlock
, int cpu
)
220 rseq_fallback_unlock(rlock
, cpu
);
223 /* Handle non-initialized rseq for this thread. */
224 void rseq_fallback_noinit(struct rseq_state
*rseq_state
)
226 rseq_state
->lock_state
= RSEQ_LOCK_STATE_FAIL
;
227 rseq_state
->cpu_id
= 0;
230 uint32_t rseq_get_fallback_wait_cnt(void)
232 return rseq_thread_state
.fallback_wait_cnt
;
235 uint32_t rseq_get_fallback_cnt(void)
237 return rseq_thread_state
.fallback_cnt
;
240 void __attribute__((constructor
)) rseq_init(void)
244 ret
= membarrier(MEMBARRIER_CMD_QUERY
, 0);
245 if (ret
>= 0 && (ret
& MEMBARRIER_CMD_SHARED
))
246 rseq_has_sys_membarrier
= 1;