Fix: Restartable Sequences: testing: ip-relative x86-64
[deliverable/linux.git] / tools / testing / selftests / rseq / rseq-x86.h
CommitLineData
b54c5158
MD
1/*
2 * rseq-x86.h
3 *
4 * (C) Copyright 2016 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#ifdef __x86_64__
26
27#define smp_mb() __asm__ __volatile__ ("mfence" : : : "memory")
28#define smp_rmb() barrier()
29#define smp_wmb() barrier()
30
31#define smp_load_acquire(p) \
32__extension__ ({ \
33 __typeof(*p) ____p1 = READ_ONCE(*p); \
34 barrier(); \
35 ____p1; \
36})
37
38#define smp_acquire__after_ctrl_dep() smp_rmb()
39
40#define smp_store_release(p, v) \
41do { \
42 barrier(); \
43 WRITE_ONCE(*p, v); \
44} while (0)
45
46#define has_fast_acquire_release() 1
47#define has_single_copy_load_64() 1
48
49/*
50 * The __rseq_table section can be used by debuggers to better handle
51 * single-stepping through the restartable critical sections.
52 */
53#define RSEQ_FINISH_ASM(_target_final, _to_write_final, _start_value, \
54 _failure, _spec_store, _spec_input, \
55 _final_store, _final_input, _extra_clobber, \
56 _setup, _teardown, _scratch) \
57do { \
58 _scratch \
59 __asm__ __volatile__ goto ( \
60 ".pushsection __rseq_table, \"aw\"\n\t" \
61 ".balign 32\n\t" \
62 "3:\n\t" \
63 ".quad 1f, 2f, 4f, 0x0\n\t" \
64 ".popsection\n\t" \
65 "1:\n\t" \
66 _setup \
67 RSEQ_INJECT_ASM(1) \
f88ff702
MD
68 "leaq 3b(%%rip), %%rax\n\t" \
69 "movq %%rax, %[rseq_cs]\n\t" \
b54c5158
MD
70 RSEQ_INJECT_ASM(2) \
71 "cmpl %[start_event_counter], %[current_event_counter]\n\t" \
72 "jnz 4f\n\t" \
73 RSEQ_INJECT_ASM(3) \
74 _spec_store \
75 _final_store \
76 "2:\n\t" \
77 RSEQ_INJECT_ASM(5) \
78 "movq $0, %[rseq_cs]\n\t" \
79 _teardown \
80 ".pushsection __rseq_failure, \"a\"\n\t" \
81 "4:\n\t" \
82 "movq $0, %[rseq_cs]\n\t" \
83 _teardown \
84 "jmp %l[failure]\n\t" \
85 ".popsection\n\t" \
86 : /* gcc asm goto does not allow outputs */ \
87 : [start_event_counter]"r"((_start_value).event_counter), \
88 [current_event_counter]"m"((_start_value).rseqp->u.e.event_counter), \
89 [rseq_cs]"m"((_start_value).rseqp->rseq_cs) \
90 _spec_input \
91 _final_input \
92 RSEQ_INJECT_INPUT \
f88ff702 93 : "memory", "cc", "rax" \
b54c5158
MD
94 _extra_clobber \
95 RSEQ_INJECT_CLOBBER \
96 : _failure \
97 ); \
98} while (0)
99
100#define RSEQ_FINISH_FINAL_STORE_ASM() \
101 "movq %[to_write_final], %[target_final]\n\t"
102
103/* x86-64 is TSO */
104#define RSEQ_FINISH_FINAL_STORE_RELEASE_ASM() \
105 RSEQ_FINISH_FINAL_STORE_ASM()
106
107#define RSEQ_FINISH_FINAL_STORE_INPUT(_target_final, _to_write_final) \
108 , [to_write_final]"r"(_to_write_final), \
109 [target_final]"m"(*(_target_final))
110
111#define RSEQ_FINISH_SPECULATIVE_STORE_ASM() \
112 "movq %[to_write_spec], %[target_spec]\n\t" \
113 RSEQ_INJECT_ASM(4)
114
115#define RSEQ_FINISH_SPECULATIVE_STORE_INPUT(_target_spec, _to_write_spec) \
116 , [to_write_spec]"r"(_to_write_spec), \
117 [target_spec]"m"(*(_target_spec))
118
119/* TODO: implement a faster memcpy. */
120#define RSEQ_FINISH_MEMCPY_STORE_ASM() \
121 "test %[len_memcpy], %[len_memcpy]\n\t" \
122 "jz 333f\n\t" \
123 "222:\n\t" \
124 "movb (%[to_write_memcpy]), %%al\n\t" \
125 "movb %%al, (%[target_memcpy])\n\t" \
126 "inc %[to_write_memcpy]\n\t" \
127 "inc %[target_memcpy]\n\t" \
128 "dec %[len_memcpy]\n\t" \
129 "jnz 222b\n\t" \
130 "333:\n\t" \
131 RSEQ_INJECT_ASM(4)
132
133#define RSEQ_FINISH_MEMCPY_STORE_INPUT(_target_memcpy, _to_write_memcpy, _len_memcpy) \
134 , [to_write_memcpy]"r"(_to_write_memcpy), \
135 [target_memcpy]"r"(_target_memcpy), \
136 [len_memcpy]"r"(_len_memcpy), \
137 [rseq_scratch0]"m"(rseq_scratch[0]), \
138 [rseq_scratch1]"m"(rseq_scratch[1]), \
139 [rseq_scratch2]"m"(rseq_scratch[2])
140
141#define RSEQ_FINISH_MEMCPY_CLOBBER() \
142 , "rax"
143
144#define RSEQ_FINISH_MEMCPY_SCRATCH() \
145 uint64_t rseq_scratch[3];
146
147/*
148 * We need to save and restore those input registers so they can be
149 * modified within the assembly.
150 */
151#define RSEQ_FINISH_MEMCPY_SETUP() \
152 "movq %[to_write_memcpy], %[rseq_scratch0]\n\t" \
153 "movq %[target_memcpy], %[rseq_scratch1]\n\t" \
154 "movq %[len_memcpy], %[rseq_scratch2]\n\t"
155
156#define RSEQ_FINISH_MEMCPY_TEARDOWN() \
157 "movq %[rseq_scratch2], %[len_memcpy]\n\t" \
158 "movq %[rseq_scratch1], %[target_memcpy]\n\t" \
159 "movq %[rseq_scratch0], %[to_write_memcpy]\n\t"
160
161#elif __i386__
162
163/*
164 * Support older 32-bit architectures that do not implement fence
165 * instructions.
166 */
167#define smp_mb() \
168 __asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory")
169#define smp_rmb() \
170 __asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory")
171#define smp_wmb() \
172 __asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory")
173
174#define smp_load_acquire(p) \
175__extension__ ({ \
176 __typeof(*p) ____p1 = READ_ONCE(*p); \
177 smp_mb(); \
178 ____p1; \
179})
180
181#define smp_acquire__after_ctrl_dep() smp_rmb()
182
183#define smp_store_release(p, v) \
184do { \
185 smp_mb(); \
186 WRITE_ONCE(*p, v); \
187} while (0)
188
189#define has_fast_acquire_release() 0
190#define has_single_copy_load_64() 0
191
192/*
193 * Use eax as scratch register and take memory operands as input to
194 * lessen register pressure. Especially needed when compiling
195 * do_rseq_memcpy() in O0.
196 */
197#define RSEQ_FINISH_ASM(_target_final, _to_write_final, _start_value, \
198 _failure, _spec_store, _spec_input, \
199 _final_store, _final_input, _extra_clobber, \
200 _setup, _teardown, _scratch) \
201do { \
202 _scratch \
203 __asm__ __volatile__ goto ( \
204 ".pushsection __rseq_table, \"aw\"\n\t" \
205 ".balign 32\n\t" \
206 "3:\n\t" \
207 ".long 1f, 0x0, 2f, 0x0, 4f, 0x0, 0x0, 0x0\n\t" \
208 ".popsection\n\t" \
209 "1:\n\t" \
210 _setup \
211 RSEQ_INJECT_ASM(1) \
212 "movl $3b, %[rseq_cs]\n\t" \
213 RSEQ_INJECT_ASM(2) \
214 "movl %[start_event_counter], %%eax\n\t" \
215 "cmpl %%eax, %[current_event_counter]\n\t" \
216 "jnz 4f\n\t" \
217 RSEQ_INJECT_ASM(3) \
218 _spec_store \
219 _final_store \
220 "2:\n\t" \
221 RSEQ_INJECT_ASM(5) \
222 "movl $0, %[rseq_cs]\n\t" \
223 _teardown \
224 ".pushsection __rseq_failure, \"a\"\n\t" \
225 "4:\n\t" \
226 "movl $0, %[rseq_cs]\n\t" \
227 _teardown \
228 "jmp %l[failure]\n\t" \
229 ".popsection\n\t" \
230 : /* gcc asm goto does not allow outputs */ \
231 : [start_event_counter]"m"((_start_value).event_counter), \
232 [current_event_counter]"m"((_start_value).rseqp->u.e.event_counter), \
233 [rseq_cs]"m"((_start_value).rseqp->rseq_cs) \
234 _spec_input \
235 _final_input \
236 RSEQ_INJECT_INPUT \
237 : "memory", "cc", "eax" \
238 _extra_clobber \
239 RSEQ_INJECT_CLOBBER \
240 : _failure \
241 ); \
242} while (0)
243
244#define RSEQ_FINISH_FINAL_STORE_ASM() \
245 "movl %[to_write_final], %%eax\n\t" \
246 "movl %%eax, %[target_final]\n\t"
247
248#define RSEQ_FINISH_FINAL_STORE_RELEASE_ASM() \
249 "lock; addl $0,0(%%esp)\n\t" \
250 RSEQ_FINISH_FINAL_STORE_ASM()
251
252#define RSEQ_FINISH_FINAL_STORE_INPUT(_target_final, _to_write_final) \
253 , [to_write_final]"m"(_to_write_final), \
254 [target_final]"m"(*(_target_final))
255
256#define RSEQ_FINISH_SPECULATIVE_STORE_ASM() \
257 "movl %[to_write_spec], %%eax\n\t" \
258 "movl %%eax, %[target_spec]\n\t" \
259 RSEQ_INJECT_ASM(4)
260
261#define RSEQ_FINISH_SPECULATIVE_STORE_INPUT(_target_spec, _to_write_spec) \
262 , [to_write_spec]"m"(_to_write_spec), \
263 [target_spec]"m"(*(_target_spec))
264
265/* TODO: implement a faster memcpy. */
266#define RSEQ_FINISH_MEMCPY_STORE_ASM() \
267 "movl %[len_memcpy], %%eax\n\t" \
268 "test %%eax, %%eax\n\t" \
269 "jz 333f\n\t" \
270 "222:\n\t" \
271 "movb (%[to_write_memcpy]), %%al\n\t" \
272 "movb %%al, (%[target_memcpy])\n\t" \
273 "inc %[to_write_memcpy]\n\t" \
274 "inc %[target_memcpy]\n\t" \
275 "decl %[rseq_scratch2]\n\t" \
276 "jnz 222b\n\t" \
277 "333:\n\t" \
278 RSEQ_INJECT_ASM(4)
279
280#define RSEQ_FINISH_MEMCPY_STORE_INPUT(_target_memcpy, _to_write_memcpy, _len_memcpy) \
281 , [to_write_memcpy]"r"(_to_write_memcpy), \
282 [target_memcpy]"r"(_target_memcpy), \
283 [len_memcpy]"m"(_len_memcpy), \
284 [rseq_scratch0]"m"(rseq_scratch[0]), \
285 [rseq_scratch1]"m"(rseq_scratch[1]), \
286 [rseq_scratch2]"m"(rseq_scratch[2])
287
288#define RSEQ_FINISH_MEMCPY_CLOBBER()
289
290#define RSEQ_FINISH_MEMCPY_SCRATCH() \
291 uint32_t rseq_scratch[3];
292
293/*
294 * We need to save and restore those input registers so they can be
295 * modified within the assembly.
296 */
297#define RSEQ_FINISH_MEMCPY_SETUP() \
298 "movl %[to_write_memcpy], %[rseq_scratch0]\n\t" \
299 "movl %[target_memcpy], %[rseq_scratch1]\n\t" \
300 "movl %[len_memcpy], %%eax\n\t" \
301 "movl %%eax, %[rseq_scratch2]\n\t"
302
303#define RSEQ_FINISH_MEMCPY_TEARDOWN() \
304 "movl %[rseq_scratch1], %[target_memcpy]\n\t" \
305 "movl %[rseq_scratch0], %[to_write_memcpy]\n\t"
306
307#endif
This page took 0.036606 seconds and 5 git commands to generate.