Restartable sequences: don't clear rseq_cs after c.s.
[deliverable/linux.git] / tools / testing / selftests / rseq / rseq-ppc.h
1 /*
2 * rseq-ppc.h
3 *
4 * (C) Copyright 2016 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * (C) Copyright 2016 - Boqun Feng <boqun.feng@gmail.com>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 */
25
26 #define smp_mb() __asm__ __volatile__ ("sync" : : : "memory")
27 #define smp_lwsync() __asm__ __volatile__ ("lwsync" : : : "memory")
28 #define smp_rmb() smp_lwsync()
29 #define smp_wmb() smp_lwsync()
30
31 #define smp_load_acquire(p) \
32 __extension__ ({ \
33 __typeof(*p) ____p1 = READ_ONCE(*p); \
34 smp_lwsync(); \
35 ____p1; \
36 })
37
38 #define smp_acquire__after_ctrl_dep() smp_lwsync()
39
40 #define smp_store_release(p, v) \
41 do { \
42 smp_lwsync(); \
43 WRITE_ONCE(*p, v); \
44 } while (0)
45
46 #define has_fast_acquire_release() 0
47
48 #ifdef __PPC64__
49 #define has_single_copy_load_64() 1
50 #else
51 #define has_single_copy_load_64() 0
52 #endif
53
54 /*
55 * The __rseq_table section can be used by debuggers to better handle
56 * single-stepping through the restartable critical sections.
57 */
58
59 #ifdef __PPC64__
60
61 #define RSEQ_FINISH_ASM(_target_final, _to_write_final, _start_value, \
62 _failure, _spec_store, _spec_input, \
63 _final_store, _final_input, _extra_clobber, \
64 _setup, _teardown, _scratch) \
65 __asm__ __volatile__ goto ( \
66 ".pushsection __rseq_table, \"aw\"\n\t" \
67 ".balign 32\n\t" \
68 "3:\n\t" \
69 ".quad 1f, 2f, 4f, 0x0\n\t" \
70 ".popsection\n\t" \
71 "1:\n\t" \
72 _setup \
73 RSEQ_INJECT_ASM(1) \
74 "lis %%r17, (3b)@highest\n\t" \
75 "ori %%r17, %%r17, (3b)@higher\n\t" \
76 "rldicr %%r17, %%r17, 32, 31\n\t" \
77 "oris %%r17, %%r17, (3b)@h\n\t" \
78 "ori %%r17, %%r17, (3b)@l\n\t" \
79 "std %%r17, 0(%[rseq_cs])\n\t" \
80 RSEQ_INJECT_ASM(2) \
81 "lwz %%r17, %[current_event_counter]\n\t" \
82 "cmpw cr7, %[start_event_counter], %%r17\n\t" \
83 "bne- cr7, 4f\n\t" \
84 RSEQ_INJECT_ASM(3) \
85 _spec_store \
86 _final_store \
87 "2:\n\t" \
88 RSEQ_INJECT_ASM(5) \
89 _teardown \
90 "b 5f\n\t" \
91 "4:\n\t" \
92 _teardown \
93 "b %l[failure]\n\t" \
94 "5:\n\t" \
95 : /* gcc asm goto does not allow outputs */ \
96 : [start_event_counter]"r"((_start_value).event_counter), \
97 [current_event_counter]"m"((_start_value).rseqp->u.e.event_counter), \
98 [rseq_cs]"b"(&(_start_value).rseqp->rseq_cs) \
99 _spec_input \
100 _final_input \
101 RSEQ_INJECT_INPUT \
102 : "r17", "memory", "cc" \
103 _extra_clobber \
104 RSEQ_INJECT_CLOBBER \
105 : _failure \
106 )
107
108 #define RSEQ_FINISH_FINAL_STORE_ASM() \
109 "std %[to_write_final], 0(%[target_final])\n\t"
110
111 #define RSEQ_FINISH_FINAL_STORE_RELEASE_ASM() \
112 "lwsync\n\t" \
113 RSEQ_FINISH_FINAL_STORE_ASM()
114
115 #define RSEQ_FINISH_FINAL_STORE_INPUT(_target_final, _to_write_final) \
116 , [to_write_final]"r"(_to_write_final), \
117 [target_final]"b"(_target_final)
118
119 #define RSEQ_FINISH_SPECULATIVE_STORE_ASM() \
120 "std %[to_write_spec], 0(%[target_spec])\n\t" \
121 RSEQ_INJECT_ASM(4)
122
123 #define RSEQ_FINISH_SPECULATIVE_STORE_INPUT(_target_spec, _to_write_spec) \
124 , [to_write_spec]"r"(_to_write_spec), \
125 [target_spec]"b"(_target_spec)
126
127 /* TODO: implement a faster memcpy. */
128 #define RSEQ_FINISH_MEMCPY_STORE_ASM() \
129 "cmpdi %%r19, 0\n\t" \
130 "beq 333f\n\t" \
131 "addi %%r20, %%r20, -1\n\t" \
132 "addi %%r21, %%r21, -1\n\t" \
133 "222:\n\t" \
134 "lbzu %%r18, 1(%%r20)\n\t" \
135 "stbu %%r18, 1(%%r21)\n\t" \
136 "addi %%r19, %%r19, -1\n\t" \
137 "cmpdi %%r19, 0\n\t" \
138 "bne 222b\n\t" \
139 "333:\n\t" \
140 RSEQ_INJECT_ASM(4)
141
142 #define RSEQ_FINISH_MEMCPY_STORE_INPUT(_target_memcpy, _to_write_memcpy, _len_memcpy) \
143 , [to_write_memcpy]"r"(_to_write_memcpy), \
144 [target_memcpy]"r"(_target_memcpy), \
145 [len_memcpy]"r"(_len_memcpy)
146
147 #define RSEQ_FINISH_MEMCPY_CLOBBER() \
148 , "r18", "r19", "r20", "r21"
149
150 #define RSEQ_FINISH_MEMCPY_SCRATCH()
151
152 /*
153 * We use extra registers to hold the input registers, and we don't need to
154 * save and restore the input registers.
155 */
156 #define RSEQ_FINISH_MEMCPY_SETUP() \
157 "mr %%r19, %[len_memcpy]\n\t" \
158 "mr %%r20, %[to_write_memcpy]\n\t" \
159 "mr %%r21, %[target_memcpy]\n\t" \
160
161 #define RSEQ_FINISH_MEMCPY_TEARDOWN()
162
163 #else /* #ifdef __PPC64__ */
164
165 #define RSEQ_FINISH_ASM(_target_final, _to_write_final, _start_value, \
166 _failure, _spec_store, _spec_input, \
167 _final_store, _final_input, _extra_clobber, \
168 _setup, _teardown, _scratch) \
169 __asm__ __volatile__ goto ( \
170 ".pushsection __rseq_table, \"aw\"\n\t" \
171 ".balign 32\n\t" \
172 "3:\n\t" \
173 /* 32-bit only supported on BE */ \
174 ".long 0x0, 1f, 0x0, 2f, 0x0, 4f, 0x0, 0x0\n\t" \
175 ".popsection\n\t" \
176 "1:\n\t" \
177 _setup \
178 RSEQ_INJECT_ASM(1) \
179 "lis %%r17, (3b)@ha\n\t" \
180 "addi %%r17, %%r17, (3b)@l\n\t" \
181 "stw %%r17, 0(%[rseq_cs])\n\t" \
182 RSEQ_INJECT_ASM(2) \
183 "lwz %%r17, %[current_event_counter]\n\t" \
184 "cmpw cr7, %[start_event_counter], %%r17\n\t" \
185 "bne- cr7, 4f\n\t" \
186 RSEQ_INJECT_ASM(3) \
187 _spec_store \
188 _final_store \
189 "2:\n\t" \
190 RSEQ_INJECT_ASM(5) \
191 _teardown \
192 "b 5f\n\t" \
193 "4:\n\t" \
194 _teardown \
195 "b %l[failure]\n\t" \
196 "5:\n\t" \
197 : /* gcc asm goto does not allow outputs */ \
198 : [start_event_counter]"r"((_start_value).event_counter), \
199 [current_event_counter]"m"((_start_value).rseqp->u.e.event_counter), \
200 [rseq_cs]"b"(&(_start_value).rseqp->rseq_cs) \
201 _spec_input \
202 _final_input \
203 RSEQ_INJECT_INPUT \
204 : "r17", "memory", "cc" \
205 _extra_clobber \
206 RSEQ_INJECT_CLOBBER \
207 : _failure \
208 )
209
210 #define RSEQ_FINISH_FINAL_STORE_ASM() \
211 "stw %[to_write_final], 0(%[target_final])\n\t"
212
213 #define RSEQ_FINISH_FINAL_STORE_RELEASE_ASM() \
214 "lwsync\n\t" \
215 RSEQ_FINISH_FINAL_STORE_ASM()
216
217 #define RSEQ_FINISH_FINAL_STORE_INPUT(_target_final, _to_write_final) \
218 , [to_write_final]"r"(_to_write_final), \
219 [target_final]"b"(_target_final)
220
221 #define RSEQ_FINISH_SPECULATIVE_STORE_ASM() \
222 "stw %[to_write_spec], 0(%[target_spec])\n\t" \
223 RSEQ_INJECT_ASM(4)
224
225 #define RSEQ_FINISH_SPECULATIVE_STORE_INPUT(_target_spec, _to_write_spec) \
226 , [to_write_spec]"r"(_to_write_spec), \
227 [target_spec]"b"(_target_spec)
228
229 /* TODO: implement a faster memcpy. */
230 #define RSEQ_FINISH_MEMCPY_STORE_ASM() \
231 "cmpwi %%r19, 0\n\t" \
232 "beq 333f\n\t" \
233 "addi %%r20, %%r20, -1\n\t" \
234 "addi %%r21, %%r21, -1\n\t" \
235 "222:\n\t" \
236 "lbzu %%r18, 1(%%r20)\n\t" \
237 "stbu %%r18, 1(%%r21)\n\t" \
238 "addi %%r19, %%r19, -1\n\t" \
239 "cmpwi %%r19, 0\n\t" \
240 "bne 222b\n\t" \
241 "333:\n\t" \
242 RSEQ_INJECT_ASM(4)
243
244 #define RSEQ_FINISH_MEMCPY_STORE_INPUT(_target_memcpy, _to_write_memcpy, _len_memcpy) \
245 , [to_write_memcpy]"r"(_to_write_memcpy), \
246 [target_memcpy]"r"(_target_memcpy), \
247 [len_memcpy]"r"(_len_memcpy)
248
249 #define RSEQ_FINISH_MEMCPY_CLOBBER() \
250 , "r18", "r19", "r20", "r21"
251
252 #define RSEQ_FINISH_MEMCPY_SCRATCH()
253
254 /*
255 * We use extra registers to hold the input registers, and we don't need to
256 * save and restore the input registers.
257 */
258 #define RSEQ_FINISH_MEMCPY_SETUP() \
259 "mr %%r19, %[len_memcpy]\n\t" \
260 "mr %%r20, %[to_write_memcpy]\n\t" \
261 "mr %%r21, %[target_memcpy]\n\t" \
262
263 #define RSEQ_FINISH_MEMCPY_TEARDOWN()
264
265 #endif /* #else #ifdef __PPC64__ */
This page took 0.038315 seconds and 5 git commands to generate.