Commit | Line | Data |
---|---|---|
b54c5158 MD |
1 | /* |
2 | * rseq-arm.h | |
3 | * | |
4 | * (C) Copyright 2016 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
22 | * SOFTWARE. | |
23 | */ | |
24 | ||
25 | #define smp_mb() __asm__ __volatile__ ("dmb" : : : "memory") | |
26 | #define smp_rmb() __asm__ __volatile__ ("dmb" : : : "memory") | |
27 | #define smp_wmb() __asm__ __volatile__ ("dmb" : : : "memory") | |
28 | ||
29 | #define smp_load_acquire(p) \ | |
30 | __extension__ ({ \ | |
31 | __typeof(*p) ____p1 = READ_ONCE(*p); \ | |
32 | smp_mb(); \ | |
33 | ____p1; \ | |
34 | }) | |
35 | ||
36 | #define smp_acquire__after_ctrl_dep() smp_rmb() | |
37 | ||
38 | #define smp_store_release(p, v) \ | |
39 | do { \ | |
40 | smp_mb(); \ | |
41 | WRITE_ONCE(*p, v); \ | |
42 | } while (0) | |
43 | ||
44 | #define has_fast_acquire_release() 0 | |
45 | #define has_single_copy_load_64() 1 | |
46 | ||
47 | /* | |
48 | * The __rseq_table section can be used by debuggers to better handle | |
49 | * single-stepping through the restartable critical sections. | |
50 | * | |
51 | * Load the immediate value 0 into register r1 right after the ldr | |
52 | * instruction to improve instruction-level parallelism: load the | |
53 | * constant while the processor is stalled waiting for the load to | |
54 | * complete, which is required by the following comparison and branch. | |
55 | */ | |
56 | ||
57 | #define RSEQ_FINISH_ASM(_target_final, _to_write_final, _start_value, \ | |
58 | _failure, _spec_store, _spec_input, \ | |
59 | _final_store, _final_input, _extra_clobber, \ | |
60 | _setup, _teardown, _scratch) \ | |
61 | do { \ | |
62 | _scratch \ | |
63 | __asm__ __volatile__ goto ( \ | |
64 | ".pushsection __rseq_table, \"aw\"\n\t" \ | |
65 | ".balign 32\n\t" \ | |
66 | ".word 1f, 0x0, 2f, 0x0, 5f, 0x0, 0x0, 0x0\n\t" \ | |
67 | ".popsection\n\t" \ | |
68 | "1:\n\t" \ | |
69 | _setup \ | |
70 | RSEQ_INJECT_ASM(1) \ | |
71 | "adr r0, 3f\n\t" \ | |
72 | "str r0, [%[rseq_cs]]\n\t" \ | |
73 | RSEQ_INJECT_ASM(2) \ | |
74 | "ldr r0, %[current_event_counter]\n\t" \ | |
75 | "mov r1, #0\n\t" \ | |
76 | "cmp %[start_event_counter], r0\n\t" \ | |
77 | "bne 5f\n\t" \ | |
78 | RSEQ_INJECT_ASM(3) \ | |
79 | _spec_store \ | |
80 | _final_store \ | |
81 | "2:\n\t" \ | |
82 | RSEQ_INJECT_ASM(5) \ | |
83 | "str r1, [%[rseq_cs]]\n\t" \ | |
84 | _teardown \ | |
85 | "b 4f\n\t" \ | |
86 | ".balign 32\n\t" \ | |
87 | "3:\n\t" \ | |
88 | ".word 1b, 0x0, 2b, 0x0, 5f, 0x0, 0x0, 0x0\n\t" \ | |
89 | "5:\n\t" \ | |
90 | "mov r1, #0\n\t" \ | |
91 | "str r1, [%[rseq_cs]]\n\t" \ | |
92 | _teardown \ | |
4c52e7c4 | 93 | "b %l[failure]\n\t" \ |
b54c5158 MD |
94 | "4:\n\t" \ |
95 | : /* gcc asm goto does not allow outputs */ \ | |
96 | : [start_event_counter]"r"((_start_value).event_counter), \ | |
97 | [current_event_counter]"m"((_start_value).rseqp->u.e.event_counter), \ | |
98 | [rseq_cs]"r"(&(_start_value).rseqp->rseq_cs) \ | |
99 | _spec_input \ | |
100 | _final_input \ | |
101 | RSEQ_INJECT_INPUT \ | |
102 | : "r0", "r1", "memory", "cc" \ | |
103 | _extra_clobber \ | |
104 | RSEQ_INJECT_CLOBBER \ | |
105 | : _failure \ | |
106 | ); \ | |
107 | } while (0) | |
108 | ||
109 | #define RSEQ_FINISH_FINAL_STORE_ASM() \ | |
110 | "str %[to_write_final], [%[target_final]]\n\t" | |
111 | ||
112 | #define RSEQ_FINISH_FINAL_STORE_RELEASE_ASM() \ | |
113 | "dmb\n\t" \ | |
114 | RSEQ_FINISH_FINAL_STORE_ASM() | |
115 | ||
116 | #define RSEQ_FINISH_FINAL_STORE_INPUT(_target_final, _to_write_final) \ | |
117 | , [to_write_final]"r"(_to_write_final), \ | |
118 | [target_final]"r"(_target_final) | |
119 | ||
120 | #define RSEQ_FINISH_SPECULATIVE_STORE_ASM() \ | |
121 | "str %[to_write_spec], [%[target_spec]]\n\t" \ | |
122 | RSEQ_INJECT_ASM(4) | |
123 | ||
124 | #define RSEQ_FINISH_SPECULATIVE_STORE_INPUT(_target_spec, _to_write_spec) \ | |
125 | , [to_write_spec]"r"(_to_write_spec), \ | |
126 | [target_spec]"r"(_target_spec) | |
127 | ||
128 | /* TODO: implement a faster memcpy. */ | |
129 | #define RSEQ_FINISH_MEMCPY_STORE_ASM() \ | |
130 | "cmp %[len_memcpy], #0\n\t" \ | |
131 | "beq 333f\n\t" \ | |
132 | "222:\n\t" \ | |
133 | "ldrb %%r0, [%[to_write_memcpy]]\n\t" \ | |
134 | "strb %%r0, [%[target_memcpy]]\n\t" \ | |
135 | "adds %[to_write_memcpy], #1\n\t" \ | |
136 | "adds %[target_memcpy], #1\n\t" \ | |
137 | "subs %[len_memcpy], #1\n\t" \ | |
138 | "bne 222b\n\t" \ | |
139 | "333:\n\t" \ | |
140 | RSEQ_INJECT_ASM(4) | |
141 | ||
142 | #define RSEQ_FINISH_MEMCPY_STORE_INPUT(_target_memcpy, _to_write_memcpy, _len_memcpy) \ | |
143 | , [to_write_memcpy]"r"(_to_write_memcpy), \ | |
144 | [target_memcpy]"r"(_target_memcpy), \ | |
145 | [len_memcpy]"r"(_len_memcpy), \ | |
146 | [rseq_scratch0]"m"(rseq_scratch[0]), \ | |
147 | [rseq_scratch1]"m"(rseq_scratch[1]), \ | |
148 | [rseq_scratch2]"m"(rseq_scratch[2]) | |
149 | ||
150 | /* We can use r0. */ | |
151 | #define RSEQ_FINISH_MEMCPY_CLOBBER() | |
152 | ||
153 | #define RSEQ_FINISH_MEMCPY_SCRATCH() \ | |
154 | uint32_t rseq_scratch[3]; | |
155 | ||
156 | /* | |
157 | * We need to save and restore those input registers so they can be | |
158 | * modified within the assembly. | |
159 | */ | |
160 | #define RSEQ_FINISH_MEMCPY_SETUP() \ | |
161 | "str %[to_write_memcpy], %[rseq_scratch0]\n\t" \ | |
162 | "str %[target_memcpy], %[rseq_scratch1]\n\t" \ | |
163 | "str %[len_memcpy], %[rseq_scratch2]\n\t" | |
164 | ||
165 | #define RSEQ_FINISH_MEMCPY_TEARDOWN() \ | |
166 | "ldr %[len_memcpy], %[rseq_scratch2]\n\t" \ | |
167 | "ldr %[target_memcpy], %[rseq_scratch1]\n\t" \ | |
168 | "ldr %[to_write_memcpy], %[rseq_scratch0]\n\t" |