Remove riscv has_load_add_load_load_add_store implementation
[librseq.git] / include / rseq / arch / riscv.h
1 /* SPDX-License-Identifier: MIT */
2 /* SPDX-FileCopyrightText: 2022 Vincent Chen <vincent.chen@sifive.com> */
3 /* SPDX-FileCopyrightText: 2024 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> */
4
5 /*
6 * rseq-riscv.h
7 */
8
9 /*
10 * RSEQ_ASM_*() macro helpers are internal to the librseq headers. Those
11 * are not part of the public API.
12 */
13
14 #ifndef _RSEQ_RSEQ_H
15 #error "Never use <rseq/arch/riscv.h> directly; include <rseq/rseq.h> instead."
16 #endif
17
18 /*
19 * Select the instruction "csrw mhartid, x0" as the RSEQ_SIG. Unlike
20 * other architectures, the ebreak instruction has no immediate field for
21 * distinguishing purposes. Hence, ebreak is not suitable as RSEQ_SIG.
22 * "csrw mhartid, x0" can also satisfy the RSEQ requirement because it
23 * is an uncommon instruction and will raise an illegal instruction
24 * exception when executed in all modes.
25 */
26 #include <endian.h>
27
28 #if defined(__BYTE_ORDER) ? (__BYTE_ORDER == __LITTLE_ENDIAN) : defined(__LITTLE_ENDIAN)
29 #define RSEQ_SIG 0xf1401073 /* csrr mhartid, x0 */
30 #else
31 #error "Currently, RSEQ only supports Little-Endian version"
32 #endif
33
34 /*
35 * Instruction selection between 32-bit/64-bit. Used internally in the
36 * rseq headers.
37 */
38 #if __riscv_xlen == 64
39 #define __RSEQ_ASM_REG_SEL(a, b) a
40 #elif __riscv_xlen == 32
41 #define __RSEQ_ASM_REG_SEL(a, b) b
42 #endif
43
44 #define RSEQ_ASM_REG_L __RSEQ_ASM_REG_SEL("ld ", "lw ")
45 #define RSEQ_ASM_REG_S __RSEQ_ASM_REG_SEL("sd ", "sw ")
46
47 /*
48 * Refer to the Linux kernel memory model (LKMM) for documentation of
49 * the memory barriers.
50 */
51
52 /* Only used internally in rseq headers. */
53 #define RSEQ_ASM_RISCV_FENCE(p, s) \
54 __asm__ __volatile__ ("fence " #p "," #s : : : "memory")
55 /* CPU memory barrier. */
56 #define rseq_smp_mb() RSEQ_ASM_RISCV_FENCE(rw, rw)
57 /* CPU read memory barrier */
58 #define rseq_smp_rmb() RSEQ_ASM_RISCV_FENCE(r, r)
59 /* CPU write memory barrier */
60 #define rseq_smp_wmb() RSEQ_ASM_RISCV_FENCE(w, w)
61
62 /* Acquire: One-way permeable barrier. */
63 #define rseq_smp_load_acquire(p) \
64 __extension__ ({ \
65 rseq_unqual_scalar_typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \
66 RSEQ_ASM_RISCV_FENCE(r, rw); \
67 ____p1; \
68 })
69
70 /* Acquire barrier after control dependency. */
71 #define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb()
72
73 /* Release: One-way permeable barrier. */
74 #define rseq_smp_store_release(p, v) \
75 do { \
76 RSEQ_ASM_RISCV_FENCE(rw, w); \
77 RSEQ_WRITE_ONCE(*(p), v); \
78 } while (0)
79
80 #define RSEQ_ASM_U64_PTR(x) ".quad " x
81 #define RSEQ_ASM_U32(x) ".long " x
82
83 /* Temporary registers. */
84 #define RSEQ_ASM_TMP_REG_1 "t6"
85 #define RSEQ_ASM_TMP_REG_2 "t5"
86 #define RSEQ_ASM_TMP_REG_3 "t4"
87 #define RSEQ_ASM_TMP_REG_4 "t3"
88
89 /* Common architecture support macros. */
90 #include "rseq/arch/generic/common.h"
91
92 /*
93 * Define a critical section abort handler.
94 *
95 * @label:
96 * Local label to the abort handler.
97 * @teardown:
98 * Sequence of instructions to run on abort.
99 * @abort_label:
100 * C label to jump to at the end of the sequence.
101 */
102 #define RSEQ_ASM_DEFINE_ABORT(label, teardown, abort_label) \
103 "j 222f\n" \
104 ".balign 4\n" \
105 RSEQ_ASM_U32(__rseq_str(RSEQ_SIG)) "\n" \
106 __rseq_str(label) ":\n" \
107 teardown \
108 "j %l[" __rseq_str(abort_label) "]\n" \
109 "222:\n"
110
111 /* Jump to local label @label when @cpu_id != @current_cpu_id. */
112 #define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
113 RSEQ_INJECT_ASM(1) \
114 "la " RSEQ_ASM_TMP_REG_1 ", " __rseq_str(cs_label) "\n" \
115 RSEQ_ASM_REG_S RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(rseq_cs) "]\n" \
116 __rseq_str(label) ":\n"
117
118 /* Store @value to address @var. */
119 #define RSEQ_ASM_OP_STORE(value, var) \
120 RSEQ_ASM_REG_S "%[" __rseq_str(value) "], %[" __rseq_str(var) "]\n"
121
122 /* Jump to local label @label when @var != @expect. */
123 #define RSEQ_ASM_OP_CBNE(var, expect, label) \
124 RSEQ_ASM_REG_L RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(var) "]\n" \
125 "bne " RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(expect) "] ," \
126 __rseq_str(label) "\n"
127
128 /*
129 * Jump to local label @label when @var != @expect (32-bit register
130 * comparison).
131 */
132 #define RSEQ_ASM_OP_CBNE32(var, expect, label) \
133 "lw " RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(var) "]\n" \
134 "bne " RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(expect) "] ," \
135 __rseq_str(label) "\n"
136
137 /* Jump to local label @label when @var == @expect. */
138 #define RSEQ_ASM_OP_CBEQ(var, expect, label) \
139 RSEQ_ASM_REG_L RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(var) "]\n" \
140 "beq " RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(expect) "] ," \
141 __rseq_str(label) "\n"
142
143 /* Jump to local label @label when @cpu_id != @current_cpu_id. */
144 #define RSEQ_ASM_CBNE_CPU_ID(cpu_id, current_cpu_id, label) \
145 RSEQ_INJECT_ASM(2) \
146 RSEQ_ASM_OP_CBNE32(current_cpu_id, cpu_id, label)
147
148 /* Load @var into temporary register. */
149 #define RSEQ_ASM_OP_R_LOAD(var) \
150 RSEQ_ASM_REG_L RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(var) "]\n"
151
152 /* Store from temporary register into @var. */
153 #define RSEQ_ASM_OP_R_STORE(var) \
154 RSEQ_ASM_REG_S RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(var) "]\n"
155
156 /* Load from address in temporary register+@offset into temporary register. */
157 #define RSEQ_ASM_OP_R_LOAD_OFF(offset) \
158 "add " RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(offset) "], " \
159 RSEQ_ASM_TMP_REG_1 "\n" \
160 RSEQ_ASM_REG_L RSEQ_ASM_TMP_REG_1 ", (" RSEQ_ASM_TMP_REG_1 ")\n"
161
162 /* Add @count to temporary register. */
163 #define RSEQ_ASM_OP_R_ADD(count) \
164 "add " RSEQ_ASM_TMP_REG_1 ", " RSEQ_ASM_TMP_REG_1 \
165 ", %[" __rseq_str(count) "]\n"
166
167 /*
168 * End-of-sequence store of @value to address @var. Emit
169 * @post_commit_label label after the store instruction.
170 */
171 #define RSEQ_ASM_OP_FINAL_STORE(value, var, post_commit_label) \
172 RSEQ_ASM_OP_STORE(value, var) \
173 __rseq_str(post_commit_label) ":\n"
174
175 /*
176 * End-of-sequence store-release of @value to address @var. Emit
177 * @post_commit_label label after the store instruction.
178 */
179 #define RSEQ_ASM_OP_FINAL_STORE_RELEASE(value, var, post_commit_label) \
180 "fence rw, w\n" \
181 RSEQ_ASM_OP_STORE(value, var) \
182 __rseq_str(post_commit_label) ":\n"
183
184 /*
185 * End-of-sequence store of temporary register to address @var. Emit
186 * @post_commit_label label after the store instruction.
187 */
188 #define RSEQ_ASM_OP_R_FINAL_STORE(var, post_commit_label) \
189 RSEQ_ASM_REG_S RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(var) "]\n" \
190 __rseq_str(post_commit_label) ":\n"
191
192 /*
193 * Copy @len bytes from @src to @dst. This is an inefficient bytewise
194 * copy and could be improved in the future.
195 */
196 #define RSEQ_ASM_OP_R_BYTEWISE_MEMCPY(dst, src, len) \
197 "beqz %[" __rseq_str(len) "], 333f\n" \
198 "mv " RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(len) "]\n" \
199 "mv " RSEQ_ASM_TMP_REG_2 ", %[" __rseq_str(src) "]\n" \
200 "mv " RSEQ_ASM_TMP_REG_3 ", %[" __rseq_str(dst) "]\n" \
201 "222:\n" \
202 "lb " RSEQ_ASM_TMP_REG_4 ", 0(" RSEQ_ASM_TMP_REG_2 ")\n" \
203 "sb " RSEQ_ASM_TMP_REG_4 ", 0(" RSEQ_ASM_TMP_REG_3 ")\n" \
204 "addi " RSEQ_ASM_TMP_REG_1 ", " RSEQ_ASM_TMP_REG_1 ", -1\n" \
205 "addi " RSEQ_ASM_TMP_REG_2 ", " RSEQ_ASM_TMP_REG_2 ", 1\n" \
206 "addi " RSEQ_ASM_TMP_REG_3 ", " RSEQ_ASM_TMP_REG_3 ", 1\n" \
207 "bnez " RSEQ_ASM_TMP_REG_1 ", 222b\n" \
208 "333:\n"
209
210 /* Per-cpu-id indexing. */
211
212 #define RSEQ_TEMPLATE_INDEX_CPU_ID
213 #define RSEQ_TEMPLATE_MO_RELAXED
214 #include "rseq/arch/riscv/bits.h"
215 #undef RSEQ_TEMPLATE_MO_RELAXED
216
217 #define RSEQ_TEMPLATE_MO_RELEASE
218 #include "rseq/arch/riscv/bits.h"
219 #undef RSEQ_TEMPLATE_MO_RELEASE
220 #undef RSEQ_TEMPLATE_INDEX_CPU_ID
221
222 /* Per-mm-cid indexing. */
223
224 #define RSEQ_TEMPLATE_INDEX_MM_CID
225 #define RSEQ_TEMPLATE_MO_RELAXED
226 #include "rseq/arch/riscv/bits.h"
227 #undef RSEQ_TEMPLATE_MO_RELAXED
228
229 #define RSEQ_TEMPLATE_MO_RELEASE
230 #include "rseq/arch/riscv/bits.h"
231 #undef RSEQ_TEMPLATE_MO_RELEASE
232 #undef RSEQ_TEMPLATE_INDEX_MM_CID
233
234 /* APIs which are not indexed. */
235
236 #define RSEQ_TEMPLATE_INDEX_NONE
237 #define RSEQ_TEMPLATE_MO_RELAXED
238 #include "rseq/arch/riscv/bits.h"
239 #undef RSEQ_TEMPLATE_MO_RELAXED
240 #undef RSEQ_TEMPLATE_INDEX_NONE
This page took 0.066449 seconds and 4 git commands to generate.