2 * Based on arch/arm/include/asm/atomic.h
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #ifndef __ASM_ATOMIC_LSE_H
22 #define __ASM_ATOMIC_LSE_H
24 #ifndef __ARM64_IN_ATOMIC_IMPL
25 #error "please don't include this file directly"
28 #define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
29 #define ATOMIC_OP(op, asm_op) \
30 static inline void atomic_##op(int i, atomic_t *v) \
32 register int w0 asm ("w0") = i; \
33 register atomic_t *x1 asm ("x1") = v; \
35 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \
36 " " #asm_op " %w[i], %[v]\n") \
37 : [i] "+r" (w0), [v] "+Q" (v->counter) \
39 : __LL_SC_CLOBBERS); \
42 ATOMIC_OP(andnot
, stclr
)
49 #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
50 static inline int atomic_fetch_##op##name(int i, atomic_t *v) \
52 register int w0 asm ("w0") = i; \
53 register atomic_t *x1 asm ("x1") = v; \
55 asm volatile(ARM64_LSE_ATOMIC_INSN( \
57 __LL_SC_ATOMIC(fetch_##op##name), \
59 " " #asm_op #mb " %w[i], %w[i], %[v]") \
60 : [i] "+r" (w0), [v] "+Q" (v->counter) \
62 : __LL_SC_CLOBBERS, ##cl); \
67 #define ATOMIC_FETCH_OPS(op, asm_op) \
68 ATOMIC_FETCH_OP(_relaxed, , op, asm_op) \
69 ATOMIC_FETCH_OP(_acquire, a, op, asm_op, "memory") \
70 ATOMIC_FETCH_OP(_release, l, op, asm_op, "memory") \
71 ATOMIC_FETCH_OP( , al, op, asm_op, "memory")
73 ATOMIC_FETCH_OPS(andnot
, ldclr
)
74 ATOMIC_FETCH_OPS(or, ldset
)
75 ATOMIC_FETCH_OPS(xor, ldeor
)
76 ATOMIC_FETCH_OPS(add
, ldadd
)
78 #undef ATOMIC_FETCH_OP
79 #undef ATOMIC_FETCH_OPS
81 #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
82 static inline int atomic_add_return##name(int i, atomic_t *v) \
84 register int w0 asm ("w0") = i; \
85 register atomic_t *x1 asm ("x1") = v; \
87 asm volatile(ARM64_LSE_ATOMIC_INSN( \
89 __LL_SC_ATOMIC(add_return##name) \
92 " ldadd" #mb " %w[i], w30, %[v]\n" \
93 " add %w[i], %w[i], w30") \
94 : [i] "+r" (w0), [v] "+Q" (v->counter) \
96 : __LL_SC_CLOBBERS, ##cl); \
101 ATOMIC_OP_ADD_RETURN(_relaxed
, )
102 ATOMIC_OP_ADD_RETURN(_acquire
, a
, "memory")
103 ATOMIC_OP_ADD_RETURN(_release
, l
, "memory")
104 ATOMIC_OP_ADD_RETURN( , al
, "memory")
106 #undef ATOMIC_OP_ADD_RETURN
108 static inline void atomic_and(int i
, atomic_t
*v
)
110 register int w0
asm ("w0") = i
;
111 register atomic_t
*x1
asm ("x1") = v
;
113 asm volatile(ARM64_LSE_ATOMIC_INSN(
118 " mvn %w[i], %w[i]\n"
119 " stclr %w[i], %[v]")
120 : [i
] "+r" (w0
), [v
] "+Q" (v
->counter
)
125 #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
126 static inline int atomic_fetch_and##name(int i, atomic_t *v) \
128 register int w0 asm ("w0") = i; \
129 register atomic_t *x1 asm ("x1") = v; \
131 asm volatile(ARM64_LSE_ATOMIC_INSN( \
133 __LL_SC_ATOMIC(fetch_and##name) \
136 " mvn %w[i], %w[i]\n" \
137 " ldclr" #mb " %w[i], %w[i], %[v]") \
138 : [i] "+r" (w0), [v] "+Q" (v->counter) \
140 : __LL_SC_CLOBBERS, ##cl); \
145 ATOMIC_FETCH_OP_AND(_relaxed
, )
146 ATOMIC_FETCH_OP_AND(_acquire
, a
, "memory")
147 ATOMIC_FETCH_OP_AND(_release
, l
, "memory")
148 ATOMIC_FETCH_OP_AND( , al
, "memory")
150 #undef ATOMIC_FETCH_OP_AND
152 static inline void atomic_sub(int i
, atomic_t
*v
)
154 register int w0
asm ("w0") = i
;
155 register atomic_t
*x1
asm ("x1") = v
;
157 asm volatile(ARM64_LSE_ATOMIC_INSN(
162 " neg %w[i], %w[i]\n"
163 " stadd %w[i], %[v]")
164 : [i
] "+r" (w0
), [v
] "+Q" (v
->counter
)
169 #define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \
170 static inline int atomic_sub_return##name(int i, atomic_t *v) \
172 register int w0 asm ("w0") = i; \
173 register atomic_t *x1 asm ("x1") = v; \
175 asm volatile(ARM64_LSE_ATOMIC_INSN( \
177 __LL_SC_ATOMIC(sub_return##name) \
180 " neg %w[i], %w[i]\n" \
181 " ldadd" #mb " %w[i], w30, %[v]\n" \
182 " add %w[i], %w[i], w30") \
183 : [i] "+r" (w0), [v] "+Q" (v->counter) \
185 : __LL_SC_CLOBBERS , ##cl); \
190 ATOMIC_OP_SUB_RETURN(_relaxed
, )
191 ATOMIC_OP_SUB_RETURN(_acquire
, a
, "memory")
192 ATOMIC_OP_SUB_RETURN(_release
, l
, "memory")
193 ATOMIC_OP_SUB_RETURN( , al
, "memory")
195 #undef ATOMIC_OP_SUB_RETURN
197 #define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \
198 static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
200 register int w0 asm ("w0") = i; \
201 register atomic_t *x1 asm ("x1") = v; \
203 asm volatile(ARM64_LSE_ATOMIC_INSN( \
205 __LL_SC_ATOMIC(fetch_sub##name) \
208 " neg %w[i], %w[i]\n" \
209 " ldadd" #mb " %w[i], %w[i], %[v]") \
210 : [i] "+r" (w0), [v] "+Q" (v->counter) \
212 : __LL_SC_CLOBBERS, ##cl); \
217 ATOMIC_FETCH_OP_SUB(_relaxed
, )
218 ATOMIC_FETCH_OP_SUB(_acquire
, a
, "memory")
219 ATOMIC_FETCH_OP_SUB(_release
, l
, "memory")
220 ATOMIC_FETCH_OP_SUB( , al
, "memory")
222 #undef ATOMIC_FETCH_OP_SUB
223 #undef __LL_SC_ATOMIC
225 #define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
226 #define ATOMIC64_OP(op, asm_op) \
227 static inline void atomic64_##op(long i, atomic64_t *v) \
229 register long x0 asm ("x0") = i; \
230 register atomic64_t *x1 asm ("x1") = v; \
232 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \
233 " " #asm_op " %[i], %[v]\n") \
234 : [i] "+r" (x0), [v] "+Q" (v->counter) \
236 : __LL_SC_CLOBBERS); \
239 ATOMIC64_OP(andnot
, stclr
)
240 ATOMIC64_OP(or, stset
)
241 ATOMIC64_OP(xor, steor
)
242 ATOMIC64_OP(add
, stadd
)
246 #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
247 static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \
249 register long x0 asm ("x0") = i; \
250 register atomic64_t *x1 asm ("x1") = v; \
252 asm volatile(ARM64_LSE_ATOMIC_INSN( \
254 __LL_SC_ATOMIC64(fetch_##op##name), \
256 " " #asm_op #mb " %[i], %[i], %[v]") \
257 : [i] "+r" (x0), [v] "+Q" (v->counter) \
259 : __LL_SC_CLOBBERS, ##cl); \
264 #define ATOMIC64_FETCH_OPS(op, asm_op) \
265 ATOMIC64_FETCH_OP(_relaxed, , op, asm_op) \
266 ATOMIC64_FETCH_OP(_acquire, a, op, asm_op, "memory") \
267 ATOMIC64_FETCH_OP(_release, l, op, asm_op, "memory") \
268 ATOMIC64_FETCH_OP( , al, op, asm_op, "memory")
270 ATOMIC64_FETCH_OPS(andnot
, ldclr
)
271 ATOMIC64_FETCH_OPS(or, ldset
)
272 ATOMIC64_FETCH_OPS(xor, ldeor
)
273 ATOMIC64_FETCH_OPS(add
, ldadd
)
275 #undef ATOMIC64_FETCH_OP
276 #undef ATOMIC64_FETCH_OPS
278 #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
279 static inline long atomic64_add_return##name(long i, atomic64_t *v) \
281 register long x0 asm ("x0") = i; \
282 register atomic64_t *x1 asm ("x1") = v; \
284 asm volatile(ARM64_LSE_ATOMIC_INSN( \
286 __LL_SC_ATOMIC64(add_return##name) \
289 " ldadd" #mb " %[i], x30, %[v]\n" \
290 " add %[i], %[i], x30") \
291 : [i] "+r" (x0), [v] "+Q" (v->counter) \
293 : __LL_SC_CLOBBERS, ##cl); \
298 ATOMIC64_OP_ADD_RETURN(_relaxed
, )
299 ATOMIC64_OP_ADD_RETURN(_acquire
, a
, "memory")
300 ATOMIC64_OP_ADD_RETURN(_release
, l
, "memory")
301 ATOMIC64_OP_ADD_RETURN( , al
, "memory")
303 #undef ATOMIC64_OP_ADD_RETURN
305 static inline void atomic64_and(long i
, atomic64_t
*v
)
307 register long x0
asm ("x0") = i
;
308 register atomic64_t
*x1
asm ("x1") = v
;
310 asm volatile(ARM64_LSE_ATOMIC_INSN(
312 __LL_SC_ATOMIC64(and)
317 : [i
] "+r" (x0
), [v
] "+Q" (v
->counter
)
322 #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
323 static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
325 register long x0 asm ("w0") = i; \
326 register atomic64_t *x1 asm ("x1") = v; \
328 asm volatile(ARM64_LSE_ATOMIC_INSN( \
330 __LL_SC_ATOMIC64(fetch_and##name) \
333 " mvn %[i], %[i]\n" \
334 " ldclr" #mb " %[i], %[i], %[v]") \
335 : [i] "+r" (x0), [v] "+Q" (v->counter) \
337 : __LL_SC_CLOBBERS, ##cl); \
342 ATOMIC64_FETCH_OP_AND(_relaxed
, )
343 ATOMIC64_FETCH_OP_AND(_acquire
, a
, "memory")
344 ATOMIC64_FETCH_OP_AND(_release
, l
, "memory")
345 ATOMIC64_FETCH_OP_AND( , al
, "memory")
347 #undef ATOMIC64_FETCH_OP_AND
349 static inline void atomic64_sub(long i
, atomic64_t
*v
)
351 register long x0
asm ("x0") = i
;
352 register atomic64_t
*x1
asm ("x1") = v
;
354 asm volatile(ARM64_LSE_ATOMIC_INSN(
356 __LL_SC_ATOMIC64(sub
)
361 : [i
] "+r" (x0
), [v
] "+Q" (v
->counter
)
366 #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
367 static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
369 register long x0 asm ("x0") = i; \
370 register atomic64_t *x1 asm ("x1") = v; \
372 asm volatile(ARM64_LSE_ATOMIC_INSN( \
374 __LL_SC_ATOMIC64(sub_return##name) \
377 " neg %[i], %[i]\n" \
378 " ldadd" #mb " %[i], x30, %[v]\n" \
379 " add %[i], %[i], x30") \
380 : [i] "+r" (x0), [v] "+Q" (v->counter) \
382 : __LL_SC_CLOBBERS, ##cl); \
387 ATOMIC64_OP_SUB_RETURN(_relaxed
, )
388 ATOMIC64_OP_SUB_RETURN(_acquire
, a
, "memory")
389 ATOMIC64_OP_SUB_RETURN(_release
, l
, "memory")
390 ATOMIC64_OP_SUB_RETURN( , al
, "memory")
392 #undef ATOMIC64_OP_SUB_RETURN
394 #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
395 static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
397 register long x0 asm ("w0") = i; \
398 register atomic64_t *x1 asm ("x1") = v; \
400 asm volatile(ARM64_LSE_ATOMIC_INSN( \
402 __LL_SC_ATOMIC64(fetch_sub##name) \
405 " neg %[i], %[i]\n" \
406 " ldadd" #mb " %[i], %[i], %[v]") \
407 : [i] "+r" (x0), [v] "+Q" (v->counter) \
409 : __LL_SC_CLOBBERS, ##cl); \
414 ATOMIC64_FETCH_OP_SUB(_relaxed
, )
415 ATOMIC64_FETCH_OP_SUB(_acquire
, a
, "memory")
416 ATOMIC64_FETCH_OP_SUB(_release
, l
, "memory")
417 ATOMIC64_FETCH_OP_SUB( , al
, "memory")
419 #undef ATOMIC64_FETCH_OP_SUB
421 static inline long atomic64_dec_if_positive(atomic64_t
*v
)
423 register long x0
asm ("x0") = (long)v
;
425 asm volatile(ARM64_LSE_ATOMIC_INSN(
427 __LL_SC_ATOMIC64(dec_if_positive
)
431 " subs %[ret], x30, #1\n"
433 " casal x30, %[ret], %[v]\n"
434 " sub x30, x30, #1\n"
435 " sub x30, x30, %[ret]\n"
438 : [ret
] "+&r" (x0
), [v
] "+Q" (v
->counter
)
440 : __LL_SC_CLOBBERS
, "cc", "memory");
445 #undef __LL_SC_ATOMIC64
447 #define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op)
449 #define __CMPXCHG_CASE(w, sz, name, mb, cl...) \
450 static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
454 register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
455 register unsigned long x1 asm ("x1") = old; \
456 register unsigned long x2 asm ("x2") = new; \
458 asm volatile(ARM64_LSE_ATOMIC_INSN( \
460 __LL_SC_CMPXCHG(name) \
463 " mov " #w "30, %" #w "[old]\n" \
464 " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \
465 " mov %" #w "[ret], " #w "30") \
466 : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
467 : [old] "r" (x1), [new] "r" (x2) \
468 : __LL_SC_CLOBBERS, ##cl); \
473 __CMPXCHG_CASE(w
, b
, 1, )
474 __CMPXCHG_CASE(w
, h
, 2, )
475 __CMPXCHG_CASE(w
, , 4, )
476 __CMPXCHG_CASE(x
, , 8, )
477 __CMPXCHG_CASE(w
, b
, acq_1
, a
, "memory")
478 __CMPXCHG_CASE(w
, h
, acq_2
, a
, "memory")
479 __CMPXCHG_CASE(w
, , acq_4
, a
, "memory")
480 __CMPXCHG_CASE(x
, , acq_8
, a
, "memory")
481 __CMPXCHG_CASE(w
, b
, rel_1
, l
, "memory")
482 __CMPXCHG_CASE(w
, h
, rel_2
, l
, "memory")
483 __CMPXCHG_CASE(w
, , rel_4
, l
, "memory")
484 __CMPXCHG_CASE(x
, , rel_8
, l
, "memory")
485 __CMPXCHG_CASE(w
, b
, mb_1
, al
, "memory")
486 __CMPXCHG_CASE(w
, h
, mb_2
, al
, "memory")
487 __CMPXCHG_CASE(w
, , mb_4
, al
, "memory")
488 __CMPXCHG_CASE(x
, , mb_8
, al
, "memory")
490 #undef __LL_SC_CMPXCHG
491 #undef __CMPXCHG_CASE
493 #define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op)
495 #define __CMPXCHG_DBL(name, mb, cl...) \
496 static inline long __cmpxchg_double##name(unsigned long old1, \
497 unsigned long old2, \
498 unsigned long new1, \
499 unsigned long new2, \
500 volatile void *ptr) \
502 unsigned long oldval1 = old1; \
503 unsigned long oldval2 = old2; \
504 register unsigned long x0 asm ("x0") = old1; \
505 register unsigned long x1 asm ("x1") = old2; \
506 register unsigned long x2 asm ("x2") = new1; \
507 register unsigned long x3 asm ("x3") = new2; \
508 register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
510 asm volatile(ARM64_LSE_ATOMIC_INSN( \
512 __LL_SC_CMPXCHG_DBL(name) \
515 " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
516 " eor %[old1], %[old1], %[oldval1]\n" \
517 " eor %[old2], %[old2], %[oldval2]\n" \
518 " orr %[old1], %[old1], %[old2]") \
519 : [old1] "+r" (x0), [old2] "+r" (x1), \
520 [v] "+Q" (*(unsigned long *)ptr) \
521 : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
522 [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
523 : __LL_SC_CLOBBERS, ##cl); \
529 __CMPXCHG_DBL(_mb
, al
, "memory")
531 #undef __LL_SC_CMPXCHG_DBL
534 #endif /* __ASM_ATOMIC_LSE_H */
This page took 0.056145 seconds and 6 git commands to generate.