2 * arch/arm/include/asm/atomic.h
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #ifndef __ASM_ARM_ATOMIC_H
12 #define __ASM_ARM_ATOMIC_H
14 #include <linux/compiler.h>
15 #include <linux/prefetch.h>
16 #include <linux/types.h>
17 #include <linux/irqflags.h>
18 #include <asm/barrier.h>
19 #include <asm/cmpxchg.h>
21 #define ATOMIC_INIT(i) { (i) }
26 * On ARM, ordinary assignment (str instruction) doesn't clear the local
27 * strex/ldrex monitor on some implementations. The reason we can use it for
28 * atomic_set() is the clrex or dummy strex done on every exception return.
30 #define atomic_read(v) ACCESS_ONCE((v)->counter)
31 #define atomic_set(v,i) (((v)->counter) = (i))
33 #if __LINUX_ARM_ARCH__ >= 6
36 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
37 * store exclusive to ensure that these are atomic. We may loop
38 * to ensure that the update happens.
41 #define ATOMIC_OP(op, c_op, asm_op) \
42 static inline void atomic_##op(int i, atomic_t *v) \
47 prefetchw(&v->counter); \
48 __asm__ __volatile__("@ atomic_" #op "\n" \
49 "1: ldrex %0, [%3]\n" \
50 " " #asm_op " %0, %0, %4\n" \
51 " strex %1, %0, [%3]\n" \
54 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
55 : "r" (&v->counter), "Ir" (i) \
59 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
60 static inline int atomic_##op##_return(int i, atomic_t *v) \
66 prefetchw(&v->counter); \
68 __asm__ __volatile__("@ atomic_" #op "_return\n" \
69 "1: ldrex %0, [%3]\n" \
70 " " #asm_op " %0, %0, %4\n" \
71 " strex %1, %0, [%3]\n" \
74 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
75 : "r" (&v->counter), "Ir" (i) \
83 static inline int atomic_cmpxchg(atomic_t
*ptr
, int old
, int new)
89 prefetchw(&ptr
->counter
);
92 __asm__
__volatile__("@ atomic_cmpxchg\n"
96 "strexeq %0, %5, [%3]\n"
97 : "=&r" (res
), "=&r" (oldval
), "+Qo" (ptr
->counter
)
98 : "r" (&ptr
->counter
), "Ir" (old
), "r" (new)
107 static inline int __atomic_add_unless(atomic_t
*v
, int a
, int u
)
113 prefetchw(&v
->counter
);
115 __asm__
__volatile__ ("@ atomic_add_unless\n"
116 "1: ldrex %0, [%4]\n"
120 " strex %2, %1, [%4]\n"
124 : "=&r" (oldval
), "=&r" (newval
), "=&r" (tmp
), "+Qo" (v
->counter
)
125 : "r" (&v
->counter
), "r" (u
), "r" (a
)
134 #else /* ARM_ARCH_6 */
137 #error SMP not supported on pre-ARMv6 CPUs
140 #define ATOMIC_OP(op, c_op, asm_op) \
141 static inline void atomic_##op(int i, atomic_t *v) \
143 unsigned long flags; \
145 raw_local_irq_save(flags); \
147 raw_local_irq_restore(flags); \
150 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
151 static inline int atomic_##op##_return(int i, atomic_t *v) \
153 unsigned long flags; \
156 raw_local_irq_save(flags); \
159 raw_local_irq_restore(flags); \
164 static inline int atomic_cmpxchg(atomic_t
*v
, int old
, int new)
169 raw_local_irq_save(flags
);
171 if (likely(ret
== old
))
173 raw_local_irq_restore(flags
);
178 static inline int __atomic_add_unless(atomic_t
*v
, int a
, int u
)
183 while (c
!= u
&& (old
= atomic_cmpxchg((v
), c
, c
+ a
)) != c
)
188 #endif /* __LINUX_ARM_ARCH__ */
190 #define ATOMIC_OPS(op, c_op, asm_op) \
191 ATOMIC_OP(op, c_op, asm_op) \
192 ATOMIC_OP_RETURN(op, c_op, asm_op)
194 ATOMIC_OPS(add
, +=, add
)
195 ATOMIC_OPS(sub
, -=, sub
)
198 #undef ATOMIC_OP_RETURN
201 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
203 #define atomic_inc(v) atomic_add(1, v)
204 #define atomic_dec(v) atomic_sub(1, v)
206 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
207 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
208 #define atomic_inc_return(v) (atomic_add_return(1, v))
209 #define atomic_dec_return(v) (atomic_sub_return(1, v))
210 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
212 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
214 #ifndef CONFIG_GENERIC_ATOMIC64
219 #define ATOMIC64_INIT(i) { (i) }
221 #ifdef CONFIG_ARM_LPAE
222 static inline long long atomic64_read(const atomic64_t
*v
)
226 __asm__
__volatile__("@ atomic64_read\n"
227 " ldrd %0, %H0, [%1]"
229 : "r" (&v
->counter
), "Qo" (v
->counter
)
235 static inline void atomic64_set(atomic64_t
*v
, long long i
)
237 __asm__
__volatile__("@ atomic64_set\n"
238 " strd %2, %H2, [%1]"
240 : "r" (&v
->counter
), "r" (i
)
244 static inline long long atomic64_read(const atomic64_t
*v
)
248 __asm__
__volatile__("@ atomic64_read\n"
249 " ldrexd %0, %H0, [%1]"
251 : "r" (&v
->counter
), "Qo" (v
->counter
)
257 static inline void atomic64_set(atomic64_t
*v
, long long i
)
261 prefetchw(&v
->counter
);
262 __asm__
__volatile__("@ atomic64_set\n"
263 "1: ldrexd %0, %H0, [%2]\n"
264 " strexd %0, %3, %H3, [%2]\n"
267 : "=&r" (tmp
), "=Qo" (v
->counter
)
268 : "r" (&v
->counter
), "r" (i
)
273 #define ATOMIC64_OP(op, op1, op2) \
274 static inline void atomic64_##op(long long i, atomic64_t *v) \
279 prefetchw(&v->counter); \
280 __asm__ __volatile__("@ atomic64_" #op "\n" \
281 "1: ldrexd %0, %H0, [%3]\n" \
282 " " #op1 " %Q0, %Q0, %Q4\n" \
283 " " #op2 " %R0, %R0, %R4\n" \
284 " strexd %1, %0, %H0, [%3]\n" \
287 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
288 : "r" (&v->counter), "r" (i) \
292 #define ATOMIC64_OP_RETURN(op, op1, op2) \
293 static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
299 prefetchw(&v->counter); \
301 __asm__ __volatile__("@ atomic64_" #op "_return\n" \
302 "1: ldrexd %0, %H0, [%3]\n" \
303 " " #op1 " %Q0, %Q0, %Q4\n" \
304 " " #op2 " %R0, %R0, %R4\n" \
305 " strexd %1, %0, %H0, [%3]\n" \
308 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
309 : "r" (&v->counter), "r" (i) \
317 #define ATOMIC64_OPS(op, op1, op2) \
318 ATOMIC64_OP(op, op1, op2) \
319 ATOMIC64_OP_RETURN(op, op1, op2)
321 ATOMIC64_OPS(add
, adds
, adc
)
322 ATOMIC64_OPS(sub
, subs
, sbc
)
325 #undef ATOMIC64_OP_RETURN
328 static inline long long atomic64_cmpxchg(atomic64_t
*ptr
, long long old
,
335 prefetchw(&ptr
->counter
);
338 __asm__
__volatile__("@ atomic64_cmpxchg\n"
339 "ldrexd %1, %H1, [%3]\n"
343 "strexdeq %0, %5, %H5, [%3]"
344 : "=&r" (res
), "=&r" (oldval
), "+Qo" (ptr
->counter
)
345 : "r" (&ptr
->counter
), "r" (old
), "r" (new)
354 static inline long long atomic64_xchg(atomic64_t
*ptr
, long long new)
360 prefetchw(&ptr
->counter
);
362 __asm__
__volatile__("@ atomic64_xchg\n"
363 "1: ldrexd %0, %H0, [%3]\n"
364 " strexd %1, %4, %H4, [%3]\n"
367 : "=&r" (result
), "=&r" (tmp
), "+Qo" (ptr
->counter
)
368 : "r" (&ptr
->counter
), "r" (new)
376 static inline long long atomic64_dec_if_positive(atomic64_t
*v
)
382 prefetchw(&v
->counter
);
384 __asm__
__volatile__("@ atomic64_dec_if_positive\n"
385 "1: ldrexd %0, %H0, [%3]\n"
386 " subs %Q0, %Q0, #1\n"
387 " sbc %R0, %R0, #0\n"
390 " strexd %1, %0, %H0, [%3]\n"
394 : "=&r" (result
), "=&r" (tmp
), "+Qo" (v
->counter
)
403 static inline int atomic64_add_unless(atomic64_t
*v
, long long a
, long long u
)
410 prefetchw(&v
->counter
);
412 __asm__
__volatile__("@ atomic64_add_unless\n"
413 "1: ldrexd %0, %H0, [%4]\n"
418 " adds %Q0, %Q0, %Q6\n"
419 " adc %R0, %R0, %R6\n"
420 " strexd %2, %0, %H0, [%4]\n"
424 : "=&r" (val
), "+r" (ret
), "=&r" (tmp
), "+Qo" (v
->counter
)
425 : "r" (&v
->counter
), "r" (u
), "r" (a
)
434 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
435 #define atomic64_inc(v) atomic64_add(1LL, (v))
436 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
437 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
438 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
439 #define atomic64_dec(v) atomic64_sub(1LL, (v))
440 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
441 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
442 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
444 #endif /* !CONFIG_GENERIC_ATOMIC64 */
This page took 0.054518 seconds and 5 git commands to generate.