2 * arch/arm/include/asm/atomic.h
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #ifndef __ASM_ARM_ATOMIC_H
12 #define __ASM_ARM_ATOMIC_H
14 #include <linux/compiler.h>
15 #include <linux/types.h>
16 #include <asm/system.h>
18 #define ATOMIC_INIT(i) { (i) }
23 * On ARM, ordinary assignment (str instruction) doesn't clear the local
24 * strex/ldrex monitor on some implementations. The reason we can use it for
25 * atomic_set() is the clrex or dummy strex done on every exception return.
27 #define atomic_read(v) (*(volatile int *)&(v)->counter)
28 #define atomic_set(v,i) (((v)->counter) = (i))
30 #if __LINUX_ARM_ARCH__ >= 6
33 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
34 * store exclusive to ensure that these are atomic. We may loop
35 * to ensure that the update happens.
37 static inline void atomic_add(int i
, atomic_t
*v
)
42 __asm__
__volatile__("@ atomic_add\n"
45 " strex %1, %0, [%3]\n"
48 : "=&r" (result
), "=&r" (tmp
), "+Qo" (v
->counter
)
49 : "r" (&v
->counter
), "Ir" (i
)
53 static inline int atomic_add_return(int i
, atomic_t
*v
)
60 __asm__
__volatile__("@ atomic_add_return\n"
63 " strex %1, %0, [%3]\n"
66 : "=&r" (result
), "=&r" (tmp
), "+Qo" (v
->counter
)
67 : "r" (&v
->counter
), "Ir" (i
)
75 static inline void atomic_sub(int i
, atomic_t
*v
)
80 __asm__
__volatile__("@ atomic_sub\n"
83 " strex %1, %0, [%3]\n"
86 : "=&r" (result
), "=&r" (tmp
), "+Qo" (v
->counter
)
87 : "r" (&v
->counter
), "Ir" (i
)
91 static inline int atomic_sub_return(int i
, atomic_t
*v
)
98 __asm__
__volatile__("@ atomic_sub_return\n"
101 " strex %1, %0, [%3]\n"
104 : "=&r" (result
), "=&r" (tmp
), "+Qo" (v
->counter
)
105 : "r" (&v
->counter
), "Ir" (i
)
113 static inline int atomic_cmpxchg(atomic_t
*ptr
, int old
, int new)
115 unsigned long oldval
, res
;
120 __asm__
__volatile__("@ atomic_cmpxchg\n"
124 "strexeq %0, %5, [%3]\n"
125 : "=&r" (res
), "=&r" (oldval
), "+Qo" (ptr
->counter
)
126 : "r" (&ptr
->counter
), "Ir" (old
), "r" (new)
135 static inline void atomic_clear_mask(unsigned long mask
, unsigned long *addr
)
137 unsigned long tmp
, tmp2
;
139 __asm__
__volatile__("@ atomic_clear_mask\n"
140 "1: ldrex %0, [%3]\n"
142 " strex %1, %0, [%3]\n"
145 : "=&r" (tmp
), "=&r" (tmp2
), "+Qo" (*addr
)
146 : "r" (addr
), "Ir" (mask
)
150 #else /* ARM_ARCH_6 */
153 #error SMP not supported on pre-ARMv6 CPUs
156 static inline int atomic_add_return(int i
, atomic_t
*v
)
161 raw_local_irq_save(flags
);
163 v
->counter
= val
+= i
;
164 raw_local_irq_restore(flags
);
168 #define atomic_add(i, v) (void) atomic_add_return(i, v)
170 static inline int atomic_sub_return(int i
, atomic_t
*v
)
175 raw_local_irq_save(flags
);
177 v
->counter
= val
-= i
;
178 raw_local_irq_restore(flags
);
182 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
184 static inline int atomic_cmpxchg(atomic_t
*v
, int old
, int new)
189 raw_local_irq_save(flags
);
191 if (likely(ret
== old
))
193 raw_local_irq_restore(flags
);
198 static inline void atomic_clear_mask(unsigned long mask
, unsigned long *addr
)
202 raw_local_irq_save(flags
);
204 raw_local_irq_restore(flags
);
207 #endif /* __LINUX_ARM_ARCH__ */
209 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
211 static inline int atomic_add_unless(atomic_t
*v
, int a
, int u
)
216 while (c
!= u
&& (old
= atomic_cmpxchg((v
), c
, c
+ a
)) != c
)
221 #define atomic_inc(v) atomic_add(1, v)
222 #define atomic_dec(v) atomic_sub(1, v)
224 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
225 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
226 #define atomic_inc_return(v) (atomic_add_return(1, v))
227 #define atomic_dec_return(v) (atomic_sub_return(1, v))
228 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
230 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
232 #define smp_mb__before_atomic_dec() smp_mb()
233 #define smp_mb__after_atomic_dec() smp_mb()
234 #define smp_mb__before_atomic_inc() smp_mb()
235 #define smp_mb__after_atomic_inc() smp_mb()
237 #ifndef CONFIG_GENERIC_ATOMIC64
239 u64
__aligned(8) counter
;
242 #define ATOMIC64_INIT(i) { (i) }
244 static inline u64
atomic64_read(atomic64_t
*v
)
248 __asm__
__volatile__("@ atomic64_read\n"
249 " ldrexd %0, %H0, [%1]"
251 : "r" (&v
->counter
), "Qo" (v
->counter
)
257 static inline void atomic64_set(atomic64_t
*v
, u64 i
)
261 __asm__
__volatile__("@ atomic64_set\n"
262 "1: ldrexd %0, %H0, [%2]\n"
263 " strexd %0, %3, %H3, [%2]\n"
266 : "=&r" (tmp
), "=Qo" (v
->counter
)
267 : "r" (&v
->counter
), "r" (i
)
271 static inline void atomic64_add(u64 i
, atomic64_t
*v
)
276 __asm__
__volatile__("@ atomic64_add\n"
277 "1: ldrexd %0, %H0, [%3]\n"
279 " adc %H0, %H0, %H4\n"
280 " strexd %1, %0, %H0, [%3]\n"
283 : "=&r" (result
), "=&r" (tmp
), "+Qo" (v
->counter
)
284 : "r" (&v
->counter
), "r" (i
)
288 static inline u64
atomic64_add_return(u64 i
, atomic64_t
*v
)
295 __asm__
__volatile__("@ atomic64_add_return\n"
296 "1: ldrexd %0, %H0, [%3]\n"
298 " adc %H0, %H0, %H4\n"
299 " strexd %1, %0, %H0, [%3]\n"
302 : "=&r" (result
), "=&r" (tmp
), "+Qo" (v
->counter
)
303 : "r" (&v
->counter
), "r" (i
)
311 static inline void atomic64_sub(u64 i
, atomic64_t
*v
)
316 __asm__
__volatile__("@ atomic64_sub\n"
317 "1: ldrexd %0, %H0, [%3]\n"
319 " sbc %H0, %H0, %H4\n"
320 " strexd %1, %0, %H0, [%3]\n"
323 : "=&r" (result
), "=&r" (tmp
), "+Qo" (v
->counter
)
324 : "r" (&v
->counter
), "r" (i
)
328 static inline u64
atomic64_sub_return(u64 i
, atomic64_t
*v
)
335 __asm__
__volatile__("@ atomic64_sub_return\n"
336 "1: ldrexd %0, %H0, [%3]\n"
338 " sbc %H0, %H0, %H4\n"
339 " strexd %1, %0, %H0, [%3]\n"
342 : "=&r" (result
), "=&r" (tmp
), "+Qo" (v
->counter
)
343 : "r" (&v
->counter
), "r" (i
)
351 static inline u64
atomic64_cmpxchg(atomic64_t
*ptr
, u64 old
, u64
new)
359 __asm__
__volatile__("@ atomic64_cmpxchg\n"
360 "ldrexd %1, %H1, [%3]\n"
364 "strexdeq %0, %5, %H5, [%3]"
365 : "=&r" (res
), "=&r" (oldval
), "+Qo" (ptr
->counter
)
366 : "r" (&ptr
->counter
), "r" (old
), "r" (new)
375 static inline u64
atomic64_xchg(atomic64_t
*ptr
, u64
new)
382 __asm__
__volatile__("@ atomic64_xchg\n"
383 "1: ldrexd %0, %H0, [%3]\n"
384 " strexd %1, %4, %H4, [%3]\n"
387 : "=&r" (result
), "=&r" (tmp
), "+Qo" (ptr
->counter
)
388 : "r" (&ptr
->counter
), "r" (new)
396 static inline u64
atomic64_dec_if_positive(atomic64_t
*v
)
403 __asm__
__volatile__("@ atomic64_dec_if_positive\n"
404 "1: ldrexd %0, %H0, [%3]\n"
406 " sbc %H0, %H0, #0\n"
409 " strexd %1, %0, %H0, [%3]\n"
413 : "=&r" (result
), "=&r" (tmp
), "+Qo" (v
->counter
)
422 static inline int atomic64_add_unless(atomic64_t
*v
, u64 a
, u64 u
)
430 __asm__
__volatile__("@ atomic64_add_unless\n"
431 "1: ldrexd %0, %H0, [%4]\n"
437 " adc %H0, %H0, %H6\n"
438 " strexd %2, %0, %H0, [%4]\n"
442 : "=&r" (val
), "+r" (ret
), "=&r" (tmp
), "+Qo" (v
->counter
)
443 : "r" (&v
->counter
), "r" (u
), "r" (a
)
452 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
453 #define atomic64_inc(v) atomic64_add(1LL, (v))
454 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
455 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
456 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
457 #define atomic64_dec(v) atomic64_sub(1LL, (v))
458 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
459 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
460 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
462 #else /* !CONFIG_GENERIC_ATOMIC64 */
463 #include <asm-generic/atomic64.h>
465 #include <asm-generic/atomic-long.h>
This page took 0.041979 seconds and 5 git commands to generate.