1 #ifndef _ALPHA_ATOMIC_H
2 #define _ALPHA_ATOMIC_H
4 #include <asm/barrier.h>
5 #include <asm/system.h>
8 * Atomic operations that C can't guarantee us. Useful for
9 * resource counting etc...
11 * But use these as seldom as possible since they are much slower
12 * than regular operations.
17 * Counter is volatile to make sure gcc doesn't try to be clever
18 * and move things around on us. We need to use _exactly_ the address
19 * the user gave us, not some alias that contains the same information.
21 typedef struct { volatile int counter
; } atomic_t
;
22 typedef struct { volatile long counter
; } atomic64_t
;
24 #define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
25 #define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
27 #define atomic_read(v) ((v)->counter + 0)
28 #define atomic64_read(v) ((v)->counter + 0)
30 #define atomic_set(v,i) ((v)->counter = (i))
31 #define atomic64_set(v,i) ((v)->counter = (i))
34 * To get proper branch prediction for the main line, we must branch
35 * forward to code at the end of this object's .text section, then
36 * branch back to restart the operation.
39 static __inline__
void atomic_add(int i
, atomic_t
* v
)
50 :"=&r" (temp
), "=m" (v
->counter
)
51 :"Ir" (i
), "m" (v
->counter
));
54 static __inline__
void atomic64_add(long i
, atomic64_t
* v
)
65 :"=&r" (temp
), "=m" (v
->counter
)
66 :"Ir" (i
), "m" (v
->counter
));
69 static __inline__
void atomic_sub(int i
, atomic_t
* v
)
80 :"=&r" (temp
), "=m" (v
->counter
)
81 :"Ir" (i
), "m" (v
->counter
));
84 static __inline__
void atomic64_sub(long i
, atomic64_t
* v
)
95 :"=&r" (temp
), "=m" (v
->counter
)
96 :"Ir" (i
), "m" (v
->counter
));
101 * Same as above, but return the result value
103 static inline int atomic_add_return(int i
, atomic_t
*v
)
107 __asm__
__volatile__(
116 :"=&r" (temp
), "=m" (v
->counter
), "=&r" (result
)
117 :"Ir" (i
), "m" (v
->counter
) : "memory");
122 static __inline__
long atomic64_add_return(long i
, atomic64_t
* v
)
126 __asm__
__volatile__(
135 :"=&r" (temp
), "=m" (v
->counter
), "=&r" (result
)
136 :"Ir" (i
), "m" (v
->counter
) : "memory");
141 static __inline__
long atomic_sub_return(int i
, atomic_t
* v
)
145 __asm__
__volatile__(
154 :"=&r" (temp
), "=m" (v
->counter
), "=&r" (result
)
155 :"Ir" (i
), "m" (v
->counter
) : "memory");
160 static __inline__
long atomic64_sub_return(long i
, atomic64_t
* v
)
164 __asm__
__volatile__(
173 :"=&r" (temp
), "=m" (v
->counter
), "=&r" (result
)
174 :"Ir" (i
), "m" (v
->counter
) : "memory");
179 #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
180 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
182 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
183 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
186 * atomic_add_unless - add unless the number is a given value
187 * @v: pointer of type atomic_t
188 * @a: the amount to add to v...
189 * @u: ...unless v is equal to u.
191 * Atomically adds @a to @v, so long as it was not @u.
192 * Returns non-zero if @v was not @u, and zero otherwise.
194 static __inline__
int atomic_add_unless(atomic_t
*v
, int a
, int u
)
199 if (unlikely(c
== (u
)))
201 old
= atomic_cmpxchg((v
), c
, c
+ (a
));
202 if (likely(old
== c
))
209 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
212 * atomic64_add_unless - add unless the number is a given value
213 * @v: pointer of type atomic64_t
214 * @a: the amount to add to v...
215 * @u: ...unless v is equal to u.
217 * Atomically adds @a to @v, so long as it was not @u.
218 * Returns non-zero if @v was not @u, and zero otherwise.
220 static __inline__
int atomic64_add_unless(atomic64_t
*v
, long a
, long u
)
223 c
= atomic64_read(v
);
225 if (unlikely(c
== (u
)))
227 old
= atomic64_cmpxchg((v
), c
, c
+ (a
));
228 if (likely(old
== c
))
235 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
237 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
238 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
240 #define atomic_dec_return(v) atomic_sub_return(1,(v))
241 #define atomic64_dec_return(v) atomic64_sub_return(1,(v))
243 #define atomic_inc_return(v) atomic_add_return(1,(v))
244 #define atomic64_inc_return(v) atomic64_add_return(1,(v))
246 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
247 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
249 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
250 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
252 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
253 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
255 #define atomic_inc(v) atomic_add(1,(v))
256 #define atomic64_inc(v) atomic64_add(1,(v))
258 #define atomic_dec(v) atomic_sub(1,(v))
259 #define atomic64_dec(v) atomic64_sub(1,(v))
261 #define smp_mb__before_atomic_dec() smp_mb()
262 #define smp_mb__after_atomic_dec() smp_mb()
263 #define smp_mb__before_atomic_inc() smp_mb()
264 #define smp_mb__after_atomic_inc() smp_mb()
266 #include <asm-generic/atomic.h>
267 #endif /* _ALPHA_ATOMIC_H */
This page took 0.04078 seconds and 5 git commands to generate.