2 * include/asm-xtensa/atomic.h
4 * Atomic operations that C can't guarantee us. Useful for resource counting..
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
10 * Copyright (C) 2001 - 2008 Tensilica Inc.
13 #ifndef _XTENSA_ATOMIC_H
14 #define _XTENSA_ATOMIC_H
16 #include <linux/stringify.h>
17 #include <linux/types.h>
20 #include <asm/processor.h>
21 #include <asm/cmpxchg.h>
23 #define ATOMIC_INIT(i) { (i) }
26 * This Xtensa implementation assumes that the right mechanism
27 * for exclusion is for locking interrupts to level EXCM_LEVEL.
29 * Locking interrupts looks like this:
36 * Note that a15 is used here because the register allocation
37 * done by the compiler is not guaranteed and a window overflow
38 * may not occur between the rsil and wsr instructions. By using
39 * a15 in the rsil, the machine is guaranteed to be in a state
40 * where no register reference will cause an overflow.
44 * atomic_read - read atomic variable
45 * @v: pointer of type atomic_t
47 * Atomically reads the value of @v.
49 #define atomic_read(v) (*(volatile int *)&(v)->counter)
52 * atomic_set - set atomic variable
53 * @v: pointer of type atomic_t
56 * Atomically sets the value of @v to @i.
58 #define atomic_set(v,i) ((v)->counter = (i))
61 * atomic_add - add integer to atomic variable
62 * @i: integer value to add
63 * @v: pointer of type atomic_t
65 * Atomically adds @i to @v.
67 static inline void atomic_add(int i
, atomic_t
* v
)
75 " wsr %1, scompare1\n"
79 : "=&a" (result
), "=&a" (tmp
)
87 " rsil a15, "__stringify(LOCKLEVEL
)"\n"
101 * atomic_sub - subtract the atomic variable
102 * @i: integer value to subtract
103 * @v: pointer of type atomic_t
105 * Atomically subtracts @i from @v.
107 static inline void atomic_sub(int i
, atomic_t
*v
)
109 #if XCHAL_HAVE_S32C1I
113 __asm__
__volatile__(
114 "1: l32i %1, %3, 0\n"
115 " wsr %1, scompare1\n"
117 " s32c1i %0, %3, 0\n"
119 : "=&a" (result
), "=&a" (tmp
)
126 __asm__
__volatile__(
127 " rsil a15, "__stringify(LOCKLEVEL
)"\n"
141 * We use atomic_{add|sub}_return to define other functions.
144 static inline int atomic_add_return(int i
, atomic_t
* v
)
146 #if XCHAL_HAVE_S32C1I
150 __asm__
__volatile__(
151 "1: l32i %1, %3, 0\n"
152 " wsr %1, scompare1\n"
154 " s32c1i %0, %3, 0\n"
157 : "=&a" (result
), "=&a" (tmp
)
166 __asm__
__volatile__(
167 " rsil a15,"__stringify(LOCKLEVEL
)"\n"
182 static inline int atomic_sub_return(int i
, atomic_t
* v
)
184 #if XCHAL_HAVE_S32C1I
188 __asm__
__volatile__(
189 "1: l32i %1, %3, 0\n"
190 " wsr %1, scompare1\n"
192 " s32c1i %0, %3, 0\n"
195 : "=&a" (result
), "=&a" (tmp
)
204 __asm__
__volatile__(
205 " rsil a15,"__stringify(LOCKLEVEL
)"\n"
221 * atomic_sub_and_test - subtract value from variable and test result
222 * @i: integer value to subtract
223 * @v: pointer of type atomic_t
225 * Atomically subtracts @i from @v and returns
226 * true if the result is zero, or false for all
229 #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
232 * atomic_inc - increment atomic variable
233 * @v: pointer of type atomic_t
235 * Atomically increments @v by 1.
237 #define atomic_inc(v) atomic_add(1,(v))
240 * atomic_inc - increment atomic variable
241 * @v: pointer of type atomic_t
243 * Atomically increments @v by 1.
245 #define atomic_inc_return(v) atomic_add_return(1,(v))
248 * atomic_dec - decrement atomic variable
249 * @v: pointer of type atomic_t
251 * Atomically decrements @v by 1.
253 #define atomic_dec(v) atomic_sub(1,(v))
256 * atomic_dec_return - decrement atomic variable
257 * @v: pointer of type atomic_t
259 * Atomically decrements @v by 1.
261 #define atomic_dec_return(v) atomic_sub_return(1,(v))
264 * atomic_dec_and_test - decrement and test
265 * @v: pointer of type atomic_t
267 * Atomically decrements @v by 1 and
268 * returns true if the result is 0, or false for all other
271 #define atomic_dec_and_test(v) (atomic_sub_return(1,(v)) == 0)
274 * atomic_inc_and_test - increment and test
275 * @v: pointer of type atomic_t
277 * Atomically increments @v by 1
278 * and returns true if the result is zero, or false for all
281 #define atomic_inc_and_test(v) (atomic_add_return(1,(v)) == 0)
284 * atomic_add_negative - add and test if negative
285 * @v: pointer of type atomic_t
286 * @i: integer value to add
288 * Atomically adds @i to @v and returns true
289 * if the result is negative, or false when
290 * result is greater than or equal to zero.
292 #define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
294 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
295 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
298 * __atomic_add_unless - add unless the number is a given value
299 * @v: pointer of type atomic_t
300 * @a: the amount to add to v...
301 * @u: ...unless v is equal to u.
303 * Atomically adds @a to @v, so long as it was not @u.
304 * Returns the old value of @v.
306 static __inline__
int __atomic_add_unless(atomic_t
*v
, int a
, int u
)
311 if (unlikely(c
== (u
)))
313 old
= atomic_cmpxchg((v
), c
, c
+ (a
));
314 if (likely(old
== c
))
322 static inline void atomic_clear_mask(unsigned int mask
, atomic_t
*v
)
324 #if XCHAL_HAVE_S32C1I
328 __asm__
__volatile__(
329 "1: l32i %1, %3, 0\n"
330 " wsr %1, scompare1\n"
332 " s32c1i %0, %3, 0\n"
334 : "=&a" (result
), "=&a" (tmp
)
335 : "a" (~mask
), "a" (v
)
339 unsigned int all_f
= -1;
342 __asm__
__volatile__(
343 " rsil a15,"__stringify(LOCKLEVEL
)"\n"
350 : "=&a" (vval
), "=a" (mask
)
351 : "a" (v
), "a" (all_f
), "1" (mask
)
357 static inline void atomic_set_mask(unsigned int mask
, atomic_t
*v
)
359 #if XCHAL_HAVE_S32C1I
363 __asm__
__volatile__(
364 "1: l32i %1, %3, 0\n"
365 " wsr %1, scompare1\n"
367 " s32c1i %0, %3, 0\n"
369 : "=&a" (result
), "=&a" (tmp
)
370 : "a" (mask
), "a" (v
)
376 __asm__
__volatile__(
377 " rsil a15,"__stringify(LOCKLEVEL
)"\n"
384 : "a" (mask
), "a" (v
)
390 /* Atomic operations are already serializing */
391 #define smp_mb__before_atomic_dec() barrier()
392 #define smp_mb__after_atomic_dec() barrier()
393 #define smp_mb__before_atomic_inc() barrier()
394 #define smp_mb__after_atomic_inc() barrier()
396 #endif /* __KERNEL__ */
398 #endif /* _XTENSA_ATOMIC_H */
This page took 0.0412 seconds and 5 git commands to generate.