1 #ifndef _ASM_POWERPC_ATOMIC_H_
2 #define _ASM_POWERPC_ATOMIC_H_
5 * PowerPC atomic operations
9 #include <linux/types.h>
10 #include <asm/cmpxchg.h>
11 #include <asm/barrier.h>
13 #define ATOMIC_INIT(i) { (i) }
16 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
17 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
18 * on the platform without lwsync.
20 #define __atomic_op_acquire(op, args...) \
22 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
23 __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory"); \
27 #define __atomic_op_release(op, args...) \
29 __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory"); \
33 static __inline__
int atomic_read(const atomic_t
*v
)
37 __asm__
__volatile__("lwz%U1%X1 %0,%1" : "=r"(t
) : "m"(v
->counter
));
42 static __inline__
void atomic_set(atomic_t
*v
, int i
)
44 __asm__
__volatile__("stw%U0%X0 %1,%0" : "=m"(v
->counter
) : "r"(i
));
47 #define ATOMIC_OP(op, asm_op) \
48 static __inline__ void atomic_##op(int a, atomic_t *v) \
52 __asm__ __volatile__( \
53 "1: lwarx %0,0,%3 # atomic_" #op "\n" \
54 #asm_op " %0,%2,%0\n" \
56 " stwcx. %0,0,%3 \n" \
58 : "=&r" (t), "+m" (v->counter) \
59 : "r" (a), "r" (&v->counter) \
63 #define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
64 static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
68 __asm__ __volatile__( \
69 "1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
70 #asm_op " %0,%2,%0\n" \
74 : "=&r" (t), "+m" (v->counter) \
75 : "r" (a), "r" (&v->counter) \
81 #define ATOMIC_OPS(op, asm_op) \
82 ATOMIC_OP(op, asm_op) \
83 ATOMIC_OP_RETURN_RELAXED(op, asm_op)
92 #define atomic_add_return_relaxed atomic_add_return_relaxed
93 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
96 #undef ATOMIC_OP_RETURN_RELAXED
99 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
101 static __inline__
void atomic_inc(atomic_t
*v
)
105 __asm__
__volatile__(
106 "1: lwarx %0,0,%2 # atomic_inc\n\
111 : "=&r" (t
), "+m" (v
->counter
)
116 static __inline__
int atomic_inc_return_relaxed(atomic_t
*v
)
120 __asm__
__volatile__(
121 "1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
126 : "=&r" (t
), "+m" (v
->counter
)
134 * atomic_inc_and_test - increment and test
135 * @v: pointer of type atomic_t
137 * Atomically increments @v by 1
138 * and returns true if the result is zero, or false for all
141 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
143 static __inline__
void atomic_dec(atomic_t
*v
)
147 __asm__
__volatile__(
148 "1: lwarx %0,0,%2 # atomic_dec\n\
153 : "=&r" (t
), "+m" (v
->counter
)
158 static __inline__
int atomic_dec_return_relaxed(atomic_t
*v
)
162 __asm__
__volatile__(
163 "1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
168 : "=&r" (t
), "+m" (v
->counter
)
175 #define atomic_inc_return_relaxed atomic_inc_return_relaxed
176 #define atomic_dec_return_relaxed atomic_dec_return_relaxed
178 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
179 #define atomic_cmpxchg_relaxed(v, o, n) \
180 cmpxchg_relaxed(&((v)->counter), (o), (n))
181 #define atomic_cmpxchg_acquire(v, o, n) \
182 cmpxchg_acquire(&((v)->counter), (o), (n))
184 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
185 #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
188 * __atomic_add_unless - add unless the number is a given value
189 * @v: pointer of type atomic_t
190 * @a: the amount to add to v...
191 * @u: ...unless v is equal to u.
193 * Atomically adds @a to @v, so long as it was not @u.
194 * Returns the old value of @v.
196 static __inline__
int __atomic_add_unless(atomic_t
*v
, int a
, int u
)
200 __asm__
__volatile__ (
201 PPC_ATOMIC_ENTRY_BARRIER
202 "1: lwarx %0,0,%1 # __atomic_add_unless\n\
209 PPC_ATOMIC_EXIT_BARRIER
213 : "r" (&v
->counter
), "r" (a
), "r" (u
)
220 * atomic_inc_not_zero - increment unless the number is zero
221 * @v: pointer of type atomic_t
223 * Atomically increments @v by 1, so long as @v is non-zero.
224 * Returns non-zero if @v was non-zero, and zero otherwise.
226 static __inline__
int atomic_inc_not_zero(atomic_t
*v
)
230 __asm__
__volatile__ (
231 PPC_ATOMIC_ENTRY_BARRIER
232 "1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
239 PPC_ATOMIC_EXIT_BARRIER
242 : "=&r" (t1
), "=&r" (t2
)
244 : "cc", "xer", "memory");
248 #define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
250 #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
251 #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
254 * Atomically test *v and decrement if it is greater than 0.
255 * The function returns the old value of *v minus 1, even if
256 * the atomic variable, v, was not decremented.
258 static __inline__
int atomic_dec_if_positive(atomic_t
*v
)
262 __asm__
__volatile__(
263 PPC_ATOMIC_ENTRY_BARRIER
264 "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
271 PPC_ATOMIC_EXIT_BARRIER
279 #define atomic_dec_if_positive atomic_dec_if_positive
283 #define ATOMIC64_INIT(i) { (i) }
285 static __inline__
long atomic64_read(const atomic64_t
*v
)
289 __asm__
__volatile__("ld%U1%X1 %0,%1" : "=r"(t
) : "m"(v
->counter
));
294 static __inline__
void atomic64_set(atomic64_t
*v
, long i
)
296 __asm__
__volatile__("std%U0%X0 %1,%0" : "=m"(v
->counter
) : "r"(i
));
299 #define ATOMIC64_OP(op, asm_op) \
300 static __inline__ void atomic64_##op(long a, atomic64_t *v) \
304 __asm__ __volatile__( \
305 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
306 #asm_op " %0,%2,%0\n" \
307 " stdcx. %0,0,%3 \n" \
309 : "=&r" (t), "+m" (v->counter) \
310 : "r" (a), "r" (&v->counter) \
314 #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
316 atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
320 __asm__ __volatile__( \
321 "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
322 #asm_op " %0,%2,%0\n" \
323 " stdcx. %0,0,%3\n" \
325 : "=&r" (t), "+m" (v->counter) \
326 : "r" (a), "r" (&v->counter) \
332 #define ATOMIC64_OPS(op, asm_op) \
333 ATOMIC64_OP(op, asm_op) \
334 ATOMIC64_OP_RETURN_RELAXED(op, asm_op)
336 ATOMIC64_OPS(add
, add
)
337 ATOMIC64_OPS(sub
, subf
)
338 ATOMIC64_OP(and, and)
340 ATOMIC64_OP(xor, xor)
342 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
343 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
346 #undef ATOMIC64_OP_RETURN_RELAXED
349 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
351 static __inline__
void atomic64_inc(atomic64_t
*v
)
355 __asm__
__volatile__(
356 "1: ldarx %0,0,%2 # atomic64_inc\n\
360 : "=&r" (t
), "+m" (v
->counter
)
365 static __inline__
long atomic64_inc_return_relaxed(atomic64_t
*v
)
369 __asm__
__volatile__(
370 "1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
374 : "=&r" (t
), "+m" (v
->counter
)
382 * atomic64_inc_and_test - increment and test
383 * @v: pointer of type atomic64_t
385 * Atomically increments @v by 1
386 * and returns true if the result is zero, or false for all
389 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
391 static __inline__
void atomic64_dec(atomic64_t
*v
)
395 __asm__
__volatile__(
396 "1: ldarx %0,0,%2 # atomic64_dec\n\
400 : "=&r" (t
), "+m" (v
->counter
)
405 static __inline__
long atomic64_dec_return_relaxed(atomic64_t
*v
)
409 __asm__
__volatile__(
410 "1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
414 : "=&r" (t
), "+m" (v
->counter
)
421 #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
422 #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
424 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
425 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
428 * Atomically test *v and decrement if it is greater than 0.
429 * The function returns the old value of *v minus 1.
431 static __inline__
long atomic64_dec_if_positive(atomic64_t
*v
)
435 __asm__
__volatile__(
436 PPC_ATOMIC_ENTRY_BARRIER
437 "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
442 PPC_ATOMIC_EXIT_BARRIER
446 : "cc", "xer", "memory");
451 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
452 #define atomic64_cmpxchg_relaxed(v, o, n) \
453 cmpxchg_relaxed(&((v)->counter), (o), (n))
454 #define atomic64_cmpxchg_acquire(v, o, n) \
455 cmpxchg_acquire(&((v)->counter), (o), (n))
457 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
458 #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
461 * atomic64_add_unless - add unless the number is a given value
462 * @v: pointer of type atomic64_t
463 * @a: the amount to add to v...
464 * @u: ...unless v is equal to u.
466 * Atomically adds @a to @v, so long as it was not @u.
467 * Returns the old value of @v.
469 static __inline__
int atomic64_add_unless(atomic64_t
*v
, long a
, long u
)
473 __asm__
__volatile__ (
474 PPC_ATOMIC_ENTRY_BARRIER
475 "1: ldarx %0,0,%1 # __atomic_add_unless\n\
481 PPC_ATOMIC_EXIT_BARRIER
485 : "r" (&v
->counter
), "r" (a
), "r" (u
)
492 * atomic_inc64_not_zero - increment unless the number is zero
493 * @v: pointer of type atomic64_t
495 * Atomically increments @v by 1, so long as @v is non-zero.
496 * Returns non-zero if @v was non-zero, and zero otherwise.
498 static __inline__
long atomic64_inc_not_zero(atomic64_t
*v
)
502 __asm__
__volatile__ (
503 PPC_ATOMIC_ENTRY_BARRIER
504 "1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
510 PPC_ATOMIC_EXIT_BARRIER
513 : "=&r" (t1
), "=&r" (t2
)
515 : "cc", "xer", "memory");
520 #endif /* __powerpc64__ */
522 #endif /* __KERNEL__ */
523 #endif /* _ASM_POWERPC_ATOMIC_H_ */
This page took 0.056064 seconds and 6 git commands to generate.