powerpc: atomic: Implement atomic{, 64}_*_return_* variants
[deliverable/linux.git] / arch / powerpc / include / asm / atomic.h
CommitLineData
feaf7cf1
BB
1#ifndef _ASM_POWERPC_ATOMIC_H_
2#define _ASM_POWERPC_ATOMIC_H_
3
1da177e4
LT
4/*
5 * PowerPC atomic operations
6 */
7
1da177e4 8#ifdef __KERNEL__
ae3a197e
DH
9#include <linux/types.h>
10#include <asm/cmpxchg.h>
c645073f 11#include <asm/barrier.h>
1da177e4 12
feaf7cf1 13#define ATOMIC_INIT(i) { (i) }
1da177e4 14
dc53617c
BF
15/*
16 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
17 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
18 * on the platform without lwsync.
19 */
20#define __atomic_op_acquire(op, args...) \
21({ \
22 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
23 __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory"); \
24 __ret; \
25})
26
27#define __atomic_op_release(op, args...) \
28({ \
29 __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory"); \
30 op##_relaxed(args); \
31})
32
9f0cbea0
SB
33static __inline__ int atomic_read(const atomic_t *v)
34{
35 int t;
36
37 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
38
39 return t;
40}
41
42static __inline__ void atomic_set(atomic_t *v, int i)
43{
44 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
45}
1da177e4 46
af095dd6
PZ
47#define ATOMIC_OP(op, asm_op) \
48static __inline__ void atomic_##op(int a, atomic_t *v) \
49{ \
50 int t; \
51 \
52 __asm__ __volatile__( \
53"1: lwarx %0,0,%3 # atomic_" #op "\n" \
54 #asm_op " %0,%2,%0\n" \
55 PPC405_ERR77(0,%3) \
56" stwcx. %0,0,%3 \n" \
57" bne- 1b\n" \
58 : "=&r" (t), "+m" (v->counter) \
59 : "r" (a), "r" (&v->counter) \
60 : "cc"); \
61} \
62
dc53617c
BF
63#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
64static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
af095dd6
PZ
65{ \
66 int t; \
67 \
68 __asm__ __volatile__( \
dc53617c
BF
69"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
70 #asm_op " %0,%2,%0\n" \
71 PPC405_ERR77(0, %3) \
72" stwcx. %0,0,%3\n" \
af095dd6 73" bne- 1b\n" \
dc53617c 74 : "=&r" (t), "+m" (v->counter) \
af095dd6 75 : "r" (a), "r" (&v->counter) \
dc53617c 76 : "cc"); \
af095dd6
PZ
77 \
78 return t; \
1da177e4
LT
79}
80
dc53617c
BF
81#define ATOMIC_OPS(op, asm_op) \
82 ATOMIC_OP(op, asm_op) \
83 ATOMIC_OP_RETURN_RELAXED(op, asm_op)
1da177e4 84
af095dd6
PZ
85ATOMIC_OPS(add, add)
86ATOMIC_OPS(sub, subf)
1da177e4 87
d0b7eb6f
PZ
88ATOMIC_OP(and, and)
89ATOMIC_OP(or, or)
90ATOMIC_OP(xor, xor)
91
dc53617c
BF
92#define atomic_add_return_relaxed atomic_add_return_relaxed
93#define atomic_sub_return_relaxed atomic_sub_return_relaxed
94
af095dd6 95#undef ATOMIC_OPS
dc53617c 96#undef ATOMIC_OP_RETURN_RELAXED
af095dd6 97#undef ATOMIC_OP
1da177e4
LT
98
99#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
100
1da177e4
LT
101static __inline__ void atomic_inc(atomic_t *v)
102{
103 int t;
104
105 __asm__ __volatile__(
106"1: lwarx %0,0,%2 # atomic_inc\n\
107 addic %0,%0,1\n"
108 PPC405_ERR77(0,%2)
109" stwcx. %0,0,%2 \n\
110 bne- 1b"
e2a3d402
LT
111 : "=&r" (t), "+m" (v->counter)
112 : "r" (&v->counter)
efc3624c 113 : "cc", "xer");
1da177e4
LT
114}
115
dc53617c 116static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
1da177e4
LT
117{
118 int t;
119
120 __asm__ __volatile__(
dc53617c
BF
121"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
122" addic %0,%0,1\n"
123 PPC405_ERR77(0, %2)
124" stwcx. %0,0,%2\n"
125" bne- 1b"
126 : "=&r" (t), "+m" (v->counter)
1da177e4 127 : "r" (&v->counter)
dc53617c 128 : "cc", "xer");
1da177e4
LT
129
130 return t;
131}
132
133/*
134 * atomic_inc_and_test - increment and test
135 * @v: pointer of type atomic_t
136 *
137 * Atomically increments @v by 1
138 * and returns true if the result is zero, or false for all
139 * other cases.
140 */
141#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
142
143static __inline__ void atomic_dec(atomic_t *v)
144{
145 int t;
146
147 __asm__ __volatile__(
148"1: lwarx %0,0,%2 # atomic_dec\n\
149 addic %0,%0,-1\n"
150 PPC405_ERR77(0,%2)\
151" stwcx. %0,0,%2\n\
152 bne- 1b"
e2a3d402
LT
153 : "=&r" (t), "+m" (v->counter)
154 : "r" (&v->counter)
efc3624c 155 : "cc", "xer");
1da177e4
LT
156}
157
dc53617c 158static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
1da177e4
LT
159{
160 int t;
161
162 __asm__ __volatile__(
dc53617c
BF
163"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
164" addic %0,%0,-1\n"
165 PPC405_ERR77(0, %2)
166" stwcx. %0,0,%2\n"
167" bne- 1b"
168 : "=&r" (t), "+m" (v->counter)
1da177e4 169 : "r" (&v->counter)
dc53617c 170 : "cc", "xer");
1da177e4
LT
171
172 return t;
173}
174
dc53617c
BF
175#define atomic_inc_return_relaxed atomic_inc_return_relaxed
176#define atomic_dec_return_relaxed atomic_dec_return_relaxed
177
f46e477e 178#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
ffbf670f 179#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4a6dae6d 180
8426e1f6 181/**
f24219b4 182 * __atomic_add_unless - add unless the number is a given value
8426e1f6
NP
183 * @v: pointer of type atomic_t
184 * @a: the amount to add to v...
185 * @u: ...unless v is equal to u.
186 *
187 * Atomically adds @a to @v, so long as it was not @u.
f24219b4 188 * Returns the old value of @v.
8426e1f6 189 */
f24219b4 190static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
f055affb
NP
191{
192 int t;
193
194 __asm__ __volatile__ (
b97021f8 195 PPC_ATOMIC_ENTRY_BARRIER
f24219b4 196"1: lwarx %0,0,%1 # __atomic_add_unless\n\
f055affb
NP
197 cmpw 0,%0,%3 \n\
198 beq- 2f \n\
199 add %0,%2,%0 \n"
200 PPC405_ERR77(0,%2)
201" stwcx. %0,0,%1 \n\
202 bne- 1b \n"
b97021f8 203 PPC_ATOMIC_EXIT_BARRIER
f055affb
NP
204" subf %0,%2,%0 \n\
2052:"
206 : "=&r" (t)
207 : "r" (&v->counter), "r" (a), "r" (u)
208 : "cc", "memory");
209
f24219b4 210 return t;
f055affb
NP
211}
212
a6cf7ed5
AB
213/**
214 * atomic_inc_not_zero - increment unless the number is zero
215 * @v: pointer of type atomic_t
216 *
217 * Atomically increments @v by 1, so long as @v is non-zero.
218 * Returns non-zero if @v was non-zero, and zero otherwise.
219 */
220static __inline__ int atomic_inc_not_zero(atomic_t *v)
221{
222 int t1, t2;
223
224 __asm__ __volatile__ (
225 PPC_ATOMIC_ENTRY_BARRIER
226"1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
227 cmpwi 0,%0,0\n\
228 beq- 2f\n\
229 addic %1,%0,1\n"
230 PPC405_ERR77(0,%2)
231" stwcx. %1,0,%2\n\
232 bne- 1b\n"
233 PPC_ATOMIC_EXIT_BARRIER
234 "\n\
2352:"
236 : "=&r" (t1), "=&r" (t2)
237 : "r" (&v->counter)
238 : "cc", "xer", "memory");
239
240 return t1;
241}
242#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
8426e1f6 243
1da177e4
LT
244#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
245#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
246
247/*
248 * Atomically test *v and decrement if it is greater than 0.
434f98c4
RJ
249 * The function returns the old value of *v minus 1, even if
250 * the atomic variable, v, was not decremented.
1da177e4
LT
251 */
252static __inline__ int atomic_dec_if_positive(atomic_t *v)
253{
254 int t;
255
256 __asm__ __volatile__(
b97021f8 257 PPC_ATOMIC_ENTRY_BARRIER
1da177e4 258"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
434f98c4
RJ
259 cmpwi %0,1\n\
260 addi %0,%0,-1\n\
1da177e4
LT
261 blt- 2f\n"
262 PPC405_ERR77(0,%1)
263" stwcx. %0,0,%1\n\
264 bne- 1b"
b97021f8 265 PPC_ATOMIC_EXIT_BARRIER
1da177e4 266 "\n\
434f98c4 2672:" : "=&b" (t)
1da177e4
LT
268 : "r" (&v->counter)
269 : "cc", "memory");
270
271 return t;
272}
e79bee24 273#define atomic_dec_if_positive atomic_dec_if_positive
1da177e4 274
06a98dba
SR
275#ifdef __powerpc64__
276
06a98dba
SR
277#define ATOMIC64_INIT(i) { (i) }
278
9f0cbea0
SB
279static __inline__ long atomic64_read(const atomic64_t *v)
280{
281 long t;
282
283 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
284
285 return t;
286}
287
288static __inline__ void atomic64_set(atomic64_t *v, long i)
289{
290 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
291}
06a98dba 292
af095dd6
PZ
293#define ATOMIC64_OP(op, asm_op) \
294static __inline__ void atomic64_##op(long a, atomic64_t *v) \
295{ \
296 long t; \
297 \
298 __asm__ __volatile__( \
299"1: ldarx %0,0,%3 # atomic64_" #op "\n" \
300 #asm_op " %0,%2,%0\n" \
301" stdcx. %0,0,%3 \n" \
302" bne- 1b\n" \
303 : "=&r" (t), "+m" (v->counter) \
304 : "r" (a), "r" (&v->counter) \
305 : "cc"); \
06a98dba
SR
306}
307
dc53617c
BF
308#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
309static inline long \
310atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
af095dd6
PZ
311{ \
312 long t; \
313 \
314 __asm__ __volatile__( \
dc53617c
BF
315"1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
316 #asm_op " %0,%2,%0\n" \
317" stdcx. %0,0,%3\n" \
af095dd6 318" bne- 1b\n" \
dc53617c 319 : "=&r" (t), "+m" (v->counter) \
af095dd6 320 : "r" (a), "r" (&v->counter) \
dc53617c 321 : "cc"); \
af095dd6
PZ
322 \
323 return t; \
06a98dba
SR
324}
325
dc53617c
BF
326#define ATOMIC64_OPS(op, asm_op) \
327 ATOMIC64_OP(op, asm_op) \
328 ATOMIC64_OP_RETURN_RELAXED(op, asm_op)
06a98dba 329
af095dd6
PZ
330ATOMIC64_OPS(add, add)
331ATOMIC64_OPS(sub, subf)
d0b7eb6f
PZ
332ATOMIC64_OP(and, and)
333ATOMIC64_OP(or, or)
334ATOMIC64_OP(xor, xor)
06a98dba 335
dc53617c
BF
336#define atomic64_add_return_relaxed atomic64_add_return_relaxed
337#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
338
339#undef ATOPIC64_OPS
340#undef ATOMIC64_OP_RETURN_RELAXED
af095dd6 341#undef ATOMIC64_OP
06a98dba 342
af095dd6 343#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
06a98dba
SR
344
345static __inline__ void atomic64_inc(atomic64_t *v)
346{
347 long t;
348
349 __asm__ __volatile__(
350"1: ldarx %0,0,%2 # atomic64_inc\n\
351 addic %0,%0,1\n\
352 stdcx. %0,0,%2 \n\
353 bne- 1b"
e2a3d402
LT
354 : "=&r" (t), "+m" (v->counter)
355 : "r" (&v->counter)
efc3624c 356 : "cc", "xer");
06a98dba
SR
357}
358
dc53617c 359static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
06a98dba
SR
360{
361 long t;
362
363 __asm__ __volatile__(
dc53617c
BF
364"1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
365" addic %0,%0,1\n"
366" stdcx. %0,0,%2\n"
367" bne- 1b"
368 : "=&r" (t), "+m" (v->counter)
06a98dba 369 : "r" (&v->counter)
dc53617c 370 : "cc", "xer");
06a98dba
SR
371
372 return t;
373}
374
375/*
376 * atomic64_inc_and_test - increment and test
377 * @v: pointer of type atomic64_t
378 *
379 * Atomically increments @v by 1
380 * and returns true if the result is zero, or false for all
381 * other cases.
382 */
383#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
384
385static __inline__ void atomic64_dec(atomic64_t *v)
386{
387 long t;
388
389 __asm__ __volatile__(
390"1: ldarx %0,0,%2 # atomic64_dec\n\
391 addic %0,%0,-1\n\
392 stdcx. %0,0,%2\n\
393 bne- 1b"
e2a3d402
LT
394 : "=&r" (t), "+m" (v->counter)
395 : "r" (&v->counter)
efc3624c 396 : "cc", "xer");
06a98dba
SR
397}
398
dc53617c 399static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
06a98dba
SR
400{
401 long t;
402
403 __asm__ __volatile__(
dc53617c
BF
404"1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
405" addic %0,%0,-1\n"
406" stdcx. %0,0,%2\n"
407" bne- 1b"
408 : "=&r" (t), "+m" (v->counter)
06a98dba 409 : "r" (&v->counter)
dc53617c 410 : "cc", "xer");
06a98dba
SR
411
412 return t;
413}
414
dc53617c
BF
415#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
416#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
417
06a98dba
SR
418#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
419#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
420
421/*
422 * Atomically test *v and decrement if it is greater than 0.
423 * The function returns the old value of *v minus 1.
424 */
425static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
426{
427 long t;
428
429 __asm__ __volatile__(
b97021f8 430 PPC_ATOMIC_ENTRY_BARRIER
06a98dba
SR
431"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
432 addic. %0,%0,-1\n\
433 blt- 2f\n\
434 stdcx. %0,0,%1\n\
435 bne- 1b"
b97021f8 436 PPC_ATOMIC_EXIT_BARRIER
06a98dba
SR
437 "\n\
4382:" : "=&r" (t)
439 : "r" (&v->counter)
efc3624c 440 : "cc", "xer", "memory");
06a98dba
SR
441
442 return t;
443}
444
f46e477e 445#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
41806ef4
MD
446#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
447
448/**
449 * atomic64_add_unless - add unless the number is a given value
450 * @v: pointer of type atomic64_t
451 * @a: the amount to add to v...
452 * @u: ...unless v is equal to u.
453 *
454 * Atomically adds @a to @v, so long as it was not @u.
f24219b4 455 * Returns the old value of @v.
41806ef4
MD
456 */
457static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
458{
459 long t;
460
461 __asm__ __volatile__ (
b97021f8 462 PPC_ATOMIC_ENTRY_BARRIER
f24219b4 463"1: ldarx %0,0,%1 # __atomic_add_unless\n\
41806ef4
MD
464 cmpd 0,%0,%3 \n\
465 beq- 2f \n\
466 add %0,%2,%0 \n"
467" stdcx. %0,0,%1 \n\
468 bne- 1b \n"
b97021f8 469 PPC_ATOMIC_EXIT_BARRIER
41806ef4
MD
470" subf %0,%2,%0 \n\
4712:"
472 : "=&r" (t)
473 : "r" (&v->counter), "r" (a), "r" (u)
474 : "cc", "memory");
475
476 return t != u;
477}
478
a6cf7ed5
AB
479/**
480 * atomic_inc64_not_zero - increment unless the number is zero
481 * @v: pointer of type atomic64_t
482 *
483 * Atomically increments @v by 1, so long as @v is non-zero.
484 * Returns non-zero if @v was non-zero, and zero otherwise.
485 */
486static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
487{
488 long t1, t2;
489
490 __asm__ __volatile__ (
491 PPC_ATOMIC_ENTRY_BARRIER
492"1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
493 cmpdi 0,%0,0\n\
494 beq- 2f\n\
495 addic %1,%0,1\n\
496 stdcx. %1,0,%2\n\
497 bne- 1b\n"
498 PPC_ATOMIC_EXIT_BARRIER
499 "\n\
5002:"
501 : "=&r" (t1), "=&r" (t2)
502 : "r" (&v->counter)
503 : "cc", "xer", "memory");
504
505 return t1;
506}
41806ef4 507
06a98dba
SR
508#endif /* __powerpc64__ */
509
1da177e4 510#endif /* __KERNEL__ */
feaf7cf1 511#endif /* _ASM_POWERPC_ATOMIC_H_ */
This page took 0.764468 seconds and 5 git commands to generate.