Merge tag 'samsung-fixes-4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/krzk...
[deliverable/linux.git] / arch / powerpc / include / asm / atomic.h
CommitLineData
feaf7cf1
BB
1#ifndef _ASM_POWERPC_ATOMIC_H_
2#define _ASM_POWERPC_ATOMIC_H_
3
1da177e4
LT
4/*
5 * PowerPC atomic operations
6 */
7
1da177e4 8#ifdef __KERNEL__
ae3a197e
DH
9#include <linux/types.h>
10#include <asm/cmpxchg.h>
c645073f 11#include <asm/barrier.h>
1da177e4 12
feaf7cf1 13#define ATOMIC_INIT(i) { (i) }
1da177e4 14
dc53617c
BF
15/*
16 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
17 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
18 * on the platform without lwsync.
19 */
20#define __atomic_op_acquire(op, args...) \
21({ \
22 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
23 __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory"); \
24 __ret; \
25})
26
27#define __atomic_op_release(op, args...) \
28({ \
29 __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory"); \
30 op##_relaxed(args); \
31})
32
9f0cbea0
SB
33static __inline__ int atomic_read(const atomic_t *v)
34{
35 int t;
36
37 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
38
39 return t;
40}
41
42static __inline__ void atomic_set(atomic_t *v, int i)
43{
44 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
45}
1da177e4 46
af095dd6
PZ
47#define ATOMIC_OP(op, asm_op) \
48static __inline__ void atomic_##op(int a, atomic_t *v) \
49{ \
50 int t; \
51 \
52 __asm__ __volatile__( \
53"1: lwarx %0,0,%3 # atomic_" #op "\n" \
54 #asm_op " %0,%2,%0\n" \
55 PPC405_ERR77(0,%3) \
56" stwcx. %0,0,%3 \n" \
57" bne- 1b\n" \
58 : "=&r" (t), "+m" (v->counter) \
59 : "r" (a), "r" (&v->counter) \
60 : "cc"); \
61} \
62
dc53617c
BF
63#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
64static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
af095dd6
PZ
65{ \
66 int t; \
67 \
68 __asm__ __volatile__( \
dc53617c
BF
69"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
70 #asm_op " %0,%2,%0\n" \
71 PPC405_ERR77(0, %3) \
72" stwcx. %0,0,%3\n" \
af095dd6 73" bne- 1b\n" \
dc53617c 74 : "=&r" (t), "+m" (v->counter) \
af095dd6 75 : "r" (a), "r" (&v->counter) \
dc53617c 76 : "cc"); \
af095dd6
PZ
77 \
78 return t; \
1da177e4
LT
79}
80
dc53617c
BF
81#define ATOMIC_OPS(op, asm_op) \
82 ATOMIC_OP(op, asm_op) \
83 ATOMIC_OP_RETURN_RELAXED(op, asm_op)
1da177e4 84
af095dd6
PZ
85ATOMIC_OPS(add, add)
86ATOMIC_OPS(sub, subf)
1da177e4 87
d0b7eb6f
PZ
88ATOMIC_OP(and, and)
89ATOMIC_OP(or, or)
90ATOMIC_OP(xor, xor)
91
dc53617c
BF
92#define atomic_add_return_relaxed atomic_add_return_relaxed
93#define atomic_sub_return_relaxed atomic_sub_return_relaxed
94
af095dd6 95#undef ATOMIC_OPS
dc53617c 96#undef ATOMIC_OP_RETURN_RELAXED
af095dd6 97#undef ATOMIC_OP
1da177e4
LT
98
99#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
100
1da177e4
LT
101static __inline__ void atomic_inc(atomic_t *v)
102{
103 int t;
104
105 __asm__ __volatile__(
106"1: lwarx %0,0,%2 # atomic_inc\n\
107 addic %0,%0,1\n"
108 PPC405_ERR77(0,%2)
109" stwcx. %0,0,%2 \n\
110 bne- 1b"
e2a3d402
LT
111 : "=&r" (t), "+m" (v->counter)
112 : "r" (&v->counter)
efc3624c 113 : "cc", "xer");
1da177e4
LT
114}
115
dc53617c 116static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
1da177e4
LT
117{
118 int t;
119
120 __asm__ __volatile__(
dc53617c
BF
121"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
122" addic %0,%0,1\n"
123 PPC405_ERR77(0, %2)
124" stwcx. %0,0,%2\n"
125" bne- 1b"
126 : "=&r" (t), "+m" (v->counter)
1da177e4 127 : "r" (&v->counter)
dc53617c 128 : "cc", "xer");
1da177e4
LT
129
130 return t;
131}
132
133/*
134 * atomic_inc_and_test - increment and test
135 * @v: pointer of type atomic_t
136 *
137 * Atomically increments @v by 1
138 * and returns true if the result is zero, or false for all
139 * other cases.
140 */
141#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
142
143static __inline__ void atomic_dec(atomic_t *v)
144{
145 int t;
146
147 __asm__ __volatile__(
148"1: lwarx %0,0,%2 # atomic_dec\n\
149 addic %0,%0,-1\n"
150 PPC405_ERR77(0,%2)\
151" stwcx. %0,0,%2\n\
152 bne- 1b"
e2a3d402
LT
153 : "=&r" (t), "+m" (v->counter)
154 : "r" (&v->counter)
efc3624c 155 : "cc", "xer");
1da177e4
LT
156}
157
dc53617c 158static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
1da177e4
LT
159{
160 int t;
161
162 __asm__ __volatile__(
dc53617c
BF
163"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
164" addic %0,%0,-1\n"
165 PPC405_ERR77(0, %2)
166" stwcx. %0,0,%2\n"
167" bne- 1b"
168 : "=&r" (t), "+m" (v->counter)
1da177e4 169 : "r" (&v->counter)
dc53617c 170 : "cc", "xer");
1da177e4
LT
171
172 return t;
173}
174
dc53617c
BF
175#define atomic_inc_return_relaxed atomic_inc_return_relaxed
176#define atomic_dec_return_relaxed atomic_dec_return_relaxed
177
f46e477e 178#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
56c08e6d
BF
179#define atomic_cmpxchg_relaxed(v, o, n) \
180 cmpxchg_relaxed(&((v)->counter), (o), (n))
181#define atomic_cmpxchg_acquire(v, o, n) \
182 cmpxchg_acquire(&((v)->counter), (o), (n))
183
ffbf670f 184#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
26760fc1 185#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
4a6dae6d 186
8426e1f6 187/**
f24219b4 188 * __atomic_add_unless - add unless the number is a given value
8426e1f6
NP
189 * @v: pointer of type atomic_t
190 * @a: the amount to add to v...
191 * @u: ...unless v is equal to u.
192 *
193 * Atomically adds @a to @v, so long as it was not @u.
f24219b4 194 * Returns the old value of @v.
8426e1f6 195 */
f24219b4 196static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
f055affb
NP
197{
198 int t;
199
200 __asm__ __volatile__ (
b97021f8 201 PPC_ATOMIC_ENTRY_BARRIER
f24219b4 202"1: lwarx %0,0,%1 # __atomic_add_unless\n\
f055affb
NP
203 cmpw 0,%0,%3 \n\
204 beq- 2f \n\
205 add %0,%2,%0 \n"
206 PPC405_ERR77(0,%2)
207" stwcx. %0,0,%1 \n\
208 bne- 1b \n"
b97021f8 209 PPC_ATOMIC_EXIT_BARRIER
f055affb
NP
210" subf %0,%2,%0 \n\
2112:"
212 : "=&r" (t)
213 : "r" (&v->counter), "r" (a), "r" (u)
214 : "cc", "memory");
215
f24219b4 216 return t;
f055affb
NP
217}
218
a6cf7ed5
AB
219/**
220 * atomic_inc_not_zero - increment unless the number is zero
221 * @v: pointer of type atomic_t
222 *
223 * Atomically increments @v by 1, so long as @v is non-zero.
224 * Returns non-zero if @v was non-zero, and zero otherwise.
225 */
226static __inline__ int atomic_inc_not_zero(atomic_t *v)
227{
228 int t1, t2;
229
230 __asm__ __volatile__ (
231 PPC_ATOMIC_ENTRY_BARRIER
232"1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
233 cmpwi 0,%0,0\n\
234 beq- 2f\n\
235 addic %1,%0,1\n"
236 PPC405_ERR77(0,%2)
237" stwcx. %1,0,%2\n\
238 bne- 1b\n"
239 PPC_ATOMIC_EXIT_BARRIER
240 "\n\
2412:"
242 : "=&r" (t1), "=&r" (t2)
243 : "r" (&v->counter)
244 : "cc", "xer", "memory");
245
246 return t1;
247}
248#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
8426e1f6 249
1da177e4
LT
250#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
251#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
252
253/*
254 * Atomically test *v and decrement if it is greater than 0.
434f98c4
RJ
255 * The function returns the old value of *v minus 1, even if
256 * the atomic variable, v, was not decremented.
1da177e4
LT
257 */
258static __inline__ int atomic_dec_if_positive(atomic_t *v)
259{
260 int t;
261
262 __asm__ __volatile__(
b97021f8 263 PPC_ATOMIC_ENTRY_BARRIER
1da177e4 264"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
434f98c4
RJ
265 cmpwi %0,1\n\
266 addi %0,%0,-1\n\
1da177e4
LT
267 blt- 2f\n"
268 PPC405_ERR77(0,%1)
269" stwcx. %0,0,%1\n\
270 bne- 1b"
b97021f8 271 PPC_ATOMIC_EXIT_BARRIER
1da177e4 272 "\n\
434f98c4 2732:" : "=&b" (t)
1da177e4
LT
274 : "r" (&v->counter)
275 : "cc", "memory");
276
277 return t;
278}
e79bee24 279#define atomic_dec_if_positive atomic_dec_if_positive
1da177e4 280
06a98dba
SR
281#ifdef __powerpc64__
282
06a98dba
SR
283#define ATOMIC64_INIT(i) { (i) }
284
9f0cbea0
SB
285static __inline__ long atomic64_read(const atomic64_t *v)
286{
287 long t;
288
289 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
290
291 return t;
292}
293
294static __inline__ void atomic64_set(atomic64_t *v, long i)
295{
296 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
297}
06a98dba 298
af095dd6
PZ
299#define ATOMIC64_OP(op, asm_op) \
300static __inline__ void atomic64_##op(long a, atomic64_t *v) \
301{ \
302 long t; \
303 \
304 __asm__ __volatile__( \
305"1: ldarx %0,0,%3 # atomic64_" #op "\n" \
306 #asm_op " %0,%2,%0\n" \
307" stdcx. %0,0,%3 \n" \
308" bne- 1b\n" \
309 : "=&r" (t), "+m" (v->counter) \
310 : "r" (a), "r" (&v->counter) \
311 : "cc"); \
06a98dba
SR
312}
313
dc53617c
BF
314#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
315static inline long \
316atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
af095dd6
PZ
317{ \
318 long t; \
319 \
320 __asm__ __volatile__( \
dc53617c
BF
321"1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
322 #asm_op " %0,%2,%0\n" \
323" stdcx. %0,0,%3\n" \
af095dd6 324" bne- 1b\n" \
dc53617c 325 : "=&r" (t), "+m" (v->counter) \
af095dd6 326 : "r" (a), "r" (&v->counter) \
dc53617c 327 : "cc"); \
af095dd6
PZ
328 \
329 return t; \
06a98dba
SR
330}
331
dc53617c
BF
332#define ATOMIC64_OPS(op, asm_op) \
333 ATOMIC64_OP(op, asm_op) \
334 ATOMIC64_OP_RETURN_RELAXED(op, asm_op)
06a98dba 335
af095dd6
PZ
336ATOMIC64_OPS(add, add)
337ATOMIC64_OPS(sub, subf)
d0b7eb6f
PZ
338ATOMIC64_OP(and, and)
339ATOMIC64_OP(or, or)
340ATOMIC64_OP(xor, xor)
06a98dba 341
dc53617c
BF
342#define atomic64_add_return_relaxed atomic64_add_return_relaxed
343#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
344
345#undef ATOPIC64_OPS
346#undef ATOMIC64_OP_RETURN_RELAXED
af095dd6 347#undef ATOMIC64_OP
06a98dba 348
af095dd6 349#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
06a98dba
SR
350
351static __inline__ void atomic64_inc(atomic64_t *v)
352{
353 long t;
354
355 __asm__ __volatile__(
356"1: ldarx %0,0,%2 # atomic64_inc\n\
357 addic %0,%0,1\n\
358 stdcx. %0,0,%2 \n\
359 bne- 1b"
e2a3d402
LT
360 : "=&r" (t), "+m" (v->counter)
361 : "r" (&v->counter)
efc3624c 362 : "cc", "xer");
06a98dba
SR
363}
364
dc53617c 365static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
06a98dba
SR
366{
367 long t;
368
369 __asm__ __volatile__(
dc53617c
BF
370"1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
371" addic %0,%0,1\n"
372" stdcx. %0,0,%2\n"
373" bne- 1b"
374 : "=&r" (t), "+m" (v->counter)
06a98dba 375 : "r" (&v->counter)
dc53617c 376 : "cc", "xer");
06a98dba
SR
377
378 return t;
379}
380
381/*
382 * atomic64_inc_and_test - increment and test
383 * @v: pointer of type atomic64_t
384 *
385 * Atomically increments @v by 1
386 * and returns true if the result is zero, or false for all
387 * other cases.
388 */
389#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
390
391static __inline__ void atomic64_dec(atomic64_t *v)
392{
393 long t;
394
395 __asm__ __volatile__(
396"1: ldarx %0,0,%2 # atomic64_dec\n\
397 addic %0,%0,-1\n\
398 stdcx. %0,0,%2\n\
399 bne- 1b"
e2a3d402
LT
400 : "=&r" (t), "+m" (v->counter)
401 : "r" (&v->counter)
efc3624c 402 : "cc", "xer");
06a98dba
SR
403}
404
dc53617c 405static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
06a98dba
SR
406{
407 long t;
408
409 __asm__ __volatile__(
dc53617c
BF
410"1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
411" addic %0,%0,-1\n"
412" stdcx. %0,0,%2\n"
413" bne- 1b"
414 : "=&r" (t), "+m" (v->counter)
06a98dba 415 : "r" (&v->counter)
dc53617c 416 : "cc", "xer");
06a98dba
SR
417
418 return t;
419}
420
dc53617c
BF
421#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
422#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
423
06a98dba
SR
424#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
425#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
426
427/*
428 * Atomically test *v and decrement if it is greater than 0.
429 * The function returns the old value of *v minus 1.
430 */
431static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
432{
433 long t;
434
435 __asm__ __volatile__(
b97021f8 436 PPC_ATOMIC_ENTRY_BARRIER
06a98dba
SR
437"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
438 addic. %0,%0,-1\n\
439 blt- 2f\n\
440 stdcx. %0,0,%1\n\
441 bne- 1b"
b97021f8 442 PPC_ATOMIC_EXIT_BARRIER
06a98dba
SR
443 "\n\
4442:" : "=&r" (t)
445 : "r" (&v->counter)
efc3624c 446 : "cc", "xer", "memory");
06a98dba
SR
447
448 return t;
449}
450
f46e477e 451#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
56c08e6d
BF
452#define atomic64_cmpxchg_relaxed(v, o, n) \
453 cmpxchg_relaxed(&((v)->counter), (o), (n))
454#define atomic64_cmpxchg_acquire(v, o, n) \
455 cmpxchg_acquire(&((v)->counter), (o), (n))
456
41806ef4 457#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
26760fc1 458#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
41806ef4
MD
459
460/**
461 * atomic64_add_unless - add unless the number is a given value
462 * @v: pointer of type atomic64_t
463 * @a: the amount to add to v...
464 * @u: ...unless v is equal to u.
465 *
466 * Atomically adds @a to @v, so long as it was not @u.
f24219b4 467 * Returns the old value of @v.
41806ef4
MD
468 */
469static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
470{
471 long t;
472
473 __asm__ __volatile__ (
b97021f8 474 PPC_ATOMIC_ENTRY_BARRIER
f24219b4 475"1: ldarx %0,0,%1 # __atomic_add_unless\n\
41806ef4
MD
476 cmpd 0,%0,%3 \n\
477 beq- 2f \n\
478 add %0,%2,%0 \n"
479" stdcx. %0,0,%1 \n\
480 bne- 1b \n"
b97021f8 481 PPC_ATOMIC_EXIT_BARRIER
41806ef4
MD
482" subf %0,%2,%0 \n\
4832:"
484 : "=&r" (t)
485 : "r" (&v->counter), "r" (a), "r" (u)
486 : "cc", "memory");
487
488 return t != u;
489}
490
a6cf7ed5
AB
491/**
492 * atomic_inc64_not_zero - increment unless the number is zero
493 * @v: pointer of type atomic64_t
494 *
495 * Atomically increments @v by 1, so long as @v is non-zero.
496 * Returns non-zero if @v was non-zero, and zero otherwise.
497 */
498static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
499{
500 long t1, t2;
501
502 __asm__ __volatile__ (
503 PPC_ATOMIC_ENTRY_BARRIER
504"1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
505 cmpdi 0,%0,0\n\
506 beq- 2f\n\
507 addic %1,%0,1\n\
508 stdcx. %1,0,%2\n\
509 bne- 1b\n"
510 PPC_ATOMIC_EXIT_BARRIER
511 "\n\
5122:"
513 : "=&r" (t1), "=&r" (t2)
514 : "r" (&v->counter)
515 : "cc", "xer", "memory");
516
517 return t1;
518}
41806ef4 519
06a98dba
SR
520#endif /* __powerpc64__ */
521
1da177e4 522#endif /* __KERNEL__ */
feaf7cf1 523#endif /* _ASM_POWERPC_ATOMIC_H_ */
This page took 0.787733 seconds and 5 git commands to generate.