Merge branch 'pm-cpufreq-fixes'
[deliverable/linux.git] / arch / powerpc / include / asm / atomic.h
1 #ifndef _ASM_POWERPC_ATOMIC_H_
2 #define _ASM_POWERPC_ATOMIC_H_
3
4 /*
5 * PowerPC atomic operations
6 */
7
8 #ifdef __KERNEL__
9 #include <linux/types.h>
10 #include <asm/cmpxchg.h>
11 #include <asm/barrier.h>
12
13 #define ATOMIC_INIT(i) { (i) }
14
15 /*
16 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
17 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
18 * on the platform without lwsync.
19 */
20 #define __atomic_op_acquire(op, args...) \
21 ({ \
22 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
23 __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory"); \
24 __ret; \
25 })
26
27 #define __atomic_op_release(op, args...) \
28 ({ \
29 __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory"); \
30 op##_relaxed(args); \
31 })
32
33 static __inline__ int atomic_read(const atomic_t *v)
34 {
35 int t;
36
37 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
38
39 return t;
40 }
41
42 static __inline__ void atomic_set(atomic_t *v, int i)
43 {
44 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
45 }
46
47 #define ATOMIC_OP(op, asm_op) \
48 static __inline__ void atomic_##op(int a, atomic_t *v) \
49 { \
50 int t; \
51 \
52 __asm__ __volatile__( \
53 "1: lwarx %0,0,%3 # atomic_" #op "\n" \
54 #asm_op " %0,%2,%0\n" \
55 PPC405_ERR77(0,%3) \
56 " stwcx. %0,0,%3 \n" \
57 " bne- 1b\n" \
58 : "=&r" (t), "+m" (v->counter) \
59 : "r" (a), "r" (&v->counter) \
60 : "cc"); \
61 } \
62
63 #define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
64 static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
65 { \
66 int t; \
67 \
68 __asm__ __volatile__( \
69 "1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
70 #asm_op " %0,%2,%0\n" \
71 PPC405_ERR77(0, %3) \
72 " stwcx. %0,0,%3\n" \
73 " bne- 1b\n" \
74 : "=&r" (t), "+m" (v->counter) \
75 : "r" (a), "r" (&v->counter) \
76 : "cc"); \
77 \
78 return t; \
79 }
80
81 #define ATOMIC_OPS(op, asm_op) \
82 ATOMIC_OP(op, asm_op) \
83 ATOMIC_OP_RETURN_RELAXED(op, asm_op)
84
85 ATOMIC_OPS(add, add)
86 ATOMIC_OPS(sub, subf)
87
88 ATOMIC_OP(and, and)
89 ATOMIC_OP(or, or)
90 ATOMIC_OP(xor, xor)
91
92 #define atomic_add_return_relaxed atomic_add_return_relaxed
93 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
94
95 #undef ATOMIC_OPS
96 #undef ATOMIC_OP_RETURN_RELAXED
97 #undef ATOMIC_OP
98
99 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
100
101 static __inline__ void atomic_inc(atomic_t *v)
102 {
103 int t;
104
105 __asm__ __volatile__(
106 "1: lwarx %0,0,%2 # atomic_inc\n\
107 addic %0,%0,1\n"
108 PPC405_ERR77(0,%2)
109 " stwcx. %0,0,%2 \n\
110 bne- 1b"
111 : "=&r" (t), "+m" (v->counter)
112 : "r" (&v->counter)
113 : "cc", "xer");
114 }
115
116 static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
117 {
118 int t;
119
120 __asm__ __volatile__(
121 "1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
122 " addic %0,%0,1\n"
123 PPC405_ERR77(0, %2)
124 " stwcx. %0,0,%2\n"
125 " bne- 1b"
126 : "=&r" (t), "+m" (v->counter)
127 : "r" (&v->counter)
128 : "cc", "xer");
129
130 return t;
131 }
132
133 /*
134 * atomic_inc_and_test - increment and test
135 * @v: pointer of type atomic_t
136 *
137 * Atomically increments @v by 1
138 * and returns true if the result is zero, or false for all
139 * other cases.
140 */
141 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
142
143 static __inline__ void atomic_dec(atomic_t *v)
144 {
145 int t;
146
147 __asm__ __volatile__(
148 "1: lwarx %0,0,%2 # atomic_dec\n\
149 addic %0,%0,-1\n"
150 PPC405_ERR77(0,%2)\
151 " stwcx. %0,0,%2\n\
152 bne- 1b"
153 : "=&r" (t), "+m" (v->counter)
154 : "r" (&v->counter)
155 : "cc", "xer");
156 }
157
158 static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
159 {
160 int t;
161
162 __asm__ __volatile__(
163 "1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
164 " addic %0,%0,-1\n"
165 PPC405_ERR77(0, %2)
166 " stwcx. %0,0,%2\n"
167 " bne- 1b"
168 : "=&r" (t), "+m" (v->counter)
169 : "r" (&v->counter)
170 : "cc", "xer");
171
172 return t;
173 }
174
175 #define atomic_inc_return_relaxed atomic_inc_return_relaxed
176 #define atomic_dec_return_relaxed atomic_dec_return_relaxed
177
178 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
179 #define atomic_cmpxchg_relaxed(v, o, n) \
180 cmpxchg_relaxed(&((v)->counter), (o), (n))
181 #define atomic_cmpxchg_acquire(v, o, n) \
182 cmpxchg_acquire(&((v)->counter), (o), (n))
183
184 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
185 #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
186
187 /**
188 * __atomic_add_unless - add unless the number is a given value
189 * @v: pointer of type atomic_t
190 * @a: the amount to add to v...
191 * @u: ...unless v is equal to u.
192 *
193 * Atomically adds @a to @v, so long as it was not @u.
194 * Returns the old value of @v.
195 */
196 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
197 {
198 int t;
199
200 __asm__ __volatile__ (
201 PPC_ATOMIC_ENTRY_BARRIER
202 "1: lwarx %0,0,%1 # __atomic_add_unless\n\
203 cmpw 0,%0,%3 \n\
204 beq- 2f \n\
205 add %0,%2,%0 \n"
206 PPC405_ERR77(0,%2)
207 " stwcx. %0,0,%1 \n\
208 bne- 1b \n"
209 PPC_ATOMIC_EXIT_BARRIER
210 " subf %0,%2,%0 \n\
211 2:"
212 : "=&r" (t)
213 : "r" (&v->counter), "r" (a), "r" (u)
214 : "cc", "memory");
215
216 return t;
217 }
218
219 /**
220 * atomic_inc_not_zero - increment unless the number is zero
221 * @v: pointer of type atomic_t
222 *
223 * Atomically increments @v by 1, so long as @v is non-zero.
224 * Returns non-zero if @v was non-zero, and zero otherwise.
225 */
226 static __inline__ int atomic_inc_not_zero(atomic_t *v)
227 {
228 int t1, t2;
229
230 __asm__ __volatile__ (
231 PPC_ATOMIC_ENTRY_BARRIER
232 "1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
233 cmpwi 0,%0,0\n\
234 beq- 2f\n\
235 addic %1,%0,1\n"
236 PPC405_ERR77(0,%2)
237 " stwcx. %1,0,%2\n\
238 bne- 1b\n"
239 PPC_ATOMIC_EXIT_BARRIER
240 "\n\
241 2:"
242 : "=&r" (t1), "=&r" (t2)
243 : "r" (&v->counter)
244 : "cc", "xer", "memory");
245
246 return t1;
247 }
248 #define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
249
250 #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
251 #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
252
253 /*
254 * Atomically test *v and decrement if it is greater than 0.
255 * The function returns the old value of *v minus 1, even if
256 * the atomic variable, v, was not decremented.
257 */
258 static __inline__ int atomic_dec_if_positive(atomic_t *v)
259 {
260 int t;
261
262 __asm__ __volatile__(
263 PPC_ATOMIC_ENTRY_BARRIER
264 "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
265 cmpwi %0,1\n\
266 addi %0,%0,-1\n\
267 blt- 2f\n"
268 PPC405_ERR77(0,%1)
269 " stwcx. %0,0,%1\n\
270 bne- 1b"
271 PPC_ATOMIC_EXIT_BARRIER
272 "\n\
273 2:" : "=&b" (t)
274 : "r" (&v->counter)
275 : "cc", "memory");
276
277 return t;
278 }
279 #define atomic_dec_if_positive atomic_dec_if_positive
280
281 #ifdef __powerpc64__
282
283 #define ATOMIC64_INIT(i) { (i) }
284
285 static __inline__ long atomic64_read(const atomic64_t *v)
286 {
287 long t;
288
289 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
290
291 return t;
292 }
293
294 static __inline__ void atomic64_set(atomic64_t *v, long i)
295 {
296 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
297 }
298
299 #define ATOMIC64_OP(op, asm_op) \
300 static __inline__ void atomic64_##op(long a, atomic64_t *v) \
301 { \
302 long t; \
303 \
304 __asm__ __volatile__( \
305 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
306 #asm_op " %0,%2,%0\n" \
307 " stdcx. %0,0,%3 \n" \
308 " bne- 1b\n" \
309 : "=&r" (t), "+m" (v->counter) \
310 : "r" (a), "r" (&v->counter) \
311 : "cc"); \
312 }
313
314 #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
315 static inline long \
316 atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
317 { \
318 long t; \
319 \
320 __asm__ __volatile__( \
321 "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
322 #asm_op " %0,%2,%0\n" \
323 " stdcx. %0,0,%3\n" \
324 " bne- 1b\n" \
325 : "=&r" (t), "+m" (v->counter) \
326 : "r" (a), "r" (&v->counter) \
327 : "cc"); \
328 \
329 return t; \
330 }
331
332 #define ATOMIC64_OPS(op, asm_op) \
333 ATOMIC64_OP(op, asm_op) \
334 ATOMIC64_OP_RETURN_RELAXED(op, asm_op)
335
336 ATOMIC64_OPS(add, add)
337 ATOMIC64_OPS(sub, subf)
338 ATOMIC64_OP(and, and)
339 ATOMIC64_OP(or, or)
340 ATOMIC64_OP(xor, xor)
341
342 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
343 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
344
345 #undef ATOPIC64_OPS
346 #undef ATOMIC64_OP_RETURN_RELAXED
347 #undef ATOMIC64_OP
348
349 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
350
351 static __inline__ void atomic64_inc(atomic64_t *v)
352 {
353 long t;
354
355 __asm__ __volatile__(
356 "1: ldarx %0,0,%2 # atomic64_inc\n\
357 addic %0,%0,1\n\
358 stdcx. %0,0,%2 \n\
359 bne- 1b"
360 : "=&r" (t), "+m" (v->counter)
361 : "r" (&v->counter)
362 : "cc", "xer");
363 }
364
365 static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
366 {
367 long t;
368
369 __asm__ __volatile__(
370 "1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
371 " addic %0,%0,1\n"
372 " stdcx. %0,0,%2\n"
373 " bne- 1b"
374 : "=&r" (t), "+m" (v->counter)
375 : "r" (&v->counter)
376 : "cc", "xer");
377
378 return t;
379 }
380
381 /*
382 * atomic64_inc_and_test - increment and test
383 * @v: pointer of type atomic64_t
384 *
385 * Atomically increments @v by 1
386 * and returns true if the result is zero, or false for all
387 * other cases.
388 */
389 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
390
391 static __inline__ void atomic64_dec(atomic64_t *v)
392 {
393 long t;
394
395 __asm__ __volatile__(
396 "1: ldarx %0,0,%2 # atomic64_dec\n\
397 addic %0,%0,-1\n\
398 stdcx. %0,0,%2\n\
399 bne- 1b"
400 : "=&r" (t), "+m" (v->counter)
401 : "r" (&v->counter)
402 : "cc", "xer");
403 }
404
405 static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
406 {
407 long t;
408
409 __asm__ __volatile__(
410 "1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
411 " addic %0,%0,-1\n"
412 " stdcx. %0,0,%2\n"
413 " bne- 1b"
414 : "=&r" (t), "+m" (v->counter)
415 : "r" (&v->counter)
416 : "cc", "xer");
417
418 return t;
419 }
420
421 #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
422 #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
423
424 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
425 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
426
427 /*
428 * Atomically test *v and decrement if it is greater than 0.
429 * The function returns the old value of *v minus 1.
430 */
431 static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
432 {
433 long t;
434
435 __asm__ __volatile__(
436 PPC_ATOMIC_ENTRY_BARRIER
437 "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
438 addic. %0,%0,-1\n\
439 blt- 2f\n\
440 stdcx. %0,0,%1\n\
441 bne- 1b"
442 PPC_ATOMIC_EXIT_BARRIER
443 "\n\
444 2:" : "=&r" (t)
445 : "r" (&v->counter)
446 : "cc", "xer", "memory");
447
448 return t;
449 }
450
451 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
452 #define atomic64_cmpxchg_relaxed(v, o, n) \
453 cmpxchg_relaxed(&((v)->counter), (o), (n))
454 #define atomic64_cmpxchg_acquire(v, o, n) \
455 cmpxchg_acquire(&((v)->counter), (o), (n))
456
457 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
458 #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
459
460 /**
461 * atomic64_add_unless - add unless the number is a given value
462 * @v: pointer of type atomic64_t
463 * @a: the amount to add to v...
464 * @u: ...unless v is equal to u.
465 *
466 * Atomically adds @a to @v, so long as it was not @u.
467 * Returns the old value of @v.
468 */
469 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
470 {
471 long t;
472
473 __asm__ __volatile__ (
474 PPC_ATOMIC_ENTRY_BARRIER
475 "1: ldarx %0,0,%1 # __atomic_add_unless\n\
476 cmpd 0,%0,%3 \n\
477 beq- 2f \n\
478 add %0,%2,%0 \n"
479 " stdcx. %0,0,%1 \n\
480 bne- 1b \n"
481 PPC_ATOMIC_EXIT_BARRIER
482 " subf %0,%2,%0 \n\
483 2:"
484 : "=&r" (t)
485 : "r" (&v->counter), "r" (a), "r" (u)
486 : "cc", "memory");
487
488 return t != u;
489 }
490
491 /**
492 * atomic_inc64_not_zero - increment unless the number is zero
493 * @v: pointer of type atomic64_t
494 *
495 * Atomically increments @v by 1, so long as @v is non-zero.
496 * Returns non-zero if @v was non-zero, and zero otherwise.
497 */
498 static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
499 {
500 long t1, t2;
501
502 __asm__ __volatile__ (
503 PPC_ATOMIC_ENTRY_BARRIER
504 "1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
505 cmpdi 0,%0,0\n\
506 beq- 2f\n\
507 addic %1,%0,1\n\
508 stdcx. %1,0,%2\n\
509 bne- 1b\n"
510 PPC_ATOMIC_EXIT_BARRIER
511 "\n\
512 2:"
513 : "=&r" (t1), "=&r" (t2)
514 : "r" (&v->counter)
515 : "cc", "xer", "memory");
516
517 return t1;
518 }
519
520 #endif /* __powerpc64__ */
521
522 #endif /* __KERNEL__ */
523 #endif /* _ASM_POWERPC_ATOMIC_H_ */
This page took 0.043315 seconds and 6 git commands to generate.