Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
[deliverable/linux.git] / include / asm-powerpc / atomic.h
1 #ifndef _ASM_POWERPC_ATOMIC_H_
2 #define _ASM_POWERPC_ATOMIC_H_
3
4 /*
5 * PowerPC atomic operations
6 */
7
8 typedef struct { volatile int counter; } atomic_t;
9
10 #ifdef __KERNEL__
11 #include <linux/compiler.h>
12 #include <asm/synch.h>
13 #include <asm/asm-compat.h>
14
15 #define ATOMIC_INIT(i) { (i) }
16
17 #define atomic_read(v) ((v)->counter)
18 #define atomic_set(v,i) (((v)->counter) = (i))
19
20 static __inline__ void atomic_add(int a, atomic_t *v)
21 {
22 int t;
23
24 __asm__ __volatile__(
25 "1: lwarx %0,0,%3 # atomic_add\n\
26 add %0,%2,%0\n"
27 PPC405_ERR77(0,%3)
28 " stwcx. %0,0,%3 \n\
29 bne- 1b"
30 : "=&r" (t), "+m" (v->counter)
31 : "r" (a), "r" (&v->counter)
32 : "cc");
33 }
34
35 static __inline__ int atomic_add_return(int a, atomic_t *v)
36 {
37 int t;
38
39 __asm__ __volatile__(
40 LWSYNC_ON_SMP
41 "1: lwarx %0,0,%2 # atomic_add_return\n\
42 add %0,%1,%0\n"
43 PPC405_ERR77(0,%2)
44 " stwcx. %0,0,%2 \n\
45 bne- 1b"
46 ISYNC_ON_SMP
47 : "=&r" (t)
48 : "r" (a), "r" (&v->counter)
49 : "cc", "memory");
50
51 return t;
52 }
53
54 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
55
56 static __inline__ void atomic_sub(int a, atomic_t *v)
57 {
58 int t;
59
60 __asm__ __volatile__(
61 "1: lwarx %0,0,%3 # atomic_sub\n\
62 subf %0,%2,%0\n"
63 PPC405_ERR77(0,%3)
64 " stwcx. %0,0,%3 \n\
65 bne- 1b"
66 : "=&r" (t), "+m" (v->counter)
67 : "r" (a), "r" (&v->counter)
68 : "cc");
69 }
70
71 static __inline__ int atomic_sub_return(int a, atomic_t *v)
72 {
73 int t;
74
75 __asm__ __volatile__(
76 LWSYNC_ON_SMP
77 "1: lwarx %0,0,%2 # atomic_sub_return\n\
78 subf %0,%1,%0\n"
79 PPC405_ERR77(0,%2)
80 " stwcx. %0,0,%2 \n\
81 bne- 1b"
82 ISYNC_ON_SMP
83 : "=&r" (t)
84 : "r" (a), "r" (&v->counter)
85 : "cc", "memory");
86
87 return t;
88 }
89
90 static __inline__ void atomic_inc(atomic_t *v)
91 {
92 int t;
93
94 __asm__ __volatile__(
95 "1: lwarx %0,0,%2 # atomic_inc\n\
96 addic %0,%0,1\n"
97 PPC405_ERR77(0,%2)
98 " stwcx. %0,0,%2 \n\
99 bne- 1b"
100 : "=&r" (t), "+m" (v->counter)
101 : "r" (&v->counter)
102 : "cc");
103 }
104
105 static __inline__ int atomic_inc_return(atomic_t *v)
106 {
107 int t;
108
109 __asm__ __volatile__(
110 LWSYNC_ON_SMP
111 "1: lwarx %0,0,%1 # atomic_inc_return\n\
112 addic %0,%0,1\n"
113 PPC405_ERR77(0,%1)
114 " stwcx. %0,0,%1 \n\
115 bne- 1b"
116 ISYNC_ON_SMP
117 : "=&r" (t)
118 : "r" (&v->counter)
119 : "cc", "memory");
120
121 return t;
122 }
123
124 /*
125 * atomic_inc_and_test - increment and test
126 * @v: pointer of type atomic_t
127 *
128 * Atomically increments @v by 1
129 * and returns true if the result is zero, or false for all
130 * other cases.
131 */
132 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
133
134 static __inline__ void atomic_dec(atomic_t *v)
135 {
136 int t;
137
138 __asm__ __volatile__(
139 "1: lwarx %0,0,%2 # atomic_dec\n\
140 addic %0,%0,-1\n"
141 PPC405_ERR77(0,%2)\
142 " stwcx. %0,0,%2\n\
143 bne- 1b"
144 : "=&r" (t), "+m" (v->counter)
145 : "r" (&v->counter)
146 : "cc");
147 }
148
149 static __inline__ int atomic_dec_return(atomic_t *v)
150 {
151 int t;
152
153 __asm__ __volatile__(
154 LWSYNC_ON_SMP
155 "1: lwarx %0,0,%1 # atomic_dec_return\n\
156 addic %0,%0,-1\n"
157 PPC405_ERR77(0,%1)
158 " stwcx. %0,0,%1\n\
159 bne- 1b"
160 ISYNC_ON_SMP
161 : "=&r" (t)
162 : "r" (&v->counter)
163 : "cc", "memory");
164
165 return t;
166 }
167
168 #define atomic_cmpxchg(v, o, n) \
169 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
170 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
171
172 /**
173 * atomic_add_unless - add unless the number is a given value
174 * @v: pointer of type atomic_t
175 * @a: the amount to add to v...
176 * @u: ...unless v is equal to u.
177 *
178 * Atomically adds @a to @v, so long as it was not @u.
179 * Returns non-zero if @v was not @u, and zero otherwise.
180 */
181 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
182 {
183 int t;
184
185 __asm__ __volatile__ (
186 LWSYNC_ON_SMP
187 "1: lwarx %0,0,%1 # atomic_add_unless\n\
188 cmpw 0,%0,%3 \n\
189 beq- 2f \n\
190 add %0,%2,%0 \n"
191 PPC405_ERR77(0,%2)
192 " stwcx. %0,0,%1 \n\
193 bne- 1b \n"
194 ISYNC_ON_SMP
195 " subf %0,%2,%0 \n\
196 2:"
197 : "=&r" (t)
198 : "r" (&v->counter), "r" (a), "r" (u)
199 : "cc", "memory");
200
201 return t != u;
202 }
203
204 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
205
206 #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
207 #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
208
209 /*
210 * Atomically test *v and decrement if it is greater than 0.
211 * The function returns the old value of *v minus 1, even if
212 * the atomic variable, v, was not decremented.
213 */
214 static __inline__ int atomic_dec_if_positive(atomic_t *v)
215 {
216 int t;
217
218 __asm__ __volatile__(
219 LWSYNC_ON_SMP
220 "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
221 cmpwi %0,1\n\
222 addi %0,%0,-1\n\
223 blt- 2f\n"
224 PPC405_ERR77(0,%1)
225 " stwcx. %0,0,%1\n\
226 bne- 1b"
227 ISYNC_ON_SMP
228 "\n\
229 2:" : "=&b" (t)
230 : "r" (&v->counter)
231 : "cc", "memory");
232
233 return t;
234 }
235
236 #define smp_mb__before_atomic_dec() smp_mb()
237 #define smp_mb__after_atomic_dec() smp_mb()
238 #define smp_mb__before_atomic_inc() smp_mb()
239 #define smp_mb__after_atomic_inc() smp_mb()
240
241 #ifdef __powerpc64__
242
243 typedef struct { volatile long counter; } atomic64_t;
244
245 #define ATOMIC64_INIT(i) { (i) }
246
247 #define atomic64_read(v) ((v)->counter)
248 #define atomic64_set(v,i) (((v)->counter) = (i))
249
250 static __inline__ void atomic64_add(long a, atomic64_t *v)
251 {
252 long t;
253
254 __asm__ __volatile__(
255 "1: ldarx %0,0,%3 # atomic64_add\n\
256 add %0,%2,%0\n\
257 stdcx. %0,0,%3 \n\
258 bne- 1b"
259 : "=&r" (t), "+m" (v->counter)
260 : "r" (a), "r" (&v->counter)
261 : "cc");
262 }
263
264 static __inline__ long atomic64_add_return(long a, atomic64_t *v)
265 {
266 long t;
267
268 __asm__ __volatile__(
269 LWSYNC_ON_SMP
270 "1: ldarx %0,0,%2 # atomic64_add_return\n\
271 add %0,%1,%0\n\
272 stdcx. %0,0,%2 \n\
273 bne- 1b"
274 ISYNC_ON_SMP
275 : "=&r" (t)
276 : "r" (a), "r" (&v->counter)
277 : "cc", "memory");
278
279 return t;
280 }
281
282 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
283
284 static __inline__ void atomic64_sub(long a, atomic64_t *v)
285 {
286 long t;
287
288 __asm__ __volatile__(
289 "1: ldarx %0,0,%3 # atomic64_sub\n\
290 subf %0,%2,%0\n\
291 stdcx. %0,0,%3 \n\
292 bne- 1b"
293 : "=&r" (t), "+m" (v->counter)
294 : "r" (a), "r" (&v->counter)
295 : "cc");
296 }
297
298 static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
299 {
300 long t;
301
302 __asm__ __volatile__(
303 LWSYNC_ON_SMP
304 "1: ldarx %0,0,%2 # atomic64_sub_return\n\
305 subf %0,%1,%0\n\
306 stdcx. %0,0,%2 \n\
307 bne- 1b"
308 ISYNC_ON_SMP
309 : "=&r" (t)
310 : "r" (a), "r" (&v->counter)
311 : "cc", "memory");
312
313 return t;
314 }
315
316 static __inline__ void atomic64_inc(atomic64_t *v)
317 {
318 long t;
319
320 __asm__ __volatile__(
321 "1: ldarx %0,0,%2 # atomic64_inc\n\
322 addic %0,%0,1\n\
323 stdcx. %0,0,%2 \n\
324 bne- 1b"
325 : "=&r" (t), "+m" (v->counter)
326 : "r" (&v->counter)
327 : "cc");
328 }
329
330 static __inline__ long atomic64_inc_return(atomic64_t *v)
331 {
332 long t;
333
334 __asm__ __volatile__(
335 LWSYNC_ON_SMP
336 "1: ldarx %0,0,%1 # atomic64_inc_return\n\
337 addic %0,%0,1\n\
338 stdcx. %0,0,%1 \n\
339 bne- 1b"
340 ISYNC_ON_SMP
341 : "=&r" (t)
342 : "r" (&v->counter)
343 : "cc", "memory");
344
345 return t;
346 }
347
348 /*
349 * atomic64_inc_and_test - increment and test
350 * @v: pointer of type atomic64_t
351 *
352 * Atomically increments @v by 1
353 * and returns true if the result is zero, or false for all
354 * other cases.
355 */
356 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
357
358 static __inline__ void atomic64_dec(atomic64_t *v)
359 {
360 long t;
361
362 __asm__ __volatile__(
363 "1: ldarx %0,0,%2 # atomic64_dec\n\
364 addic %0,%0,-1\n\
365 stdcx. %0,0,%2\n\
366 bne- 1b"
367 : "=&r" (t), "+m" (v->counter)
368 : "r" (&v->counter)
369 : "cc");
370 }
371
372 static __inline__ long atomic64_dec_return(atomic64_t *v)
373 {
374 long t;
375
376 __asm__ __volatile__(
377 LWSYNC_ON_SMP
378 "1: ldarx %0,0,%1 # atomic64_dec_return\n\
379 addic %0,%0,-1\n\
380 stdcx. %0,0,%1\n\
381 bne- 1b"
382 ISYNC_ON_SMP
383 : "=&r" (t)
384 : "r" (&v->counter)
385 : "cc", "memory");
386
387 return t;
388 }
389
390 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
391 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
392
393 /*
394 * Atomically test *v and decrement if it is greater than 0.
395 * The function returns the old value of *v minus 1.
396 */
397 static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
398 {
399 long t;
400
401 __asm__ __volatile__(
402 LWSYNC_ON_SMP
403 "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
404 addic. %0,%0,-1\n\
405 blt- 2f\n\
406 stdcx. %0,0,%1\n\
407 bne- 1b"
408 ISYNC_ON_SMP
409 "\n\
410 2:" : "=&r" (t)
411 : "r" (&v->counter)
412 : "cc", "memory");
413
414 return t;
415 }
416
417 #define atomic64_cmpxchg(v, o, n) \
418 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
419 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
420
421 /**
422 * atomic64_add_unless - add unless the number is a given value
423 * @v: pointer of type atomic64_t
424 * @a: the amount to add to v...
425 * @u: ...unless v is equal to u.
426 *
427 * Atomically adds @a to @v, so long as it was not @u.
428 * Returns non-zero if @v was not @u, and zero otherwise.
429 */
430 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
431 {
432 long t;
433
434 __asm__ __volatile__ (
435 LWSYNC_ON_SMP
436 "1: ldarx %0,0,%1 # atomic_add_unless\n\
437 cmpd 0,%0,%3 \n\
438 beq- 2f \n\
439 add %0,%2,%0 \n"
440 " stdcx. %0,0,%1 \n\
441 bne- 1b \n"
442 ISYNC_ON_SMP
443 " subf %0,%2,%0 \n\
444 2:"
445 : "=&r" (t)
446 : "r" (&v->counter), "r" (a), "r" (u)
447 : "cc", "memory");
448
449 return t != u;
450 }
451
452 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
453
454 #endif /* __powerpc64__ */
455
456 #include <asm-generic/atomic.h>
457 #endif /* __KERNEL__ */
458 #endif /* _ASM_POWERPC_ATOMIC_H_ */
This page took 0.045181 seconds and 6 git commands to generate.