Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
[deliverable/linux.git] / arch / x86 / include / asm / atomic_64.h
1 #ifndef _ASM_X86_ATOMIC_64_H
2 #define _ASM_X86_ATOMIC_64_H
3
4 #include <asm/alternative.h>
5 #include <asm/cmpxchg.h>
6
7 /* atomic_t should be 32 bit signed type */
8
9 /*
10 * Atomic operations that C can't guarantee us. Useful for
11 * resource counting etc..
12 */
13
14 /*
15 * Make sure gcc doesn't try to be clever and move things around
16 * on us. We need to use _exactly_ the address the user gave us,
17 * not some alias that contains the same information.
18 */
19 typedef struct {
20 int counter;
21 } atomic_t;
22
23 #define ATOMIC_INIT(i) { (i) }
24
25 /**
26 * atomic_read - read atomic variable
27 * @v: pointer of type atomic_t
28 *
29 * Atomically reads the value of @v.
30 */
31 #define atomic_read(v) ((v)->counter)
32
33 /**
34 * atomic_set - set atomic variable
35 * @v: pointer of type atomic_t
36 * @i: required value
37 *
38 * Atomically sets the value of @v to @i.
39 */
40 #define atomic_set(v, i) (((v)->counter) = (i))
41
42 /**
43 * atomic_add - add integer to atomic variable
44 * @i: integer value to add
45 * @v: pointer of type atomic_t
46 *
47 * Atomically adds @i to @v.
48 */
49 static inline void atomic_add(int i, atomic_t *v)
50 {
51 asm volatile(LOCK_PREFIX "addl %1,%0"
52 : "=m" (v->counter)
53 : "ir" (i), "m" (v->counter));
54 }
55
56 /**
57 * atomic_sub - subtract the atomic variable
58 * @i: integer value to subtract
59 * @v: pointer of type atomic_t
60 *
61 * Atomically subtracts @i from @v.
62 */
63 static inline void atomic_sub(int i, atomic_t *v)
64 {
65 asm volatile(LOCK_PREFIX "subl %1,%0"
66 : "=m" (v->counter)
67 : "ir" (i), "m" (v->counter));
68 }
69
70 /**
71 * atomic_sub_and_test - subtract value from variable and test result
72 * @i: integer value to subtract
73 * @v: pointer of type atomic_t
74 *
75 * Atomically subtracts @i from @v and returns
76 * true if the result is zero, or false for all
77 * other cases.
78 */
79 static inline int atomic_sub_and_test(int i, atomic_t *v)
80 {
81 unsigned char c;
82
83 asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
84 : "=m" (v->counter), "=qm" (c)
85 : "ir" (i), "m" (v->counter) : "memory");
86 return c;
87 }
88
89 /**
90 * atomic_inc - increment atomic variable
91 * @v: pointer of type atomic_t
92 *
93 * Atomically increments @v by 1.
94 */
95 static inline void atomic_inc(atomic_t *v)
96 {
97 asm volatile(LOCK_PREFIX "incl %0"
98 : "=m" (v->counter)
99 : "m" (v->counter));
100 }
101
102 /**
103 * atomic_dec - decrement atomic variable
104 * @v: pointer of type atomic_t
105 *
106 * Atomically decrements @v by 1.
107 */
108 static inline void atomic_dec(atomic_t *v)
109 {
110 asm volatile(LOCK_PREFIX "decl %0"
111 : "=m" (v->counter)
112 : "m" (v->counter));
113 }
114
115 /**
116 * atomic_dec_and_test - decrement and test
117 * @v: pointer of type atomic_t
118 *
119 * Atomically decrements @v by 1 and
120 * returns true if the result is 0, or false for all other
121 * cases.
122 */
123 static inline int atomic_dec_and_test(atomic_t *v)
124 {
125 unsigned char c;
126
127 asm volatile(LOCK_PREFIX "decl %0; sete %1"
128 : "=m" (v->counter), "=qm" (c)
129 : "m" (v->counter) : "memory");
130 return c != 0;
131 }
132
133 /**
134 * atomic_inc_and_test - increment and test
135 * @v: pointer of type atomic_t
136 *
137 * Atomically increments @v by 1
138 * and returns true if the result is zero, or false for all
139 * other cases.
140 */
141 static inline int atomic_inc_and_test(atomic_t *v)
142 {
143 unsigned char c;
144
145 asm volatile(LOCK_PREFIX "incl %0; sete %1"
146 : "=m" (v->counter), "=qm" (c)
147 : "m" (v->counter) : "memory");
148 return c != 0;
149 }
150
151 /**
152 * atomic_add_negative - add and test if negative
153 * @i: integer value to add
154 * @v: pointer of type atomic_t
155 *
156 * Atomically adds @i to @v and returns true
157 * if the result is negative, or false when
158 * result is greater than or equal to zero.
159 */
160 static inline int atomic_add_negative(int i, atomic_t *v)
161 {
162 unsigned char c;
163
164 asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
165 : "=m" (v->counter), "=qm" (c)
166 : "ir" (i), "m" (v->counter) : "memory");
167 return c;
168 }
169
170 /**
171 * atomic_add_return - add and return
172 * @i: integer value to add
173 * @v: pointer of type atomic_t
174 *
175 * Atomically adds @i to @v and returns @i + @v
176 */
177 static inline int atomic_add_return(int i, atomic_t *v)
178 {
179 int __i = i;
180 asm volatile(LOCK_PREFIX "xaddl %0, %1"
181 : "+r" (i), "+m" (v->counter)
182 : : "memory");
183 return i + __i;
184 }
185
186 static inline int atomic_sub_return(int i, atomic_t *v)
187 {
188 return atomic_add_return(-i, v);
189 }
190
191 #define atomic_inc_return(v) (atomic_add_return(1, v))
192 #define atomic_dec_return(v) (atomic_sub_return(1, v))
193
194 /* An 64bit atomic type */
195
196 typedef struct {
197 long counter;
198 } atomic64_t;
199
200 #define ATOMIC64_INIT(i) { (i) }
201
202 /**
203 * atomic64_read - read atomic64 variable
204 * @v: pointer of type atomic64_t
205 *
206 * Atomically reads the value of @v.
207 * Doesn't imply a read memory barrier.
208 */
209 #define atomic64_read(v) ((v)->counter)
210
211 /**
212 * atomic64_set - set atomic64 variable
213 * @v: pointer to type atomic64_t
214 * @i: required value
215 *
216 * Atomically sets the value of @v to @i.
217 */
218 #define atomic64_set(v, i) (((v)->counter) = (i))
219
220 /**
221 * atomic64_add - add integer to atomic64 variable
222 * @i: integer value to add
223 * @v: pointer to type atomic64_t
224 *
225 * Atomically adds @i to @v.
226 */
227 static inline void atomic64_add(long i, atomic64_t *v)
228 {
229 asm volatile(LOCK_PREFIX "addq %1,%0"
230 : "=m" (v->counter)
231 : "er" (i), "m" (v->counter));
232 }
233
234 /**
235 * atomic64_sub - subtract the atomic64 variable
236 * @i: integer value to subtract
237 * @v: pointer to type atomic64_t
238 *
239 * Atomically subtracts @i from @v.
240 */
241 static inline void atomic64_sub(long i, atomic64_t *v)
242 {
243 asm volatile(LOCK_PREFIX "subq %1,%0"
244 : "=m" (v->counter)
245 : "er" (i), "m" (v->counter));
246 }
247
248 /**
249 * atomic64_sub_and_test - subtract value from variable and test result
250 * @i: integer value to subtract
251 * @v: pointer to type atomic64_t
252 *
253 * Atomically subtracts @i from @v and returns
254 * true if the result is zero, or false for all
255 * other cases.
256 */
257 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
258 {
259 unsigned char c;
260
261 asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
262 : "=m" (v->counter), "=qm" (c)
263 : "er" (i), "m" (v->counter) : "memory");
264 return c;
265 }
266
267 /**
268 * atomic64_inc - increment atomic64 variable
269 * @v: pointer to type atomic64_t
270 *
271 * Atomically increments @v by 1.
272 */
273 static inline void atomic64_inc(atomic64_t *v)
274 {
275 asm volatile(LOCK_PREFIX "incq %0"
276 : "=m" (v->counter)
277 : "m" (v->counter));
278 }
279
280 /**
281 * atomic64_dec - decrement atomic64 variable
282 * @v: pointer to type atomic64_t
283 *
284 * Atomically decrements @v by 1.
285 */
286 static inline void atomic64_dec(atomic64_t *v)
287 {
288 asm volatile(LOCK_PREFIX "decq %0"
289 : "=m" (v->counter)
290 : "m" (v->counter));
291 }
292
293 /**
294 * atomic64_dec_and_test - decrement and test
295 * @v: pointer to type atomic64_t
296 *
297 * Atomically decrements @v by 1 and
298 * returns true if the result is 0, or false for all other
299 * cases.
300 */
301 static inline int atomic64_dec_and_test(atomic64_t *v)
302 {
303 unsigned char c;
304
305 asm volatile(LOCK_PREFIX "decq %0; sete %1"
306 : "=m" (v->counter), "=qm" (c)
307 : "m" (v->counter) : "memory");
308 return c != 0;
309 }
310
311 /**
312 * atomic64_inc_and_test - increment and test
313 * @v: pointer to type atomic64_t
314 *
315 * Atomically increments @v by 1
316 * and returns true if the result is zero, or false for all
317 * other cases.
318 */
319 static inline int atomic64_inc_and_test(atomic64_t *v)
320 {
321 unsigned char c;
322
323 asm volatile(LOCK_PREFIX "incq %0; sete %1"
324 : "=m" (v->counter), "=qm" (c)
325 : "m" (v->counter) : "memory");
326 return c != 0;
327 }
328
329 /**
330 * atomic64_add_negative - add and test if negative
331 * @i: integer value to add
332 * @v: pointer to type atomic64_t
333 *
334 * Atomically adds @i to @v and returns true
335 * if the result is negative, or false when
336 * result is greater than or equal to zero.
337 */
338 static inline int atomic64_add_negative(long i, atomic64_t *v)
339 {
340 unsigned char c;
341
342 asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
343 : "=m" (v->counter), "=qm" (c)
344 : "er" (i), "m" (v->counter) : "memory");
345 return c;
346 }
347
348 /**
349 * atomic64_add_return - add and return
350 * @i: integer value to add
351 * @v: pointer to type atomic64_t
352 *
353 * Atomically adds @i to @v and returns @i + @v
354 */
355 static inline long atomic64_add_return(long i, atomic64_t *v)
356 {
357 long __i = i;
358 asm volatile(LOCK_PREFIX "xaddq %0, %1;"
359 : "+r" (i), "+m" (v->counter)
360 : : "memory");
361 return i + __i;
362 }
363
364 static inline long atomic64_sub_return(long i, atomic64_t *v)
365 {
366 return atomic64_add_return(-i, v);
367 }
368
369 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
370 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
371
372 #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
373 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
374
375 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
376 #define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
377
378 /**
379 * atomic_add_unless - add unless the number is a given value
380 * @v: pointer of type atomic_t
381 * @a: the amount to add to v...
382 * @u: ...unless v is equal to u.
383 *
384 * Atomically adds @a to @v, so long as it was not @u.
385 * Returns non-zero if @v was not @u, and zero otherwise.
386 */
387 static inline int atomic_add_unless(atomic_t *v, int a, int u)
388 {
389 int c, old;
390 c = atomic_read(v);
391 for (;;) {
392 if (unlikely(c == (u)))
393 break;
394 old = atomic_cmpxchg((v), c, c + (a));
395 if (likely(old == c))
396 break;
397 c = old;
398 }
399 return c != (u);
400 }
401
402 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
403
404 /**
405 * atomic64_add_unless - add unless the number is a given value
406 * @v: pointer of type atomic64_t
407 * @a: the amount to add to v...
408 * @u: ...unless v is equal to u.
409 *
410 * Atomically adds @a to @v, so long as it was not @u.
411 * Returns non-zero if @v was not @u, and zero otherwise.
412 */
413 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
414 {
415 long c, old;
416 c = atomic64_read(v);
417 for (;;) {
418 if (unlikely(c == (u)))
419 break;
420 old = atomic64_cmpxchg((v), c, c + (a));
421 if (likely(old == c))
422 break;
423 c = old;
424 }
425 return c != (u);
426 }
427
428 /**
429 * atomic_inc_short - increment of a short integer
430 * @v: pointer to type int
431 *
432 * Atomically adds 1 to @v
433 * Returns the new value of @u
434 */
435 static inline short int atomic_inc_short(short int *v)
436 {
437 asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
438 return *v;
439 }
440
441 /**
442 * atomic_or_long - OR of two long integers
443 * @v1: pointer to type unsigned long
444 * @v2: pointer to type unsigned long
445 *
446 * Atomically ORs @v1 and @v2
447 * Returns the result of the OR
448 */
449 static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
450 {
451 asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2));
452 }
453
454 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
455
456 /* These are x86-specific, used by some header files */
457 #define atomic_clear_mask(mask, addr) \
458 asm volatile(LOCK_PREFIX "andl %0,%1" \
459 : : "r" (~(mask)), "m" (*(addr)) : "memory")
460
461 #define atomic_set_mask(mask, addr) \
462 asm volatile(LOCK_PREFIX "orl %0,%1" \
463 : : "r" ((unsigned)(mask)), "m" (*(addr)) \
464 : "memory")
465
466 /* Atomic operations are already serializing on x86 */
467 #define smp_mb__before_atomic_dec() barrier()
468 #define smp_mb__after_atomic_dec() barrier()
469 #define smp_mb__before_atomic_inc() barrier()
470 #define smp_mb__after_atomic_inc() barrier()
471
472 #include <asm-generic/atomic.h>
473 #endif /* _ASM_X86_ATOMIC_64_H */
This page took 0.040268 seconds and 6 git commands to generate.