/home/lenb/src/to-linus branch 'acpi-2.6.12'
[deliverable/linux.git] / arch / i386 / lib / dec_and_lock.c
1 /*
2 * x86 version of "atomic_dec_and_lock()" using
3 * the atomic "cmpxchg" instruction.
4 *
5 * (For CPU's lacking cmpxchg, we use the slow
6 * generic version, and this one never even gets
7 * compiled).
8 */
9
10 #include <linux/spinlock.h>
11 #include <linux/module.h>
12 #include <asm/atomic.h>
13
14 int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
15 {
16 int counter;
17 int newcount;
18
19 repeat:
20 counter = atomic_read(atomic);
21 newcount = counter-1;
22
23 if (!newcount)
24 goto slow_path;
25
26 asm volatile("lock; cmpxchgl %1,%2"
27 :"=a" (newcount)
28 :"r" (newcount), "m" (atomic->counter), "0" (counter));
29
30 /* If the above failed, "eax" will have changed */
31 if (newcount != counter)
32 goto repeat;
33 return 0;
34
35 slow_path:
36 spin_lock(lock);
37 if (atomic_dec_and_test(atomic))
38 return 1;
39 spin_unlock(lock);
40 return 0;
41 }
42 EXPORT_SYMBOL(_atomic_dec_and_lock);
This page took 0.155103 seconds and 6 git commands to generate.