Merge /spare/repo/linux-2.6/
[deliverable/linux.git] / lib / dec_and_lock.c
1 #include <linux/module.h>
2 #include <linux/spinlock.h>
3 #include <asm/atomic.h>
4
5 /*
6 * This is an architecture-neutral, but slow,
7 * implementation of the notion of "decrement
8 * a reference count, and return locked if it
9 * decremented to zero".
10 *
11 * NOTE NOTE NOTE! This is _not_ equivalent to
12 *
13 * if (atomic_dec_and_test(&atomic)) {
14 * spin_lock(&lock);
15 * return 1;
16 * }
17 * return 0;
18 *
19 * because the spin-lock and the decrement must be
20 * "atomic".
21 *
22 * This slow version gets the spinlock unconditionally,
23 * and releases it if it isn't needed. Architectures
24 * are encouraged to come up with better approaches,
25 * this is trivially done efficiently using a load-locked
26 * store-conditional approach, for example.
27 */
28 int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
29 {
30 spin_lock(lock);
31 if (atomic_dec_and_test(atomic))
32 return 1;
33 spin_unlock(lock);
34 return 0;
35 }
36
37 EXPORT_SYMBOL(_atomic_dec_and_lock);
This page took 0.034583 seconds and 6 git commands to generate.