[PATCH] spinlock consolidation
[deliverable/linux.git] / include / asm-i386 / spinlock.h
1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
3
4 #include <asm/atomic.h>
5 #include <asm/rwlock.h>
6 #include <asm/page.h>
7 #include <linux/config.h>
8 #include <linux/compiler.h>
9
10 /*
11 * Your basic SMP spinlocks, allowing only a single CPU anywhere
12 *
13 * Simple spin lock operations. There are two variants, one clears IRQ's
14 * on the local processor, one does not.
15 *
16 * We make no fairness assumptions. They have a cost.
17 *
18 * (the type definitions are in asm/spinlock_types.h)
19 */
20
21 #define __raw_spin_is_locked(x) \
22 (*(volatile signed char *)(&(x)->slock) <= 0)
23
24 #define __raw_spin_lock_string \
25 "\n1:\t" \
26 "lock ; decb %0\n\t" \
27 "jns 3f\n" \
28 "2:\t" \
29 "rep;nop\n\t" \
30 "cmpb $0,%0\n\t" \
31 "jle 2b\n\t" \
32 "jmp 1b\n" \
33 "3:\n\t"
34
35 #define __raw_spin_lock_string_flags \
36 "\n1:\t" \
37 "lock ; decb %0\n\t" \
38 "jns 4f\n\t" \
39 "2:\t" \
40 "testl $0x200, %1\n\t" \
41 "jz 3f\n\t" \
42 "sti\n\t" \
43 "3:\t" \
44 "rep;nop\n\t" \
45 "cmpb $0, %0\n\t" \
46 "jle 3b\n\t" \
47 "cli\n\t" \
48 "jmp 1b\n" \
49 "4:\n\t"
50
51 static inline void __raw_spin_lock(raw_spinlock_t *lock)
52 {
53 __asm__ __volatile__(
54 __raw_spin_lock_string
55 :"=m" (lock->slock) : : "memory");
56 }
57
58 static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
59 {
60 __asm__ __volatile__(
61 __raw_spin_lock_string_flags
62 :"=m" (lock->slock) : "r" (flags) : "memory");
63 }
64
65 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
66 {
67 char oldval;
68 __asm__ __volatile__(
69 "xchgb %b0,%1"
70 :"=q" (oldval), "=m" (lock->slock)
71 :"0" (0) : "memory");
72 return oldval > 0;
73 }
74
75 /*
76 * __raw_spin_unlock based on writing $1 to the low byte.
77 * This method works. Despite all the confusion.
78 * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there)
79 * (PPro errata 66, 92)
80 */
81
82 #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
83
84 #define __raw_spin_unlock_string \
85 "movb $1,%0" \
86 :"=m" (lock->slock) : : "memory"
87
88
89 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
90 {
91 __asm__ __volatile__(
92 __raw_spin_unlock_string
93 );
94 }
95
96 #else
97
98 #define __raw_spin_unlock_string \
99 "xchgb %b0, %1" \
100 :"=q" (oldval), "=m" (lock->slock) \
101 :"0" (oldval) : "memory"
102
103 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
104 {
105 char oldval = 1;
106
107 __asm__ __volatile__(
108 __raw_spin_unlock_string
109 );
110 }
111
112 #endif
113
114 #define __raw_spin_unlock_wait(lock) \
115 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
116
117 /*
118 * Read-write spinlocks, allowing multiple readers
119 * but only one writer.
120 *
121 * NOTE! it is quite common to have readers in interrupts
122 * but no interrupt writers. For those circumstances we
123 * can "mix" irq-safe locks - any writer needs to get a
124 * irq-safe write-lock, but readers can get non-irqsafe
125 * read-locks.
126 *
127 * On x86, we implement read-write locks as a 32-bit counter
128 * with the high bit (sign) being the "contended" bit.
129 *
130 * The inline assembly is non-obvious. Think about it.
131 *
132 * Changed to use the same technique as rw semaphores. See
133 * semaphore.h for details. -ben
134 *
135 * the helpers are in arch/i386/kernel/semaphore.c
136 */
137
138 /**
139 * read_can_lock - would read_trylock() succeed?
140 * @lock: the rwlock in question.
141 */
142 #define __raw_read_can_lock(x) ((int)(x)->lock > 0)
143
144 /**
145 * write_can_lock - would write_trylock() succeed?
146 * @lock: the rwlock in question.
147 */
148 #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
149
150 static inline void __raw_read_lock(raw_rwlock_t *rw)
151 {
152 __build_read_lock(rw, "__read_lock_failed");
153 }
154
155 static inline void __raw_write_lock(raw_rwlock_t *rw)
156 {
157 __build_write_lock(rw, "__write_lock_failed");
158 }
159
160 static inline int __raw_read_trylock(raw_rwlock_t *lock)
161 {
162 atomic_t *count = (atomic_t *)lock;
163 atomic_dec(count);
164 if (atomic_read(count) >= 0)
165 return 1;
166 atomic_inc(count);
167 return 0;
168 }
169
170 static inline int __raw_write_trylock(raw_rwlock_t *lock)
171 {
172 atomic_t *count = (atomic_t *)lock;
173 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
174 return 1;
175 atomic_add(RW_LOCK_BIAS, count);
176 return 0;
177 }
178
179 static inline void __raw_read_unlock(raw_rwlock_t *rw)
180 {
181 asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
182 }
183
184 static inline void __raw_write_unlock(raw_rwlock_t *rw)
185 {
186 asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ", %0"
187 : "=m" (rw->lock) : : "memory");
188 }
189
190 #endif /* __ASM_SPINLOCK_H */
This page took 0.054648 seconds and 5 git commands to generate.