x86: rename .i assembler includes to .h
[deliverable/linux.git] / include / asm-x86 / atomic_32.h
1 #ifndef __ARCH_I386_ATOMIC__
2 #define __ARCH_I386_ATOMIC__
3
4 #include <linux/compiler.h>
5 #include <asm/processor.h>
6 #include <asm/cmpxchg.h>
7
8 /*
9 * Atomic operations that C can't guarantee us. Useful for
10 * resource counting etc..
11 */
12
13 /*
14 * Make sure gcc doesn't try to be clever and move things around
15 * on us. We need to use _exactly_ the address the user gave us,
16 * not some alias that contains the same information.
17 */
18 typedef struct { int counter; } atomic_t;
19
20 #define ATOMIC_INIT(i) { (i) }
21
22 /**
23 * atomic_read - read atomic variable
24 * @v: pointer of type atomic_t
25 *
26 * Atomically reads the value of @v.
27 */
28 #define atomic_read(v) ((v)->counter)
29
30 /**
31 * atomic_set - set atomic variable
32 * @v: pointer of type atomic_t
33 * @i: required value
34 *
35 * Atomically sets the value of @v to @i.
36 */
37 #define atomic_set(v,i) (((v)->counter) = (i))
38
39 /**
40 * atomic_add - add integer to atomic variable
41 * @i: integer value to add
42 * @v: pointer of type atomic_t
43 *
44 * Atomically adds @i to @v.
45 */
46 static __inline__ void atomic_add(int i, atomic_t *v)
47 {
48 __asm__ __volatile__(
49 LOCK_PREFIX "addl %1,%0"
50 :"+m" (v->counter)
51 :"ir" (i));
52 }
53
54 /**
55 * atomic_sub - subtract integer from atomic variable
56 * @i: integer value to subtract
57 * @v: pointer of type atomic_t
58 *
59 * Atomically subtracts @i from @v.
60 */
61 static __inline__ void atomic_sub(int i, atomic_t *v)
62 {
63 __asm__ __volatile__(
64 LOCK_PREFIX "subl %1,%0"
65 :"+m" (v->counter)
66 :"ir" (i));
67 }
68
69 /**
70 * atomic_sub_and_test - subtract value from variable and test result
71 * @i: integer value to subtract
72 * @v: pointer of type atomic_t
73 *
74 * Atomically subtracts @i from @v and returns
75 * true if the result is zero, or false for all
76 * other cases.
77 */
78 static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
79 {
80 unsigned char c;
81
82 __asm__ __volatile__(
83 LOCK_PREFIX "subl %2,%0; sete %1"
84 :"+m" (v->counter), "=qm" (c)
85 :"ir" (i) : "memory");
86 return c;
87 }
88
89 /**
90 * atomic_inc - increment atomic variable
91 * @v: pointer of type atomic_t
92 *
93 * Atomically increments @v by 1.
94 */
95 static __inline__ void atomic_inc(atomic_t *v)
96 {
97 __asm__ __volatile__(
98 LOCK_PREFIX "incl %0"
99 :"+m" (v->counter));
100 }
101
102 /**
103 * atomic_dec - decrement atomic variable
104 * @v: pointer of type atomic_t
105 *
106 * Atomically decrements @v by 1.
107 */
108 static __inline__ void atomic_dec(atomic_t *v)
109 {
110 __asm__ __volatile__(
111 LOCK_PREFIX "decl %0"
112 :"+m" (v->counter));
113 }
114
115 /**
116 * atomic_dec_and_test - decrement and test
117 * @v: pointer of type atomic_t
118 *
119 * Atomically decrements @v by 1 and
120 * returns true if the result is 0, or false for all other
121 * cases.
122 */
123 static __inline__ int atomic_dec_and_test(atomic_t *v)
124 {
125 unsigned char c;
126
127 __asm__ __volatile__(
128 LOCK_PREFIX "decl %0; sete %1"
129 :"+m" (v->counter), "=qm" (c)
130 : : "memory");
131 return c != 0;
132 }
133
134 /**
135 * atomic_inc_and_test - increment and test
136 * @v: pointer of type atomic_t
137 *
138 * Atomically increments @v by 1
139 * and returns true if the result is zero, or false for all
140 * other cases.
141 */
142 static __inline__ int atomic_inc_and_test(atomic_t *v)
143 {
144 unsigned char c;
145
146 __asm__ __volatile__(
147 LOCK_PREFIX "incl %0; sete %1"
148 :"+m" (v->counter), "=qm" (c)
149 : : "memory");
150 return c != 0;
151 }
152
153 /**
154 * atomic_add_negative - add and test if negative
155 * @v: pointer of type atomic_t
156 * @i: integer value to add
157 *
158 * Atomically adds @i to @v and returns true
159 * if the result is negative, or false when
160 * result is greater than or equal to zero.
161 */
162 static __inline__ int atomic_add_negative(int i, atomic_t *v)
163 {
164 unsigned char c;
165
166 __asm__ __volatile__(
167 LOCK_PREFIX "addl %2,%0; sets %1"
168 :"+m" (v->counter), "=qm" (c)
169 :"ir" (i) : "memory");
170 return c;
171 }
172
173 /**
174 * atomic_add_return - add integer and return
175 * @v: pointer of type atomic_t
176 * @i: integer value to add
177 *
178 * Atomically adds @i to @v and returns @i + @v
179 */
180 static __inline__ int atomic_add_return(int i, atomic_t *v)
181 {
182 int __i;
183 #ifdef CONFIG_M386
184 unsigned long flags;
185 if(unlikely(boot_cpu_data.x86 <= 3))
186 goto no_xadd;
187 #endif
188 /* Modern 486+ processor */
189 __i = i;
190 __asm__ __volatile__(
191 LOCK_PREFIX "xaddl %0, %1"
192 :"+r" (i), "+m" (v->counter)
193 : : "memory");
194 return i + __i;
195
196 #ifdef CONFIG_M386
197 no_xadd: /* Legacy 386 processor */
198 local_irq_save(flags);
199 __i = atomic_read(v);
200 atomic_set(v, i + __i);
201 local_irq_restore(flags);
202 return i + __i;
203 #endif
204 }
205
206 /**
207 * atomic_sub_return - subtract integer and return
208 * @v: pointer of type atomic_t
209 * @i: integer value to subtract
210 *
211 * Atomically subtracts @i from @v and returns @v - @i
212 */
213 static __inline__ int atomic_sub_return(int i, atomic_t *v)
214 {
215 return atomic_add_return(-i,v);
216 }
217
218 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
219 #define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
220
221 /**
222 * atomic_add_unless - add unless the number is already a given value
223 * @v: pointer of type atomic_t
224 * @a: the amount to add to v...
225 * @u: ...unless v is equal to u.
226 *
227 * Atomically adds @a to @v, so long as @v was not already @u.
228 * Returns non-zero if @v was not @u, and zero otherwise.
229 */
230 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
231 {
232 int c, old;
233 c = atomic_read(v);
234 for (;;) {
235 if (unlikely(c == (u)))
236 break;
237 old = atomic_cmpxchg((v), c, c + (a));
238 if (likely(old == c))
239 break;
240 c = old;
241 }
242 return c != (u);
243 }
244
245 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
246
247 #define atomic_inc_return(v) (atomic_add_return(1,v))
248 #define atomic_dec_return(v) (atomic_sub_return(1,v))
249
250 /* These are x86-specific, used by some header files */
251 #define atomic_clear_mask(mask, addr) \
252 __asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
253 : : "r" (~(mask)),"m" (*addr) : "memory")
254
255 #define atomic_set_mask(mask, addr) \
256 __asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
257 : : "r" (mask),"m" (*(addr)) : "memory")
258
259 /* Atomic operations are already serializing on x86 */
260 #define smp_mb__before_atomic_dec() barrier()
261 #define smp_mb__after_atomic_dec() barrier()
262 #define smp_mb__before_atomic_inc() barrier()
263 #define smp_mb__after_atomic_inc() barrier()
264
265 #include <asm-generic/atomic.h>
266 #endif
This page took 0.04758 seconds and 5 git commands to generate.