Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ALPHA_ATOMIC_H |
2 | #define _ALPHA_ATOMIC_H | |
3 | ||
ea435467 | 4 | #include <linux/types.h> |
0db9ae4a | 5 | #include <asm/barrier.h> |
5ba840f9 | 6 | #include <asm/cmpxchg.h> |
0db9ae4a | 7 | |
1da177e4 LT |
8 | /* |
9 | * Atomic operations that C can't guarantee us. Useful for | |
10 | * resource counting etc... | |
11 | * | |
12 | * But use these as seldom as possible since they are much slower | |
13 | * than regular operations. | |
14 | */ | |
15 | ||
16 | ||
67a806d9 MG |
17 | #define ATOMIC_INIT(i) { (i) } |
18 | #define ATOMIC64_INIT(i) { (i) } | |
1da177e4 | 19 | |
62e8a325 PZ |
20 | #define atomic_read(v) READ_ONCE((v)->counter) |
21 | #define atomic64_read(v) READ_ONCE((v)->counter) | |
1da177e4 | 22 | |
62e8a325 PZ |
23 | #define atomic_set(v,i) WRITE_ONCE((v)->counter, (i)) |
24 | #define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i)) | |
1da177e4 LT |
25 | |
26 | /* | |
27 | * To get proper branch prediction for the main line, we must branch | |
28 | * forward to code at the end of this object's .text section, then | |
29 | * branch back to restart the operation. | |
30 | */ | |
31 | ||
212d3be1 | 32 | #define ATOMIC_OP(op, asm_op) \ |
b93c7b8c PZ |
33 | static __inline__ void atomic_##op(int i, atomic_t * v) \ |
34 | { \ | |
35 | unsigned long temp; \ | |
36 | __asm__ __volatile__( \ | |
37 | "1: ldl_l %0,%1\n" \ | |
212d3be1 | 38 | " " #asm_op " %0,%2,%0\n" \ |
b93c7b8c PZ |
39 | " stl_c %0,%1\n" \ |
40 | " beq %0,2f\n" \ | |
41 | ".subsection 2\n" \ | |
42 | "2: br 1b\n" \ | |
43 | ".previous" \ | |
44 | :"=&r" (temp), "=m" (v->counter) \ | |
45 | :"Ir" (i), "m" (v->counter)); \ | |
46 | } \ | |
47 | ||
212d3be1 | 48 | #define ATOMIC_OP_RETURN(op, asm_op) \ |
b93c7b8c PZ |
49 | static inline int atomic_##op##_return(int i, atomic_t *v) \ |
50 | { \ | |
51 | long temp, result; \ | |
52 | smp_mb(); \ | |
53 | __asm__ __volatile__( \ | |
54 | "1: ldl_l %0,%1\n" \ | |
212d3be1 PZ |
55 | " " #asm_op " %0,%3,%2\n" \ |
56 | " " #asm_op " %0,%3,%0\n" \ | |
b93c7b8c PZ |
57 | " stl_c %0,%1\n" \ |
58 | " beq %0,2f\n" \ | |
59 | ".subsection 2\n" \ | |
60 | "2: br 1b\n" \ | |
61 | ".previous" \ | |
62 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ | |
63 | :"Ir" (i), "m" (v->counter) : "memory"); \ | |
64 | smp_mb(); \ | |
65 | return result; \ | |
1da177e4 LT |
66 | } |
67 | ||
1f51dee7 PZ |
68 | #define ATOMIC_FETCH_OP(op, asm_op) \ |
69 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | |
70 | { \ | |
71 | long temp, result; \ | |
72 | smp_mb(); \ | |
73 | __asm__ __volatile__( \ | |
74 | "1: ldl_l %2,%1\n" \ | |
75 | " " #asm_op " %2,%3,%0\n" \ | |
76 | " stl_c %0,%1\n" \ | |
77 | " beq %0,2f\n" \ | |
78 | ".subsection 2\n" \ | |
79 | "2: br 1b\n" \ | |
80 | ".previous" \ | |
81 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ | |
82 | :"Ir" (i), "m" (v->counter) : "memory"); \ | |
83 | smp_mb(); \ | |
84 | return result; \ | |
85 | } | |
86 | ||
212d3be1 | 87 | #define ATOMIC64_OP(op, asm_op) \ |
b93c7b8c PZ |
88 | static __inline__ void atomic64_##op(long i, atomic64_t * v) \ |
89 | { \ | |
90 | unsigned long temp; \ | |
91 | __asm__ __volatile__( \ | |
92 | "1: ldq_l %0,%1\n" \ | |
212d3be1 | 93 | " " #asm_op " %0,%2,%0\n" \ |
b93c7b8c PZ |
94 | " stq_c %0,%1\n" \ |
95 | " beq %0,2f\n" \ | |
96 | ".subsection 2\n" \ | |
97 | "2: br 1b\n" \ | |
98 | ".previous" \ | |
99 | :"=&r" (temp), "=m" (v->counter) \ | |
100 | :"Ir" (i), "m" (v->counter)); \ | |
101 | } \ | |
102 | ||
212d3be1 | 103 | #define ATOMIC64_OP_RETURN(op, asm_op) \ |
b93c7b8c PZ |
104 | static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ |
105 | { \ | |
106 | long temp, result; \ | |
107 | smp_mb(); \ | |
108 | __asm__ __volatile__( \ | |
109 | "1: ldq_l %0,%1\n" \ | |
212d3be1 PZ |
110 | " " #asm_op " %0,%3,%2\n" \ |
111 | " " #asm_op " %0,%3,%0\n" \ | |
b93c7b8c PZ |
112 | " stq_c %0,%1\n" \ |
113 | " beq %0,2f\n" \ | |
114 | ".subsection 2\n" \ | |
115 | "2: br 1b\n" \ | |
116 | ".previous" \ | |
117 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ | |
118 | :"Ir" (i), "m" (v->counter) : "memory"); \ | |
119 | smp_mb(); \ | |
120 | return result; \ | |
1da177e4 LT |
121 | } |
122 | ||
1f51dee7 PZ |
123 | #define ATOMIC64_FETCH_OP(op, asm_op) \ |
124 | static __inline__ long atomic64_fetch_##op(long i, atomic64_t * v) \ | |
125 | { \ | |
126 | long temp, result; \ | |
127 | smp_mb(); \ | |
128 | __asm__ __volatile__( \ | |
129 | "1: ldq_l %2,%1\n" \ | |
130 | " " #asm_op " %2,%3,%0\n" \ | |
131 | " stq_c %0,%1\n" \ | |
132 | " beq %0,2f\n" \ | |
133 | ".subsection 2\n" \ | |
134 | "2: br 1b\n" \ | |
135 | ".previous" \ | |
136 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ | |
137 | :"Ir" (i), "m" (v->counter) : "memory"); \ | |
138 | smp_mb(); \ | |
139 | return result; \ | |
140 | } | |
141 | ||
212d3be1 PZ |
142 | #define ATOMIC_OPS(op) \ |
143 | ATOMIC_OP(op, op##l) \ | |
144 | ATOMIC_OP_RETURN(op, op##l) \ | |
1f51dee7 | 145 | ATOMIC_FETCH_OP(op, op##l) \ |
212d3be1 | 146 | ATOMIC64_OP(op, op##q) \ |
1f51dee7 PZ |
147 | ATOMIC64_OP_RETURN(op, op##q) \ |
148 | ATOMIC64_FETCH_OP(op, op##q) | |
1da177e4 | 149 | |
b93c7b8c PZ |
150 | ATOMIC_OPS(add) |
151 | ATOMIC_OPS(sub) | |
1da177e4 | 152 | |
212d3be1 PZ |
153 | #define atomic_andnot atomic_andnot |
154 | #define atomic64_andnot atomic64_andnot | |
155 | ||
1f51dee7 PZ |
156 | #define atomic_fetch_or atomic_fetch_or |
157 | ||
158 | #undef ATOMIC_OPS | |
159 | #define ATOMIC_OPS(op, asm) \ | |
160 | ATOMIC_OP(op, asm) \ | |
161 | ATOMIC_FETCH_OP(op, asm) \ | |
162 | ATOMIC64_OP(op, asm) \ | |
163 | ATOMIC64_FETCH_OP(op, asm) | |
164 | ||
165 | ATOMIC_OPS(and, and) | |
166 | ATOMIC_OPS(andnot, bic) | |
167 | ATOMIC_OPS(or, bis) | |
168 | ATOMIC_OPS(xor, xor) | |
212d3be1 | 169 | |
b93c7b8c | 170 | #undef ATOMIC_OPS |
1f51dee7 | 171 | #undef ATOMIC64_FETCH_OP |
b93c7b8c PZ |
172 | #undef ATOMIC64_OP_RETURN |
173 | #undef ATOMIC64_OP | |
1f51dee7 | 174 | #undef ATOMIC_FETCH_OP |
b93c7b8c PZ |
175 | #undef ATOMIC_OP_RETURN |
176 | #undef ATOMIC_OP | |
1da177e4 | 177 | |
e96e6994 MD |
178 | #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) |
179 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | |
180 | ||
181 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) | |
ffbf670f | 182 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
4a6dae6d | 183 | |
e96e6994 | 184 | /** |
f24219b4 | 185 | * __atomic_add_unless - add unless the number is a given value |
e96e6994 MD |
186 | * @v: pointer of type atomic_t |
187 | * @a: the amount to add to v... | |
188 | * @u: ...unless v is equal to u. | |
189 | * | |
190 | * Atomically adds @a to @v, so long as it was not @u. | |
f24219b4 | 191 | * Returns the old value of @v. |
e96e6994 | 192 | */ |
f24219b4 | 193 | static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) |
2856f5e3 | 194 | { |
6da75397 RH |
195 | int c, new, old; |
196 | smp_mb(); | |
197 | __asm__ __volatile__( | |
198 | "1: ldl_l %[old],%[mem]\n" | |
199 | " cmpeq %[old],%[u],%[c]\n" | |
200 | " addl %[old],%[a],%[new]\n" | |
201 | " bne %[c],2f\n" | |
202 | " stl_c %[new],%[mem]\n" | |
203 | " beq %[new],3f\n" | |
204 | "2:\n" | |
205 | ".subsection 2\n" | |
206 | "3: br 1b\n" | |
207 | ".previous" | |
208 | : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c) | |
209 | : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u) | |
210 | : "memory"); | |
211 | smp_mb(); | |
212 | return old; | |
2856f5e3 MD |
213 | } |
214 | ||
8426e1f6 | 215 | |
e96e6994 MD |
216 | /** |
217 | * atomic64_add_unless - add unless the number is a given value | |
218 | * @v: pointer of type atomic64_t | |
219 | * @a: the amount to add to v... | |
220 | * @u: ...unless v is equal to u. | |
221 | * | |
222 | * Atomically adds @a to @v, so long as it was not @u. | |
6da75397 | 223 | * Returns true iff @v was not @u. |
e96e6994 | 224 | */ |
2856f5e3 MD |
225 | static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) |
226 | { | |
6da75397 RH |
227 | long c, tmp; |
228 | smp_mb(); | |
229 | __asm__ __volatile__( | |
230 | "1: ldq_l %[tmp],%[mem]\n" | |
231 | " cmpeq %[tmp],%[u],%[c]\n" | |
232 | " addq %[tmp],%[a],%[tmp]\n" | |
233 | " bne %[c],2f\n" | |
234 | " stq_c %[tmp],%[mem]\n" | |
235 | " beq %[tmp],3f\n" | |
236 | "2:\n" | |
237 | ".subsection 2\n" | |
238 | "3: br 1b\n" | |
239 | ".previous" | |
240 | : [tmp] "=&r"(tmp), [c] "=&r"(c) | |
241 | : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u) | |
242 | : "memory"); | |
243 | smp_mb(); | |
244 | return !c; | |
2856f5e3 MD |
245 | } |
246 | ||
748a76b5 RH |
247 | /* |
248 | * atomic64_dec_if_positive - decrement by 1 if old value positive | |
249 | * @v: pointer of type atomic_t | |
250 | * | |
251 | * The function returns the old value of *v minus 1, even if | |
252 | * the atomic variable, v, was not decremented. | |
253 | */ | |
254 | static inline long atomic64_dec_if_positive(atomic64_t *v) | |
255 | { | |
256 | long old, tmp; | |
257 | smp_mb(); | |
258 | __asm__ __volatile__( | |
259 | "1: ldq_l %[old],%[mem]\n" | |
260 | " subq %[old],1,%[tmp]\n" | |
261 | " ble %[old],2f\n" | |
262 | " stq_c %[tmp],%[mem]\n" | |
263 | " beq %[tmp],3f\n" | |
264 | "2:\n" | |
265 | ".subsection 2\n" | |
266 | "3: br 1b\n" | |
267 | ".previous" | |
268 | : [old] "=&r"(old), [tmp] "=&r"(tmp) | |
269 | : [mem] "m"(*v) | |
270 | : "memory"); | |
271 | smp_mb(); | |
272 | return old - 1; | |
273 | } | |
274 | ||
e96e6994 MD |
275 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) |
276 | ||
7c72aaf2 HD |
277 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) |
278 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | |
279 | ||
1da177e4 LT |
280 | #define atomic_dec_return(v) atomic_sub_return(1,(v)) |
281 | #define atomic64_dec_return(v) atomic64_sub_return(1,(v)) | |
282 | ||
283 | #define atomic_inc_return(v) atomic_add_return(1,(v)) | |
284 | #define atomic64_inc_return(v) atomic64_add_return(1,(v)) | |
285 | ||
286 | #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) | |
287 | #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) | |
288 | ||
289 | #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) | |
7c72aaf2 HD |
290 | #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) |
291 | ||
1da177e4 LT |
292 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) |
293 | #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) | |
294 | ||
295 | #define atomic_inc(v) atomic_add(1,(v)) | |
296 | #define atomic64_inc(v) atomic64_add(1,(v)) | |
297 | ||
298 | #define atomic_dec(v) atomic_sub(1,(v)) | |
299 | #define atomic64_dec(v) atomic64_sub(1,(v)) | |
300 | ||
1da177e4 | 301 | #endif /* _ALPHA_ATOMIC_H */ |