Merge tag 'dm-3.19-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device...
[deliverable/linux.git] / arch / arm / include / asm / atomic.h
1 /*
2 * arch/arm/include/asm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #ifndef __ASM_ARM_ATOMIC_H
12 #define __ASM_ARM_ATOMIC_H
13
14 #include <linux/compiler.h>
15 #include <linux/prefetch.h>
16 #include <linux/types.h>
17 #include <linux/irqflags.h>
18 #include <asm/barrier.h>
19 #include <asm/cmpxchg.h>
20
21 #define ATOMIC_INIT(i) { (i) }
22
23 #ifdef __KERNEL__
24
25 /*
26 * On ARM, ordinary assignment (str instruction) doesn't clear the local
27 * strex/ldrex monitor on some implementations. The reason we can use it for
28 * atomic_set() is the clrex or dummy strex done on every exception return.
29 */
30 #define atomic_read(v) ACCESS_ONCE((v)->counter)
31 #define atomic_set(v,i) (((v)->counter) = (i))
32
33 #if __LINUX_ARM_ARCH__ >= 6
34
35 /*
36 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
37 * store exclusive to ensure that these are atomic. We may loop
38 * to ensure that the update happens.
39 */
40
41 #define ATOMIC_OP(op, c_op, asm_op) \
42 static inline void atomic_##op(int i, atomic_t *v) \
43 { \
44 unsigned long tmp; \
45 int result; \
46 \
47 prefetchw(&v->counter); \
48 __asm__ __volatile__("@ atomic_" #op "\n" \
49 "1: ldrex %0, [%3]\n" \
50 " " #asm_op " %0, %0, %4\n" \
51 " strex %1, %0, [%3]\n" \
52 " teq %1, #0\n" \
53 " bne 1b" \
54 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
55 : "r" (&v->counter), "Ir" (i) \
56 : "cc"); \
57 } \
58
59 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
60 static inline int atomic_##op##_return(int i, atomic_t *v) \
61 { \
62 unsigned long tmp; \
63 int result; \
64 \
65 smp_mb(); \
66 prefetchw(&v->counter); \
67 \
68 __asm__ __volatile__("@ atomic_" #op "_return\n" \
69 "1: ldrex %0, [%3]\n" \
70 " " #asm_op " %0, %0, %4\n" \
71 " strex %1, %0, [%3]\n" \
72 " teq %1, #0\n" \
73 " bne 1b" \
74 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
75 : "r" (&v->counter), "Ir" (i) \
76 : "cc"); \
77 \
78 smp_mb(); \
79 \
80 return result; \
81 }
82
83 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
84 {
85 int oldval;
86 unsigned long res;
87
88 smp_mb();
89 prefetchw(&ptr->counter);
90
91 do {
92 __asm__ __volatile__("@ atomic_cmpxchg\n"
93 "ldrex %1, [%3]\n"
94 "mov %0, #0\n"
95 "teq %1, %4\n"
96 "strexeq %0, %5, [%3]\n"
97 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
98 : "r" (&ptr->counter), "Ir" (old), "r" (new)
99 : "cc");
100 } while (res);
101
102 smp_mb();
103
104 return oldval;
105 }
106
107 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
108 {
109 int oldval, newval;
110 unsigned long tmp;
111
112 smp_mb();
113 prefetchw(&v->counter);
114
115 __asm__ __volatile__ ("@ atomic_add_unless\n"
116 "1: ldrex %0, [%4]\n"
117 " teq %0, %5\n"
118 " beq 2f\n"
119 " add %1, %0, %6\n"
120 " strex %2, %1, [%4]\n"
121 " teq %2, #0\n"
122 " bne 1b\n"
123 "2:"
124 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
125 : "r" (&v->counter), "r" (u), "r" (a)
126 : "cc");
127
128 if (oldval != u)
129 smp_mb();
130
131 return oldval;
132 }
133
134 #else /* ARM_ARCH_6 */
135
136 #ifdef CONFIG_SMP
137 #error SMP not supported on pre-ARMv6 CPUs
138 #endif
139
140 #define ATOMIC_OP(op, c_op, asm_op) \
141 static inline void atomic_##op(int i, atomic_t *v) \
142 { \
143 unsigned long flags; \
144 \
145 raw_local_irq_save(flags); \
146 v->counter c_op i; \
147 raw_local_irq_restore(flags); \
148 } \
149
150 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
151 static inline int atomic_##op##_return(int i, atomic_t *v) \
152 { \
153 unsigned long flags; \
154 int val; \
155 \
156 raw_local_irq_save(flags); \
157 v->counter c_op i; \
158 val = v->counter; \
159 raw_local_irq_restore(flags); \
160 \
161 return val; \
162 }
163
164 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
165 {
166 int ret;
167 unsigned long flags;
168
169 raw_local_irq_save(flags);
170 ret = v->counter;
171 if (likely(ret == old))
172 v->counter = new;
173 raw_local_irq_restore(flags);
174
175 return ret;
176 }
177
178 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
179 {
180 int c, old;
181
182 c = atomic_read(v);
183 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
184 c = old;
185 return c;
186 }
187
188 #endif /* __LINUX_ARM_ARCH__ */
189
190 #define ATOMIC_OPS(op, c_op, asm_op) \
191 ATOMIC_OP(op, c_op, asm_op) \
192 ATOMIC_OP_RETURN(op, c_op, asm_op)
193
194 ATOMIC_OPS(add, +=, add)
195 ATOMIC_OPS(sub, -=, sub)
196
197 #undef ATOMIC_OPS
198 #undef ATOMIC_OP_RETURN
199 #undef ATOMIC_OP
200
201 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
202
203 #define atomic_inc(v) atomic_add(1, v)
204 #define atomic_dec(v) atomic_sub(1, v)
205
206 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
207 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
208 #define atomic_inc_return(v) (atomic_add_return(1, v))
209 #define atomic_dec_return(v) (atomic_sub_return(1, v))
210 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
211
212 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
213
214 #ifndef CONFIG_GENERIC_ATOMIC64
215 typedef struct {
216 long long counter;
217 } atomic64_t;
218
219 #define ATOMIC64_INIT(i) { (i) }
220
221 #ifdef CONFIG_ARM_LPAE
222 static inline long long atomic64_read(const atomic64_t *v)
223 {
224 long long result;
225
226 __asm__ __volatile__("@ atomic64_read\n"
227 " ldrd %0, %H0, [%1]"
228 : "=&r" (result)
229 : "r" (&v->counter), "Qo" (v->counter)
230 );
231
232 return result;
233 }
234
235 static inline void atomic64_set(atomic64_t *v, long long i)
236 {
237 __asm__ __volatile__("@ atomic64_set\n"
238 " strd %2, %H2, [%1]"
239 : "=Qo" (v->counter)
240 : "r" (&v->counter), "r" (i)
241 );
242 }
243 #else
244 static inline long long atomic64_read(const atomic64_t *v)
245 {
246 long long result;
247
248 __asm__ __volatile__("@ atomic64_read\n"
249 " ldrexd %0, %H0, [%1]"
250 : "=&r" (result)
251 : "r" (&v->counter), "Qo" (v->counter)
252 );
253
254 return result;
255 }
256
257 static inline void atomic64_set(atomic64_t *v, long long i)
258 {
259 long long tmp;
260
261 prefetchw(&v->counter);
262 __asm__ __volatile__("@ atomic64_set\n"
263 "1: ldrexd %0, %H0, [%2]\n"
264 " strexd %0, %3, %H3, [%2]\n"
265 " teq %0, #0\n"
266 " bne 1b"
267 : "=&r" (tmp), "=Qo" (v->counter)
268 : "r" (&v->counter), "r" (i)
269 : "cc");
270 }
271 #endif
272
273 #define ATOMIC64_OP(op, op1, op2) \
274 static inline void atomic64_##op(long long i, atomic64_t *v) \
275 { \
276 long long result; \
277 unsigned long tmp; \
278 \
279 prefetchw(&v->counter); \
280 __asm__ __volatile__("@ atomic64_" #op "\n" \
281 "1: ldrexd %0, %H0, [%3]\n" \
282 " " #op1 " %Q0, %Q0, %Q4\n" \
283 " " #op2 " %R0, %R0, %R4\n" \
284 " strexd %1, %0, %H0, [%3]\n" \
285 " teq %1, #0\n" \
286 " bne 1b" \
287 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
288 : "r" (&v->counter), "r" (i) \
289 : "cc"); \
290 } \
291
292 #define ATOMIC64_OP_RETURN(op, op1, op2) \
293 static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
294 { \
295 long long result; \
296 unsigned long tmp; \
297 \
298 smp_mb(); \
299 prefetchw(&v->counter); \
300 \
301 __asm__ __volatile__("@ atomic64_" #op "_return\n" \
302 "1: ldrexd %0, %H0, [%3]\n" \
303 " " #op1 " %Q0, %Q0, %Q4\n" \
304 " " #op2 " %R0, %R0, %R4\n" \
305 " strexd %1, %0, %H0, [%3]\n" \
306 " teq %1, #0\n" \
307 " bne 1b" \
308 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
309 : "r" (&v->counter), "r" (i) \
310 : "cc"); \
311 \
312 smp_mb(); \
313 \
314 return result; \
315 }
316
317 #define ATOMIC64_OPS(op, op1, op2) \
318 ATOMIC64_OP(op, op1, op2) \
319 ATOMIC64_OP_RETURN(op, op1, op2)
320
321 ATOMIC64_OPS(add, adds, adc)
322 ATOMIC64_OPS(sub, subs, sbc)
323
324 #undef ATOMIC64_OPS
325 #undef ATOMIC64_OP_RETURN
326 #undef ATOMIC64_OP
327
328 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
329 long long new)
330 {
331 long long oldval;
332 unsigned long res;
333
334 smp_mb();
335 prefetchw(&ptr->counter);
336
337 do {
338 __asm__ __volatile__("@ atomic64_cmpxchg\n"
339 "ldrexd %1, %H1, [%3]\n"
340 "mov %0, #0\n"
341 "teq %1, %4\n"
342 "teqeq %H1, %H4\n"
343 "strexdeq %0, %5, %H5, [%3]"
344 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
345 : "r" (&ptr->counter), "r" (old), "r" (new)
346 : "cc");
347 } while (res);
348
349 smp_mb();
350
351 return oldval;
352 }
353
354 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
355 {
356 long long result;
357 unsigned long tmp;
358
359 smp_mb();
360 prefetchw(&ptr->counter);
361
362 __asm__ __volatile__("@ atomic64_xchg\n"
363 "1: ldrexd %0, %H0, [%3]\n"
364 " strexd %1, %4, %H4, [%3]\n"
365 " teq %1, #0\n"
366 " bne 1b"
367 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
368 : "r" (&ptr->counter), "r" (new)
369 : "cc");
370
371 smp_mb();
372
373 return result;
374 }
375
376 static inline long long atomic64_dec_if_positive(atomic64_t *v)
377 {
378 long long result;
379 unsigned long tmp;
380
381 smp_mb();
382 prefetchw(&v->counter);
383
384 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
385 "1: ldrexd %0, %H0, [%3]\n"
386 " subs %Q0, %Q0, #1\n"
387 " sbc %R0, %R0, #0\n"
388 " teq %R0, #0\n"
389 " bmi 2f\n"
390 " strexd %1, %0, %H0, [%3]\n"
391 " teq %1, #0\n"
392 " bne 1b\n"
393 "2:"
394 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
395 : "r" (&v->counter)
396 : "cc");
397
398 smp_mb();
399
400 return result;
401 }
402
403 static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
404 {
405 long long val;
406 unsigned long tmp;
407 int ret = 1;
408
409 smp_mb();
410 prefetchw(&v->counter);
411
412 __asm__ __volatile__("@ atomic64_add_unless\n"
413 "1: ldrexd %0, %H0, [%4]\n"
414 " teq %0, %5\n"
415 " teqeq %H0, %H5\n"
416 " moveq %1, #0\n"
417 " beq 2f\n"
418 " adds %Q0, %Q0, %Q6\n"
419 " adc %R0, %R0, %R6\n"
420 " strexd %2, %0, %H0, [%4]\n"
421 " teq %2, #0\n"
422 " bne 1b\n"
423 "2:"
424 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
425 : "r" (&v->counter), "r" (u), "r" (a)
426 : "cc");
427
428 if (ret)
429 smp_mb();
430
431 return ret;
432 }
433
434 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
435 #define atomic64_inc(v) atomic64_add(1LL, (v))
436 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
437 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
438 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
439 #define atomic64_dec(v) atomic64_sub(1LL, (v))
440 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
441 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
442 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
443
444 #endif /* !CONFIG_GENERIC_ATOMIC64 */
445 #endif
446 #endif
This page took 0.054518 seconds and 5 git commands to generate.