Merge remote-tracking branch 'regulator/for-next'
[deliverable/linux.git] / arch / arc / include / asm / atomic.h
CommitLineData
14e968ba
VG
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_ATOMIC_H
10#define _ASM_ARC_ATOMIC_H
11
14e968ba
VG
12#ifndef __ASSEMBLY__
13
14#include <linux/types.h>
15#include <linux/compiler.h>
16#include <asm/cmpxchg.h>
17#include <asm/barrier.h>
18#include <asm/smp.h>
19
a5a10d99
NC
20#ifndef CONFIG_ARC_PLAT_EZNPS
21
62e8a325 22#define atomic_read(v) READ_ONCE((v)->counter)
a0869b8c 23#define ATOMIC_INIT(i) { (i) }
14e968ba
VG
24
25#ifdef CONFIG_ARC_HAS_LLSC
26
62e8a325 27#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
14e968ba 28
f7d11e93
PZ
29#define ATOMIC_OP(op, c_op, asm_op) \
30static inline void atomic_##op(int i, atomic_t *v) \
31{ \
ed6aefed 32 unsigned int val; \
f7d11e93
PZ
33 \
34 __asm__ __volatile__( \
8ac0665f
VG
35 "1: llock %[val], [%[ctr]] \n" \
36 " " #asm_op " %[val], %[val], %[i] \n" \
37 " scond %[val], [%[ctr]] \n" \
ed6aefed 38 " bnz 1b \n" \
8ac0665f
VG
39 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
40 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
41 [i] "ir" (i) \
f7d11e93
PZ
42 : "cc"); \
43} \
44
45#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
46static inline int atomic_##op##_return(int i, atomic_t *v) \
47{ \
ed6aefed 48 unsigned int val; \
f7d11e93 49 \
2576c28e
VG
50 /* \
51 * Explicit full memory barrier needed before/after as \
52 * LLOCK/SCOND thmeselves don't provide any such semantics \
53 */ \
54 smp_mb(); \
55 \
f7d11e93 56 __asm__ __volatile__( \
8ac0665f
VG
57 "1: llock %[val], [%[ctr]] \n" \
58 " " #asm_op " %[val], %[val], %[i] \n" \
59 " scond %[val], [%[ctr]] \n" \
ed6aefed 60 " bnz 1b \n" \
8ac0665f
VG
61 : [val] "=&r" (val) \
62 : [ctr] "r" (&v->counter), \
63 [i] "ir" (i) \
f7d11e93
PZ
64 : "cc"); \
65 \
2576c28e
VG
66 smp_mb(); \
67 \
8ac0665f 68 return val; \
14e968ba
VG
69}
70
fbffe892
PZ
71#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
72static inline int atomic_fetch_##op(int i, atomic_t *v) \
73{ \
74 unsigned int val, orig; \
fbffe892
PZ
75 \
76 /* \
77 * Explicit full memory barrier needed before/after as \
78 * LLOCK/SCOND thmeselves don't provide any such semantics \
79 */ \
80 smp_mb(); \
81 \
82 __asm__ __volatile__( \
83 "1: llock %[orig], [%[ctr]] \n" \
84 " " #asm_op " %[val], %[orig], %[i] \n" \
85 " scond %[val], [%[ctr]] \n" \
86 " \n" \
fbffe892
PZ
87 : [val] "=&r" (val), \
88 [orig] "=&r" (orig) \
fbffe892
PZ
89 : [ctr] "r" (&v->counter), \
90 [i] "ir" (i) \
91 : "cc"); \
92 \
93 smp_mb(); \
94 \
95 return orig; \
96}
97
14e968ba
VG
98#else /* !CONFIG_ARC_HAS_LLSC */
99
100#ifndef CONFIG_SMP
101
102 /* violating atomic_xxx API locking protocol in UP for optimization sake */
62e8a325 103#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
14e968ba
VG
104
105#else
106
107static inline void atomic_set(atomic_t *v, int i)
108{
109 /*
110 * Independent of hardware support, all of the atomic_xxx() APIs need
111 * to follow the same locking rules to make sure that a "hardware"
112 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
113 * sequence
114 *
115 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
116 * requires the locking.
117 */
118 unsigned long flags;
119
120 atomic_ops_lock(flags);
62e8a325 121 WRITE_ONCE(v->counter, i);
14e968ba
VG
122 atomic_ops_unlock(flags);
123}
f7d11e93 124
14e968ba
VG
125#endif
126
127/*
128 * Non hardware assisted Atomic-R-M-W
129 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
130 */
131
f7d11e93
PZ
132#define ATOMIC_OP(op, c_op, asm_op) \
133static inline void atomic_##op(int i, atomic_t *v) \
134{ \
135 unsigned long flags; \
136 \
137 atomic_ops_lock(flags); \
138 v->counter c_op i; \
139 atomic_ops_unlock(flags); \
14e968ba
VG
140}
141
daaf40e5 142#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
f7d11e93
PZ
143static inline int atomic_##op##_return(int i, atomic_t *v) \
144{ \
145 unsigned long flags; \
146 unsigned long temp; \
147 \
2576c28e
VG
148 /* \
149 * spin lock/unlock provides the needed smp_mb() before/after \
150 */ \
f7d11e93
PZ
151 atomic_ops_lock(flags); \
152 temp = v->counter; \
153 temp c_op i; \
154 v->counter = temp; \
155 atomic_ops_unlock(flags); \
156 \
157 return temp; \
14e968ba
VG
158}
159
fbffe892
PZ
160#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
161static inline int atomic_fetch_##op(int i, atomic_t *v) \
162{ \
163 unsigned long flags; \
164 unsigned long orig; \
165 \
166 /* \
167 * spin lock/unlock provides the needed smp_mb() before/after \
168 */ \
169 atomic_ops_lock(flags); \
170 orig = v->counter; \
171 v->counter c_op i; \
172 atomic_ops_unlock(flags); \
173 \
174 return orig; \
175}
176
f7d11e93 177#endif /* !CONFIG_ARC_HAS_LLSC */
14e968ba 178
f7d11e93
PZ
179#define ATOMIC_OPS(op, c_op, asm_op) \
180 ATOMIC_OP(op, c_op, asm_op) \
fbffe892
PZ
181 ATOMIC_OP_RETURN(op, c_op, asm_op) \
182 ATOMIC_FETCH_OP(op, c_op, asm_op)
14e968ba 183
f7d11e93
PZ
184ATOMIC_OPS(add, +=, add)
185ATOMIC_OPS(sub, -=, sub)
14e968ba 186
cda7e413
PZ
187#define atomic_andnot atomic_andnot
188
fbffe892
PZ
189#undef ATOMIC_OPS
190#define ATOMIC_OPS(op, c_op, asm_op) \
191 ATOMIC_OP(op, c_op, asm_op) \
192 ATOMIC_FETCH_OP(op, c_op, asm_op)
193
194ATOMIC_OPS(and, &=, and)
195ATOMIC_OPS(andnot, &= ~, bic)
196ATOMIC_OPS(or, |=, or)
197ATOMIC_OPS(xor, ^=, xor)
14e968ba 198
a5a10d99
NC
199#else /* CONFIG_ARC_PLAT_EZNPS */
200
201static inline int atomic_read(const atomic_t *v)
202{
203 int temp;
204
205 __asm__ __volatile__(
206 " ld.di %0, [%1]"
207 : "=r"(temp)
208 : "r"(&v->counter)
209 : "memory");
210 return temp;
211}
212
213static inline void atomic_set(atomic_t *v, int i)
214{
215 __asm__ __volatile__(
216 " st.di %0,[%1]"
217 :
218 : "r"(i), "r"(&v->counter)
219 : "memory");
220}
221
222#define ATOMIC_OP(op, c_op, asm_op) \
223static inline void atomic_##op(int i, atomic_t *v) \
224{ \
225 __asm__ __volatile__( \
226 " mov r2, %0\n" \
227 " mov r3, %1\n" \
228 " .word %2\n" \
229 : \
230 : "r"(i), "r"(&v->counter), "i"(asm_op) \
231 : "r2", "r3", "memory"); \
232} \
233
234#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
235static inline int atomic_##op##_return(int i, atomic_t *v) \
236{ \
237 unsigned int temp = i; \
238 \
239 /* Explicit full memory barrier needed before/after */ \
240 smp_mb(); \
241 \
242 __asm__ __volatile__( \
243 " mov r2, %0\n" \
244 " mov r3, %1\n" \
245 " .word %2\n" \
246 " mov %0, r2" \
247 : "+r"(temp) \
248 : "r"(&v->counter), "i"(asm_op) \
249 : "r2", "r3", "memory"); \
250 \
251 smp_mb(); \
252 \
253 temp c_op i; \
254 \
255 return temp; \
256}
257
fbffe892
PZ
258#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
259static inline int atomic_fetch_##op(int i, atomic_t *v) \
260{ \
261 unsigned int temp = i; \
262 \
263 /* Explicit full memory barrier needed before/after */ \
264 smp_mb(); \
265 \
266 __asm__ __volatile__( \
267 " mov r2, %0\n" \
268 " mov r3, %1\n" \
269 " .word %2\n" \
270 " mov %0, r2" \
271 : "+r"(temp) \
272 : "r"(&v->counter), "i"(asm_op) \
273 : "r2", "r3", "memory"); \
274 \
275 smp_mb(); \
276 \
277 return temp; \
278}
279
a5a10d99
NC
280#define ATOMIC_OPS(op, c_op, asm_op) \
281 ATOMIC_OP(op, c_op, asm_op) \
fbffe892
PZ
282 ATOMIC_OP_RETURN(op, c_op, asm_op) \
283 ATOMIC_FETCH_OP(op, c_op, asm_op)
a5a10d99
NC
284
285ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
286#define atomic_sub(i, v) atomic_add(-(i), (v))
287#define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
288
fbffe892
PZ
289#undef ATOMIC_OPS
290#define ATOMIC_OPS(op, c_op, asm_op) \
291 ATOMIC_OP(op, c_op, asm_op) \
292 ATOMIC_FETCH_OP(op, c_op, asm_op)
293
294ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
a5a10d99 295#define atomic_andnot(mask, v) atomic_and(~(mask), (v))
fbffe892
PZ
296ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
297ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
a5a10d99
NC
298
299#endif /* CONFIG_ARC_PLAT_EZNPS */
300
301#undef ATOMIC_OPS
fbffe892 302#undef ATOMIC_FETCH_OP
a5a10d99
NC
303#undef ATOMIC_OP_RETURN
304#undef ATOMIC_OP
305
14e968ba
VG
306/**
307 * __atomic_add_unless - add unless the number is a given value
308 * @v: pointer of type atomic_t
309 * @a: the amount to add to v...
310 * @u: ...unless v is equal to u.
311 *
312 * Atomically adds @a to @v, so long as it was not @u.
313 * Returns the old value of @v
314 */
315#define __atomic_add_unless(v, a, u) \
316({ \
317 int c, old; \
2576c28e
VG
318 \
319 /* \
320 * Explicit full memory barrier needed before/after as \
321 * LLOCK/SCOND thmeselves don't provide any such semantics \
322 */ \
323 smp_mb(); \
324 \
14e968ba
VG
325 c = atomic_read(v); \
326 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
327 c = old; \
2576c28e
VG
328 \
329 smp_mb(); \
330 \
14e968ba
VG
331 c; \
332})
333
334#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
335
336#define atomic_inc(v) atomic_add(1, v)
337#define atomic_dec(v) atomic_sub(1, v)
338
339#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
340#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
341#define atomic_inc_return(v) atomic_add_return(1, (v))
342#define atomic_dec_return(v) atomic_sub_return(1, (v))
343#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
344
345#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
346
a0869b8c
VG
347
348#ifdef CONFIG_GENERIC_ATOMIC64
14e968ba
VG
349
350#include <asm-generic/atomic64.h>
351
a0869b8c
VG
352#else /* Kconfig ensures this is only enabled with needed h/w assist */
353
354/*
355 * ARCv2 supports 64-bit exclusive load (LLOCKD) / store (SCONDD)
356 * - The address HAS to be 64-bit aligned
357 * - There are 2 semantics involved here:
358 * = exclusive implies no interim update between load/store to same addr
359 * = both words are observed/updated together: this is guaranteed even
360 * for regular 64-bit load (LDD) / store (STD). Thus atomic64_set()
361 * is NOT required to use LLOCKD+SCONDD, STD suffices
362 */
363
364typedef struct {
365 aligned_u64 counter;
366} atomic64_t;
367
368#define ATOMIC64_INIT(a) { (a) }
369
370static inline long long atomic64_read(const atomic64_t *v)
371{
372 unsigned long long val;
373
374 __asm__ __volatile__(
375 " ldd %0, [%1] \n"
376 : "=r"(val)
377 : "r"(&v->counter));
378
379 return val;
380}
381
382static inline void atomic64_set(atomic64_t *v, long long a)
383{
384 /*
385 * This could have been a simple assignment in "C" but would need
386 * explicit volatile. Otherwise gcc optimizers could elide the store
387 * which borked atomic64 self-test
388 * In the inline asm version, memory clobber needed for exact same
389 * reason, to tell gcc about the store.
390 *
391 * This however is not needed for sibling atomic64_add() etc since both
392 * load/store are explicitly done in inline asm. As long as API is used
393 * for each access, gcc has no way to optimize away any load/store
394 */
395 __asm__ __volatile__(
396 " std %0, [%1] \n"
397 :
398 : "r"(a), "r"(&v->counter)
399 : "memory");
400}
401
402#define ATOMIC64_OP(op, op1, op2) \
403static inline void atomic64_##op(long long a, atomic64_t *v) \
404{ \
405 unsigned long long val; \
406 \
407 __asm__ __volatile__( \
408 "1: \n" \
409 " llockd %0, [%1] \n" \
410 " " #op1 " %L0, %L0, %L2 \n" \
411 " " #op2 " %H0, %H0, %H2 \n" \
412 " scondd %0, [%1] \n" \
413 " bnz 1b \n" \
414 : "=&r"(val) \
415 : "r"(&v->counter), "ir"(a) \
416 : "cc"); \
417} \
418
419#define ATOMIC64_OP_RETURN(op, op1, op2) \
420static inline long long atomic64_##op##_return(long long a, atomic64_t *v) \
421{ \
422 unsigned long long val; \
423 \
424 smp_mb(); \
425 \
426 __asm__ __volatile__( \
427 "1: \n" \
428 " llockd %0, [%1] \n" \
429 " " #op1 " %L0, %L0, %L2 \n" \
430 " " #op2 " %H0, %H0, %H2 \n" \
431 " scondd %0, [%1] \n" \
432 " bnz 1b \n" \
433 : [val] "=&r"(val) \
434 : "r"(&v->counter), "ir"(a) \
435 : "cc"); /* memory clobber comes from smp_mb() */ \
436 \
437 smp_mb(); \
438 \
439 return val; \
440}
441
442#define ATOMIC64_FETCH_OP(op, op1, op2) \
443static inline long long atomic64_fetch_##op(long long a, atomic64_t *v) \
444{ \
445 unsigned long long val, orig; \
446 \
447 smp_mb(); \
448 \
449 __asm__ __volatile__( \
450 "1: \n" \
451 " llockd %0, [%2] \n" \
452 " " #op1 " %L1, %L0, %L3 \n" \
453 " " #op2 " %H1, %H0, %H3 \n" \
454 " scondd %1, [%2] \n" \
455 " bnz 1b \n" \
456 : "=&r"(orig), "=&r"(val) \
457 : "r"(&v->counter), "ir"(a) \
458 : "cc"); /* memory clobber comes from smp_mb() */ \
459 \
460 smp_mb(); \
461 \
462 return orig; \
463}
464
465#define ATOMIC64_OPS(op, op1, op2) \
466 ATOMIC64_OP(op, op1, op2) \
467 ATOMIC64_OP_RETURN(op, op1, op2) \
468 ATOMIC64_FETCH_OP(op, op1, op2)
469
470#define atomic64_andnot atomic64_andnot
471
472ATOMIC64_OPS(add, add.f, adc)
473ATOMIC64_OPS(sub, sub.f, sbc)
474ATOMIC64_OPS(and, and, and)
475ATOMIC64_OPS(andnot, bic, bic)
476ATOMIC64_OPS(or, or, or)
477ATOMIC64_OPS(xor, xor, xor)
478
479#undef ATOMIC64_OPS
480#undef ATOMIC64_FETCH_OP
481#undef ATOMIC64_OP_RETURN
482#undef ATOMIC64_OP
483
484static inline long long
485atomic64_cmpxchg(atomic64_t *ptr, long long expected, long long new)
486{
487 long long prev;
488
489 smp_mb();
490
491 __asm__ __volatile__(
492 "1: llockd %0, [%1] \n"
493 " brne %L0, %L2, 2f \n"
494 " brne %H0, %H2, 2f \n"
495 " scondd %3, [%1] \n"
496 " bnz 1b \n"
497 "2: \n"
498 : "=&r"(prev)
499 : "r"(ptr), "ir"(expected), "r"(new)
500 : "cc"); /* memory clobber comes from smp_mb() */
501
502 smp_mb();
503
504 return prev;
505}
506
507static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
508{
509 long long prev;
510
511 smp_mb();
512
513 __asm__ __volatile__(
514 "1: llockd %0, [%1] \n"
515 " scondd %2, [%1] \n"
516 " bnz 1b \n"
517 "2: \n"
518 : "=&r"(prev)
519 : "r"(ptr), "r"(new)
520 : "cc"); /* memory clobber comes from smp_mb() */
521
522 smp_mb();
523
524 return prev;
525}
526
527/**
528 * atomic64_dec_if_positive - decrement by 1 if old value positive
529 * @v: pointer of type atomic64_t
530 *
531 * The function returns the old value of *v minus 1, even if
532 * the atomic variable, v, was not decremented.
533 */
534
535static inline long long atomic64_dec_if_positive(atomic64_t *v)
536{
537 long long val;
538
539 smp_mb();
540
541 __asm__ __volatile__(
542 "1: llockd %0, [%1] \n"
543 " sub.f %L0, %L0, 1 # w0 - 1, set C on borrow\n"
544 " sub.c %H0, %H0, 1 # if C set, w1 - 1\n"
545 " brlt %H0, 0, 2f \n"
546 " scondd %0, [%1] \n"
547 " bnz 1b \n"
548 "2: \n"
549 : "=&r"(val)
550 : "r"(&v->counter)
551 : "cc"); /* memory clobber comes from smp_mb() */
552
553 smp_mb();
554
555 return val;
556}
557
558/**
559 * atomic64_add_unless - add unless the number is a given value
560 * @v: pointer of type atomic64_t
561 * @a: the amount to add to v...
562 * @u: ...unless v is equal to u.
563 *
564 * if (v != u) { v += a; ret = 1} else {ret = 0}
565 * Returns 1 iff @v was not @u (i.e. if add actually happened)
566 */
567static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
568{
569 long long val;
570 int op_done;
571
572 smp_mb();
573
574 __asm__ __volatile__(
575 "1: llockd %0, [%2] \n"
576 " mov %1, 1 \n"
577 " brne %L0, %L4, 2f # continue to add since v != u \n"
578 " breq.d %H0, %H4, 3f # return since v == u \n"
579 " mov %1, 0 \n"
580 "2: \n"
581 " add.f %L0, %L0, %L3 \n"
582 " adc %H0, %H0, %H3 \n"
583 " scondd %0, [%2] \n"
584 " bnz 1b \n"
585 "3: \n"
586 : "=&r"(val), "=&r" (op_done)
587 : "r"(&v->counter), "r"(a), "r"(u)
588 : "cc"); /* memory clobber comes from smp_mb() */
589
590 smp_mb();
591
592 return op_done;
593}
594
595#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
596#define atomic64_inc(v) atomic64_add(1LL, (v))
597#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
598#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
599#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
600#define atomic64_dec(v) atomic64_sub(1LL, (v))
601#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
602#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
603#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
604
605#endif /* !CONFIG_GENERIC_ATOMIC64 */
606
607#endif /* !__ASSEMBLY__ */
14e968ba
VG
608
609#endif
This page took 0.172023 seconds and 5 git commands to generate.