2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #ifndef _ASM_ARC_ATOMIC_H
10 #define _ASM_ARC_ATOMIC_H
14 #include <linux/types.h>
15 #include <linux/compiler.h>
16 #include <asm/cmpxchg.h>
17 #include <asm/barrier.h>
20 #ifndef CONFIG_ARC_PLAT_EZNPS
22 #define atomic_read(v) READ_ONCE((v)->counter)
24 #ifdef CONFIG_ARC_HAS_LLSC
26 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
28 #define ATOMIC_OP(op, c_op, asm_op) \
29 static inline void atomic_##op(int i, atomic_t *v) \
33 __asm__ __volatile__( \
34 "1: llock %[val], [%[ctr]] \n" \
35 " " #asm_op " %[val], %[val], %[i] \n" \
36 " scond %[val], [%[ctr]] \n" \
38 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
39 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
44 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
45 static inline int atomic_##op##_return(int i, atomic_t *v) \
50 * Explicit full memory barrier needed before/after as \
51 * LLOCK/SCOND thmeselves don't provide any such semantics \
55 __asm__ __volatile__( \
56 "1: llock %[val], [%[ctr]] \n" \
57 " " #asm_op " %[val], %[val], %[i] \n" \
58 " scond %[val], [%[ctr]] \n" \
61 : [ctr] "r" (&v->counter), \
70 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
71 static inline int atomic_fetch_##op(int i, atomic_t *v) \
73 unsigned int val, orig; \
76 * Explicit full memory barrier needed before/after as \
77 * LLOCK/SCOND thmeselves don't provide any such semantics \
81 __asm__ __volatile__( \
82 "1: llock %[orig], [%[ctr]] \n" \
83 " " #asm_op " %[val], %[orig], %[i] \n" \
84 " scond %[val], [%[ctr]] \n" \
86 : [val] "=&r" (val), \
88 : [ctr] "r" (&v->counter), \
97 #else /* !CONFIG_ARC_HAS_LLSC */
101 /* violating atomic_xxx API locking protocol in UP for optimization sake */
102 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
106 static inline void atomic_set(atomic_t
*v
, int i
)
109 * Independent of hardware support, all of the atomic_xxx() APIs need
110 * to follow the same locking rules to make sure that a "hardware"
111 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
114 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
115 * requires the locking.
119 atomic_ops_lock(flags
);
120 WRITE_ONCE(v
->counter
, i
);
121 atomic_ops_unlock(flags
);
127 * Non hardware assisted Atomic-R-M-W
128 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
131 #define ATOMIC_OP(op, c_op, asm_op) \
132 static inline void atomic_##op(int i, atomic_t *v) \
134 unsigned long flags; \
136 atomic_ops_lock(flags); \
138 atomic_ops_unlock(flags); \
141 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
142 static inline int atomic_##op##_return(int i, atomic_t *v) \
144 unsigned long flags; \
145 unsigned long temp; \
148 * spin lock/unlock provides the needed smp_mb() before/after \
150 atomic_ops_lock(flags); \
154 atomic_ops_unlock(flags); \
159 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
160 static inline int atomic_fetch_##op(int i, atomic_t *v) \
162 unsigned long flags; \
163 unsigned long orig; \
166 * spin lock/unlock provides the needed smp_mb() before/after \
168 atomic_ops_lock(flags); \
171 atomic_ops_unlock(flags); \
176 #endif /* !CONFIG_ARC_HAS_LLSC */
178 #define ATOMIC_OPS(op, c_op, asm_op) \
179 ATOMIC_OP(op, c_op, asm_op) \
180 ATOMIC_OP_RETURN(op, c_op, asm_op) \
181 ATOMIC_FETCH_OP(op, c_op, asm_op)
183 ATOMIC_OPS(add
, +=, add
)
184 ATOMIC_OPS(sub
, -=, sub
)
186 #define atomic_andnot atomic_andnot
189 #define ATOMIC_OPS(op, c_op, asm_op) \
190 ATOMIC_OP(op, c_op, asm_op) \
191 ATOMIC_FETCH_OP(op, c_op, asm_op)
193 ATOMIC_OPS(and, &=, and)
194 ATOMIC_OPS(andnot
, &= ~, bic
)
195 ATOMIC_OPS(or, |=, or)
196 ATOMIC_OPS(xor, ^=, xor)
198 #else /* CONFIG_ARC_PLAT_EZNPS */
200 static inline int atomic_read(const atomic_t
*v
)
204 __asm__
__volatile__(
212 static inline void atomic_set(atomic_t
*v
, int i
)
214 __asm__
__volatile__(
217 : "r"(i
), "r"(&v
->counter
)
221 #define ATOMIC_OP(op, c_op, asm_op) \
222 static inline void atomic_##op(int i, atomic_t *v) \
224 __asm__ __volatile__( \
229 : "r"(i), "r"(&v->counter), "i"(asm_op) \
230 : "r2", "r3", "memory"); \
233 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
234 static inline int atomic_##op##_return(int i, atomic_t *v) \
236 unsigned int temp = i; \
238 /* Explicit full memory barrier needed before/after */ \
241 __asm__ __volatile__( \
247 : "r"(&v->counter), "i"(asm_op) \
248 : "r2", "r3", "memory"); \
257 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
258 static inline int atomic_fetch_##op(int i, atomic_t *v) \
260 unsigned int temp = i; \
262 /* Explicit full memory barrier needed before/after */ \
265 __asm__ __volatile__( \
271 : "r"(&v->counter), "i"(asm_op) \
272 : "r2", "r3", "memory"); \
279 #define ATOMIC_OPS(op, c_op, asm_op) \
280 ATOMIC_OP(op, c_op, asm_op) \
281 ATOMIC_OP_RETURN(op, c_op, asm_op) \
282 ATOMIC_FETCH_OP(op, c_op, asm_op)
284 ATOMIC_OPS(add
, +=, CTOP_INST_AADD_DI_R2_R2_R3
)
285 #define atomic_sub(i, v) atomic_add(-(i), (v))
286 #define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
289 #define ATOMIC_OPS(op, c_op, asm_op) \
290 ATOMIC_OP(op, c_op, asm_op) \
291 ATOMIC_FETCH_OP(op, c_op, asm_op)
293 ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3
)
294 #define atomic_andnot(mask, v) atomic_and(~(mask), (v))
295 ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3
)
296 ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3
)
298 #endif /* CONFIG_ARC_PLAT_EZNPS */
301 #undef ATOMIC_FETCH_OP
302 #undef ATOMIC_OP_RETURN
306 * __atomic_add_unless - add unless the number is a given value
307 * @v: pointer of type atomic_t
308 * @a: the amount to add to v...
309 * @u: ...unless v is equal to u.
311 * Atomically adds @a to @v, so long as it was not @u.
312 * Returns the old value of @v
314 #define __atomic_add_unless(v, a, u) \
319 * Explicit full memory barrier needed before/after as \
320 * LLOCK/SCOND thmeselves don't provide any such semantics \
324 c = atomic_read(v); \
325 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
333 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
335 #define atomic_inc(v) atomic_add(1, v)
336 #define atomic_dec(v) atomic_sub(1, v)
338 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
339 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
340 #define atomic_inc_return(v) atomic_add_return(1, (v))
341 #define atomic_dec_return(v) atomic_sub_return(1, (v))
342 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
344 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
346 #define ATOMIC_INIT(i) { (i) }
348 #include <asm-generic/atomic64.h>