1 #ifndef __ARCH_S390_ATOMIC__
2 #define __ARCH_S390_ATOMIC__
5 * Copyright 1999,2009 IBM Corp.
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
8 * Arnd Bergmann <arndb@de.ibm.com>,
10 * Atomic operations that C can't guarantee us.
11 * Useful for resource counting etc.
12 * s390 uses 'Compare And Swap' for atomicity in SMP environment.
16 #include <linux/compiler.h>
17 #include <linux/types.h>
18 #include <asm/system.h>
20 #define ATOMIC_INIT(i) { (i) }
22 #define __CS_LOOP(ptr, op_val, op_string) ({ \
23 int old_val, new_val; \
27 op_string " %1,%3\n" \
30 : "=&d" (old_val), "=&d" (new_val), \
31 "=Q" (((atomic_t *)(ptr))->counter) \
32 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
37 static inline int atomic_read(const atomic_t
*v
)
43 : "=d" (c
) : "Q" (v
->counter
));
47 static inline void atomic_set(atomic_t
*v
, int i
)
51 : "=Q" (v
->counter
) : "d" (i
));
54 static inline int atomic_add_return(int i
, atomic_t
*v
)
56 return __CS_LOOP(v
, i
, "ar");
58 #define atomic_add(_i, _v) atomic_add_return(_i, _v)
59 #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
60 #define atomic_inc(_v) atomic_add_return(1, _v)
61 #define atomic_inc_return(_v) atomic_add_return(1, _v)
62 #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
64 static inline int atomic_sub_return(int i
, atomic_t
*v
)
66 return __CS_LOOP(v
, i
, "sr");
68 #define atomic_sub(_i, _v) atomic_sub_return(_i, _v)
69 #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
70 #define atomic_dec(_v) atomic_sub_return(1, _v)
71 #define atomic_dec_return(_v) atomic_sub_return(1, _v)
72 #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
74 static inline void atomic_clear_mask(unsigned long mask
, atomic_t
*v
)
76 __CS_LOOP(v
, ~mask
, "nr");
79 static inline void atomic_set_mask(unsigned long mask
, atomic_t
*v
)
81 __CS_LOOP(v
, mask
, "or");
84 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
86 static inline int atomic_cmpxchg(atomic_t
*v
, int old
, int new)
90 : "+d" (old
), "=Q" (v
->counter
)
91 : "d" (new), "Q" (v
->counter
)
96 static inline int __atomic_add_unless(atomic_t
*v
, int a
, int u
)
101 if (unlikely(c
== u
))
103 old
= atomic_cmpxchg(v
, c
, c
+ a
);
104 if (likely(old
== c
))
114 #define ATOMIC64_INIT(i) { (i) }
118 #define __CSG_LOOP(ptr, op_val, op_string) ({ \
119 long long old_val, new_val; \
123 op_string " %1,%3\n" \
126 : "=&d" (old_val), "=&d" (new_val), \
127 "=Q" (((atomic_t *)(ptr))->counter) \
128 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
133 static inline long long atomic64_read(const atomic64_t
*v
)
139 : "=d" (c
) : "Q" (v
->counter
));
143 static inline void atomic64_set(atomic64_t
*v
, long long i
)
147 : "=Q" (v
->counter
) : "d" (i
));
150 static inline long long atomic64_add_return(long long i
, atomic64_t
*v
)
152 return __CSG_LOOP(v
, i
, "agr");
155 static inline long long atomic64_sub_return(long long i
, atomic64_t
*v
)
157 return __CSG_LOOP(v
, i
, "sgr");
160 static inline void atomic64_clear_mask(unsigned long mask
, atomic64_t
*v
)
162 __CSG_LOOP(v
, ~mask
, "ngr");
165 static inline void atomic64_set_mask(unsigned long mask
, atomic64_t
*v
)
167 __CSG_LOOP(v
, mask
, "ogr");
170 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
172 static inline long long atomic64_cmpxchg(atomic64_t
*v
,
173 long long old
, long long new)
177 : "+d" (old
), "=Q" (v
->counter
)
178 : "d" (new), "Q" (v
->counter
)
185 #else /* CONFIG_64BIT */
191 static inline long long atomic64_read(const atomic64_t
*v
)
197 : "=&d" (rp
) : "Q" (v
->counter
) );
201 static inline void atomic64_set(atomic64_t
*v
, long long i
)
203 register_pair rp
= {.pair
= i
};
207 : "=Q" (v
->counter
) : "d" (rp
) );
210 static inline long long atomic64_xchg(atomic64_t
*v
, long long new)
212 register_pair rp_new
= {.pair
= new};
213 register_pair rp_old
;
219 : "=&d" (rp_old
), "=Q" (v
->counter
)
220 : "d" (rp_new
), "Q" (v
->counter
)
225 static inline long long atomic64_cmpxchg(atomic64_t
*v
,
226 long long old
, long long new)
228 register_pair rp_old
= {.pair
= old
};
229 register_pair rp_new
= {.pair
= new};
233 : "+&d" (rp_old
), "=Q" (v
->counter
)
234 : "d" (rp_new
), "Q" (v
->counter
)
240 static inline long long atomic64_add_return(long long i
, atomic64_t
*v
)
245 old
= atomic64_read(v
);
247 } while (atomic64_cmpxchg(v
, old
, new) != old
);
251 static inline long long atomic64_sub_return(long long i
, atomic64_t
*v
)
256 old
= atomic64_read(v
);
258 } while (atomic64_cmpxchg(v
, old
, new) != old
);
262 static inline void atomic64_set_mask(unsigned long long mask
, atomic64_t
*v
)
267 old
= atomic64_read(v
);
269 } while (atomic64_cmpxchg(v
, old
, new) != old
);
272 static inline void atomic64_clear_mask(unsigned long long mask
, atomic64_t
*v
)
277 old
= atomic64_read(v
);
279 } while (atomic64_cmpxchg(v
, old
, new) != old
);
282 #endif /* CONFIG_64BIT */
284 static inline int atomic64_add_unless(atomic64_t
*v
, long long a
, long long u
)
288 c
= atomic64_read(v
);
290 if (unlikely(c
== u
))
292 old
= atomic64_cmpxchg(v
, c
, c
+ a
);
293 if (likely(old
== c
))
300 static inline long long atomic64_dec_if_positive(atomic64_t
*v
)
302 long long c
, old
, dec
;
304 c
= atomic64_read(v
);
307 if (unlikely(dec
< 0))
309 old
= atomic64_cmpxchg((v
), c
, dec
);
310 if (likely(old
== c
))
317 #define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
318 #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
319 #define atomic64_inc(_v) atomic64_add_return(1, _v)
320 #define atomic64_inc_return(_v) atomic64_add_return(1, _v)
321 #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
322 #define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
323 #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
324 #define atomic64_dec(_v) atomic64_sub_return(1, _v)
325 #define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
326 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
327 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
329 #define smp_mb__before_atomic_dec() smp_mb()
330 #define smp_mb__after_atomic_dec() smp_mb()
331 #define smp_mb__before_atomic_inc() smp_mb()
332 #define smp_mb__after_atomic_inc() smp_mb()
334 #endif /* __ARCH_S390_ATOMIC__ */
This page took 0.039779 seconds and 6 git commands to generate.