2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * Do not include directly; use <asm/atomic.h>.
17 #ifndef _ASM_TILE_ATOMIC_64_H
18 #define _ASM_TILE_ATOMIC_64_H
22 #include <arch/spr_def.h>
24 /* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
26 #define atomic_set(v, i) ((v)->counter = (i))
29 * The smp_mb() operations throughout are to support the fact that
30 * Linux requires memory barriers before and after the operation,
31 * on any routine which updates memory and returns a value.
34 static inline int atomic_cmpxchg(atomic_t
*v
, int o
, int n
)
37 __insn_mtspr(SPR_CMPEXCH_VALUE
, o
);
38 smp_mb(); /* barrier for proper semantics */
39 val
= __insn_cmpexch4((void *)&v
->counter
, n
);
40 smp_mb(); /* barrier for proper semantics */
44 static inline int atomic_xchg(atomic_t
*v
, int n
)
47 smp_mb(); /* barrier for proper semantics */
48 val
= __insn_exch4((void *)&v
->counter
, n
);
49 smp_mb(); /* barrier for proper semantics */
53 static inline void atomic_add(int i
, atomic_t
*v
)
55 __insn_fetchadd4((void *)&v
->counter
, i
);
58 static inline int atomic_add_return(int i
, atomic_t
*v
)
61 smp_mb(); /* barrier for proper semantics */
62 val
= __insn_fetchadd4((void *)&v
->counter
, i
) + i
;
63 barrier(); /* the "+ i" above will wait on memory */
67 static inline int atomic_add_unless(atomic_t
*v
, int a
, int u
)
69 int guess
, oldval
= v
->counter
;
74 oldval
= atomic_cmpxchg(v
, guess
, guess
+ a
);
75 } while (guess
!= oldval
);
79 /* Now the true 64-bit operations. */
81 #define ATOMIC64_INIT(i) { (i) }
83 #define atomic64_read(v) ((v)->counter)
84 #define atomic64_set(v, i) ((v)->counter = (i))
86 static inline long atomic64_cmpxchg(atomic64_t
*v
, long o
, long n
)
89 smp_mb(); /* barrier for proper semantics */
90 __insn_mtspr(SPR_CMPEXCH_VALUE
, o
);
91 val
= __insn_cmpexch((void *)&v
->counter
, n
);
92 smp_mb(); /* barrier for proper semantics */
96 static inline long atomic64_xchg(atomic64_t
*v
, long n
)
99 smp_mb(); /* barrier for proper semantics */
100 val
= __insn_exch((void *)&v
->counter
, n
);
101 smp_mb(); /* barrier for proper semantics */
105 static inline void atomic64_add(long i
, atomic64_t
*v
)
107 __insn_fetchadd((void *)&v
->counter
, i
);
110 static inline long atomic64_add_return(long i
, atomic64_t
*v
)
113 smp_mb(); /* barrier for proper semantics */
114 val
= __insn_fetchadd((void *)&v
->counter
, i
) + i
;
115 barrier(); /* the "+ i" above will wait on memory */
119 static inline long atomic64_add_unless(atomic64_t
*v
, long a
, long u
)
121 long guess
, oldval
= v
->counter
;
126 oldval
= atomic64_cmpxchg(v
, guess
, guess
+ a
);
127 } while (guess
!= oldval
);
131 #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
132 #define atomic64_sub(i, v) atomic64_add(-(i), (v))
133 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
134 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
135 #define atomic64_inc(v) atomic64_add(1, (v))
136 #define atomic64_dec(v) atomic64_sub(1, (v))
138 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
139 #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
140 #define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
141 #define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
143 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
145 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
146 #define smp_mb__before_atomic_dec() smp_mb()
147 #define smp_mb__after_atomic_dec() smp_mb()
148 #define smp_mb__before_atomic_inc() smp_mb()
149 #define smp_mb__after_atomic_inc() smp_mb()
151 #define xchg(ptr, x) \
153 ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \
154 atomic_xchg((atomic_t *)(ptr), (long)(x)) : \
155 (sizeof(*(ptr)) == sizeof(atomic_long_t)) ? \
156 atomic_long_xchg((atomic_long_t *)(ptr), (long)(x)) : \
157 __xchg_called_with_bad_pointer()))
159 #define cmpxchg(ptr, o, n) \
161 ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \
162 atomic_cmpxchg((atomic_t *)(ptr), (long)(o), (long)(n)) : \
163 (sizeof(*(ptr)) == sizeof(atomic_long_t)) ? \
164 atomic_long_cmpxchg((atomic_long_t *)(ptr), (long)(o), (long)(n)) : \
165 __cmpxchg_called_with_bad_pointer()))
167 #endif /* !__ASSEMBLY__ */
169 #endif /* _ASM_TILE_ATOMIC_64_H */
This page took 0.038561 seconds and 5 git commands to generate.