[ARM] 3594/1: Poodle: Add touchscreen support + other updates
[deliverable/linux.git] / include / asm-alpha / atomic.h
1 #ifndef _ALPHA_ATOMIC_H
2 #define _ALPHA_ATOMIC_H
3
4 #include <asm/barrier.h>
5
6 /*
7 * Atomic operations that C can't guarantee us. Useful for
8 * resource counting etc...
9 *
10 * But use these as seldom as possible since they are much slower
11 * than regular operations.
12 */
13
14
15 /*
16 * Counter is volatile to make sure gcc doesn't try to be clever
17 * and move things around on us. We need to use _exactly_ the address
18 * the user gave us, not some alias that contains the same information.
19 */
20 typedef struct { volatile int counter; } atomic_t;
21 typedef struct { volatile long counter; } atomic64_t;
22
23 #define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
24 #define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
25
26 #define atomic_read(v) ((v)->counter + 0)
27 #define atomic64_read(v) ((v)->counter + 0)
28
29 #define atomic_set(v,i) ((v)->counter = (i))
30 #define atomic64_set(v,i) ((v)->counter = (i))
31
32 /*
33 * To get proper branch prediction for the main line, we must branch
34 * forward to code at the end of this object's .text section, then
35 * branch back to restart the operation.
36 */
37
38 static __inline__ void atomic_add(int i, atomic_t * v)
39 {
40 unsigned long temp;
41 __asm__ __volatile__(
42 "1: ldl_l %0,%1\n"
43 " addl %0,%2,%0\n"
44 " stl_c %0,%1\n"
45 " beq %0,2f\n"
46 ".subsection 2\n"
47 "2: br 1b\n"
48 ".previous"
49 :"=&r" (temp), "=m" (v->counter)
50 :"Ir" (i), "m" (v->counter));
51 }
52
53 static __inline__ void atomic64_add(long i, atomic64_t * v)
54 {
55 unsigned long temp;
56 __asm__ __volatile__(
57 "1: ldq_l %0,%1\n"
58 " addq %0,%2,%0\n"
59 " stq_c %0,%1\n"
60 " beq %0,2f\n"
61 ".subsection 2\n"
62 "2: br 1b\n"
63 ".previous"
64 :"=&r" (temp), "=m" (v->counter)
65 :"Ir" (i), "m" (v->counter));
66 }
67
68 static __inline__ void atomic_sub(int i, atomic_t * v)
69 {
70 unsigned long temp;
71 __asm__ __volatile__(
72 "1: ldl_l %0,%1\n"
73 " subl %0,%2,%0\n"
74 " stl_c %0,%1\n"
75 " beq %0,2f\n"
76 ".subsection 2\n"
77 "2: br 1b\n"
78 ".previous"
79 :"=&r" (temp), "=m" (v->counter)
80 :"Ir" (i), "m" (v->counter));
81 }
82
83 static __inline__ void atomic64_sub(long i, atomic64_t * v)
84 {
85 unsigned long temp;
86 __asm__ __volatile__(
87 "1: ldq_l %0,%1\n"
88 " subq %0,%2,%0\n"
89 " stq_c %0,%1\n"
90 " beq %0,2f\n"
91 ".subsection 2\n"
92 "2: br 1b\n"
93 ".previous"
94 :"=&r" (temp), "=m" (v->counter)
95 :"Ir" (i), "m" (v->counter));
96 }
97
98
99 /*
100 * Same as above, but return the result value
101 */
102 static __inline__ long atomic_add_return(int i, atomic_t * v)
103 {
104 long temp, result;
105 smp_mb();
106 __asm__ __volatile__(
107 "1: ldl_l %0,%1\n"
108 " addl %0,%3,%2\n"
109 " addl %0,%3,%0\n"
110 " stl_c %0,%1\n"
111 " beq %0,2f\n"
112 ".subsection 2\n"
113 "2: br 1b\n"
114 ".previous"
115 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
116 :"Ir" (i), "m" (v->counter) : "memory");
117 smp_mb();
118 return result;
119 }
120
121 static __inline__ long atomic64_add_return(long i, atomic64_t * v)
122 {
123 long temp, result;
124 smp_mb();
125 __asm__ __volatile__(
126 "1: ldq_l %0,%1\n"
127 " addq %0,%3,%2\n"
128 " addq %0,%3,%0\n"
129 " stq_c %0,%1\n"
130 " beq %0,2f\n"
131 ".subsection 2\n"
132 "2: br 1b\n"
133 ".previous"
134 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
135 :"Ir" (i), "m" (v->counter) : "memory");
136 smp_mb();
137 return result;
138 }
139
140 static __inline__ long atomic_sub_return(int i, atomic_t * v)
141 {
142 long temp, result;
143 smp_mb();
144 __asm__ __volatile__(
145 "1: ldl_l %0,%1\n"
146 " subl %0,%3,%2\n"
147 " subl %0,%3,%0\n"
148 " stl_c %0,%1\n"
149 " beq %0,2f\n"
150 ".subsection 2\n"
151 "2: br 1b\n"
152 ".previous"
153 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
154 :"Ir" (i), "m" (v->counter) : "memory");
155 smp_mb();
156 return result;
157 }
158
159 static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
160 {
161 long temp, result;
162 smp_mb();
163 __asm__ __volatile__(
164 "1: ldq_l %0,%1\n"
165 " subq %0,%3,%2\n"
166 " subq %0,%3,%0\n"
167 " stq_c %0,%1\n"
168 " beq %0,2f\n"
169 ".subsection 2\n"
170 "2: br 1b\n"
171 ".previous"
172 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
173 :"Ir" (i), "m" (v->counter) : "memory");
174 smp_mb();
175 return result;
176 }
177
178 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
179 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
180
181 #define atomic_add_unless(v, a, u) \
182 ({ \
183 int c, old; \
184 c = atomic_read(v); \
185 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
186 c = old; \
187 c != (u); \
188 })
189 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
190
191 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
192 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
193
194 #define atomic_dec_return(v) atomic_sub_return(1,(v))
195 #define atomic64_dec_return(v) atomic64_sub_return(1,(v))
196
197 #define atomic_inc_return(v) atomic_add_return(1,(v))
198 #define atomic64_inc_return(v) atomic64_add_return(1,(v))
199
200 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
201 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
202
203 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
204 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
205
206 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
207 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
208
209 #define atomic_inc(v) atomic_add(1,(v))
210 #define atomic64_inc(v) atomic64_add(1,(v))
211
212 #define atomic_dec(v) atomic_sub(1,(v))
213 #define atomic64_dec(v) atomic64_sub(1,(v))
214
215 #define smp_mb__before_atomic_dec() smp_mb()
216 #define smp_mb__after_atomic_dec() smp_mb()
217 #define smp_mb__before_atomic_inc() smp_mb()
218 #define smp_mb__after_atomic_inc() smp_mb()
219
220 #include <asm-generic/atomic.h>
221 #endif /* _ALPHA_ATOMIC_H */
This page took 0.051778 seconds and 5 git commands to generate.