Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* atomic.h: atomic operation emulation for FR-V |
2 | * | |
3 | * For an explanation of how atomic ops work in this arch, see: | |
4 | * Documentation/fujitsu/frv/atomic-ops.txt | |
5 | * | |
6 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | |
7 | * Written by David Howells (dhowells@redhat.com) | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU General Public License | |
11 | * as published by the Free Software Foundation; either version | |
12 | * 2 of the License, or (at your option) any later version. | |
13 | */ | |
14 | #ifndef _ASM_ATOMIC_H | |
15 | #define _ASM_ATOMIC_H | |
16 | ||
17 | #include <linux/config.h> | |
18 | #include <linux/types.h> | |
19 | #include <asm/spr-regs.h> | |
20 | ||
21 | #ifdef CONFIG_SMP | |
22 | #error not SMP safe | |
23 | #endif | |
24 | ||
25 | /* | |
26 | * Atomic operations that C can't guarantee us. Useful for | |
27 | * resource counting etc.. | |
28 | * | |
29 | * We do not have SMP systems, so we don't have to deal with that. | |
30 | */ | |
31 | ||
32 | /* Atomic operations are already serializing */ | |
33 | #define smp_mb__before_atomic_dec() barrier() | |
34 | #define smp_mb__after_atomic_dec() barrier() | |
35 | #define smp_mb__before_atomic_inc() barrier() | |
36 | #define smp_mb__after_atomic_inc() barrier() | |
37 | ||
38 | typedef struct { | |
39 | int counter; | |
40 | } atomic_t; | |
41 | ||
42 | #define ATOMIC_INIT(i) { (i) } | |
43 | #define atomic_read(v) ((v)->counter) | |
44 | #define atomic_set(v, i) (((v)->counter) = (i)) | |
45 | ||
46 | #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS | |
47 | static inline int atomic_add_return(int i, atomic_t *v) | |
48 | { | |
49 | unsigned long val; | |
50 | ||
51 | asm("0: \n" | |
52 | " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ | |
53 | " ckeq icc3,cc7 \n" | |
54 | " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ | |
55 | " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ | |
56 | " add%I2 %1,%2,%1 \n" | |
57 | " cst.p %1,%M0 ,cc3,#1 \n" | |
58 | " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */ | |
59 | " beq icc3,#0,0b \n" | |
60 | : "+U"(v->counter), "=&r"(val) | |
61 | : "NPr"(i) | |
62 | : "memory", "cc7", "cc3", "icc3" | |
63 | ); | |
64 | ||
65 | return val; | |
66 | } | |
67 | ||
68 | static inline int atomic_sub_return(int i, atomic_t *v) | |
69 | { | |
70 | unsigned long val; | |
71 | ||
72 | asm("0: \n" | |
73 | " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ | |
74 | " ckeq icc3,cc7 \n" | |
75 | " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ | |
76 | " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ | |
77 | " sub%I2 %1,%2,%1 \n" | |
78 | " cst.p %1,%M0 ,cc3,#1 \n" | |
79 | " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */ | |
80 | " beq icc3,#0,0b \n" | |
81 | : "+U"(v->counter), "=&r"(val) | |
82 | : "NPr"(i) | |
83 | : "memory", "cc7", "cc3", "icc3" | |
84 | ); | |
85 | ||
86 | return val; | |
87 | } | |
88 | ||
89 | #else | |
90 | ||
91 | extern int atomic_add_return(int i, atomic_t *v); | |
92 | extern int atomic_sub_return(int i, atomic_t *v); | |
93 | ||
94 | #endif | |
95 | ||
96 | static inline int atomic_add_negative(int i, atomic_t *v) | |
97 | { | |
98 | return atomic_add_return(i, v) < 0; | |
99 | } | |
100 | ||
101 | static inline void atomic_add(int i, atomic_t *v) | |
102 | { | |
103 | atomic_add_return(i, v); | |
104 | } | |
105 | ||
106 | static inline void atomic_sub(int i, atomic_t *v) | |
107 | { | |
108 | atomic_sub_return(i, v); | |
109 | } | |
110 | ||
111 | static inline void atomic_inc(atomic_t *v) | |
112 | { | |
113 | atomic_add_return(1, v); | |
114 | } | |
115 | ||
116 | static inline void atomic_dec(atomic_t *v) | |
117 | { | |
118 | atomic_sub_return(1, v); | |
119 | } | |
120 | ||
121 | #define atomic_dec_return(v) atomic_sub_return(1, (v)) | |
122 | #define atomic_inc_return(v) atomic_add_return(1, (v)) | |
123 | ||
124 | #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) | |
125 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) | |
126 | #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) | |
127 | ||
128 | #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS | |
129 | static inline | |
130 | unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v) | |
131 | { | |
132 | unsigned long old, tmp; | |
133 | ||
134 | asm volatile( | |
135 | "0: \n" | |
136 | " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ | |
137 | " ckeq icc3,cc7 \n" | |
138 | " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ | |
139 | " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ | |
140 | " and%I3 %1,%3,%2 \n" | |
141 | " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ | |
142 | " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ | |
143 | " beq icc3,#0,0b \n" | |
144 | : "+U"(*v), "=&r"(old), "=r"(tmp) | |
145 | : "NPr"(~mask) | |
146 | : "memory", "cc7", "cc3", "icc3" | |
147 | ); | |
148 | ||
149 | return old; | |
150 | } | |
151 | ||
152 | static inline | |
153 | unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v) | |
154 | { | |
155 | unsigned long old, tmp; | |
156 | ||
157 | asm volatile( | |
158 | "0: \n" | |
159 | " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ | |
160 | " ckeq icc3,cc7 \n" | |
161 | " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ | |
162 | " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ | |
163 | " or%I3 %1,%3,%2 \n" | |
164 | " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ | |
165 | " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ | |
166 | " beq icc3,#0,0b \n" | |
167 | : "+U"(*v), "=&r"(old), "=r"(tmp) | |
168 | : "NPr"(mask) | |
169 | : "memory", "cc7", "cc3", "icc3" | |
170 | ); | |
171 | ||
172 | return old; | |
173 | } | |
174 | ||
175 | static inline | |
176 | unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v) | |
177 | { | |
178 | unsigned long old, tmp; | |
179 | ||
180 | asm volatile( | |
181 | "0: \n" | |
182 | " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ | |
183 | " ckeq icc3,cc7 \n" | |
184 | " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ | |
185 | " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ | |
186 | " xor%I3 %1,%3,%2 \n" | |
187 | " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ | |
188 | " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ | |
189 | " beq icc3,#0,0b \n" | |
190 | : "+U"(*v), "=&r"(old), "=r"(tmp) | |
191 | : "NPr"(mask) | |
192 | : "memory", "cc7", "cc3", "icc3" | |
193 | ); | |
194 | ||
195 | return old; | |
196 | } | |
197 | ||
198 | #else | |
199 | ||
200 | extern unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v); | |
201 | extern unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v); | |
202 | extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v); | |
203 | ||
204 | #endif | |
205 | ||
206 | #define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v)) | |
207 | #define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v)) | |
208 | ||
209 | /*****************************************************************************/ | |
210 | /* | |
211 | * exchange value with memory | |
212 | */ | |
213 | #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS | |
214 | ||
215 | #define xchg(ptr, x) \ | |
216 | ({ \ | |
217 | __typeof__(ptr) __xg_ptr = (ptr); \ | |
218 | __typeof__(*(ptr)) __xg_orig; \ | |
219 | \ | |
220 | switch (sizeof(__xg_orig)) { \ | |
221 | case 1: \ | |
222 | asm volatile( \ | |
223 | "0: \n" \ | |
224 | " orcc gr0,gr0,gr0,icc3 \n" \ | |
225 | " ckeq icc3,cc7 \n" \ | |
226 | " ldub.p %M0,%1 \n" \ | |
227 | " orcr cc7,cc7,cc3 \n" \ | |
228 | " cstb.p %2,%M0 ,cc3,#1 \n" \ | |
229 | " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ | |
230 | " beq icc3,#0,0b \n" \ | |
231 | : "+U"(*__xg_ptr), "=&r"(__xg_orig) \ | |
232 | : "r"(x) \ | |
233 | : "memory", "cc7", "cc3", "icc3" \ | |
234 | ); \ | |
235 | break; \ | |
236 | \ | |
237 | case 2: \ | |
238 | asm volatile( \ | |
239 | "0: \n" \ | |
240 | " orcc gr0,gr0,gr0,icc3 \n" \ | |
241 | " ckeq icc3,cc7 \n" \ | |
242 | " lduh.p %M0,%1 \n" \ | |
243 | " orcr cc7,cc7,cc3 \n" \ | |
244 | " csth.p %2,%M0 ,cc3,#1 \n" \ | |
245 | " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ | |
246 | " beq icc3,#0,0b \n" \ | |
247 | : "+U"(*__xg_ptr), "=&r"(__xg_orig) \ | |
248 | : "r"(x) \ | |
249 | : "memory", "cc7", "cc3", "icc3" \ | |
250 | ); \ | |
251 | break; \ | |
252 | \ | |
253 | case 4: \ | |
254 | asm volatile( \ | |
255 | "0: \n" \ | |
256 | " orcc gr0,gr0,gr0,icc3 \n" \ | |
257 | " ckeq icc3,cc7 \n" \ | |
258 | " ld.p %M0,%1 \n" \ | |
259 | " orcr cc7,cc7,cc3 \n" \ | |
260 | " cst.p %2,%M0 ,cc3,#1 \n" \ | |
261 | " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ | |
262 | " beq icc3,#0,0b \n" \ | |
263 | : "+U"(*__xg_ptr), "=&r"(__xg_orig) \ | |
264 | : "r"(x) \ | |
265 | : "memory", "cc7", "cc3", "icc3" \ | |
266 | ); \ | |
267 | break; \ | |
268 | \ | |
269 | default: \ | |
270 | __xg_orig = 0; \ | |
271 | asm volatile("break"); \ | |
272 | break; \ | |
273 | } \ | |
274 | \ | |
275 | __xg_orig; \ | |
276 | }) | |
277 | ||
278 | #else | |
279 | ||
280 | extern uint8_t __xchg_8 (uint8_t i, volatile void *v); | |
281 | extern uint16_t __xchg_16(uint16_t i, volatile void *v); | |
282 | extern uint32_t __xchg_32(uint32_t i, volatile void *v); | |
283 | ||
284 | #define xchg(ptr, x) \ | |
285 | ({ \ | |
286 | __typeof__(ptr) __xg_ptr = (ptr); \ | |
287 | __typeof__(*(ptr)) __xg_orig; \ | |
288 | \ | |
289 | switch (sizeof(__xg_orig)) { \ | |
290 | case 1: __xg_orig = (__typeof__(*(ptr))) __xchg_8 ((uint8_t) x, __xg_ptr); break; \ | |
291 | case 2: __xg_orig = (__typeof__(*(ptr))) __xchg_16((uint16_t) x, __xg_ptr); break; \ | |
292 | case 4: __xg_orig = (__typeof__(*(ptr))) __xchg_32((uint32_t) x, __xg_ptr); break; \ | |
293 | default: \ | |
294 | __xg_orig = 0; \ | |
295 | asm volatile("break"); \ | |
296 | break; \ | |
297 | } \ | |
298 | __xg_orig; \ | |
299 | }) | |
300 | ||
301 | #endif | |
302 | ||
303 | #define tas(ptr) (xchg((ptr), 1)) | |
304 | ||
305 | /*****************************************************************************/ | |
306 | /* | |
307 | * compare and conditionally exchange value with memory | |
308 | * - if (*ptr == test) then orig = *ptr; *ptr = test; | |
309 | * - if (*ptr != test) then orig = *ptr; | |
310 | */ | |
311 | #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS | |
312 | ||
313 | #define cmpxchg(ptr, test, new) \ | |
314 | ({ \ | |
315 | __typeof__(ptr) __xg_ptr = (ptr); \ | |
316 | __typeof__(*(ptr)) __xg_orig, __xg_tmp; \ | |
317 | __typeof__(*(ptr)) __xg_test = (test); \ | |
318 | __typeof__(*(ptr)) __xg_new = (new); \ | |
319 | \ | |
320 | switch (sizeof(__xg_orig)) { \ | |
321 | case 1: \ | |
322 | asm volatile( \ | |
323 | "0: \n" \ | |
324 | " orcc gr0,gr0,gr0,icc3 \n" \ | |
325 | " ckeq icc3,cc7 \n" \ | |
326 | " ldub.p %M0,%1 \n" \ | |
327 | " orcr cc7,cc7,cc3 \n" \ | |
328 | " sub%I4 %1,%4,%2 \n" \ | |
329 | " sllcc %2,#24,gr0,icc0 \n" \ | |
330 | " bne icc0,#0,1f \n" \ | |
331 | " cstb.p %3,%M0 ,cc3,#1 \n" \ | |
332 | " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ | |
333 | " beq icc3,#0,0b \n" \ | |
334 | "1: \n" \ | |
335 | : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \ | |
336 | : "r"(__xg_new), "NPr"(__xg_test) \ | |
337 | : "memory", "cc7", "cc3", "icc3", "icc0" \ | |
338 | ); \ | |
339 | break; \ | |
340 | \ | |
341 | case 2: \ | |
342 | asm volatile( \ | |
343 | "0: \n" \ | |
344 | " orcc gr0,gr0,gr0,icc3 \n" \ | |
345 | " ckeq icc3,cc7 \n" \ | |
346 | " lduh.p %M0,%1 \n" \ | |
347 | " orcr cc7,cc7,cc3 \n" \ | |
348 | " sub%I4 %1,%4,%2 \n" \ | |
349 | " sllcc %2,#16,gr0,icc0 \n" \ | |
350 | " bne icc0,#0,1f \n" \ | |
351 | " csth.p %3,%M0 ,cc3,#1 \n" \ | |
352 | " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ | |
353 | " beq icc3,#0,0b \n" \ | |
354 | "1: \n" \ | |
355 | : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \ | |
356 | : "r"(__xg_new), "NPr"(__xg_test) \ | |
357 | : "memory", "cc7", "cc3", "icc3", "icc0" \ | |
358 | ); \ | |
359 | break; \ | |
360 | \ | |
361 | case 4: \ | |
362 | asm volatile( \ | |
363 | "0: \n" \ | |
364 | " orcc gr0,gr0,gr0,icc3 \n" \ | |
365 | " ckeq icc3,cc7 \n" \ | |
366 | " ld.p %M0,%1 \n" \ | |
367 | " orcr cc7,cc7,cc3 \n" \ | |
368 | " sub%I4cc %1,%4,%2,icc0 \n" \ | |
369 | " bne icc0,#0,1f \n" \ | |
370 | " cst.p %3,%M0 ,cc3,#1 \n" \ | |
371 | " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ | |
372 | " beq icc3,#0,0b \n" \ | |
373 | "1: \n" \ | |
374 | : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \ | |
375 | : "r"(__xg_new), "NPr"(__xg_test) \ | |
376 | : "memory", "cc7", "cc3", "icc3", "icc0" \ | |
377 | ); \ | |
378 | break; \ | |
379 | \ | |
380 | default: \ | |
381 | __xg_orig = 0; \ | |
382 | asm volatile("break"); \ | |
383 | break; \ | |
384 | } \ | |
385 | \ | |
386 | __xg_orig; \ | |
387 | }) | |
388 | ||
389 | #else | |
390 | ||
391 | extern uint8_t __cmpxchg_8 (uint8_t *v, uint8_t test, uint8_t new); | |
392 | extern uint16_t __cmpxchg_16(uint16_t *v, uint16_t test, uint16_t new); | |
393 | extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new); | |
394 | ||
395 | #define cmpxchg(ptr, test, new) \ | |
396 | ({ \ | |
397 | __typeof__(ptr) __xg_ptr = (ptr); \ | |
398 | __typeof__(*(ptr)) __xg_orig; \ | |
399 | __typeof__(*(ptr)) __xg_test = (test); \ | |
400 | __typeof__(*(ptr)) __xg_new = (new); \ | |
401 | \ | |
402 | switch (sizeof(__xg_orig)) { \ | |
403 | case 1: __xg_orig = __cmpxchg_8 (__xg_ptr, __xg_test, __xg_new); break; \ | |
404 | case 2: __xg_orig = __cmpxchg_16(__xg_ptr, __xg_test, __xg_new); break; \ | |
405 | case 4: __xg_orig = __cmpxchg_32(__xg_ptr, __xg_test, __xg_new); break; \ | |
406 | default: \ | |
407 | __xg_orig = 0; \ | |
408 | asm volatile("break"); \ | |
409 | break; \ | |
410 | } \ | |
411 | \ | |
412 | __xg_orig; \ | |
413 | }) | |
414 | ||
415 | #endif | |
416 | ||
417 | #endif /* _ASM_ATOMIC_H */ |