Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ARCH_S390_ATOMIC__ |
2 | #define __ARCH_S390_ATOMIC__ | |
3 | ||
4 | /* | |
5 | * include/asm-s390/atomic.h | |
6 | * | |
7 | * S390 version | |
8 | * Copyright (C) 1999-2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | |
9 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | |
10 | * Denis Joseph Barrow, | |
11 | * Arnd Bergmann (arndb@de.ibm.com) | |
12 | * | |
13 | * Derived from "include/asm-i386/bitops.h" | |
14 | * Copyright (C) 1992, Linus Torvalds | |
15 | * | |
16 | */ | |
17 | ||
18 | /* | |
19 | * Atomic operations that C can't guarantee us. Useful for | |
20 | * resource counting etc.. | |
21 | * S390 uses 'Compare And Swap' for atomicity in SMP enviroment | |
22 | */ | |
23 | ||
24 | typedef struct { | |
25 | volatile int counter; | |
26 | } __attribute__ ((aligned (4))) atomic_t; | |
27 | #define ATOMIC_INIT(i) { (i) } | |
28 | ||
29 | #ifdef __KERNEL__ | |
30 | ||
31 | #define __CS_LOOP(ptr, op_val, op_string) ({ \ | |
32 | typeof(ptr->counter) old_val, new_val; \ | |
33 | __asm__ __volatile__(" l %0,0(%3)\n" \ | |
34 | "0: lr %1,%0\n" \ | |
35 | op_string " %1,%4\n" \ | |
36 | " cs %0,%1,0(%3)\n" \ | |
37 | " jl 0b" \ | |
38 | : "=&d" (old_val), "=&d" (new_val), \ | |
39 | "=m" (((atomic_t *)(ptr))->counter) \ | |
40 | : "a" (ptr), "d" (op_val), \ | |
41 | "m" (((atomic_t *)(ptr))->counter) \ | |
42 | : "cc", "memory" ); \ | |
43 | new_val; \ | |
44 | }) | |
45 | #define atomic_read(v) ((v)->counter) | |
46 | #define atomic_set(v,i) (((v)->counter) = (i)) | |
47 | ||
48 | static __inline__ void atomic_add(int i, atomic_t * v) | |
49 | { | |
50 | __CS_LOOP(v, i, "ar"); | |
51 | } | |
52 | static __inline__ int atomic_add_return(int i, atomic_t * v) | |
53 | { | |
54 | return __CS_LOOP(v, i, "ar"); | |
55 | } | |
56 | static __inline__ int atomic_add_negative(int i, atomic_t * v) | |
57 | { | |
58 | return __CS_LOOP(v, i, "ar") < 0; | |
59 | } | |
60 | static __inline__ void atomic_sub(int i, atomic_t * v) | |
61 | { | |
62 | __CS_LOOP(v, i, "sr"); | |
63 | } | |
64 | static __inline__ int atomic_sub_return(int i, atomic_t * v) | |
65 | { | |
66 | return __CS_LOOP(v, i, "sr"); | |
67 | } | |
68 | static __inline__ void atomic_inc(volatile atomic_t * v) | |
69 | { | |
70 | __CS_LOOP(v, 1, "ar"); | |
71 | } | |
72 | static __inline__ int atomic_inc_return(volatile atomic_t * v) | |
73 | { | |
74 | return __CS_LOOP(v, 1, "ar"); | |
75 | } | |
76 | ||
77 | static __inline__ int atomic_inc_and_test(volatile atomic_t * v) | |
78 | { | |
79 | return __CS_LOOP(v, 1, "ar") == 0; | |
80 | } | |
81 | static __inline__ void atomic_dec(volatile atomic_t * v) | |
82 | { | |
83 | __CS_LOOP(v, 1, "sr"); | |
84 | } | |
85 | static __inline__ int atomic_dec_return(volatile atomic_t * v) | |
86 | { | |
87 | return __CS_LOOP(v, 1, "sr"); | |
88 | } | |
89 | static __inline__ int atomic_dec_and_test(volatile atomic_t * v) | |
90 | { | |
91 | return __CS_LOOP(v, 1, "sr") == 0; | |
92 | } | |
93 | static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v) | |
94 | { | |
95 | __CS_LOOP(v, ~mask, "nr"); | |
96 | } | |
97 | static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v) | |
98 | { | |
99 | __CS_LOOP(v, mask, "or"); | |
100 | } | |
101 | #undef __CS_LOOP | |
102 | ||
103 | #ifdef __s390x__ | |
104 | typedef struct { | |
105 | volatile long long counter; | |
106 | } __attribute__ ((aligned (8))) atomic64_t; | |
107 | #define ATOMIC64_INIT(i) { (i) } | |
108 | ||
109 | #define __CSG_LOOP(ptr, op_val, op_string) ({ \ | |
110 | typeof(ptr->counter) old_val, new_val; \ | |
111 | __asm__ __volatile__(" lg %0,0(%3)\n" \ | |
112 | "0: lgr %1,%0\n" \ | |
113 | op_string " %1,%4\n" \ | |
114 | " csg %0,%1,0(%3)\n" \ | |
115 | " jl 0b" \ | |
116 | : "=&d" (old_val), "=&d" (new_val), \ | |
117 | "=m" (((atomic_t *)(ptr))->counter) \ | |
118 | : "a" (ptr), "d" (op_val), \ | |
119 | "m" (((atomic_t *)(ptr))->counter) \ | |
120 | : "cc", "memory" ); \ | |
121 | new_val; \ | |
122 | }) | |
123 | #define atomic64_read(v) ((v)->counter) | |
124 | #define atomic64_set(v,i) (((v)->counter) = (i)) | |
125 | ||
46ee058c | 126 | static __inline__ void atomic64_add(long long i, atomic64_t * v) |
1da177e4 LT |
127 | { |
128 | __CSG_LOOP(v, i, "agr"); | |
129 | } | |
46ee058c | 130 | static __inline__ long long atomic64_add_return(long long i, atomic64_t * v) |
1da177e4 LT |
131 | { |
132 | return __CSG_LOOP(v, i, "agr"); | |
133 | } | |
46ee058c | 134 | static __inline__ long long atomic64_add_negative(long long i, atomic64_t * v) |
1da177e4 LT |
135 | { |
136 | return __CSG_LOOP(v, i, "agr") < 0; | |
137 | } | |
46ee058c | 138 | static __inline__ void atomic64_sub(long long i, atomic64_t * v) |
1da177e4 LT |
139 | { |
140 | __CSG_LOOP(v, i, "sgr"); | |
141 | } | |
142 | static __inline__ void atomic64_inc(volatile atomic64_t * v) | |
143 | { | |
144 | __CSG_LOOP(v, 1, "agr"); | |
145 | } | |
146 | static __inline__ long long atomic64_inc_return(volatile atomic64_t * v) | |
147 | { | |
148 | return __CSG_LOOP(v, 1, "agr"); | |
149 | } | |
150 | static __inline__ long long atomic64_inc_and_test(volatile atomic64_t * v) | |
151 | { | |
152 | return __CSG_LOOP(v, 1, "agr") == 0; | |
153 | } | |
154 | static __inline__ void atomic64_dec(volatile atomic64_t * v) | |
155 | { | |
156 | __CSG_LOOP(v, 1, "sgr"); | |
157 | } | |
158 | static __inline__ long long atomic64_dec_return(volatile atomic64_t * v) | |
159 | { | |
160 | return __CSG_LOOP(v, 1, "sgr"); | |
161 | } | |
162 | static __inline__ long long atomic64_dec_and_test(volatile atomic64_t * v) | |
163 | { | |
164 | return __CSG_LOOP(v, 1, "sgr") == 0; | |
165 | } | |
166 | static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v) | |
167 | { | |
168 | __CSG_LOOP(v, ~mask, "ngr"); | |
169 | } | |
170 | static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v) | |
171 | { | |
172 | __CSG_LOOP(v, mask, "ogr"); | |
173 | } | |
174 | ||
175 | #undef __CSG_LOOP | |
176 | #endif | |
177 | ||
178 | /* | |
179 | returns 0 if expected_oldval==value in *v ( swap was successful ) | |
180 | returns 1 if unsuccessful. | |
181 | ||
182 | This is non-portable, use bitops or spinlocks instead! | |
183 | */ | |
184 | static __inline__ int | |
185 | atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v) | |
186 | { | |
187 | int retval; | |
188 | ||
189 | __asm__ __volatile__( | |
190 | " lr %0,%3\n" | |
191 | " cs %0,%4,0(%2)\n" | |
192 | " ipm %0\n" | |
193 | " srl %0,28\n" | |
194 | "0:" | |
195 | : "=&d" (retval), "=m" (v->counter) | |
196 | : "a" (v), "d" (expected_oldval) , "d" (new_val), | |
197 | "m" (v->counter) : "cc", "memory" ); | |
198 | return retval; | |
199 | } | |
200 | ||
4a6dae6d NP |
201 | #define atomic_cmpxchg(v, o, n) (atomic_compare_and_swap((o), (n), &((v)->counter))) |
202 | ||
8426e1f6 NP |
203 | #define atomic_add_unless(v, a, u) \ |
204 | ({ \ | |
205 | int c, old; \ | |
206 | c = atomic_read(v); \ | |
207 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ | |
208 | c = old; \ | |
209 | c != (u); \ | |
210 | }) | |
211 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | |
212 | ||
1da177e4 LT |
213 | #define smp_mb__before_atomic_dec() smp_mb() |
214 | #define smp_mb__after_atomic_dec() smp_mb() | |
215 | #define smp_mb__before_atomic_inc() smp_mb() | |
216 | #define smp_mb__after_atomic_inc() smp_mb() | |
217 | ||
218 | #endif /* __KERNEL__ */ | |
219 | #endif /* __ARCH_S390_ATOMIC__ */ |