Merge remote-tracking branch 'xen-tip/linux-next'
[deliverable/linux.git] / arch / arm64 / include / asm / cmpxchg.h
1 /*
2 * Based on arch/arm/include/asm/cmpxchg.h
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18 #ifndef __ASM_CMPXCHG_H
19 #define __ASM_CMPXCHG_H
20
21 #include <linux/bug.h>
22
23 #include <asm/atomic.h>
24 #include <asm/barrier.h>
25 #include <asm/lse.h>
26
27 /*
28 * We need separate acquire parameters for ll/sc and lse, since the full
29 * barrier case is generated as release+dmb for the former and
30 * acquire+release for the latter.
31 */
32 #define __XCHG_CASE(w, sz, name, mb, nop_lse, acq, acq_lse, rel, cl) \
33 static inline unsigned long __xchg_case_##name(unsigned long x, \
34 volatile void *ptr) \
35 { \
36 unsigned long ret, tmp; \
37 \
38 asm volatile(ARM64_LSE_ATOMIC_INSN( \
39 /* LL/SC */ \
40 " prfm pstl1strm, %2\n" \
41 "1: ld" #acq "xr" #sz "\t%" #w "0, %2\n" \
42 " st" #rel "xr" #sz "\t%w1, %" #w "3, %2\n" \
43 " cbnz %w1, 1b\n" \
44 " " #mb, \
45 /* LSE atomics */ \
46 " swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \
47 __nops(3) \
48 " " #nop_lse) \
49 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) \
50 : "r" (x) \
51 : cl); \
52 \
53 return ret; \
54 }
55
56 __XCHG_CASE(w, b, 1, , , , , , )
57 __XCHG_CASE(w, h, 2, , , , , , )
58 __XCHG_CASE(w, , 4, , , , , , )
59 __XCHG_CASE( , , 8, , , , , , )
60 __XCHG_CASE(w, b, acq_1, , , a, a, , "memory")
61 __XCHG_CASE(w, h, acq_2, , , a, a, , "memory")
62 __XCHG_CASE(w, , acq_4, , , a, a, , "memory")
63 __XCHG_CASE( , , acq_8, , , a, a, , "memory")
64 __XCHG_CASE(w, b, rel_1, , , , , l, "memory")
65 __XCHG_CASE(w, h, rel_2, , , , , l, "memory")
66 __XCHG_CASE(w, , rel_4, , , , , l, "memory")
67 __XCHG_CASE( , , rel_8, , , , , l, "memory")
68 __XCHG_CASE(w, b, mb_1, dmb ish, nop, , a, l, "memory")
69 __XCHG_CASE(w, h, mb_2, dmb ish, nop, , a, l, "memory")
70 __XCHG_CASE(w, , mb_4, dmb ish, nop, , a, l, "memory")
71 __XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory")
72
73 #undef __XCHG_CASE
74
75 #define __XCHG_GEN(sfx) \
76 static inline unsigned long __xchg##sfx(unsigned long x, \
77 volatile void *ptr, \
78 int size) \
79 { \
80 switch (size) { \
81 case 1: \
82 return __xchg_case##sfx##_1(x, ptr); \
83 case 2: \
84 return __xchg_case##sfx##_2(x, ptr); \
85 case 4: \
86 return __xchg_case##sfx##_4(x, ptr); \
87 case 8: \
88 return __xchg_case##sfx##_8(x, ptr); \
89 default: \
90 BUILD_BUG(); \
91 } \
92 \
93 unreachable(); \
94 }
95
96 __XCHG_GEN()
97 __XCHG_GEN(_acq)
98 __XCHG_GEN(_rel)
99 __XCHG_GEN(_mb)
100
101 #undef __XCHG_GEN
102
103 #define __xchg_wrapper(sfx, ptr, x) \
104 ({ \
105 __typeof__(*(ptr)) __ret; \
106 __ret = (__typeof__(*(ptr))) \
107 __xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \
108 __ret; \
109 })
110
111 /* xchg */
112 #define xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
113 #define xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
114 #define xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
115 #define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
116
117 #define __CMPXCHG_GEN(sfx) \
118 static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
119 unsigned long old, \
120 unsigned long new, \
121 int size) \
122 { \
123 switch (size) { \
124 case 1: \
125 return __cmpxchg_case##sfx##_1(ptr, (u8)old, new); \
126 case 2: \
127 return __cmpxchg_case##sfx##_2(ptr, (u16)old, new); \
128 case 4: \
129 return __cmpxchg_case##sfx##_4(ptr, old, new); \
130 case 8: \
131 return __cmpxchg_case##sfx##_8(ptr, old, new); \
132 default: \
133 BUILD_BUG(); \
134 } \
135 \
136 unreachable(); \
137 }
138
139 __CMPXCHG_GEN()
140 __CMPXCHG_GEN(_acq)
141 __CMPXCHG_GEN(_rel)
142 __CMPXCHG_GEN(_mb)
143
144 #undef __CMPXCHG_GEN
145
146 #define __cmpxchg_wrapper(sfx, ptr, o, n) \
147 ({ \
148 __typeof__(*(ptr)) __ret; \
149 __ret = (__typeof__(*(ptr))) \
150 __cmpxchg##sfx((ptr), (unsigned long)(o), \
151 (unsigned long)(n), sizeof(*(ptr))); \
152 __ret; \
153 })
154
155 /* cmpxchg */
156 #define cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
157 #define cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
158 #define cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
159 #define cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
160 #define cmpxchg_local cmpxchg_relaxed
161
162 /* cmpxchg64 */
163 #define cmpxchg64_relaxed cmpxchg_relaxed
164 #define cmpxchg64_acquire cmpxchg_acquire
165 #define cmpxchg64_release cmpxchg_release
166 #define cmpxchg64 cmpxchg
167 #define cmpxchg64_local cmpxchg_local
168
169 /* cmpxchg_double */
170 #define system_has_cmpxchg_double() 1
171
172 #define __cmpxchg_double_check(ptr1, ptr2) \
173 ({ \
174 if (sizeof(*(ptr1)) != 8) \
175 BUILD_BUG(); \
176 VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \
177 })
178
179 #define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
180 ({\
181 int __ret;\
182 __cmpxchg_double_check(ptr1, ptr2); \
183 __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
184 (unsigned long)(n1), (unsigned long)(n2), \
185 ptr1); \
186 __ret; \
187 })
188
189 #define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
190 ({\
191 int __ret;\
192 __cmpxchg_double_check(ptr1, ptr2); \
193 __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
194 (unsigned long)(n1), (unsigned long)(n2), \
195 ptr1); \
196 __ret; \
197 })
198
199 /* this_cpu_cmpxchg */
200 #define _protect_cmpxchg_local(pcp, o, n) \
201 ({ \
202 typeof(*raw_cpu_ptr(&(pcp))) __ret; \
203 preempt_disable(); \
204 __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
205 preempt_enable(); \
206 __ret; \
207 })
208
209 #define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
210 #define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
211 #define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
212 #define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
213
214 #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
215 ({ \
216 int __ret; \
217 preempt_disable(); \
218 __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
219 raw_cpu_ptr(&(ptr2)), \
220 o1, o2, n1, n2); \
221 preempt_enable(); \
222 __ret; \
223 })
224
225 #define __CMPWAIT_CASE(w, sz, name) \
226 static inline void __cmpwait_case_##name(volatile void *ptr, \
227 unsigned long val) \
228 { \
229 unsigned long tmp; \
230 \
231 asm volatile( \
232 " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \
233 " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \
234 " cbnz %" #w "[tmp], 1f\n" \
235 " wfe\n" \
236 "1:" \
237 : [tmp] "=&r" (tmp), [v] "+Q" (*(unsigned long *)ptr) \
238 : [val] "r" (val)); \
239 }
240
241 __CMPWAIT_CASE(w, b, 1);
242 __CMPWAIT_CASE(w, h, 2);
243 __CMPWAIT_CASE(w, , 4);
244 __CMPWAIT_CASE( , , 8);
245
246 #undef __CMPWAIT_CASE
247
248 #define __CMPWAIT_GEN(sfx) \
249 static inline void __cmpwait##sfx(volatile void *ptr, \
250 unsigned long val, \
251 int size) \
252 { \
253 switch (size) { \
254 case 1: \
255 return __cmpwait_case##sfx##_1(ptr, (u8)val); \
256 case 2: \
257 return __cmpwait_case##sfx##_2(ptr, (u16)val); \
258 case 4: \
259 return __cmpwait_case##sfx##_4(ptr, val); \
260 case 8: \
261 return __cmpwait_case##sfx##_8(ptr, val); \
262 default: \
263 BUILD_BUG(); \
264 } \
265 \
266 unreachable(); \
267 }
268
269 __CMPWAIT_GEN()
270
271 #undef __CMPWAIT_GEN
272
273 #define __cmpwait_relaxed(ptr, val) \
274 __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr)))
275
276 #endif /* __ASM_CMPXCHG_H */
This page took 0.036942 seconds and 5 git commands to generate.