1 #ifndef _ASM_POWERPC_CMPXCHG_H_
2 #define _ASM_POWERPC_CMPXCHG_H_
5 #include <linux/compiler.h>
7 #include <asm/asm-compat.h>
13 * Changes the memory location '*p' to be val and returns
14 * the previous value stored there.
17 static __always_inline
unsigned long
18 __xchg_u32_local(volatile void *p
, unsigned long val
)
27 : "=&r" (prev
), "+m" (*(volatile unsigned int *)p
)
34 static __always_inline
unsigned long
35 __xchg_u32_relaxed(u32
*p
, unsigned long val
)
44 : "=&r" (prev
), "+m" (*p
)
52 static __always_inline
unsigned long
53 __xchg_u64_local(volatile void *p
, unsigned long val
)
62 : "=&r" (prev
), "+m" (*(volatile unsigned long *)p
)
69 static __always_inline
unsigned long
70 __xchg_u64_relaxed(u64
*p
, unsigned long val
)
79 : "=&r" (prev
), "+m" (*p
)
87 static __always_inline
unsigned long
88 __xchg_local(volatile void *ptr
, unsigned long x
, unsigned int size
)
92 return __xchg_u32_local(ptr
, x
);
95 return __xchg_u64_local(ptr
, x
);
98 BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg");
102 static __always_inline
unsigned long
103 __xchg_relaxed(void *ptr
, unsigned long x
, unsigned int size
)
107 return __xchg_u32_relaxed(ptr
, x
);
110 return __xchg_u64_relaxed(ptr
, x
);
113 BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local");
116 #define xchg_local(ptr,x) \
118 __typeof__(*(ptr)) _x_ = (x); \
119 (__typeof__(*(ptr))) __xchg_local((ptr), \
120 (unsigned long)_x_, sizeof(*(ptr))); \
123 #define xchg_relaxed(ptr, x) \
125 __typeof__(*(ptr)) _x_ = (x); \
126 (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
127 (unsigned long)_x_, sizeof(*(ptr))); \
130 * Compare and exchange - if *p == old, set it to new,
131 * and return the old value of *p.
134 static __always_inline
unsigned long
135 __cmpxchg_u32(volatile unsigned int *p
, unsigned long old
, unsigned long new)
139 __asm__
__volatile__ (
140 PPC_ATOMIC_ENTRY_BARRIER
141 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
147 PPC_ATOMIC_EXIT_BARRIER
150 : "=&r" (prev
), "+m" (*p
)
151 : "r" (p
), "r" (old
), "r" (new)
157 static __always_inline
unsigned long
158 __cmpxchg_u32_local(volatile unsigned int *p
, unsigned long old
,
163 __asm__
__volatile__ (
164 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
172 : "=&r" (prev
), "+m" (*p
)
173 : "r" (p
), "r" (old
), "r" (new)
179 static __always_inline
unsigned long
180 __cmpxchg_u32_relaxed(u32
*p
, unsigned long old
, unsigned long new)
184 __asm__
__volatile__ (
185 "1: lwarx %0,0,%2 # __cmpxchg_u32_relaxed\n"
192 : "=&r" (prev
), "+m" (*p
)
193 : "r" (p
), "r" (old
), "r" (new)
200 * cmpxchg family don't have order guarantee if cmp part fails, therefore we
201 * can avoid superfluous barriers if we use assembly code to implement
202 * cmpxchg() and cmpxchg_acquire(), however we don't do the similar for
203 * cmpxchg_release() because that will result in putting a barrier in the
204 * middle of a ll/sc loop, which is probably a bad idea. For example, this
205 * might cause the conditional store more likely to fail.
207 static __always_inline
unsigned long
208 __cmpxchg_u32_acquire(u32
*p
, unsigned long old
, unsigned long new)
212 __asm__
__volatile__ (
213 "1: lwarx %0,0,%2 # __cmpxchg_u32_acquire\n"
222 : "=&r" (prev
), "+m" (*p
)
223 : "r" (p
), "r" (old
), "r" (new)
230 static __always_inline
unsigned long
231 __cmpxchg_u64(volatile unsigned long *p
, unsigned long old
, unsigned long new)
235 __asm__
__volatile__ (
236 PPC_ATOMIC_ENTRY_BARRIER
237 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
242 PPC_ATOMIC_EXIT_BARRIER
245 : "=&r" (prev
), "+m" (*p
)
246 : "r" (p
), "r" (old
), "r" (new)
252 static __always_inline
unsigned long
253 __cmpxchg_u64_local(volatile unsigned long *p
, unsigned long old
,
258 __asm__
__volatile__ (
259 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
266 : "=&r" (prev
), "+m" (*p
)
267 : "r" (p
), "r" (old
), "r" (new)
273 static __always_inline
unsigned long
274 __cmpxchg_u64_relaxed(u64
*p
, unsigned long old
, unsigned long new)
278 __asm__
__volatile__ (
279 "1: ldarx %0,0,%2 # __cmpxchg_u64_relaxed\n"
285 : "=&r" (prev
), "+m" (*p
)
286 : "r" (p
), "r" (old
), "r" (new)
292 static __always_inline
unsigned long
293 __cmpxchg_u64_acquire(u64
*p
, unsigned long old
, unsigned long new)
297 __asm__
__volatile__ (
298 "1: ldarx %0,0,%2 # __cmpxchg_u64_acquire\n"
306 : "=&r" (prev
), "+m" (*p
)
307 : "r" (p
), "r" (old
), "r" (new)
314 static __always_inline
unsigned long
315 __cmpxchg(volatile void *ptr
, unsigned long old
, unsigned long new,
320 return __cmpxchg_u32(ptr
, old
, new);
323 return __cmpxchg_u64(ptr
, old
, new);
326 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg");
330 static __always_inline
unsigned long
331 __cmpxchg_local(volatile void *ptr
, unsigned long old
, unsigned long new,
336 return __cmpxchg_u32_local(ptr
, old
, new);
339 return __cmpxchg_u64_local(ptr
, old
, new);
342 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_local");
346 static __always_inline
unsigned long
347 __cmpxchg_relaxed(void *ptr
, unsigned long old
, unsigned long new,
352 return __cmpxchg_u32_relaxed(ptr
, old
, new);
355 return __cmpxchg_u64_relaxed(ptr
, old
, new);
358 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_relaxed");
362 static __always_inline
unsigned long
363 __cmpxchg_acquire(void *ptr
, unsigned long old
, unsigned long new,
368 return __cmpxchg_u32_acquire(ptr
, old
, new);
371 return __cmpxchg_u64_acquire(ptr
, old
, new);
374 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
377 #define cmpxchg(ptr, o, n) \
379 __typeof__(*(ptr)) _o_ = (o); \
380 __typeof__(*(ptr)) _n_ = (n); \
381 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
382 (unsigned long)_n_, sizeof(*(ptr))); \
386 #define cmpxchg_local(ptr, o, n) \
388 __typeof__(*(ptr)) _o_ = (o); \
389 __typeof__(*(ptr)) _n_ = (n); \
390 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
391 (unsigned long)_n_, sizeof(*(ptr))); \
394 #define cmpxchg_relaxed(ptr, o, n) \
396 __typeof__(*(ptr)) _o_ = (o); \
397 __typeof__(*(ptr)) _n_ = (n); \
398 (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
399 (unsigned long)_o_, (unsigned long)_n_, \
403 #define cmpxchg_acquire(ptr, o, n) \
405 __typeof__(*(ptr)) _o_ = (o); \
406 __typeof__(*(ptr)) _n_ = (n); \
407 (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
408 (unsigned long)_o_, (unsigned long)_n_, \
412 #define cmpxchg64(ptr, o, n) \
414 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
415 cmpxchg((ptr), (o), (n)); \
417 #define cmpxchg64_local(ptr, o, n) \
419 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
420 cmpxchg_local((ptr), (o), (n)); \
422 #define cmpxchg64_relaxed(ptr, o, n) \
424 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
425 cmpxchg_relaxed((ptr), (o), (n)); \
427 #define cmpxchg64_acquire(ptr, o, n) \
429 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
430 cmpxchg_acquire((ptr), (o), (n)); \
433 #include <asm-generic/cmpxchg-local.h>
434 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
437 #endif /* __KERNEL__ */
438 #endif /* _ASM_POWERPC_CMPXCHG_H_ */
This page took 0.080651 seconds and 6 git commands to generate.