locking/barriers, arch/arm64: Implement LDXR+WFE based smp_cond_load_acquire()
[deliverable/linux.git] / arch / arm64 / include / asm / barrier.h
CommitLineData
10b663ae
CM
1/*
2 * Based on arch/arm/include/asm/barrier.h
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef __ASM_BARRIER_H
19#define __ASM_BARRIER_H
20
21#ifndef __ASSEMBLY__
22
23#define sev() asm volatile("sev" : : : "memory")
24#define wfe() asm volatile("wfe" : : : "memory")
25#define wfi() asm volatile("wfi" : : : "memory")
26
27#define isb() asm volatile("isb" : : : "memory")
493e6874
WD
28#define dmb(opt) asm volatile("dmb " #opt : : : "memory")
29#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
10b663ae 30
98f7685e 31#define mb() dsb(sy)
493e6874
WD
32#define rmb() dsb(ld)
33#define wmb() dsb(st)
10b663ae 34
1077fa36
AD
35#define dma_rmb() dmb(oshld)
36#define dma_wmb() dmb(oshst)
37
fd072df8
MT
38#define __smp_mb() dmb(ish)
39#define __smp_rmb() dmb(ishld)
40#define __smp_wmb() dmb(ishst)
47933ad4 41
fd072df8 42#define __smp_store_release(p, v) \
47933ad4
PZ
43do { \
44 compiletime_assert_atomic_type(*p); \
45 switch (sizeof(*p)) { \
878a84d5
AP
46 case 1: \
47 asm volatile ("stlrb %w1, %0" \
48 : "=Q" (*p) : "r" (v) : "memory"); \
49 break; \
50 case 2: \
51 asm volatile ("stlrh %w1, %0" \
52 : "=Q" (*p) : "r" (v) : "memory"); \
53 break; \
47933ad4
PZ
54 case 4: \
55 asm volatile ("stlr %w1, %0" \
56 : "=Q" (*p) : "r" (v) : "memory"); \
57 break; \
58 case 8: \
59 asm volatile ("stlr %1, %0" \
60 : "=Q" (*p) : "r" (v) : "memory"); \
61 break; \
62 } \
63} while (0)
64
fd072df8 65#define __smp_load_acquire(p) \
47933ad4 66({ \
c139aa60 67 union { typeof(*p) __val; char __c[1]; } __u; \
47933ad4
PZ
68 compiletime_assert_atomic_type(*p); \
69 switch (sizeof(*p)) { \
878a84d5
AP
70 case 1: \
71 asm volatile ("ldarb %w0, %1" \
c139aa60
WD
72 : "=r" (*(__u8 *)__u.__c) \
73 : "Q" (*p) : "memory"); \
878a84d5
AP
74 break; \
75 case 2: \
76 asm volatile ("ldarh %w0, %1" \
c139aa60
WD
77 : "=r" (*(__u16 *)__u.__c) \
78 : "Q" (*p) : "memory"); \
878a84d5 79 break; \
47933ad4
PZ
80 case 4: \
81 asm volatile ("ldar %w0, %1" \
c139aa60
WD
82 : "=r" (*(__u32 *)__u.__c) \
83 : "Q" (*p) : "memory"); \
47933ad4
PZ
84 break; \
85 case 8: \
86 asm volatile ("ldar %0, %1" \
c139aa60
WD
87 : "=r" (*(__u64 *)__u.__c) \
88 : "Q" (*p) : "memory"); \
47933ad4
PZ
89 break; \
90 } \
c139aa60 91 __u.__val; \
47933ad4
PZ
92})
93
03e3c2b7
WD
94#define smp_cond_load_acquire(ptr, cond_expr) \
95({ \
96 typeof(ptr) __PTR = (ptr); \
97 typeof(*ptr) VAL; \
98 for (;;) { \
99 VAL = smp_load_acquire(__PTR); \
100 if (cond_expr) \
101 break; \
102 __cmpwait_relaxed(__PTR, VAL); \
103 } \
104 VAL; \
105})
106
90ff6a17 107#include <asm-generic/barrier.h>
8715466b 108
10b663ae
CM
109#endif /* __ASSEMBLY__ */
110
111#endif /* __ASM_BARRIER_H */
This page took 0.175675 seconds and 5 git commands to generate.