Commit | Line | Data |
---|---|---|
08e875c1 CM |
1 | /* |
2 | * Copyright (C) 2012 ARM Ltd. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
15 | */ | |
16 | #ifndef __ASM_SPINLOCK_H | |
17 | #define __ASM_SPINLOCK_H | |
18 | ||
19 | #include <asm/spinlock_types.h> | |
20 | #include <asm/processor.h> | |
21 | ||
22 | /* | |
23 | * Spinlock implementation. | |
24 | * | |
08e875c1 CM |
25 | * The memory barriers are implicit with the load-acquire and store-release |
26 | * instructions. | |
08e875c1 CM |
27 | */ |
28 | ||
08e875c1 CM |
29 | #define arch_spin_unlock_wait(lock) \ |
30 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) | |
31 | ||
32 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | |
33 | ||
34 | static inline void arch_spin_lock(arch_spinlock_t *lock) | |
35 | { | |
36 | unsigned int tmp; | |
52ea2a56 | 37 | arch_spinlock_t lockval, newval; |
08e875c1 CM |
38 | |
39 | asm volatile( | |
52ea2a56 WD |
40 | /* Atomically increment the next ticket. */ |
41 | " prfm pstl1strm, %3\n" | |
42 | "1: ldaxr %w0, %3\n" | |
43 | " add %w1, %w0, %w5\n" | |
44 | " stxr %w2, %w1, %3\n" | |
45 | " cbnz %w2, 1b\n" | |
46 | /* Did we get the lock? */ | |
47 | " eor %w1, %w0, %w0, ror #16\n" | |
48 | " cbz %w1, 3f\n" | |
49 | /* | |
50 | * No: spin on the owner. Send a local event to avoid missing an | |
51 | * unlock before the exclusive load. | |
52 | */ | |
53 | " sevl\n" | |
54 | "2: wfe\n" | |
55 | " ldaxrh %w2, %4\n" | |
56 | " eor %w1, %w2, %w0, lsr #16\n" | |
57 | " cbnz %w1, 2b\n" | |
58 | /* We got the lock. Critical section starts here. */ | |
59 | "3:" | |
60 | : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock) | |
61 | : "Q" (lock->owner), "I" (1 << TICKET_SHIFT) | |
62 | : "memory"); | |
08e875c1 CM |
63 | } |
64 | ||
65 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | |
66 | { | |
67 | unsigned int tmp; | |
52ea2a56 | 68 | arch_spinlock_t lockval; |
08e875c1 CM |
69 | |
70 | asm volatile( | |
52ea2a56 WD |
71 | " prfm pstl1strm, %2\n" |
72 | "1: ldaxr %w0, %2\n" | |
73 | " eor %w1, %w0, %w0, ror #16\n" | |
74 | " cbnz %w1, 2f\n" | |
75 | " add %w0, %w0, %3\n" | |
76 | " stxr %w1, %w0, %2\n" | |
77 | " cbnz %w1, 1b\n" | |
78 | "2:" | |
79 | : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) | |
80 | : "I" (1 << TICKET_SHIFT) | |
81 | : "memory"); | |
08e875c1 CM |
82 | |
83 | return !tmp; | |
84 | } | |
85 | ||
86 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | |
87 | { | |
88 | asm volatile( | |
52ea2a56 WD |
89 | " stlrh %w1, %0\n" |
90 | : "=Q" (lock->owner) | |
91 | : "r" (lock->owner + 1) | |
92 | : "memory"); | |
93 | } | |
94 | ||
95 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) | |
96 | { | |
97 | arch_spinlock_t lockval = ACCESS_ONCE(*lock); | |
98 | return lockval.owner != lockval.next; | |
99 | } | |
100 | ||
101 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) | |
102 | { | |
103 | arch_spinlock_t lockval = ACCESS_ONCE(*lock); | |
104 | return (lockval.next - lockval.owner) > 1; | |
08e875c1 | 105 | } |
52ea2a56 | 106 | #define arch_spin_is_contended arch_spin_is_contended |
08e875c1 CM |
107 | |
108 | /* | |
109 | * Write lock implementation. | |
110 | * | |
111 | * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is | |
112 | * exclusively held. | |
113 | * | |
114 | * The memory barriers are implicit with the load-acquire and store-release | |
115 | * instructions. | |
116 | */ | |
117 | ||
118 | static inline void arch_write_lock(arch_rwlock_t *rw) | |
119 | { | |
120 | unsigned int tmp; | |
121 | ||
122 | asm volatile( | |
123 | " sevl\n" | |
124 | "1: wfe\n" | |
3a0310eb | 125 | "2: ldaxr %w0, %1\n" |
08e875c1 | 126 | " cbnz %w0, 1b\n" |
3a0310eb | 127 | " stxr %w0, %w2, %1\n" |
08e875c1 | 128 | " cbnz %w0, 2b\n" |
3a0310eb WD |
129 | : "=&r" (tmp), "+Q" (rw->lock) |
130 | : "r" (0x80000000) | |
131 | : "cc", "memory"); | |
08e875c1 CM |
132 | } |
133 | ||
134 | static inline int arch_write_trylock(arch_rwlock_t *rw) | |
135 | { | |
136 | unsigned int tmp; | |
137 | ||
138 | asm volatile( | |
3a0310eb | 139 | " ldaxr %w0, %1\n" |
08e875c1 | 140 | " cbnz %w0, 1f\n" |
3a0310eb | 141 | " stxr %w0, %w2, %1\n" |
08e875c1 | 142 | "1:\n" |
3a0310eb WD |
143 | : "=&r" (tmp), "+Q" (rw->lock) |
144 | : "r" (0x80000000) | |
145 | : "cc", "memory"); | |
08e875c1 CM |
146 | |
147 | return !tmp; | |
148 | } | |
149 | ||
150 | static inline void arch_write_unlock(arch_rwlock_t *rw) | |
151 | { | |
152 | asm volatile( | |
3a0310eb WD |
153 | " stlr %w1, %0\n" |
154 | : "=Q" (rw->lock) : "r" (0) : "memory"); | |
08e875c1 CM |
155 | } |
156 | ||
157 | /* write_can_lock - would write_trylock() succeed? */ | |
158 | #define arch_write_can_lock(x) ((x)->lock == 0) | |
159 | ||
160 | /* | |
161 | * Read lock implementation. | |
162 | * | |
163 | * It exclusively loads the lock value, increments it and stores the new value | |
164 | * back if positive and the CPU still exclusively owns the location. If the | |
165 | * value is negative, the lock is already held. | |
166 | * | |
167 | * During unlocking there may be multiple active read locks but no write lock. | |
168 | * | |
169 | * The memory barriers are implicit with the load-acquire and store-release | |
170 | * instructions. | |
171 | */ | |
172 | static inline void arch_read_lock(arch_rwlock_t *rw) | |
173 | { | |
174 | unsigned int tmp, tmp2; | |
175 | ||
176 | asm volatile( | |
177 | " sevl\n" | |
178 | "1: wfe\n" | |
3a0310eb | 179 | "2: ldaxr %w0, %2\n" |
08e875c1 CM |
180 | " add %w0, %w0, #1\n" |
181 | " tbnz %w0, #31, 1b\n" | |
3a0310eb | 182 | " stxr %w1, %w0, %2\n" |
08e875c1 | 183 | " cbnz %w1, 2b\n" |
3a0310eb WD |
184 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) |
185 | : | |
186 | : "cc", "memory"); | |
08e875c1 CM |
187 | } |
188 | ||
189 | static inline void arch_read_unlock(arch_rwlock_t *rw) | |
190 | { | |
191 | unsigned int tmp, tmp2; | |
192 | ||
193 | asm volatile( | |
3a0310eb | 194 | "1: ldxr %w0, %2\n" |
08e875c1 | 195 | " sub %w0, %w0, #1\n" |
3a0310eb | 196 | " stlxr %w1, %w0, %2\n" |
08e875c1 | 197 | " cbnz %w1, 1b\n" |
3a0310eb WD |
198 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) |
199 | : | |
200 | : "cc", "memory"); | |
08e875c1 CM |
201 | } |
202 | ||
203 | static inline int arch_read_trylock(arch_rwlock_t *rw) | |
204 | { | |
205 | unsigned int tmp, tmp2 = 1; | |
206 | ||
207 | asm volatile( | |
3a0310eb | 208 | " ldaxr %w0, %2\n" |
08e875c1 CM |
209 | " add %w0, %w0, #1\n" |
210 | " tbnz %w0, #31, 1f\n" | |
3a0310eb | 211 | " stxr %w1, %w0, %2\n" |
08e875c1 | 212 | "1:\n" |
3a0310eb WD |
213 | : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock) |
214 | : | |
215 | : "cc", "memory"); | |
08e875c1 CM |
216 | |
217 | return !tmp2; | |
218 | } | |
219 | ||
220 | /* read_can_lock - would read_trylock() succeed? */ | |
221 | #define arch_read_can_lock(x) ((x)->lock < 0x80000000) | |
222 | ||
223 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | |
224 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | |
225 | ||
226 | #define arch_spin_relax(lock) cpu_relax() | |
227 | #define arch_read_relax(lock) cpu_relax() | |
228 | #define arch_write_relax(lock) cpu_relax() | |
229 | ||
230 | #endif /* __ASM_SPINLOCK_H */ |