Merge remote-tracking branch 'iommu/next'
[deliverable/linux.git] / arch / arm64 / include / asm / spinlock.h
1 /*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16 #ifndef __ASM_SPINLOCK_H
17 #define __ASM_SPINLOCK_H
18
19 #include <asm/lse.h>
20 #include <asm/spinlock_types.h>
21 #include <asm/processor.h>
22
23 /*
24 * Spinlock implementation.
25 *
26 * The memory barriers are implicit with the load-acquire and store-release
27 * instructions.
28 */
29 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
30 {
31 unsigned int tmp;
32 arch_spinlock_t lockval;
33 u32 owner;
34
35 /*
36 * Ensure prior spin_lock operations to other locks have completed
37 * on this CPU before we test whether "lock" is locked.
38 */
39 smp_mb();
40 owner = READ_ONCE(lock->owner) << 16;
41
42 asm volatile(
43 " sevl\n"
44 "1: wfe\n"
45 "2: ldaxr %w0, %2\n"
46 /* Is the lock free? */
47 " eor %w1, %w0, %w0, ror #16\n"
48 " cbz %w1, 3f\n"
49 /* Lock taken -- has there been a subsequent unlock->lock transition? */
50 " eor %w1, %w3, %w0, lsl #16\n"
51 " cbz %w1, 1b\n"
52 /*
53 * The owner has been updated, so there was an unlock->lock
54 * transition that we missed. That means we can rely on the
55 * store-release of the unlock operation paired with the
56 * load-acquire of the lock operation to publish any of our
57 * previous stores to the new lock owner and therefore don't
58 * need to bother with the writeback below.
59 */
60 " b 4f\n"
61 "3:\n"
62 /*
63 * Serialise against any concurrent lockers by writing back the
64 * unlocked lock value
65 */
66 ARM64_LSE_ATOMIC_INSN(
67 /* LL/SC */
68 " stxr %w1, %w0, %2\n"
69 __nops(2),
70 /* LSE atomics */
71 " mov %w1, %w0\n"
72 " cas %w0, %w0, %2\n"
73 " eor %w1, %w1, %w0\n")
74 /* Somebody else wrote to the lock, GOTO 10 and reload the value */
75 " cbnz %w1, 2b\n"
76 "4:"
77 : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
78 : "r" (owner)
79 : "memory");
80 }
81
82 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
83
84 static inline void arch_spin_lock(arch_spinlock_t *lock)
85 {
86 unsigned int tmp;
87 arch_spinlock_t lockval, newval;
88
89 asm volatile(
90 /* Atomically increment the next ticket. */
91 ARM64_LSE_ATOMIC_INSN(
92 /* LL/SC */
93 " prfm pstl1strm, %3\n"
94 "1: ldaxr %w0, %3\n"
95 " add %w1, %w0, %w5\n"
96 " stxr %w2, %w1, %3\n"
97 " cbnz %w2, 1b\n",
98 /* LSE atomics */
99 " mov %w2, %w5\n"
100 " ldadda %w2, %w0, %3\n"
101 __nops(3)
102 )
103
104 /* Did we get the lock? */
105 " eor %w1, %w0, %w0, ror #16\n"
106 " cbz %w1, 3f\n"
107 /*
108 * No: spin on the owner. Send a local event to avoid missing an
109 * unlock before the exclusive load.
110 */
111 " sevl\n"
112 "2: wfe\n"
113 " ldaxrh %w2, %4\n"
114 " eor %w1, %w2, %w0, lsr #16\n"
115 " cbnz %w1, 2b\n"
116 /* We got the lock. Critical section starts here. */
117 "3:"
118 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
119 : "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
120 : "memory");
121 }
122
123 static inline int arch_spin_trylock(arch_spinlock_t *lock)
124 {
125 unsigned int tmp;
126 arch_spinlock_t lockval;
127
128 asm volatile(ARM64_LSE_ATOMIC_INSN(
129 /* LL/SC */
130 " prfm pstl1strm, %2\n"
131 "1: ldaxr %w0, %2\n"
132 " eor %w1, %w0, %w0, ror #16\n"
133 " cbnz %w1, 2f\n"
134 " add %w0, %w0, %3\n"
135 " stxr %w1, %w0, %2\n"
136 " cbnz %w1, 1b\n"
137 "2:",
138 /* LSE atomics */
139 " ldr %w0, %2\n"
140 " eor %w1, %w0, %w0, ror #16\n"
141 " cbnz %w1, 1f\n"
142 " add %w1, %w0, %3\n"
143 " casa %w0, %w1, %2\n"
144 " and %w1, %w1, #0xffff\n"
145 " eor %w1, %w1, %w0, lsr #16\n"
146 "1:")
147 : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
148 : "I" (1 << TICKET_SHIFT)
149 : "memory");
150
151 return !tmp;
152 }
153
154 static inline void arch_spin_unlock(arch_spinlock_t *lock)
155 {
156 unsigned long tmp;
157
158 asm volatile(ARM64_LSE_ATOMIC_INSN(
159 /* LL/SC */
160 " ldrh %w1, %0\n"
161 " add %w1, %w1, #1\n"
162 " stlrh %w1, %0",
163 /* LSE atomics */
164 " mov %w1, #1\n"
165 " staddlh %w1, %0\n"
166 __nops(1))
167 : "=Q" (lock->owner), "=&r" (tmp)
168 :
169 : "memory");
170 }
171
172 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
173 {
174 return lock.owner == lock.next;
175 }
176
177 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
178 {
179 smp_mb(); /* See arch_spin_unlock_wait */
180 return !arch_spin_value_unlocked(READ_ONCE(*lock));
181 }
182
183 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
184 {
185 arch_spinlock_t lockval = READ_ONCE(*lock);
186 return (lockval.next - lockval.owner) > 1;
187 }
188 #define arch_spin_is_contended arch_spin_is_contended
189
190 /*
191 * Write lock implementation.
192 *
193 * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is
194 * exclusively held.
195 *
196 * The memory barriers are implicit with the load-acquire and store-release
197 * instructions.
198 */
199
200 static inline void arch_write_lock(arch_rwlock_t *rw)
201 {
202 unsigned int tmp;
203
204 asm volatile(ARM64_LSE_ATOMIC_INSN(
205 /* LL/SC */
206 " sevl\n"
207 "1: wfe\n"
208 "2: ldaxr %w0, %1\n"
209 " cbnz %w0, 1b\n"
210 " stxr %w0, %w2, %1\n"
211 " cbnz %w0, 2b\n"
212 __nops(1),
213 /* LSE atomics */
214 "1: mov %w0, wzr\n"
215 "2: casa %w0, %w2, %1\n"
216 " cbz %w0, 3f\n"
217 " ldxr %w0, %1\n"
218 " cbz %w0, 2b\n"
219 " wfe\n"
220 " b 1b\n"
221 "3:")
222 : "=&r" (tmp), "+Q" (rw->lock)
223 : "r" (0x80000000)
224 : "memory");
225 }
226
227 static inline int arch_write_trylock(arch_rwlock_t *rw)
228 {
229 unsigned int tmp;
230
231 asm volatile(ARM64_LSE_ATOMIC_INSN(
232 /* LL/SC */
233 "1: ldaxr %w0, %1\n"
234 " cbnz %w0, 2f\n"
235 " stxr %w0, %w2, %1\n"
236 " cbnz %w0, 1b\n"
237 "2:",
238 /* LSE atomics */
239 " mov %w0, wzr\n"
240 " casa %w0, %w2, %1\n"
241 __nops(2))
242 : "=&r" (tmp), "+Q" (rw->lock)
243 : "r" (0x80000000)
244 : "memory");
245
246 return !tmp;
247 }
248
249 static inline void arch_write_unlock(arch_rwlock_t *rw)
250 {
251 asm volatile(ARM64_LSE_ATOMIC_INSN(
252 " stlr wzr, %0",
253 " swpl wzr, wzr, %0")
254 : "=Q" (rw->lock) :: "memory");
255 }
256
257 /* write_can_lock - would write_trylock() succeed? */
258 #define arch_write_can_lock(x) ((x)->lock == 0)
259
260 /*
261 * Read lock implementation.
262 *
263 * It exclusively loads the lock value, increments it and stores the new value
264 * back if positive and the CPU still exclusively owns the location. If the
265 * value is negative, the lock is already held.
266 *
267 * During unlocking there may be multiple active read locks but no write lock.
268 *
269 * The memory barriers are implicit with the load-acquire and store-release
270 * instructions.
271 *
272 * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC
273 * and LSE implementations may exhibit different behaviour (although this
274 * will have no effect on lockdep).
275 */
276 static inline void arch_read_lock(arch_rwlock_t *rw)
277 {
278 unsigned int tmp, tmp2;
279
280 asm volatile(
281 " sevl\n"
282 ARM64_LSE_ATOMIC_INSN(
283 /* LL/SC */
284 "1: wfe\n"
285 "2: ldaxr %w0, %2\n"
286 " add %w0, %w0, #1\n"
287 " tbnz %w0, #31, 1b\n"
288 " stxr %w1, %w0, %2\n"
289 " cbnz %w1, 2b\n"
290 __nops(1),
291 /* LSE atomics */
292 "1: wfe\n"
293 "2: ldxr %w0, %2\n"
294 " adds %w1, %w0, #1\n"
295 " tbnz %w1, #31, 1b\n"
296 " casa %w0, %w1, %2\n"
297 " sbc %w0, %w1, %w0\n"
298 " cbnz %w0, 2b")
299 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
300 :
301 : "cc", "memory");
302 }
303
304 static inline void arch_read_unlock(arch_rwlock_t *rw)
305 {
306 unsigned int tmp, tmp2;
307
308 asm volatile(ARM64_LSE_ATOMIC_INSN(
309 /* LL/SC */
310 "1: ldxr %w0, %2\n"
311 " sub %w0, %w0, #1\n"
312 " stlxr %w1, %w0, %2\n"
313 " cbnz %w1, 1b",
314 /* LSE atomics */
315 " movn %w0, #0\n"
316 " staddl %w0, %2\n"
317 __nops(2))
318 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
319 :
320 : "memory");
321 }
322
323 static inline int arch_read_trylock(arch_rwlock_t *rw)
324 {
325 unsigned int tmp, tmp2;
326
327 asm volatile(ARM64_LSE_ATOMIC_INSN(
328 /* LL/SC */
329 " mov %w1, #1\n"
330 "1: ldaxr %w0, %2\n"
331 " add %w0, %w0, #1\n"
332 " tbnz %w0, #31, 2f\n"
333 " stxr %w1, %w0, %2\n"
334 " cbnz %w1, 1b\n"
335 "2:",
336 /* LSE atomics */
337 " ldr %w0, %2\n"
338 " adds %w1, %w0, #1\n"
339 " tbnz %w1, #31, 1f\n"
340 " casa %w0, %w1, %2\n"
341 " sbc %w1, %w1, %w0\n"
342 __nops(1)
343 "1:")
344 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
345 :
346 : "cc", "memory");
347
348 return !tmp2;
349 }
350
351 /* read_can_lock - would read_trylock() succeed? */
352 #define arch_read_can_lock(x) ((x)->lock < 0x80000000)
353
354 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
355 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
356
357 #define arch_spin_relax(lock) cpu_relax()
358 #define arch_read_relax(lock) cpu_relax()
359 #define arch_write_relax(lock) cpu_relax()
360
361 /*
362 * Accesses appearing in program order before a spin_lock() operation
363 * can be reordered with accesses inside the critical section, by virtue
364 * of arch_spin_lock being constructed using acquire semantics.
365 *
366 * In cases where this is problematic (e.g. try_to_wake_up), an
367 * smp_mb__before_spinlock() can restore the required ordering.
368 */
369 #define smp_mb__before_spinlock() smp_mb()
370
371 #endif /* __ASM_SPINLOCK_H */
This page took 0.042667 seconds and 5 git commands to generate.