Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
f65e4fa8 | 6 | * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org) |
1da177e4 LT |
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
8 | */ | |
9 | #ifndef _ASM_SPINLOCK_H | |
10 | #define _ASM_SPINLOCK_H | |
11 | ||
2a31b033 RB |
12 | #include <linux/compiler.h> |
13 | ||
0004a9df | 14 | #include <asm/barrier.h> |
b0984c43 | 15 | #include <asm/compiler.h> |
1da177e4 LT |
16 | #include <asm/war.h> |
17 | ||
18 | /* | |
19 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | |
2a31b033 | 20 | * |
70342287 | 21 | * Simple spin lock operations. There are two variants, one clears IRQ's |
2a31b033 RB |
22 | * on the local processor, one does not. |
23 | * | |
24 | * These are fair FIFO ticket locks | |
25 | * | |
26 | * (the type definitions are in asm/spinlock_types.h) | |
1da177e4 LT |
27 | */ |
28 | ||
1da177e4 LT |
29 | |
30 | /* | |
2a31b033 RB |
31 | * Ticket locks are conceptually two parts, one indicating the current head of |
32 | * the queue, and the other indicating the current tail. The lock is acquired | |
33 | * by atomically noting the tail and incrementing it by one (thus adding | |
34 | * ourself to the queue and noting our position), then waiting until the head | |
35 | * becomes equal to the the initial value of the tail. | |
1da177e4 LT |
36 | */ |
37 | ||
0199c4e6 | 38 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
2a31b033 | 39 | { |
500c2e1f | 40 | u32 counters = ACCESS_ONCE(lock->lock); |
2a31b033 | 41 | |
500c2e1f | 42 | return ((counters >> 16) ^ counters) & 0xffff; |
2a31b033 RB |
43 | } |
44 | ||
5fac4f7a PB |
45 | static inline int arch_spin_value_unlocked(arch_spinlock_t lock) |
46 | { | |
47 | return lock.h.serving_now == lock.h.ticket; | |
48 | } | |
49 | ||
0199c4e6 TG |
50 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
51 | #define arch_spin_unlock_wait(x) \ | |
52 | while (arch_spin_is_locked(x)) { cpu_relax(); } | |
2a31b033 | 53 | |
0199c4e6 | 54 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
2a31b033 | 55 | { |
500c2e1f | 56 | u32 counters = ACCESS_ONCE(lock->lock); |
2a31b033 | 57 | |
500c2e1f | 58 | return (((counters >> 16) - counters) & 0xffff) > 1; |
2a31b033 | 59 | } |
0199c4e6 | 60 | #define arch_spin_is_contended arch_spin_is_contended |
2a31b033 | 61 | |
0199c4e6 | 62 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
1da177e4 | 63 | { |
2a31b033 RB |
64 | int my_ticket; |
65 | int tmp; | |
500c2e1f | 66 | int inc = 0x10000; |
1da177e4 LT |
67 | |
68 | if (R10000_LLSC_WAR) { | |
2a31b033 | 69 | __asm__ __volatile__ ( |
0199c4e6 | 70 | " .set push # arch_spin_lock \n" |
2a31b033 RB |
71 | " .set noreorder \n" |
72 | " \n" | |
73 | "1: ll %[ticket], %[ticket_ptr] \n" | |
500c2e1f | 74 | " addu %[my_ticket], %[ticket], %[inc] \n" |
2a31b033 RB |
75 | " sc %[my_ticket], %[ticket_ptr] \n" |
76 | " beqzl %[my_ticket], 1b \n" | |
1da177e4 | 77 | " nop \n" |
500c2e1f DD |
78 | " srl %[my_ticket], %[ticket], 16 \n" |
79 | " andi %[ticket], %[ticket], 0xffff \n" | |
2a31b033 RB |
80 | " bne %[ticket], %[my_ticket], 4f \n" |
81 | " subu %[ticket], %[my_ticket], %[ticket] \n" | |
82 | "2: \n" | |
83 | " .subsection 2 \n" | |
500c2e1f | 84 | "4: andi %[ticket], %[ticket], 0xffff \n" |
0e6826c7 | 85 | " sll %[ticket], 5 \n" |
2a31b033 RB |
86 | " \n" |
87 | "6: bnez %[ticket], 6b \n" | |
88 | " subu %[ticket], 1 \n" | |
89 | " \n" | |
500c2e1f | 90 | " lhu %[ticket], %[serving_now_ptr] \n" |
2a31b033 RB |
91 | " beq %[ticket], %[my_ticket], 2b \n" |
92 | " subu %[ticket], %[my_ticket], %[ticket] \n" | |
0e6826c7 | 93 | " b 4b \n" |
2a31b033 RB |
94 | " subu %[ticket], %[ticket], 1 \n" |
95 | " .previous \n" | |
96 | " .set pop \n" | |
94bfb75a | 97 | : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), |
500c2e1f | 98 | [serving_now_ptr] "+m" (lock->h.serving_now), |
2a31b033 | 99 | [ticket] "=&r" (tmp), |
500c2e1f DD |
100 | [my_ticket] "=&r" (my_ticket) |
101 | : [inc] "r" (inc)); | |
1da177e4 | 102 | } else { |
2a31b033 | 103 | __asm__ __volatile__ ( |
0199c4e6 | 104 | " .set push # arch_spin_lock \n" |
2a31b033 RB |
105 | " .set noreorder \n" |
106 | " \n" | |
500c2e1f DD |
107 | "1: ll %[ticket], %[ticket_ptr] \n" |
108 | " addu %[my_ticket], %[ticket], %[inc] \n" | |
2a31b033 | 109 | " sc %[my_ticket], %[ticket_ptr] \n" |
500c2e1f DD |
110 | " beqz %[my_ticket], 1b \n" |
111 | " srl %[my_ticket], %[ticket], 16 \n" | |
112 | " andi %[ticket], %[ticket], 0xffff \n" | |
2a31b033 RB |
113 | " bne %[ticket], %[my_ticket], 4f \n" |
114 | " subu %[ticket], %[my_ticket], %[ticket] \n" | |
115 | "2: \n" | |
f65e4fa8 | 116 | " .subsection 2 \n" |
9ff897c4 | 117 | "4: andi %[ticket], %[ticket], 0xffff \n" |
0e6826c7 | 118 | " sll %[ticket], 5 \n" |
2a31b033 RB |
119 | " \n" |
120 | "6: bnez %[ticket], 6b \n" | |
121 | " subu %[ticket], 1 \n" | |
122 | " \n" | |
500c2e1f | 123 | " lhu %[ticket], %[serving_now_ptr] \n" |
2a31b033 RB |
124 | " beq %[ticket], %[my_ticket], 2b \n" |
125 | " subu %[ticket], %[my_ticket], %[ticket] \n" | |
0e6826c7 | 126 | " b 4b \n" |
2a31b033 | 127 | " subu %[ticket], %[ticket], 1 \n" |
f65e4fa8 | 128 | " .previous \n" |
2a31b033 | 129 | " .set pop \n" |
94bfb75a | 130 | : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), |
500c2e1f | 131 | [serving_now_ptr] "+m" (lock->h.serving_now), |
2a31b033 | 132 | [ticket] "=&r" (tmp), |
500c2e1f DD |
133 | [my_ticket] "=&r" (my_ticket) |
134 | : [inc] "r" (inc)); | |
1da177e4 | 135 | } |
0004a9df | 136 | |
17099b11 | 137 | smp_llsc_mb(); |
1da177e4 LT |
138 | } |
139 | ||
0199c4e6 | 140 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
1da177e4 | 141 | { |
500c2e1f DD |
142 | unsigned int serving_now = lock->h.serving_now + 1; |
143 | wmb(); | |
144 | lock->h.serving_now = (u16)serving_now; | |
145 | nudge_writes(); | |
1da177e4 LT |
146 | } |
147 | ||
0199c4e6 | 148 | static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) |
1da177e4 | 149 | { |
2a31b033 | 150 | int tmp, tmp2, tmp3; |
500c2e1f | 151 | int inc = 0x10000; |
1da177e4 LT |
152 | |
153 | if (R10000_LLSC_WAR) { | |
2a31b033 | 154 | __asm__ __volatile__ ( |
0199c4e6 | 155 | " .set push # arch_spin_trylock \n" |
2a31b033 RB |
156 | " .set noreorder \n" |
157 | " \n" | |
158 | "1: ll %[ticket], %[ticket_ptr] \n" | |
500c2e1f | 159 | " srl %[my_ticket], %[ticket], 16 \n" |
500c2e1f | 160 | " andi %[now_serving], %[ticket], 0xffff \n" |
2a31b033 | 161 | " bne %[my_ticket], %[now_serving], 3f \n" |
500c2e1f | 162 | " addu %[ticket], %[ticket], %[inc] \n" |
2a31b033 RB |
163 | " sc %[ticket], %[ticket_ptr] \n" |
164 | " beqzl %[ticket], 1b \n" | |
165 | " li %[ticket], 1 \n" | |
166 | "2: \n" | |
167 | " .subsection 2 \n" | |
168 | "3: b 2b \n" | |
169 | " li %[ticket], 0 \n" | |
170 | " .previous \n" | |
171 | " .set pop \n" | |
94bfb75a | 172 | : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), |
2a31b033 RB |
173 | [ticket] "=&r" (tmp), |
174 | [my_ticket] "=&r" (tmp2), | |
500c2e1f DD |
175 | [now_serving] "=&r" (tmp3) |
176 | : [inc] "r" (inc)); | |
1da177e4 | 177 | } else { |
2a31b033 | 178 | __asm__ __volatile__ ( |
0199c4e6 | 179 | " .set push # arch_spin_trylock \n" |
2a31b033 RB |
180 | " .set noreorder \n" |
181 | " \n" | |
500c2e1f DD |
182 | "1: ll %[ticket], %[ticket_ptr] \n" |
183 | " srl %[my_ticket], %[ticket], 16 \n" | |
500c2e1f | 184 | " andi %[now_serving], %[ticket], 0xffff \n" |
2a31b033 | 185 | " bne %[my_ticket], %[now_serving], 3f \n" |
500c2e1f | 186 | " addu %[ticket], %[ticket], %[inc] \n" |
2a31b033 | 187 | " sc %[ticket], %[ticket_ptr] \n" |
500c2e1f | 188 | " beqz %[ticket], 1b \n" |
2a31b033 RB |
189 | " li %[ticket], 1 \n" |
190 | "2: \n" | |
f65e4fa8 | 191 | " .subsection 2 \n" |
2a31b033 RB |
192 | "3: b 2b \n" |
193 | " li %[ticket], 0 \n" | |
f65e4fa8 | 194 | " .previous \n" |
2a31b033 | 195 | " .set pop \n" |
94bfb75a | 196 | : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), |
2a31b033 RB |
197 | [ticket] "=&r" (tmp), |
198 | [my_ticket] "=&r" (tmp2), | |
500c2e1f DD |
199 | [now_serving] "=&r" (tmp3) |
200 | : [inc] "r" (inc)); | |
1da177e4 LT |
201 | } |
202 | ||
17099b11 | 203 | smp_llsc_mb(); |
0004a9df | 204 | |
2a31b033 | 205 | return tmp; |
1da177e4 LT |
206 | } |
207 | ||
208 | /* | |
209 | * Read-write spinlocks, allowing multiple readers but only one writer. | |
210 | * | |
211 | * NOTE! it is quite common to have readers in interrupts but no interrupt | |
212 | * writers. For those circumstances we can "mix" irq-safe locks - any writer | |
213 | * needs to get a irq-safe write-lock, but readers can get non-irqsafe | |
214 | * read-locks. | |
215 | */ | |
216 | ||
e3c48078 RB |
217 | /* |
218 | * read_can_lock - would read_trylock() succeed? | |
219 | * @lock: the rwlock in question. | |
220 | */ | |
e5931943 | 221 | #define arch_read_can_lock(rw) ((rw)->lock >= 0) |
e3c48078 RB |
222 | |
223 | /* | |
224 | * write_can_lock - would write_trylock() succeed? | |
225 | * @lock: the rwlock in question. | |
226 | */ | |
70342287 | 227 | #define arch_write_can_lock(rw) (!(rw)->lock) |
e3c48078 | 228 | |
e5931943 | 229 | static inline void arch_read_lock(arch_rwlock_t *rw) |
1da177e4 LT |
230 | { |
231 | unsigned int tmp; | |
232 | ||
233 | if (R10000_LLSC_WAR) { | |
234 | __asm__ __volatile__( | |
e5931943 | 235 | " .set noreorder # arch_read_lock \n" |
1da177e4 LT |
236 | "1: ll %1, %2 \n" |
237 | " bltz %1, 1b \n" | |
238 | " addu %1, 1 \n" | |
239 | " sc %1, %0 \n" | |
240 | " beqzl %1, 1b \n" | |
241 | " nop \n" | |
1da177e4 | 242 | " .set reorder \n" |
94bfb75a MC |
243 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) |
244 | : GCC_OFF_SMALL_ASM() (rw->lock) | |
1da177e4 LT |
245 | : "memory"); |
246 | } else { | |
e01961ce RB |
247 | do { |
248 | __asm__ __volatile__( | |
249 | "1: ll %1, %2 # arch_read_lock \n" | |
250 | " bltz %1, 1b \n" | |
251 | " addu %1, 1 \n" | |
252 | "2: sc %1, %0 \n" | |
94bfb75a MC |
253 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) |
254 | : GCC_OFF_SMALL_ASM() (rw->lock) | |
e01961ce RB |
255 | : "memory"); |
256 | } while (unlikely(!tmp)); | |
1da177e4 | 257 | } |
0004a9df | 258 | |
17099b11 | 259 | smp_llsc_mb(); |
1da177e4 LT |
260 | } |
261 | ||
e5931943 | 262 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
1da177e4 LT |
263 | { |
264 | unsigned int tmp; | |
265 | ||
f252ffd5 | 266 | smp_mb__before_llsc(); |
0004a9df | 267 | |
1da177e4 LT |
268 | if (R10000_LLSC_WAR) { |
269 | __asm__ __volatile__( | |
e5931943 | 270 | "1: ll %1, %2 # arch_read_unlock \n" |
51822216 | 271 | " addiu %1, -1 \n" |
1da177e4 LT |
272 | " sc %1, %0 \n" |
273 | " beqzl %1, 1b \n" | |
94bfb75a MC |
274 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) |
275 | : GCC_OFF_SMALL_ASM() (rw->lock) | |
1da177e4 LT |
276 | : "memory"); |
277 | } else { | |
e01961ce RB |
278 | do { |
279 | __asm__ __volatile__( | |
280 | "1: ll %1, %2 # arch_read_unlock \n" | |
5753762c | 281 | " addiu %1, -1 \n" |
e01961ce | 282 | " sc %1, %0 \n" |
94bfb75a MC |
283 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) |
284 | : GCC_OFF_SMALL_ASM() (rw->lock) | |
e01961ce RB |
285 | : "memory"); |
286 | } while (unlikely(!tmp)); | |
1da177e4 LT |
287 | } |
288 | } | |
289 | ||
e5931943 | 290 | static inline void arch_write_lock(arch_rwlock_t *rw) |
1da177e4 LT |
291 | { |
292 | unsigned int tmp; | |
293 | ||
294 | if (R10000_LLSC_WAR) { | |
295 | __asm__ __volatile__( | |
e5931943 | 296 | " .set noreorder # arch_write_lock \n" |
1da177e4 LT |
297 | "1: ll %1, %2 \n" |
298 | " bnez %1, 1b \n" | |
299 | " lui %1, 0x8000 \n" | |
300 | " sc %1, %0 \n" | |
301 | " beqzl %1, 1b \n" | |
0004a9df | 302 | " nop \n" |
1da177e4 | 303 | " .set reorder \n" |
94bfb75a MC |
304 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) |
305 | : GCC_OFF_SMALL_ASM() (rw->lock) | |
1da177e4 LT |
306 | : "memory"); |
307 | } else { | |
e01961ce RB |
308 | do { |
309 | __asm__ __volatile__( | |
310 | "1: ll %1, %2 # arch_write_lock \n" | |
311 | " bnez %1, 1b \n" | |
312 | " lui %1, 0x8000 \n" | |
313 | "2: sc %1, %0 \n" | |
94bfb75a MC |
314 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) |
315 | : GCC_OFF_SMALL_ASM() (rw->lock) | |
e01961ce RB |
316 | : "memory"); |
317 | } while (unlikely(!tmp)); | |
1da177e4 | 318 | } |
0004a9df | 319 | |
17099b11 | 320 | smp_llsc_mb(); |
1da177e4 LT |
321 | } |
322 | ||
e5931943 | 323 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
1da177e4 | 324 | { |
6f6ed482 | 325 | smp_mb__before_llsc(); |
0004a9df | 326 | |
1da177e4 | 327 | __asm__ __volatile__( |
e5931943 | 328 | " # arch_write_unlock \n" |
1da177e4 LT |
329 | " sw $0, %0 \n" |
330 | : "=m" (rw->lock) | |
331 | : "m" (rw->lock) | |
332 | : "memory"); | |
333 | } | |
334 | ||
e5931943 | 335 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
65316fd1 RB |
336 | { |
337 | unsigned int tmp; | |
338 | int ret; | |
339 | ||
340 | if (R10000_LLSC_WAR) { | |
341 | __asm__ __volatile__( | |
e5931943 | 342 | " .set noreorder # arch_read_trylock \n" |
65316fd1 RB |
343 | " li %2, 0 \n" |
344 | "1: ll %1, %3 \n" | |
d52c2d5a | 345 | " bltz %1, 2f \n" |
65316fd1 RB |
346 | " addu %1, 1 \n" |
347 | " sc %1, %0 \n" | |
65316fd1 | 348 | " .set reorder \n" |
0004a9df RB |
349 | " beqzl %1, 1b \n" |
350 | " nop \n" | |
17099b11 | 351 | __WEAK_LLSC_MB |
65316fd1 RB |
352 | " li %2, 1 \n" |
353 | "2: \n" | |
94bfb75a MC |
354 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) |
355 | : GCC_OFF_SMALL_ASM() (rw->lock) | |
65316fd1 RB |
356 | : "memory"); |
357 | } else { | |
358 | __asm__ __volatile__( | |
e5931943 | 359 | " .set noreorder # arch_read_trylock \n" |
65316fd1 RB |
360 | " li %2, 0 \n" |
361 | "1: ll %1, %3 \n" | |
d52c2d5a | 362 | " bltz %1, 2f \n" |
65316fd1 RB |
363 | " addu %1, 1 \n" |
364 | " sc %1, %0 \n" | |
365 | " beqz %1, 1b \n" | |
0004a9df | 366 | " nop \n" |
65316fd1 | 367 | " .set reorder \n" |
17099b11 | 368 | __WEAK_LLSC_MB |
65316fd1 RB |
369 | " li %2, 1 \n" |
370 | "2: \n" | |
94bfb75a MC |
371 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) |
372 | : GCC_OFF_SMALL_ASM() (rw->lock) | |
65316fd1 RB |
373 | : "memory"); |
374 | } | |
375 | ||
376 | return ret; | |
377 | } | |
1da177e4 | 378 | |
e5931943 | 379 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
1da177e4 LT |
380 | { |
381 | unsigned int tmp; | |
382 | int ret; | |
383 | ||
384 | if (R10000_LLSC_WAR) { | |
385 | __asm__ __volatile__( | |
e5931943 | 386 | " .set noreorder # arch_write_trylock \n" |
1da177e4 LT |
387 | " li %2, 0 \n" |
388 | "1: ll %1, %3 \n" | |
389 | " bnez %1, 2f \n" | |
390 | " lui %1, 0x8000 \n" | |
391 | " sc %1, %0 \n" | |
392 | " beqzl %1, 1b \n" | |
0004a9df | 393 | " nop \n" |
17099b11 | 394 | __WEAK_LLSC_MB |
1da177e4 LT |
395 | " li %2, 1 \n" |
396 | " .set reorder \n" | |
397 | "2: \n" | |
94bfb75a MC |
398 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) |
399 | : GCC_OFF_SMALL_ASM() (rw->lock) | |
1da177e4 LT |
400 | : "memory"); |
401 | } else { | |
e01961ce RB |
402 | do { |
403 | __asm__ __volatile__( | |
404 | " ll %1, %3 # arch_write_trylock \n" | |
405 | " li %2, 0 \n" | |
406 | " bnez %1, 2f \n" | |
407 | " lui %1, 0x8000 \n" | |
408 | " sc %1, %0 \n" | |
409 | " li %2, 1 \n" | |
410 | "2: \n" | |
94bfb75a | 411 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), |
b0984c43 | 412 | "=&r" (ret) |
94bfb75a | 413 | : GCC_OFF_SMALL_ASM() (rw->lock) |
e01961ce RB |
414 | : "memory"); |
415 | } while (unlikely(!tmp)); | |
416 | ||
417 | smp_llsc_mb(); | |
1da177e4 LT |
418 | } |
419 | ||
420 | return ret; | |
421 | } | |
422 | ||
e5931943 TG |
423 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
424 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | |
65316fd1 | 425 | |
0199c4e6 TG |
426 | #define arch_spin_relax(lock) cpu_relax() |
427 | #define arch_read_relax(lock) cpu_relax() | |
428 | #define arch_write_relax(lock) cpu_relax() | |
ef6edc97 | 429 | |
1da177e4 | 430 | #endif /* _ASM_SPINLOCK_H */ |