Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SPINLOCK_H |
2 | #define __ASM_SPINLOCK_H | |
3 | ||
4 | #if __LINUX_ARM_ARCH__ < 6 | |
5 | #error SMP not supported on pre-ARMv6 CPUs | |
6 | #endif | |
7 | ||
603605ab MZ |
8 | #include <asm/processor.h> |
9 | ||
000d9c78 RK |
10 | /* |
11 | * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K | |
12 | * extensions, so when running on UP, we have to patch these instructions away. | |
13 | */ | |
14 | #define ALT_SMP(smp, up) \ | |
15 | "9998: " smp "\n" \ | |
16 | " .pushsection \".alt.smp.init\", \"a\"\n" \ | |
17 | " .long 9998b\n" \ | |
18 | " " up "\n" \ | |
19 | " .popsection\n" | |
20 | ||
21 | #ifdef CONFIG_THUMB2_KERNEL | |
22 | #define SEV ALT_SMP("sev.w", "nop.w") | |
917692f5 DM |
23 | /* |
24 | * For Thumb-2, special care is needed to ensure that the conditional WFE | |
25 | * instruction really does assemble to exactly 4 bytes (as required by | |
26 | * the SMP_ON_UP fixup code). By itself "wfene" might cause the | |
27 | * assembler to insert a extra (16-bit) IT instruction, depending on the | |
28 | * presence or absence of neighbouring conditional instructions. | |
29 | * | |
30 | * To avoid this unpredictableness, an approprite IT is inserted explicitly: | |
31 | * the assembler won't change IT instructions which are explicitly present | |
32 | * in the input. | |
33 | */ | |
34 | #define WFE(cond) ALT_SMP( \ | |
35 | "it " cond "\n\t" \ | |
36 | "wfe" cond ".n", \ | |
37 | \ | |
38 | "nop.w" \ | |
39 | ) | |
000d9c78 RK |
40 | #else |
41 | #define SEV ALT_SMP("sev", "nop") | |
42 | #define WFE(cond) ALT_SMP("wfe" cond, "nop") | |
43 | #endif | |
44 | ||
c5113b61 RV |
45 | static inline void dsb_sev(void) |
46 | { | |
47 | #if __LINUX_ARM_ARCH__ >= 7 | |
48 | __asm__ __volatile__ ( | |
49 | "dsb\n" | |
000d9c78 | 50 | SEV |
c5113b61 | 51 | ); |
000d9c78 | 52 | #else |
c5113b61 RV |
53 | __asm__ __volatile__ ( |
54 | "mcr p15, 0, %0, c7, c10, 4\n" | |
000d9c78 | 55 | SEV |
c5113b61 RV |
56 | : : "r" (0) |
57 | ); | |
58 | #endif | |
59 | } | |
60 | ||
1da177e4 | 61 | /* |
546c2896 | 62 | * ARMv6 ticket-based spin-locking. |
1da177e4 | 63 | * |
546c2896 WD |
64 | * A memory barrier is required after we get a lock, and before we |
65 | * release it, because V6 CPUs are assumed to have weakly ordered | |
66 | * memory. | |
1da177e4 | 67 | */ |
1da177e4 | 68 | |
0199c4e6 TG |
69 | #define arch_spin_unlock_wait(lock) \ |
70 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) | |
1da177e4 | 71 | |
0199c4e6 | 72 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
1da177e4 | 73 | |
0199c4e6 | 74 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
1da177e4 LT |
75 | { |
76 | unsigned long tmp; | |
546c2896 WD |
77 | u32 newval; |
78 | arch_spinlock_t lockval; | |
1da177e4 LT |
79 | |
80 | __asm__ __volatile__( | |
546c2896 WD |
81 | "1: ldrex %0, [%3]\n" |
82 | " add %1, %0, %4\n" | |
83 | " strex %2, %1, [%3]\n" | |
84 | " teq %2, #0\n" | |
1da177e4 | 85 | " bne 1b" |
546c2896 WD |
86 | : "=&r" (lockval), "=&r" (newval), "=&r" (tmp) |
87 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) | |
6d9b37a3 RK |
88 | : "cc"); |
89 | ||
546c2896 WD |
90 | while (lockval.tickets.next != lockval.tickets.owner) { |
91 | wfe(); | |
92 | lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner); | |
93 | } | |
94 | ||
6d9b37a3 | 95 | smp_mb(); |
1da177e4 LT |
96 | } |
97 | ||
0199c4e6 | 98 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
1da177e4 | 99 | { |
15e7e5c1 | 100 | unsigned long contended, res; |
546c2896 | 101 | u32 slock; |
1da177e4 | 102 | |
15e7e5c1 WD |
103 | do { |
104 | __asm__ __volatile__( | |
105 | " ldrex %0, [%3]\n" | |
106 | " mov %2, #0\n" | |
107 | " subs %1, %0, %0, ror #16\n" | |
108 | " addeq %0, %0, %4\n" | |
109 | " strexeq %2, %0, [%3]" | |
afa31d8e | 110 | : "=&r" (slock), "=&r" (contended), "=&r" (res) |
15e7e5c1 WD |
111 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) |
112 | : "cc"); | |
113 | } while (res); | |
114 | ||
115 | if (!contended) { | |
6d9b37a3 RK |
116 | smp_mb(); |
117 | return 1; | |
118 | } else { | |
119 | return 0; | |
120 | } | |
1da177e4 LT |
121 | } |
122 | ||
0199c4e6 | 123 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
1da177e4 | 124 | { |
6d9b37a3 | 125 | smp_mb(); |
20e260b6 | 126 | lock->tickets.owner++; |
c5113b61 | 127 | dsb_sev(); |
1da177e4 LT |
128 | } |
129 | ||
546c2896 WD |
130 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
131 | { | |
132 | struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); | |
133 | return tickets.owner != tickets.next; | |
134 | } | |
135 | ||
136 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) | |
137 | { | |
138 | struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); | |
139 | return (tickets.next - tickets.owner) > 1; | |
140 | } | |
141 | #define arch_spin_is_contended arch_spin_is_contended | |
142 | ||
1da177e4 LT |
143 | /* |
144 | * RWLOCKS | |
fb1c8f93 IM |
145 | * |
146 | * | |
1da177e4 LT |
147 | * Write locks are easy - we just set bit 31. When unlocking, we can |
148 | * just write zero since the lock is exclusively held. | |
149 | */ | |
fb1c8f93 | 150 | |
e5931943 | 151 | static inline void arch_write_lock(arch_rwlock_t *rw) |
1da177e4 LT |
152 | { |
153 | unsigned long tmp; | |
154 | ||
155 | __asm__ __volatile__( | |
156 | "1: ldrex %0, [%1]\n" | |
157 | " teq %0, #0\n" | |
000d9c78 | 158 | WFE("ne") |
1da177e4 LT |
159 | " strexeq %0, %2, [%1]\n" |
160 | " teq %0, #0\n" | |
161 | " bne 1b" | |
162 | : "=&r" (tmp) | |
163 | : "r" (&rw->lock), "r" (0x80000000) | |
6d9b37a3 RK |
164 | : "cc"); |
165 | ||
166 | smp_mb(); | |
1da177e4 LT |
167 | } |
168 | ||
e5931943 | 169 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
4e8fd22b | 170 | { |
00efaa02 | 171 | unsigned long contended, res; |
4e8fd22b | 172 | |
00efaa02 WD |
173 | do { |
174 | __asm__ __volatile__( | |
175 | " ldrex %0, [%2]\n" | |
176 | " mov %1, #0\n" | |
177 | " teq %0, #0\n" | |
178 | " strexeq %1, %3, [%2]" | |
179 | : "=&r" (contended), "=&r" (res) | |
180 | : "r" (&rw->lock), "r" (0x80000000) | |
181 | : "cc"); | |
182 | } while (res); | |
6d9b37a3 | 183 | |
00efaa02 | 184 | if (!contended) { |
6d9b37a3 RK |
185 | smp_mb(); |
186 | return 1; | |
187 | } else { | |
188 | return 0; | |
189 | } | |
4e8fd22b RK |
190 | } |
191 | ||
e5931943 | 192 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
1da177e4 | 193 | { |
6d9b37a3 RK |
194 | smp_mb(); |
195 | ||
1da177e4 | 196 | __asm__ __volatile__( |
00b4c907 | 197 | "str %1, [%0]\n" |
1da177e4 LT |
198 | : |
199 | : "r" (&rw->lock), "r" (0) | |
6d9b37a3 | 200 | : "cc"); |
c5113b61 RV |
201 | |
202 | dsb_sev(); | |
1da177e4 LT |
203 | } |
204 | ||
c2a4c406 | 205 | /* write_can_lock - would write_trylock() succeed? */ |
e5931943 | 206 | #define arch_write_can_lock(x) ((x)->lock == 0) |
c2a4c406 | 207 | |
1da177e4 LT |
208 | /* |
209 | * Read locks are a bit more hairy: | |
210 | * - Exclusively load the lock value. | |
211 | * - Increment it. | |
212 | * - Store new lock value if positive, and we still own this location. | |
213 | * If the value is negative, we've already failed. | |
214 | * - If we failed to store the value, we want a negative result. | |
215 | * - If we failed, try again. | |
216 | * Unlocking is similarly hairy. We may have multiple read locks | |
217 | * currently active. However, we know we won't have any write | |
218 | * locks. | |
219 | */ | |
e5931943 | 220 | static inline void arch_read_lock(arch_rwlock_t *rw) |
1da177e4 LT |
221 | { |
222 | unsigned long tmp, tmp2; | |
223 | ||
224 | __asm__ __volatile__( | |
225 | "1: ldrex %0, [%2]\n" | |
226 | " adds %0, %0, #1\n" | |
227 | " strexpl %1, %0, [%2]\n" | |
000d9c78 | 228 | WFE("mi") |
1da177e4 LT |
229 | " rsbpls %0, %1, #0\n" |
230 | " bmi 1b" | |
231 | : "=&r" (tmp), "=&r" (tmp2) | |
232 | : "r" (&rw->lock) | |
6d9b37a3 RK |
233 | : "cc"); |
234 | ||
235 | smp_mb(); | |
1da177e4 LT |
236 | } |
237 | ||
e5931943 | 238 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
1da177e4 | 239 | { |
4e8fd22b RK |
240 | unsigned long tmp, tmp2; |
241 | ||
6d9b37a3 RK |
242 | smp_mb(); |
243 | ||
1da177e4 LT |
244 | __asm__ __volatile__( |
245 | "1: ldrex %0, [%2]\n" | |
246 | " sub %0, %0, #1\n" | |
247 | " strex %1, %0, [%2]\n" | |
248 | " teq %1, #0\n" | |
249 | " bne 1b" | |
250 | : "=&r" (tmp), "=&r" (tmp2) | |
251 | : "r" (&rw->lock) | |
6d9b37a3 | 252 | : "cc"); |
c5113b61 RV |
253 | |
254 | if (tmp == 0) | |
255 | dsb_sev(); | |
1da177e4 LT |
256 | } |
257 | ||
e5931943 | 258 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
8e34703b | 259 | { |
00efaa02 | 260 | unsigned long contended, res; |
8e34703b | 261 | |
00efaa02 WD |
262 | do { |
263 | __asm__ __volatile__( | |
264 | " ldrex %0, [%2]\n" | |
265 | " mov %1, #0\n" | |
266 | " adds %0, %0, #1\n" | |
267 | " strexpl %1, %0, [%2]" | |
268 | : "=&r" (contended), "=&r" (res) | |
269 | : "r" (&rw->lock) | |
270 | : "cc"); | |
271 | } while (res); | |
8e34703b | 272 | |
00efaa02 WD |
273 | /* If the lock is negative, then it is already held for write. */ |
274 | if (contended < 0x80000000) { | |
275 | smp_mb(); | |
276 | return 1; | |
277 | } else { | |
278 | return 0; | |
279 | } | |
8e34703b | 280 | } |
1da177e4 | 281 | |
c2a4c406 | 282 | /* read_can_lock - would read_trylock() succeed? */ |
e5931943 | 283 | #define arch_read_can_lock(x) ((x)->lock < 0x80000000) |
c2a4c406 | 284 | |
e5931943 TG |
285 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
286 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | |
f5f7eac4 | 287 | |
0199c4e6 TG |
288 | #define arch_spin_relax(lock) cpu_relax() |
289 | #define arch_read_relax(lock) cpu_relax() | |
290 | #define arch_write_relax(lock) cpu_relax() | |
ef6edc97 | 291 | |
1da177e4 | 292 | #endif /* __ASM_SPINLOCK_H */ |