Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (2004) Linus Torvalds | |
3 | * | |
4 | * Author: Zwane Mwaikambo <zwane@fsmlabs.com> | |
5 | * | |
fb1c8f93 IM |
6 | * Copyright (2004, 2005) Ingo Molnar |
7 | * | |
8 | * This file contains the spinlock/rwlock implementations for the | |
9 | * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them) | |
1da177e4 LT |
10 | */ |
11 | ||
12 | #include <linux/config.h> | |
13 | #include <linux/linkage.h> | |
14 | #include <linux/preempt.h> | |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/module.h> | |
18 | ||
19 | /* | |
20 | * Generic declaration of the raw read_trylock() function, | |
21 | * architectures are supposed to optimize this: | |
22 | */ | |
fb1c8f93 | 23 | int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock) |
1da177e4 | 24 | { |
fb1c8f93 | 25 | __raw_read_lock(lock); |
1da177e4 LT |
26 | return 1; |
27 | } | |
fb1c8f93 | 28 | EXPORT_SYMBOL(generic__raw_read_trylock); |
1da177e4 LT |
29 | |
30 | int __lockfunc _spin_trylock(spinlock_t *lock) | |
31 | { | |
32 | preempt_disable(); | |
33 | if (_raw_spin_trylock(lock)) | |
34 | return 1; | |
35 | ||
36 | preempt_enable(); | |
37 | return 0; | |
38 | } | |
39 | EXPORT_SYMBOL(_spin_trylock); | |
40 | ||
41 | int __lockfunc _read_trylock(rwlock_t *lock) | |
42 | { | |
43 | preempt_disable(); | |
44 | if (_raw_read_trylock(lock)) | |
45 | return 1; | |
46 | ||
47 | preempt_enable(); | |
48 | return 0; | |
49 | } | |
50 | EXPORT_SYMBOL(_read_trylock); | |
51 | ||
52 | int __lockfunc _write_trylock(rwlock_t *lock) | |
53 | { | |
54 | preempt_disable(); | |
55 | if (_raw_write_trylock(lock)) | |
56 | return 1; | |
57 | ||
58 | preempt_enable(); | |
59 | return 0; | |
60 | } | |
61 | EXPORT_SYMBOL(_write_trylock); | |
62 | ||
fb1c8f93 | 63 | #if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) |
1da177e4 LT |
64 | |
65 | void __lockfunc _read_lock(rwlock_t *lock) | |
66 | { | |
67 | preempt_disable(); | |
68 | _raw_read_lock(lock); | |
69 | } | |
70 | EXPORT_SYMBOL(_read_lock); | |
71 | ||
72 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | |
73 | { | |
74 | unsigned long flags; | |
75 | ||
76 | local_irq_save(flags); | |
77 | preempt_disable(); | |
fb1c8f93 | 78 | _raw_spin_lock_flags(lock, &flags); |
1da177e4 LT |
79 | return flags; |
80 | } | |
81 | EXPORT_SYMBOL(_spin_lock_irqsave); | |
82 | ||
83 | void __lockfunc _spin_lock_irq(spinlock_t *lock) | |
84 | { | |
85 | local_irq_disable(); | |
86 | preempt_disable(); | |
87 | _raw_spin_lock(lock); | |
88 | } | |
89 | EXPORT_SYMBOL(_spin_lock_irq); | |
90 | ||
91 | void __lockfunc _spin_lock_bh(spinlock_t *lock) | |
92 | { | |
93 | local_bh_disable(); | |
94 | preempt_disable(); | |
95 | _raw_spin_lock(lock); | |
96 | } | |
97 | EXPORT_SYMBOL(_spin_lock_bh); | |
98 | ||
99 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | |
100 | { | |
101 | unsigned long flags; | |
102 | ||
103 | local_irq_save(flags); | |
104 | preempt_disable(); | |
105 | _raw_read_lock(lock); | |
106 | return flags; | |
107 | } | |
108 | EXPORT_SYMBOL(_read_lock_irqsave); | |
109 | ||
110 | void __lockfunc _read_lock_irq(rwlock_t *lock) | |
111 | { | |
112 | local_irq_disable(); | |
113 | preempt_disable(); | |
114 | _raw_read_lock(lock); | |
115 | } | |
116 | EXPORT_SYMBOL(_read_lock_irq); | |
117 | ||
118 | void __lockfunc _read_lock_bh(rwlock_t *lock) | |
119 | { | |
120 | local_bh_disable(); | |
121 | preempt_disable(); | |
122 | _raw_read_lock(lock); | |
123 | } | |
124 | EXPORT_SYMBOL(_read_lock_bh); | |
125 | ||
126 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | |
127 | { | |
128 | unsigned long flags; | |
129 | ||
130 | local_irq_save(flags); | |
131 | preempt_disable(); | |
132 | _raw_write_lock(lock); | |
133 | return flags; | |
134 | } | |
135 | EXPORT_SYMBOL(_write_lock_irqsave); | |
136 | ||
137 | void __lockfunc _write_lock_irq(rwlock_t *lock) | |
138 | { | |
139 | local_irq_disable(); | |
140 | preempt_disable(); | |
141 | _raw_write_lock(lock); | |
142 | } | |
143 | EXPORT_SYMBOL(_write_lock_irq); | |
144 | ||
145 | void __lockfunc _write_lock_bh(rwlock_t *lock) | |
146 | { | |
147 | local_bh_disable(); | |
148 | preempt_disable(); | |
149 | _raw_write_lock(lock); | |
150 | } | |
151 | EXPORT_SYMBOL(_write_lock_bh); | |
152 | ||
153 | void __lockfunc _spin_lock(spinlock_t *lock) | |
154 | { | |
155 | preempt_disable(); | |
156 | _raw_spin_lock(lock); | |
157 | } | |
158 | ||
159 | EXPORT_SYMBOL(_spin_lock); | |
160 | ||
161 | void __lockfunc _write_lock(rwlock_t *lock) | |
162 | { | |
163 | preempt_disable(); | |
164 | _raw_write_lock(lock); | |
165 | } | |
166 | ||
167 | EXPORT_SYMBOL(_write_lock); | |
168 | ||
169 | #else /* CONFIG_PREEMPT: */ | |
170 | ||
171 | /* | |
172 | * This could be a long-held lock. We both prepare to spin for a long | |
173 | * time (making _this_ CPU preemptable if possible), and we also signal | |
174 | * towards that other CPU that it should break the lock ASAP. | |
175 | * | |
176 | * (We do this in a function because inlining it would be excessive.) | |
177 | */ | |
178 | ||
179 | #define BUILD_LOCK_OPS(op, locktype) \ | |
180 | void __lockfunc _##op##_lock(locktype##_t *lock) \ | |
181 | { \ | |
1da177e4 | 182 | for (;;) { \ |
ee25e96f | 183 | preempt_disable(); \ |
1da177e4 LT |
184 | if (likely(_raw_##op##_trylock(lock))) \ |
185 | break; \ | |
186 | preempt_enable(); \ | |
ee25e96f | 187 | \ |
1da177e4 LT |
188 | if (!(lock)->break_lock) \ |
189 | (lock)->break_lock = 1; \ | |
190 | while (!op##_can_lock(lock) && (lock)->break_lock) \ | |
191 | cpu_relax(); \ | |
1da177e4 LT |
192 | } \ |
193 | (lock)->break_lock = 0; \ | |
194 | } \ | |
195 | \ | |
196 | EXPORT_SYMBOL(_##op##_lock); \ | |
197 | \ | |
198 | unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \ | |
199 | { \ | |
200 | unsigned long flags; \ | |
201 | \ | |
1da177e4 | 202 | for (;;) { \ |
ee25e96f | 203 | preempt_disable(); \ |
1da177e4 LT |
204 | local_irq_save(flags); \ |
205 | if (likely(_raw_##op##_trylock(lock))) \ | |
206 | break; \ | |
207 | local_irq_restore(flags); \ | |
1da177e4 | 208 | preempt_enable(); \ |
ee25e96f | 209 | \ |
1da177e4 LT |
210 | if (!(lock)->break_lock) \ |
211 | (lock)->break_lock = 1; \ | |
212 | while (!op##_can_lock(lock) && (lock)->break_lock) \ | |
213 | cpu_relax(); \ | |
1da177e4 LT |
214 | } \ |
215 | (lock)->break_lock = 0; \ | |
216 | return flags; \ | |
217 | } \ | |
218 | \ | |
219 | EXPORT_SYMBOL(_##op##_lock_irqsave); \ | |
220 | \ | |
221 | void __lockfunc _##op##_lock_irq(locktype##_t *lock) \ | |
222 | { \ | |
223 | _##op##_lock_irqsave(lock); \ | |
224 | } \ | |
225 | \ | |
226 | EXPORT_SYMBOL(_##op##_lock_irq); \ | |
227 | \ | |
228 | void __lockfunc _##op##_lock_bh(locktype##_t *lock) \ | |
229 | { \ | |
230 | unsigned long flags; \ | |
231 | \ | |
232 | /* */ \ | |
233 | /* Careful: we must exclude softirqs too, hence the */ \ | |
234 | /* irq-disabling. We use the generic preemption-aware */ \ | |
235 | /* function: */ \ | |
236 | /**/ \ | |
237 | flags = _##op##_lock_irqsave(lock); \ | |
238 | local_bh_disable(); \ | |
239 | local_irq_restore(flags); \ | |
240 | } \ | |
241 | \ | |
242 | EXPORT_SYMBOL(_##op##_lock_bh) | |
243 | ||
244 | /* | |
245 | * Build preemption-friendly versions of the following | |
246 | * lock-spinning functions: | |
247 | * | |
248 | * _[spin|read|write]_lock() | |
249 | * _[spin|read|write]_lock_irq() | |
250 | * _[spin|read|write]_lock_irqsave() | |
251 | * _[spin|read|write]_lock_bh() | |
252 | */ | |
253 | BUILD_LOCK_OPS(spin, spinlock); | |
254 | BUILD_LOCK_OPS(read, rwlock); | |
255 | BUILD_LOCK_OPS(write, rwlock); | |
256 | ||
257 | #endif /* CONFIG_PREEMPT */ | |
258 | ||
259 | void __lockfunc _spin_unlock(spinlock_t *lock) | |
260 | { | |
261 | _raw_spin_unlock(lock); | |
262 | preempt_enable(); | |
263 | } | |
264 | EXPORT_SYMBOL(_spin_unlock); | |
265 | ||
266 | void __lockfunc _write_unlock(rwlock_t *lock) | |
267 | { | |
268 | _raw_write_unlock(lock); | |
269 | preempt_enable(); | |
270 | } | |
271 | EXPORT_SYMBOL(_write_unlock); | |
272 | ||
273 | void __lockfunc _read_unlock(rwlock_t *lock) | |
274 | { | |
275 | _raw_read_unlock(lock); | |
276 | preempt_enable(); | |
277 | } | |
278 | EXPORT_SYMBOL(_read_unlock); | |
279 | ||
280 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | |
281 | { | |
282 | _raw_spin_unlock(lock); | |
283 | local_irq_restore(flags); | |
284 | preempt_enable(); | |
285 | } | |
286 | EXPORT_SYMBOL(_spin_unlock_irqrestore); | |
287 | ||
288 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) | |
289 | { | |
290 | _raw_spin_unlock(lock); | |
291 | local_irq_enable(); | |
292 | preempt_enable(); | |
293 | } | |
294 | EXPORT_SYMBOL(_spin_unlock_irq); | |
295 | ||
296 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) | |
297 | { | |
298 | _raw_spin_unlock(lock); | |
10f02d1c | 299 | preempt_enable_no_resched(); |
1da177e4 LT |
300 | local_bh_enable(); |
301 | } | |
302 | EXPORT_SYMBOL(_spin_unlock_bh); | |
303 | ||
304 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |
305 | { | |
306 | _raw_read_unlock(lock); | |
307 | local_irq_restore(flags); | |
308 | preempt_enable(); | |
309 | } | |
310 | EXPORT_SYMBOL(_read_unlock_irqrestore); | |
311 | ||
312 | void __lockfunc _read_unlock_irq(rwlock_t *lock) | |
313 | { | |
314 | _raw_read_unlock(lock); | |
315 | local_irq_enable(); | |
316 | preempt_enable(); | |
317 | } | |
318 | EXPORT_SYMBOL(_read_unlock_irq); | |
319 | ||
320 | void __lockfunc _read_unlock_bh(rwlock_t *lock) | |
321 | { | |
322 | _raw_read_unlock(lock); | |
10f02d1c | 323 | preempt_enable_no_resched(); |
1da177e4 LT |
324 | local_bh_enable(); |
325 | } | |
326 | EXPORT_SYMBOL(_read_unlock_bh); | |
327 | ||
328 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |
329 | { | |
330 | _raw_write_unlock(lock); | |
331 | local_irq_restore(flags); | |
332 | preempt_enable(); | |
333 | } | |
334 | EXPORT_SYMBOL(_write_unlock_irqrestore); | |
335 | ||
336 | void __lockfunc _write_unlock_irq(rwlock_t *lock) | |
337 | { | |
338 | _raw_write_unlock(lock); | |
339 | local_irq_enable(); | |
340 | preempt_enable(); | |
341 | } | |
342 | EXPORT_SYMBOL(_write_unlock_irq); | |
343 | ||
344 | void __lockfunc _write_unlock_bh(rwlock_t *lock) | |
345 | { | |
346 | _raw_write_unlock(lock); | |
10f02d1c | 347 | preempt_enable_no_resched(); |
1da177e4 LT |
348 | local_bh_enable(); |
349 | } | |
350 | EXPORT_SYMBOL(_write_unlock_bh); | |
351 | ||
352 | int __lockfunc _spin_trylock_bh(spinlock_t *lock) | |
353 | { | |
354 | local_bh_disable(); | |
355 | preempt_disable(); | |
356 | if (_raw_spin_trylock(lock)) | |
357 | return 1; | |
358 | ||
10f02d1c | 359 | preempt_enable_no_resched(); |
1da177e4 LT |
360 | local_bh_enable(); |
361 | return 0; | |
362 | } | |
363 | EXPORT_SYMBOL(_spin_trylock_bh); | |
364 | ||
365 | int in_lock_functions(unsigned long addr) | |
366 | { | |
367 | /* Linker adds these: start and end of __lockfunc functions */ | |
368 | extern char __lock_text_start[], __lock_text_end[]; | |
369 | ||
370 | return addr >= (unsigned long)__lock_text_start | |
371 | && addr < (unsigned long)__lock_text_end; | |
372 | } | |
373 | EXPORT_SYMBOL(in_lock_functions); |