Commit | Line | Data |
---|---|---|
6053ee3b | 1 | /* |
67a6de49 | 2 | * kernel/locking/mutex.c |
6053ee3b IM |
3 | * |
4 | * Mutexes: blocking mutual exclusion locks | |
5 | * | |
6 | * Started by Ingo Molnar: | |
7 | * | |
8 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
9 | * | |
10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and | |
11 | * David Howells for suggestions and improvements. | |
12 | * | |
0d66bf6d PZ |
13 | * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline |
14 | * from the -rt tree, where it was originally implemented for rtmutexes | |
15 | * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale | |
16 | * and Sven Dietrich. | |
17 | * | |
6053ee3b IM |
18 | * Also see Documentation/mutex-design.txt. |
19 | */ | |
20 | #include <linux/mutex.h> | |
1b375dc3 | 21 | #include <linux/ww_mutex.h> |
6053ee3b | 22 | #include <linux/sched.h> |
8bd75c77 | 23 | #include <linux/sched/rt.h> |
9984de1a | 24 | #include <linux/export.h> |
6053ee3b IM |
25 | #include <linux/spinlock.h> |
26 | #include <linux/interrupt.h> | |
9a11b49a | 27 | #include <linux/debug_locks.h> |
e7224674 | 28 | #include <linux/mcs_spinlock.h> |
6053ee3b IM |
29 | |
30 | /* | |
31 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, | |
32 | * which forces all calls into the slowpath: | |
33 | */ | |
34 | #ifdef CONFIG_DEBUG_MUTEXES | |
35 | # include "mutex-debug.h" | |
36 | # include <asm-generic/mutex-null.h> | |
37 | #else | |
38 | # include "mutex.h" | |
39 | # include <asm/mutex.h> | |
40 | #endif | |
41 | ||
0dc8c730 | 42 | /* |
cc189d25 WL |
43 | * A negative mutex count indicates that waiters are sleeping waiting for the |
44 | * mutex. | |
0dc8c730 | 45 | */ |
0dc8c730 | 46 | #define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0) |
0dc8c730 | 47 | |
ef5d4707 IM |
48 | void |
49 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | |
6053ee3b IM |
50 | { |
51 | atomic_set(&lock->count, 1); | |
52 | spin_lock_init(&lock->wait_lock); | |
53 | INIT_LIST_HEAD(&lock->wait_list); | |
0d66bf6d | 54 | mutex_clear_owner(lock); |
2bd2c92c | 55 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
e7224674 | 56 | lock->mcs_lock = NULL; |
2bd2c92c | 57 | #endif |
6053ee3b | 58 | |
ef5d4707 | 59 | debug_mutex_init(lock, name, key); |
6053ee3b IM |
60 | } |
61 | ||
62 | EXPORT_SYMBOL(__mutex_init); | |
63 | ||
e4564f79 | 64 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
6053ee3b IM |
65 | /* |
66 | * We split the mutex lock/unlock logic into separate fastpath and | |
67 | * slowpath functions, to reduce the register pressure on the fastpath. | |
68 | * We also put the fastpath first in the kernel image, to make sure the | |
69 | * branch is predicted by the CPU as default-untaken. | |
70 | */ | |
7918baa5 | 71 | static __used noinline void __sched |
9a11b49a | 72 | __mutex_lock_slowpath(atomic_t *lock_count); |
6053ee3b | 73 | |
ef5dc121 | 74 | /** |
6053ee3b IM |
75 | * mutex_lock - acquire the mutex |
76 | * @lock: the mutex to be acquired | |
77 | * | |
78 | * Lock the mutex exclusively for this task. If the mutex is not | |
79 | * available right now, it will sleep until it can get it. | |
80 | * | |
81 | * The mutex must later on be released by the same task that | |
82 | * acquired it. Recursive locking is not allowed. The task | |
83 | * may not exit without first unlocking the mutex. Also, kernel | |
84 | * memory where the mutex resides mutex must not be freed with | |
85 | * the mutex still locked. The mutex must first be initialized | |
86 | * (or statically defined) before it can be locked. memset()-ing | |
87 | * the mutex to 0 is not allowed. | |
88 | * | |
89 | * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging | |
90 | * checks that will enforce the restrictions and will also do | |
91 | * deadlock debugging. ) | |
92 | * | |
93 | * This function is similar to (but not equivalent to) down(). | |
94 | */ | |
b09d2501 | 95 | void __sched mutex_lock(struct mutex *lock) |
6053ee3b | 96 | { |
c544bdb1 | 97 | might_sleep(); |
6053ee3b IM |
98 | /* |
99 | * The locking fastpath is the 1->0 transition from | |
100 | * 'unlocked' into 'locked' state. | |
6053ee3b IM |
101 | */ |
102 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); | |
0d66bf6d | 103 | mutex_set_owner(lock); |
6053ee3b IM |
104 | } |
105 | ||
106 | EXPORT_SYMBOL(mutex_lock); | |
e4564f79 | 107 | #endif |
6053ee3b | 108 | |
41fcb9f2 | 109 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
2bd2c92c WL |
110 | /* |
111 | * In order to avoid a stampede of mutex spinners from acquiring the mutex | |
112 | * more or less simultaneously, the spinners need to acquire a MCS lock | |
113 | * first before spinning on the owner field. | |
114 | * | |
2bd2c92c | 115 | */ |
2bd2c92c | 116 | |
41fcb9f2 WL |
117 | /* |
118 | * Mutex spinning code migrated from kernel/sched/core.c | |
119 | */ | |
120 | ||
121 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) | |
122 | { | |
123 | if (lock->owner != owner) | |
124 | return false; | |
125 | ||
126 | /* | |
127 | * Ensure we emit the owner->on_cpu, dereference _after_ checking | |
128 | * lock->owner still matches owner, if that fails, owner might | |
129 | * point to free()d memory, if it still matches, the rcu_read_lock() | |
130 | * ensures the memory stays valid. | |
131 | */ | |
132 | barrier(); | |
133 | ||
134 | return owner->on_cpu; | |
135 | } | |
136 | ||
137 | /* | |
138 | * Look out! "owner" is an entirely speculative pointer | |
139 | * access and not reliable. | |
140 | */ | |
141 | static noinline | |
142 | int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) | |
143 | { | |
144 | rcu_read_lock(); | |
145 | while (owner_running(lock, owner)) { | |
146 | if (need_resched()) | |
147 | break; | |
148 | ||
149 | arch_mutex_cpu_relax(); | |
150 | } | |
151 | rcu_read_unlock(); | |
152 | ||
153 | /* | |
154 | * We break out the loop above on need_resched() and when the | |
155 | * owner changed, which is a sign for heavy contention. Return | |
156 | * success only when lock->owner is NULL. | |
157 | */ | |
158 | return lock->owner == NULL; | |
159 | } | |
2bd2c92c WL |
160 | |
161 | /* | |
162 | * Initial check for entering the mutex spinning loop | |
163 | */ | |
164 | static inline int mutex_can_spin_on_owner(struct mutex *lock) | |
165 | { | |
1e40c2ed | 166 | struct task_struct *owner; |
2bd2c92c WL |
167 | int retval = 1; |
168 | ||
169 | rcu_read_lock(); | |
1e40c2ed PZ |
170 | owner = ACCESS_ONCE(lock->owner); |
171 | if (owner) | |
172 | retval = owner->on_cpu; | |
2bd2c92c WL |
173 | rcu_read_unlock(); |
174 | /* | |
175 | * if lock->owner is not set, the mutex owner may have just acquired | |
176 | * it and not set the owner yet or the mutex has been released. | |
177 | */ | |
178 | return retval; | |
179 | } | |
41fcb9f2 WL |
180 | #endif |
181 | ||
7918baa5 | 182 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
6053ee3b | 183 | |
ef5dc121 | 184 | /** |
6053ee3b IM |
185 | * mutex_unlock - release the mutex |
186 | * @lock: the mutex to be released | |
187 | * | |
188 | * Unlock a mutex that has been locked by this task previously. | |
189 | * | |
190 | * This function must not be used in interrupt context. Unlocking | |
191 | * of a not locked mutex is not allowed. | |
192 | * | |
193 | * This function is similar to (but not equivalent to) up(). | |
194 | */ | |
7ad5b3a5 | 195 | void __sched mutex_unlock(struct mutex *lock) |
6053ee3b IM |
196 | { |
197 | /* | |
198 | * The unlocking fastpath is the 0->1 transition from 'locked' | |
199 | * into 'unlocked' state: | |
6053ee3b | 200 | */ |
0d66bf6d PZ |
201 | #ifndef CONFIG_DEBUG_MUTEXES |
202 | /* | |
203 | * When debugging is enabled we must not clear the owner before time, | |
204 | * the slow path will always be taken, and that clears the owner field | |
205 | * after verifying that it was indeed current. | |
206 | */ | |
207 | mutex_clear_owner(lock); | |
208 | #endif | |
6053ee3b IM |
209 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); |
210 | } | |
211 | ||
212 | EXPORT_SYMBOL(mutex_unlock); | |
213 | ||
040a0a37 ML |
214 | /** |
215 | * ww_mutex_unlock - release the w/w mutex | |
216 | * @lock: the mutex to be released | |
217 | * | |
218 | * Unlock a mutex that has been locked by this task previously with any of the | |
219 | * ww_mutex_lock* functions (with or without an acquire context). It is | |
220 | * forbidden to release the locks after releasing the acquire context. | |
221 | * | |
222 | * This function must not be used in interrupt context. Unlocking | |
223 | * of a unlocked mutex is not allowed. | |
224 | */ | |
225 | void __sched ww_mutex_unlock(struct ww_mutex *lock) | |
226 | { | |
227 | /* | |
228 | * The unlocking fastpath is the 0->1 transition from 'locked' | |
229 | * into 'unlocked' state: | |
230 | */ | |
231 | if (lock->ctx) { | |
232 | #ifdef CONFIG_DEBUG_MUTEXES | |
233 | DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); | |
234 | #endif | |
235 | if (lock->ctx->acquired > 0) | |
236 | lock->ctx->acquired--; | |
237 | lock->ctx = NULL; | |
238 | } | |
239 | ||
240 | #ifndef CONFIG_DEBUG_MUTEXES | |
241 | /* | |
242 | * When debugging is enabled we must not clear the owner before time, | |
243 | * the slow path will always be taken, and that clears the owner field | |
244 | * after verifying that it was indeed current. | |
245 | */ | |
246 | mutex_clear_owner(&lock->base); | |
247 | #endif | |
248 | __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath); | |
249 | } | |
250 | EXPORT_SYMBOL(ww_mutex_unlock); | |
251 | ||
252 | static inline int __sched | |
253 | __mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) | |
254 | { | |
255 | struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); | |
256 | struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx); | |
257 | ||
258 | if (!hold_ctx) | |
259 | return 0; | |
260 | ||
261 | if (unlikely(ctx == hold_ctx)) | |
262 | return -EALREADY; | |
263 | ||
264 | if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && | |
265 | (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { | |
266 | #ifdef CONFIG_DEBUG_MUTEXES | |
267 | DEBUG_LOCKS_WARN_ON(ctx->contending_lock); | |
268 | ctx->contending_lock = ww; | |
269 | #endif | |
270 | return -EDEADLK; | |
271 | } | |
272 | ||
273 | return 0; | |
274 | } | |
275 | ||
276 | static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, | |
277 | struct ww_acquire_ctx *ww_ctx) | |
278 | { | |
279 | #ifdef CONFIG_DEBUG_MUTEXES | |
280 | /* | |
281 | * If this WARN_ON triggers, you used ww_mutex_lock to acquire, | |
282 | * but released with a normal mutex_unlock in this call. | |
283 | * | |
284 | * This should never happen, always use ww_mutex_unlock. | |
285 | */ | |
286 | DEBUG_LOCKS_WARN_ON(ww->ctx); | |
287 | ||
288 | /* | |
289 | * Not quite done after calling ww_acquire_done() ? | |
290 | */ | |
291 | DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); | |
292 | ||
293 | if (ww_ctx->contending_lock) { | |
294 | /* | |
295 | * After -EDEADLK you tried to | |
296 | * acquire a different ww_mutex? Bad! | |
297 | */ | |
298 | DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); | |
299 | ||
300 | /* | |
301 | * You called ww_mutex_lock after receiving -EDEADLK, | |
302 | * but 'forgot' to unlock everything else first? | |
303 | */ | |
304 | DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); | |
305 | ww_ctx->contending_lock = NULL; | |
306 | } | |
307 | ||
308 | /* | |
309 | * Naughty, using a different class will lead to undefined behavior! | |
310 | */ | |
311 | DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); | |
312 | #endif | |
313 | ww_ctx->acquired++; | |
314 | } | |
315 | ||
316 | /* | |
317 | * after acquiring lock with fastpath or when we lost out in contested | |
318 | * slowpath, set ctx and wake up any waiters so they can recheck. | |
319 | * | |
320 | * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set, | |
321 | * as the fastpath and opportunistic spinning are disabled in that case. | |
322 | */ | |
323 | static __always_inline void | |
324 | ww_mutex_set_context_fastpath(struct ww_mutex *lock, | |
325 | struct ww_acquire_ctx *ctx) | |
326 | { | |
327 | unsigned long flags; | |
328 | struct mutex_waiter *cur; | |
329 | ||
330 | ww_mutex_lock_acquired(lock, ctx); | |
331 | ||
332 | lock->ctx = ctx; | |
333 | ||
334 | /* | |
335 | * The lock->ctx update should be visible on all cores before | |
336 | * the atomic read is done, otherwise contended waiters might be | |
337 | * missed. The contended waiters will either see ww_ctx == NULL | |
338 | * and keep spinning, or it will acquire wait_lock, add itself | |
339 | * to waiter list and sleep. | |
340 | */ | |
341 | smp_mb(); /* ^^^ */ | |
342 | ||
343 | /* | |
344 | * Check if lock is contended, if not there is nobody to wake up | |
345 | */ | |
346 | if (likely(atomic_read(&lock->base.count) == 0)) | |
347 | return; | |
348 | ||
349 | /* | |
350 | * Uh oh, we raced in fastpath, wake up everyone in this case, | |
351 | * so they can see the new lock->ctx. | |
352 | */ | |
353 | spin_lock_mutex(&lock->base.wait_lock, flags); | |
354 | list_for_each_entry(cur, &lock->base.wait_list, list) { | |
355 | debug_mutex_wake_waiter(&lock->base, cur); | |
356 | wake_up_process(cur->task); | |
357 | } | |
358 | spin_unlock_mutex(&lock->base.wait_lock, flags); | |
359 | } | |
360 | ||
6053ee3b IM |
361 | /* |
362 | * Lock a mutex (possibly interruptible), slowpath: | |
363 | */ | |
040a0a37 | 364 | static __always_inline int __sched |
e4564f79 | 365 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, |
040a0a37 | 366 | struct lockdep_map *nest_lock, unsigned long ip, |
b0267507 | 367 | struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) |
6053ee3b IM |
368 | { |
369 | struct task_struct *task = current; | |
370 | struct mutex_waiter waiter; | |
1fb00c6c | 371 | unsigned long flags; |
040a0a37 | 372 | int ret; |
6053ee3b | 373 | |
41719b03 | 374 | preempt_disable(); |
e4c70a66 | 375 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); |
c0226027 FW |
376 | |
377 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | |
0d66bf6d PZ |
378 | /* |
379 | * Optimistic spinning. | |
380 | * | |
381 | * We try to spin for acquisition when we find that there are no | |
382 | * pending waiters and the lock owner is currently running on a | |
383 | * (different) CPU. | |
384 | * | |
385 | * The rationale is that if the lock owner is running, it is likely to | |
386 | * release the lock soon. | |
387 | * | |
388 | * Since this needs the lock owner, and this mutex implementation | |
389 | * doesn't track the owner atomically in the lock field, we need to | |
390 | * track it non-atomically. | |
391 | * | |
392 | * We can't do this for DEBUG_MUTEXES because that relies on wait_lock | |
393 | * to serialize everything. | |
2bd2c92c WL |
394 | * |
395 | * The mutex spinners are queued up using MCS lock so that only one | |
396 | * spinner can compete for the mutex. However, if mutex spinning isn't | |
397 | * going to happen, there is no point in going through the lock/unlock | |
398 | * overhead. | |
0d66bf6d | 399 | */ |
2bd2c92c WL |
400 | if (!mutex_can_spin_on_owner(lock)) |
401 | goto slowpath; | |
0d66bf6d PZ |
402 | |
403 | for (;;) { | |
c6eb3dda | 404 | struct task_struct *owner; |
e7224674 | 405 | struct mcs_spinlock node; |
0d66bf6d | 406 | |
b0267507 | 407 | if (use_ww_ctx && ww_ctx->acquired > 0) { |
040a0a37 ML |
408 | struct ww_mutex *ww; |
409 | ||
410 | ww = container_of(lock, struct ww_mutex, base); | |
411 | /* | |
412 | * If ww->ctx is set the contents are undefined, only | |
413 | * by acquiring wait_lock there is a guarantee that | |
414 | * they are not invalid when reading. | |
415 | * | |
416 | * As such, when deadlock detection needs to be | |
417 | * performed the optimistic spinning cannot be done. | |
418 | */ | |
419 | if (ACCESS_ONCE(ww->ctx)) | |
ec83f425 | 420 | goto slowpath; |
040a0a37 ML |
421 | } |
422 | ||
0d66bf6d PZ |
423 | /* |
424 | * If there's an owner, wait for it to either | |
425 | * release the lock or go to sleep. | |
426 | */ | |
e7224674 | 427 | mcs_spin_lock(&lock->mcs_lock, &node); |
0d66bf6d | 428 | owner = ACCESS_ONCE(lock->owner); |
2bd2c92c | 429 | if (owner && !mutex_spin_on_owner(lock, owner)) { |
e7224674 | 430 | mcs_spin_unlock(&lock->mcs_lock, &node); |
ec83f425 | 431 | goto slowpath; |
2bd2c92c | 432 | } |
0d66bf6d | 433 | |
0dc8c730 WL |
434 | if ((atomic_read(&lock->count) == 1) && |
435 | (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { | |
ac6e60ee | 436 | lock_acquired(&lock->dep_map, ip); |
b0267507 | 437 | if (use_ww_ctx) { |
040a0a37 ML |
438 | struct ww_mutex *ww; |
439 | ww = container_of(lock, struct ww_mutex, base); | |
440 | ||
441 | ww_mutex_set_context_fastpath(ww, ww_ctx); | |
442 | } | |
443 | ||
ac6e60ee | 444 | mutex_set_owner(lock); |
e7224674 | 445 | mcs_spin_unlock(&lock->mcs_lock, &node); |
ac6e60ee CM |
446 | preempt_enable(); |
447 | return 0; | |
448 | } | |
e7224674 | 449 | mcs_spin_unlock(&lock->mcs_lock, &node); |
ac6e60ee | 450 | |
0d66bf6d PZ |
451 | /* |
452 | * When there's no owner, we might have preempted between the | |
453 | * owner acquiring the lock and setting the owner field. If | |
454 | * we're an RT task that will live-lock because we won't let | |
455 | * the owner complete. | |
456 | */ | |
457 | if (!owner && (need_resched() || rt_task(task))) | |
ec83f425 | 458 | goto slowpath; |
0d66bf6d | 459 | |
0d66bf6d PZ |
460 | /* |
461 | * The cpu_relax() call is a compiler barrier which forces | |
462 | * everything in this loop to be re-loaded. We don't need | |
463 | * memory barriers as we'll eventually observe the right | |
464 | * values at the cost of a few extra spins. | |
465 | */ | |
335d7afb | 466 | arch_mutex_cpu_relax(); |
0d66bf6d | 467 | } |
2bd2c92c | 468 | slowpath: |
0d66bf6d | 469 | #endif |
1fb00c6c | 470 | spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b | 471 | |
ec83f425 DB |
472 | /* once more, can we acquire the lock? */ |
473 | if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1)) | |
474 | goto skip_wait; | |
475 | ||
9a11b49a | 476 | debug_mutex_lock_common(lock, &waiter); |
c9f4f06d | 477 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); |
6053ee3b IM |
478 | |
479 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | |
480 | list_add_tail(&waiter.list, &lock->wait_list); | |
481 | waiter.task = task; | |
482 | ||
e4564f79 | 483 | lock_contended(&lock->dep_map, ip); |
4fe87745 | 484 | |
6053ee3b IM |
485 | for (;;) { |
486 | /* | |
487 | * Lets try to take the lock again - this is needed even if | |
488 | * we get here for the first time (shortly after failing to | |
489 | * acquire the lock), to make sure that we get a wakeup once | |
490 | * it's unlocked. Later on, if we sleep, this is the | |
491 | * operation that gives us the lock. We xchg it to -1, so | |
492 | * that when we release the lock, we properly wake up the | |
493 | * other waiters: | |
494 | */ | |
0dc8c730 | 495 | if (MUTEX_SHOW_NO_WAITER(lock) && |
ec83f425 | 496 | (atomic_xchg(&lock->count, -1) == 1)) |
6053ee3b IM |
497 | break; |
498 | ||
499 | /* | |
500 | * got a signal? (This code gets eliminated in the | |
501 | * TASK_UNINTERRUPTIBLE case.) | |
502 | */ | |
6ad36762 | 503 | if (unlikely(signal_pending_state(state, task))) { |
040a0a37 ML |
504 | ret = -EINTR; |
505 | goto err; | |
506 | } | |
6053ee3b | 507 | |
b0267507 | 508 | if (use_ww_ctx && ww_ctx->acquired > 0) { |
040a0a37 ML |
509 | ret = __mutex_lock_check_stamp(lock, ww_ctx); |
510 | if (ret) | |
511 | goto err; | |
6053ee3b | 512 | } |
040a0a37 | 513 | |
6053ee3b IM |
514 | __set_task_state(task, state); |
515 | ||
25985edc | 516 | /* didn't get the lock, go to sleep: */ |
1fb00c6c | 517 | spin_unlock_mutex(&lock->wait_lock, flags); |
bd2f5536 | 518 | schedule_preempt_disabled(); |
1fb00c6c | 519 | spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b | 520 | } |
ec83f425 DB |
521 | mutex_remove_waiter(lock, &waiter, current_thread_info()); |
522 | /* set it to 0 if there are no waiters left: */ | |
523 | if (likely(list_empty(&lock->wait_list))) | |
524 | atomic_set(&lock->count, 0); | |
525 | debug_mutex_free_waiter(&waiter); | |
6053ee3b | 526 | |
ec83f425 DB |
527 | skip_wait: |
528 | /* got the lock - cleanup and rejoice! */ | |
c7e78cff | 529 | lock_acquired(&lock->dep_map, ip); |
0d66bf6d | 530 | mutex_set_owner(lock); |
6053ee3b | 531 | |
b0267507 | 532 | if (use_ww_ctx) { |
ec83f425 | 533 | struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); |
040a0a37 ML |
534 | struct mutex_waiter *cur; |
535 | ||
536 | /* | |
537 | * This branch gets optimized out for the common case, | |
538 | * and is only important for ww_mutex_lock. | |
539 | */ | |
040a0a37 ML |
540 | ww_mutex_lock_acquired(ww, ww_ctx); |
541 | ww->ctx = ww_ctx; | |
542 | ||
543 | /* | |
544 | * Give any possible sleeping processes the chance to wake up, | |
545 | * so they can recheck if they have to back off. | |
546 | */ | |
547 | list_for_each_entry(cur, &lock->wait_list, list) { | |
548 | debug_mutex_wake_waiter(lock, cur); | |
549 | wake_up_process(cur->task); | |
550 | } | |
551 | } | |
552 | ||
1fb00c6c | 553 | spin_unlock_mutex(&lock->wait_lock, flags); |
41719b03 | 554 | preempt_enable(); |
6053ee3b | 555 | return 0; |
040a0a37 ML |
556 | |
557 | err: | |
558 | mutex_remove_waiter(lock, &waiter, task_thread_info(task)); | |
559 | spin_unlock_mutex(&lock->wait_lock, flags); | |
560 | debug_mutex_free_waiter(&waiter); | |
561 | mutex_release(&lock->dep_map, 1, ip); | |
562 | preempt_enable(); | |
563 | return ret; | |
6053ee3b IM |
564 | } |
565 | ||
ef5d4707 IM |
566 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
567 | void __sched | |
568 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) | |
569 | { | |
570 | might_sleep(); | |
040a0a37 | 571 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, |
b0267507 | 572 | subclass, NULL, _RET_IP_, NULL, 0); |
ef5d4707 IM |
573 | } |
574 | ||
575 | EXPORT_SYMBOL_GPL(mutex_lock_nested); | |
d63a5a74 | 576 | |
e4c70a66 PZ |
577 | void __sched |
578 | _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) | |
579 | { | |
580 | might_sleep(); | |
040a0a37 | 581 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, |
b0267507 | 582 | 0, nest, _RET_IP_, NULL, 0); |
e4c70a66 PZ |
583 | } |
584 | ||
585 | EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); | |
586 | ||
ad776537 LH |
587 | int __sched |
588 | mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) | |
589 | { | |
590 | might_sleep(); | |
040a0a37 | 591 | return __mutex_lock_common(lock, TASK_KILLABLE, |
b0267507 | 592 | subclass, NULL, _RET_IP_, NULL, 0); |
ad776537 LH |
593 | } |
594 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); | |
595 | ||
d63a5a74 N |
596 | int __sched |
597 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) | |
598 | { | |
599 | might_sleep(); | |
0d66bf6d | 600 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, |
b0267507 | 601 | subclass, NULL, _RET_IP_, NULL, 0); |
d63a5a74 N |
602 | } |
603 | ||
604 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); | |
040a0a37 | 605 | |
23010027 DV |
606 | static inline int |
607 | ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |
608 | { | |
609 | #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH | |
610 | unsigned tmp; | |
611 | ||
612 | if (ctx->deadlock_inject_countdown-- == 0) { | |
613 | tmp = ctx->deadlock_inject_interval; | |
614 | if (tmp > UINT_MAX/4) | |
615 | tmp = UINT_MAX; | |
616 | else | |
617 | tmp = tmp*2 + tmp + tmp/2; | |
618 | ||
619 | ctx->deadlock_inject_interval = tmp; | |
620 | ctx->deadlock_inject_countdown = tmp; | |
621 | ctx->contending_lock = lock; | |
622 | ||
623 | ww_mutex_unlock(lock); | |
624 | ||
625 | return -EDEADLK; | |
626 | } | |
627 | #endif | |
628 | ||
629 | return 0; | |
630 | } | |
040a0a37 ML |
631 | |
632 | int __sched | |
633 | __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |
634 | { | |
23010027 DV |
635 | int ret; |
636 | ||
040a0a37 | 637 | might_sleep(); |
23010027 | 638 | ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, |
b0267507 | 639 | 0, &ctx->dep_map, _RET_IP_, ctx, 1); |
85f48961 | 640 | if (!ret && ctx->acquired > 1) |
23010027 DV |
641 | return ww_mutex_deadlock_injection(lock, ctx); |
642 | ||
643 | return ret; | |
040a0a37 ML |
644 | } |
645 | EXPORT_SYMBOL_GPL(__ww_mutex_lock); | |
646 | ||
647 | int __sched | |
648 | __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |
649 | { | |
23010027 DV |
650 | int ret; |
651 | ||
040a0a37 | 652 | might_sleep(); |
23010027 | 653 | ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, |
b0267507 | 654 | 0, &ctx->dep_map, _RET_IP_, ctx, 1); |
23010027 | 655 | |
85f48961 | 656 | if (!ret && ctx->acquired > 1) |
23010027 DV |
657 | return ww_mutex_deadlock_injection(lock, ctx); |
658 | ||
659 | return ret; | |
040a0a37 ML |
660 | } |
661 | EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); | |
662 | ||
ef5d4707 IM |
663 | #endif |
664 | ||
6053ee3b IM |
665 | /* |
666 | * Release the lock, slowpath: | |
667 | */ | |
7ad5b3a5 | 668 | static inline void |
ef5d4707 | 669 | __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) |
6053ee3b | 670 | { |
02706647 | 671 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
1fb00c6c | 672 | unsigned long flags; |
6053ee3b | 673 | |
1fb00c6c | 674 | spin_lock_mutex(&lock->wait_lock, flags); |
ef5d4707 | 675 | mutex_release(&lock->dep_map, nested, _RET_IP_); |
9a11b49a | 676 | debug_mutex_unlock(lock); |
6053ee3b IM |
677 | |
678 | /* | |
679 | * some architectures leave the lock unlocked in the fastpath failure | |
680 | * case, others need to leave it locked. In the later case we have to | |
681 | * unlock it here | |
682 | */ | |
683 | if (__mutex_slowpath_needs_to_unlock()) | |
684 | atomic_set(&lock->count, 1); | |
685 | ||
6053ee3b IM |
686 | if (!list_empty(&lock->wait_list)) { |
687 | /* get the first entry from the wait-list: */ | |
688 | struct mutex_waiter *waiter = | |
689 | list_entry(lock->wait_list.next, | |
690 | struct mutex_waiter, list); | |
691 | ||
692 | debug_mutex_wake_waiter(lock, waiter); | |
693 | ||
694 | wake_up_process(waiter->task); | |
695 | } | |
696 | ||
1fb00c6c | 697 | spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
698 | } |
699 | ||
9a11b49a IM |
700 | /* |
701 | * Release the lock, slowpath: | |
702 | */ | |
7918baa5 | 703 | static __used noinline void |
9a11b49a IM |
704 | __mutex_unlock_slowpath(atomic_t *lock_count) |
705 | { | |
ef5d4707 | 706 | __mutex_unlock_common_slowpath(lock_count, 1); |
9a11b49a IM |
707 | } |
708 | ||
e4564f79 | 709 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
6053ee3b IM |
710 | /* |
711 | * Here come the less common (and hence less performance-critical) APIs: | |
712 | * mutex_lock_interruptible() and mutex_trylock(). | |
713 | */ | |
7ad5b3a5 | 714 | static noinline int __sched |
a41b56ef | 715 | __mutex_lock_killable_slowpath(struct mutex *lock); |
ad776537 | 716 | |
7ad5b3a5 | 717 | static noinline int __sched |
a41b56ef | 718 | __mutex_lock_interruptible_slowpath(struct mutex *lock); |
6053ee3b | 719 | |
ef5dc121 RD |
720 | /** |
721 | * mutex_lock_interruptible - acquire the mutex, interruptible | |
6053ee3b IM |
722 | * @lock: the mutex to be acquired |
723 | * | |
724 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has | |
725 | * been acquired or sleep until the mutex becomes available. If a | |
726 | * signal arrives while waiting for the lock then this function | |
727 | * returns -EINTR. | |
728 | * | |
729 | * This function is similar to (but not equivalent to) down_interruptible(). | |
730 | */ | |
7ad5b3a5 | 731 | int __sched mutex_lock_interruptible(struct mutex *lock) |
6053ee3b | 732 | { |
0d66bf6d PZ |
733 | int ret; |
734 | ||
c544bdb1 | 735 | might_sleep(); |
a41b56ef ML |
736 | ret = __mutex_fastpath_lock_retval(&lock->count); |
737 | if (likely(!ret)) { | |
0d66bf6d | 738 | mutex_set_owner(lock); |
a41b56ef ML |
739 | return 0; |
740 | } else | |
741 | return __mutex_lock_interruptible_slowpath(lock); | |
6053ee3b IM |
742 | } |
743 | ||
744 | EXPORT_SYMBOL(mutex_lock_interruptible); | |
745 | ||
7ad5b3a5 | 746 | int __sched mutex_lock_killable(struct mutex *lock) |
ad776537 | 747 | { |
0d66bf6d PZ |
748 | int ret; |
749 | ||
ad776537 | 750 | might_sleep(); |
a41b56ef ML |
751 | ret = __mutex_fastpath_lock_retval(&lock->count); |
752 | if (likely(!ret)) { | |
0d66bf6d | 753 | mutex_set_owner(lock); |
a41b56ef ML |
754 | return 0; |
755 | } else | |
756 | return __mutex_lock_killable_slowpath(lock); | |
ad776537 LH |
757 | } |
758 | EXPORT_SYMBOL(mutex_lock_killable); | |
759 | ||
7918baa5 | 760 | static __used noinline void __sched |
e4564f79 PZ |
761 | __mutex_lock_slowpath(atomic_t *lock_count) |
762 | { | |
763 | struct mutex *lock = container_of(lock_count, struct mutex, count); | |
764 | ||
040a0a37 | 765 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, |
b0267507 | 766 | NULL, _RET_IP_, NULL, 0); |
e4564f79 PZ |
767 | } |
768 | ||
7ad5b3a5 | 769 | static noinline int __sched |
a41b56ef | 770 | __mutex_lock_killable_slowpath(struct mutex *lock) |
ad776537 | 771 | { |
040a0a37 | 772 | return __mutex_lock_common(lock, TASK_KILLABLE, 0, |
b0267507 | 773 | NULL, _RET_IP_, NULL, 0); |
ad776537 LH |
774 | } |
775 | ||
7ad5b3a5 | 776 | static noinline int __sched |
a41b56ef | 777 | __mutex_lock_interruptible_slowpath(struct mutex *lock) |
6053ee3b | 778 | { |
040a0a37 | 779 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, |
b0267507 | 780 | NULL, _RET_IP_, NULL, 0); |
040a0a37 ML |
781 | } |
782 | ||
783 | static noinline int __sched | |
784 | __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |
785 | { | |
786 | return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, | |
b0267507 | 787 | NULL, _RET_IP_, ctx, 1); |
6053ee3b | 788 | } |
040a0a37 ML |
789 | |
790 | static noinline int __sched | |
791 | __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, | |
792 | struct ww_acquire_ctx *ctx) | |
793 | { | |
794 | return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, | |
b0267507 | 795 | NULL, _RET_IP_, ctx, 1); |
040a0a37 ML |
796 | } |
797 | ||
e4564f79 | 798 | #endif |
6053ee3b IM |
799 | |
800 | /* | |
801 | * Spinlock based trylock, we take the spinlock and check whether we | |
802 | * can get the lock: | |
803 | */ | |
804 | static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | |
805 | { | |
806 | struct mutex *lock = container_of(lock_count, struct mutex, count); | |
1fb00c6c | 807 | unsigned long flags; |
6053ee3b IM |
808 | int prev; |
809 | ||
1fb00c6c | 810 | spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
811 | |
812 | prev = atomic_xchg(&lock->count, -1); | |
ef5d4707 | 813 | if (likely(prev == 1)) { |
0d66bf6d | 814 | mutex_set_owner(lock); |
ef5d4707 IM |
815 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
816 | } | |
0d66bf6d | 817 | |
6053ee3b IM |
818 | /* Set it back to 0 if there are no waiters: */ |
819 | if (likely(list_empty(&lock->wait_list))) | |
820 | atomic_set(&lock->count, 0); | |
821 | ||
1fb00c6c | 822 | spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
823 | |
824 | return prev == 1; | |
825 | } | |
826 | ||
ef5dc121 RD |
827 | /** |
828 | * mutex_trylock - try to acquire the mutex, without waiting | |
6053ee3b IM |
829 | * @lock: the mutex to be acquired |
830 | * | |
831 | * Try to acquire the mutex atomically. Returns 1 if the mutex | |
832 | * has been acquired successfully, and 0 on contention. | |
833 | * | |
834 | * NOTE: this function follows the spin_trylock() convention, so | |
ef5dc121 | 835 | * it is negated from the down_trylock() return values! Be careful |
6053ee3b IM |
836 | * about this when converting semaphore users to mutexes. |
837 | * | |
838 | * This function must not be used in interrupt context. The | |
839 | * mutex must be released by the same task that acquired it. | |
840 | */ | |
7ad5b3a5 | 841 | int __sched mutex_trylock(struct mutex *lock) |
6053ee3b | 842 | { |
0d66bf6d PZ |
843 | int ret; |
844 | ||
845 | ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); | |
846 | if (ret) | |
847 | mutex_set_owner(lock); | |
848 | ||
849 | return ret; | |
6053ee3b | 850 | } |
6053ee3b | 851 | EXPORT_SYMBOL(mutex_trylock); |
a511e3f9 | 852 | |
040a0a37 ML |
853 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
854 | int __sched | |
855 | __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |
856 | { | |
857 | int ret; | |
858 | ||
859 | might_sleep(); | |
860 | ||
861 | ret = __mutex_fastpath_lock_retval(&lock->base.count); | |
862 | ||
863 | if (likely(!ret)) { | |
864 | ww_mutex_set_context_fastpath(lock, ctx); | |
865 | mutex_set_owner(&lock->base); | |
866 | } else | |
867 | ret = __ww_mutex_lock_slowpath(lock, ctx); | |
868 | return ret; | |
869 | } | |
870 | EXPORT_SYMBOL(__ww_mutex_lock); | |
871 | ||
872 | int __sched | |
873 | __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |
874 | { | |
875 | int ret; | |
876 | ||
877 | might_sleep(); | |
878 | ||
879 | ret = __mutex_fastpath_lock_retval(&lock->base.count); | |
880 | ||
881 | if (likely(!ret)) { | |
882 | ww_mutex_set_context_fastpath(lock, ctx); | |
883 | mutex_set_owner(&lock->base); | |
884 | } else | |
885 | ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx); | |
886 | return ret; | |
887 | } | |
888 | EXPORT_SYMBOL(__ww_mutex_lock_interruptible); | |
889 | ||
890 | #endif | |
891 | ||
a511e3f9 AM |
892 | /** |
893 | * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 | |
894 | * @cnt: the atomic which we are to dec | |
895 | * @lock: the mutex to return holding if we dec to 0 | |
896 | * | |
897 | * return true and hold lock if we dec to 0, return false otherwise | |
898 | */ | |
899 | int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) | |
900 | { | |
901 | /* dec if we can't possibly hit 0 */ | |
902 | if (atomic_add_unless(cnt, -1, 1)) | |
903 | return 0; | |
904 | /* we might hit 0, so take the lock */ | |
905 | mutex_lock(lock); | |
906 | if (!atomic_dec_and_test(cnt)) { | |
907 | /* when we actually did the dec, we didn't hit 0 */ | |
908 | mutex_unlock(lock); | |
909 | return 0; | |
910 | } | |
911 | /* we hit 0, and we hold the lock */ | |
912 | return 1; | |
913 | } | |
914 | EXPORT_SYMBOL(atomic_dec_and_mutex_lock); |