2 * kernel/locking/mutex.c
4 * Mutexes: blocking mutual exclusion locks
6 * Started by Ingo Molnar:
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
18 * Also see Documentation/mutex-design.txt.
20 #include <linux/mutex.h>
21 #include <linux/ww_mutex.h>
22 #include <linux/sched.h>
23 #include <linux/sched/rt.h>
24 #include <linux/export.h>
25 #include <linux/spinlock.h>
26 #include <linux/interrupt.h>
27 #include <linux/debug_locks.h>
28 #include "mcs_spinlock.h"
31 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
32 * which forces all calls into the slowpath:
34 #ifdef CONFIG_DEBUG_MUTEXES
35 # include "mutex-debug.h"
36 # include <asm-generic/mutex-null.h>
38 * Must be 0 for the debug case so we do not do the unlock outside of the
39 * wait_lock region. debug_mutex_unlock() will do the actual unlock in this
42 # undef __mutex_slowpath_needs_to_unlock
43 # define __mutex_slowpath_needs_to_unlock() 0
46 # include <asm/mutex.h>
50 __mutex_init(struct mutex
*lock
, const char *name
, struct lock_class_key
*key
)
52 atomic_set(&lock
->count
, 1);
53 spin_lock_init(&lock
->wait_lock
);
54 INIT_LIST_HEAD(&lock
->wait_list
);
55 mutex_clear_owner(lock
);
56 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
57 osq_lock_init(&lock
->osq
);
60 debug_mutex_init(lock
, name
, key
);
63 EXPORT_SYMBOL(__mutex_init
);
65 #ifndef CONFIG_DEBUG_LOCK_ALLOC
67 * We split the mutex lock/unlock logic into separate fastpath and
68 * slowpath functions, to reduce the register pressure on the fastpath.
69 * We also put the fastpath first in the kernel image, to make sure the
70 * branch is predicted by the CPU as default-untaken.
72 __visible
void __sched
__mutex_lock_slowpath(atomic_t
*lock_count
);
75 * mutex_lock - acquire the mutex
76 * @lock: the mutex to be acquired
78 * Lock the mutex exclusively for this task. If the mutex is not
79 * available right now, it will sleep until it can get it.
81 * The mutex must later on be released by the same task that
82 * acquired it. Recursive locking is not allowed. The task
83 * may not exit without first unlocking the mutex. Also, kernel
84 * memory where the mutex resides mutex must not be freed with
85 * the mutex still locked. The mutex must first be initialized
86 * (or statically defined) before it can be locked. memset()-ing
87 * the mutex to 0 is not allowed.
89 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
90 * checks that will enforce the restrictions and will also do
91 * deadlock debugging. )
93 * This function is similar to (but not equivalent to) down().
95 void __sched
mutex_lock(struct mutex
*lock
)
99 * The locking fastpath is the 1->0 transition from
100 * 'unlocked' into 'locked' state.
102 __mutex_fastpath_lock(&lock
->count
, __mutex_lock_slowpath
);
103 mutex_set_owner(lock
);
106 EXPORT_SYMBOL(mutex_lock
);
109 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
111 * In order to avoid a stampede of mutex spinners from acquiring the mutex
112 * more or less simultaneously, the spinners need to acquire a MCS lock
113 * first before spinning on the owner field.
118 * Mutex spinning code migrated from kernel/sched/core.c
121 static inline bool owner_running(struct mutex
*lock
, struct task_struct
*owner
)
123 if (lock
->owner
!= owner
)
127 * Ensure we emit the owner->on_cpu, dereference _after_ checking
128 * lock->owner still matches owner, if that fails, owner might
129 * point to free()d memory, if it still matches, the rcu_read_lock()
130 * ensures the memory stays valid.
134 return owner
->on_cpu
;
138 * Look out! "owner" is an entirely speculative pointer
139 * access and not reliable.
142 int mutex_spin_on_owner(struct mutex
*lock
, struct task_struct
*owner
)
145 while (owner_running(lock
, owner
)) {
149 cpu_relax_lowlatency();
154 * We break out the loop above on need_resched() and when the
155 * owner changed, which is a sign for heavy contention. Return
156 * success only when lock->owner is NULL.
158 return lock
->owner
== NULL
;
162 * Initial check for entering the mutex spinning loop
164 static inline int mutex_can_spin_on_owner(struct mutex
*lock
)
166 struct task_struct
*owner
;
173 owner
= ACCESS_ONCE(lock
->owner
);
175 retval
= owner
->on_cpu
;
178 * if lock->owner is not set, the mutex owner may have just acquired
179 * it and not set the owner yet or the mutex has been released.
185 __visible __used noinline
186 void __sched
__mutex_unlock_slowpath(atomic_t
*lock_count
);
189 * mutex_unlock - release the mutex
190 * @lock: the mutex to be released
192 * Unlock a mutex that has been locked by this task previously.
194 * This function must not be used in interrupt context. Unlocking
195 * of a not locked mutex is not allowed.
197 * This function is similar to (but not equivalent to) up().
199 void __sched
mutex_unlock(struct mutex
*lock
)
202 * The unlocking fastpath is the 0->1 transition from 'locked'
203 * into 'unlocked' state:
205 #ifndef CONFIG_DEBUG_MUTEXES
207 * When debugging is enabled we must not clear the owner before time,
208 * the slow path will always be taken, and that clears the owner field
209 * after verifying that it was indeed current.
211 mutex_clear_owner(lock
);
213 __mutex_fastpath_unlock(&lock
->count
, __mutex_unlock_slowpath
);
216 EXPORT_SYMBOL(mutex_unlock
);
219 * ww_mutex_unlock - release the w/w mutex
220 * @lock: the mutex to be released
222 * Unlock a mutex that has been locked by this task previously with any of the
223 * ww_mutex_lock* functions (with or without an acquire context). It is
224 * forbidden to release the locks after releasing the acquire context.
226 * This function must not be used in interrupt context. Unlocking
227 * of a unlocked mutex is not allowed.
229 void __sched
ww_mutex_unlock(struct ww_mutex
*lock
)
232 * The unlocking fastpath is the 0->1 transition from 'locked'
233 * into 'unlocked' state:
236 #ifdef CONFIG_DEBUG_MUTEXES
237 DEBUG_LOCKS_WARN_ON(!lock
->ctx
->acquired
);
239 if (lock
->ctx
->acquired
> 0)
240 lock
->ctx
->acquired
--;
244 #ifndef CONFIG_DEBUG_MUTEXES
246 * When debugging is enabled we must not clear the owner before time,
247 * the slow path will always be taken, and that clears the owner field
248 * after verifying that it was indeed current.
250 mutex_clear_owner(&lock
->base
);
252 __mutex_fastpath_unlock(&lock
->base
.count
, __mutex_unlock_slowpath
);
254 EXPORT_SYMBOL(ww_mutex_unlock
);
256 static inline int __sched
257 __mutex_lock_check_stamp(struct mutex
*lock
, struct ww_acquire_ctx
*ctx
)
259 struct ww_mutex
*ww
= container_of(lock
, struct ww_mutex
, base
);
260 struct ww_acquire_ctx
*hold_ctx
= ACCESS_ONCE(ww
->ctx
);
265 if (unlikely(ctx
== hold_ctx
))
268 if (ctx
->stamp
- hold_ctx
->stamp
<= LONG_MAX
&&
269 (ctx
->stamp
!= hold_ctx
->stamp
|| ctx
> hold_ctx
)) {
270 #ifdef CONFIG_DEBUG_MUTEXES
271 DEBUG_LOCKS_WARN_ON(ctx
->contending_lock
);
272 ctx
->contending_lock
= ww
;
280 static __always_inline
void ww_mutex_lock_acquired(struct ww_mutex
*ww
,
281 struct ww_acquire_ctx
*ww_ctx
)
283 #ifdef CONFIG_DEBUG_MUTEXES
285 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
286 * but released with a normal mutex_unlock in this call.
288 * This should never happen, always use ww_mutex_unlock.
290 DEBUG_LOCKS_WARN_ON(ww
->ctx
);
293 * Not quite done after calling ww_acquire_done() ?
295 DEBUG_LOCKS_WARN_ON(ww_ctx
->done_acquire
);
297 if (ww_ctx
->contending_lock
) {
299 * After -EDEADLK you tried to
300 * acquire a different ww_mutex? Bad!
302 DEBUG_LOCKS_WARN_ON(ww_ctx
->contending_lock
!= ww
);
305 * You called ww_mutex_lock after receiving -EDEADLK,
306 * but 'forgot' to unlock everything else first?
308 DEBUG_LOCKS_WARN_ON(ww_ctx
->acquired
> 0);
309 ww_ctx
->contending_lock
= NULL
;
313 * Naughty, using a different class will lead to undefined behavior!
315 DEBUG_LOCKS_WARN_ON(ww_ctx
->ww_class
!= ww
->ww_class
);
321 * after acquiring lock with fastpath or when we lost out in contested
322 * slowpath, set ctx and wake up any waiters so they can recheck.
324 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
325 * as the fastpath and opportunistic spinning are disabled in that case.
327 static __always_inline
void
328 ww_mutex_set_context_fastpath(struct ww_mutex
*lock
,
329 struct ww_acquire_ctx
*ctx
)
332 struct mutex_waiter
*cur
;
334 ww_mutex_lock_acquired(lock
, ctx
);
339 * The lock->ctx update should be visible on all cores before
340 * the atomic read is done, otherwise contended waiters might be
341 * missed. The contended waiters will either see ww_ctx == NULL
342 * and keep spinning, or it will acquire wait_lock, add itself
343 * to waiter list and sleep.
348 * Check if lock is contended, if not there is nobody to wake up
350 if (likely(atomic_read(&lock
->base
.count
) == 0))
354 * Uh oh, we raced in fastpath, wake up everyone in this case,
355 * so they can see the new lock->ctx.
357 spin_lock_mutex(&lock
->base
.wait_lock
, flags
);
358 list_for_each_entry(cur
, &lock
->base
.wait_list
, list
) {
359 debug_mutex_wake_waiter(&lock
->base
, cur
);
360 wake_up_process(cur
->task
);
362 spin_unlock_mutex(&lock
->base
.wait_lock
, flags
);
366 * Lock a mutex (possibly interruptible), slowpath:
368 static __always_inline
int __sched
369 __mutex_lock_common(struct mutex
*lock
, long state
, unsigned int subclass
,
370 struct lockdep_map
*nest_lock
, unsigned long ip
,
371 struct ww_acquire_ctx
*ww_ctx
, const bool use_ww_ctx
)
373 struct task_struct
*task
= current
;
374 struct mutex_waiter waiter
;
379 mutex_acquire_nest(&lock
->dep_map
, subclass
, 0, nest_lock
, ip
);
381 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
383 * Optimistic spinning.
385 * We try to spin for acquisition when we find that the lock owner
386 * is currently running on a (different) CPU and while we don't
387 * need to reschedule. The rationale is that if the lock owner is
388 * running, it is likely to release the lock soon.
390 * Since this needs the lock owner, and this mutex implementation
391 * doesn't track the owner atomically in the lock field, we need to
392 * track it non-atomically.
394 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
395 * to serialize everything.
397 * The mutex spinners are queued up using MCS lock so that only one
398 * spinner can compete for the mutex. However, if mutex spinning isn't
399 * going to happen, there is no point in going through the lock/unlock
402 if (!mutex_can_spin_on_owner(lock
))
405 if (!osq_lock(&lock
->osq
))
409 struct task_struct
*owner
;
411 if (use_ww_ctx
&& ww_ctx
->acquired
> 0) {
414 ww
= container_of(lock
, struct ww_mutex
, base
);
416 * If ww->ctx is set the contents are undefined, only
417 * by acquiring wait_lock there is a guarantee that
418 * they are not invalid when reading.
420 * As such, when deadlock detection needs to be
421 * performed the optimistic spinning cannot be done.
423 if (ACCESS_ONCE(ww
->ctx
))
428 * If there's an owner, wait for it to either
429 * release the lock or go to sleep.
431 owner
= ACCESS_ONCE(lock
->owner
);
432 if (owner
&& !mutex_spin_on_owner(lock
, owner
))
435 /* Try to acquire the mutex if it is unlocked. */
436 if (!mutex_is_locked(lock
) &&
437 (atomic_cmpxchg(&lock
->count
, 1, 0) == 1)) {
438 lock_acquired(&lock
->dep_map
, ip
);
441 ww
= container_of(lock
, struct ww_mutex
, base
);
443 ww_mutex_set_context_fastpath(ww
, ww_ctx
);
446 mutex_set_owner(lock
);
447 osq_unlock(&lock
->osq
);
453 * When there's no owner, we might have preempted between the
454 * owner acquiring the lock and setting the owner field. If
455 * we're an RT task that will live-lock because we won't let
456 * the owner complete.
458 if (!owner
&& (need_resched() || rt_task(task
)))
462 * The cpu_relax() call is a compiler barrier which forces
463 * everything in this loop to be re-loaded. We don't need
464 * memory barriers as we'll eventually observe the right
465 * values at the cost of a few extra spins.
467 cpu_relax_lowlatency();
469 osq_unlock(&lock
->osq
);
472 * If we fell out of the spin path because of need_resched(),
473 * reschedule now, before we try-lock the mutex. This avoids getting
474 * scheduled out right after we obtained the mutex.
477 schedule_preempt_disabled();
479 spin_lock_mutex(&lock
->wait_lock
, flags
);
482 * Once more, try to acquire the lock. Only try-lock the mutex if
483 * it is unlocked to reduce unnecessary xchg() operations.
485 if (!mutex_is_locked(lock
) && (atomic_xchg(&lock
->count
, 0) == 1))
488 debug_mutex_lock_common(lock
, &waiter
);
489 debug_mutex_add_waiter(lock
, &waiter
, task_thread_info(task
));
491 /* add waiting tasks to the end of the waitqueue (FIFO): */
492 list_add_tail(&waiter
.list
, &lock
->wait_list
);
495 lock_contended(&lock
->dep_map
, ip
);
499 * Lets try to take the lock again - this is needed even if
500 * we get here for the first time (shortly after failing to
501 * acquire the lock), to make sure that we get a wakeup once
502 * it's unlocked. Later on, if we sleep, this is the
503 * operation that gives us the lock. We xchg it to -1, so
504 * that when we release the lock, we properly wake up the
505 * other waiters. We only attempt the xchg if the count is
506 * non-negative in order to avoid unnecessary xchg operations:
508 if (atomic_read(&lock
->count
) >= 0 &&
509 (atomic_xchg(&lock
->count
, -1) == 1))
513 * got a signal? (This code gets eliminated in the
514 * TASK_UNINTERRUPTIBLE case.)
516 if (unlikely(signal_pending_state(state
, task
))) {
521 if (use_ww_ctx
&& ww_ctx
->acquired
> 0) {
522 ret
= __mutex_lock_check_stamp(lock
, ww_ctx
);
527 __set_task_state(task
, state
);
529 /* didn't get the lock, go to sleep: */
530 spin_unlock_mutex(&lock
->wait_lock
, flags
);
531 schedule_preempt_disabled();
532 spin_lock_mutex(&lock
->wait_lock
, flags
);
534 mutex_remove_waiter(lock
, &waiter
, current_thread_info());
535 /* set it to 0 if there are no waiters left: */
536 if (likely(list_empty(&lock
->wait_list
)))
537 atomic_set(&lock
->count
, 0);
538 debug_mutex_free_waiter(&waiter
);
541 /* got the lock - cleanup and rejoice! */
542 lock_acquired(&lock
->dep_map
, ip
);
543 mutex_set_owner(lock
);
546 struct ww_mutex
*ww
= container_of(lock
, struct ww_mutex
, base
);
547 struct mutex_waiter
*cur
;
550 * This branch gets optimized out for the common case,
551 * and is only important for ww_mutex_lock.
553 ww_mutex_lock_acquired(ww
, ww_ctx
);
557 * Give any possible sleeping processes the chance to wake up,
558 * so they can recheck if they have to back off.
560 list_for_each_entry(cur
, &lock
->wait_list
, list
) {
561 debug_mutex_wake_waiter(lock
, cur
);
562 wake_up_process(cur
->task
);
566 spin_unlock_mutex(&lock
->wait_lock
, flags
);
571 mutex_remove_waiter(lock
, &waiter
, task_thread_info(task
));
572 spin_unlock_mutex(&lock
->wait_lock
, flags
);
573 debug_mutex_free_waiter(&waiter
);
574 mutex_release(&lock
->dep_map
, 1, ip
);
579 #ifdef CONFIG_DEBUG_LOCK_ALLOC
581 mutex_lock_nested(struct mutex
*lock
, unsigned int subclass
)
584 __mutex_lock_common(lock
, TASK_UNINTERRUPTIBLE
,
585 subclass
, NULL
, _RET_IP_
, NULL
, 0);
588 EXPORT_SYMBOL_GPL(mutex_lock_nested
);
591 _mutex_lock_nest_lock(struct mutex
*lock
, struct lockdep_map
*nest
)
594 __mutex_lock_common(lock
, TASK_UNINTERRUPTIBLE
,
595 0, nest
, _RET_IP_
, NULL
, 0);
598 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock
);
601 mutex_lock_killable_nested(struct mutex
*lock
, unsigned int subclass
)
604 return __mutex_lock_common(lock
, TASK_KILLABLE
,
605 subclass
, NULL
, _RET_IP_
, NULL
, 0);
607 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested
);
610 mutex_lock_interruptible_nested(struct mutex
*lock
, unsigned int subclass
)
613 return __mutex_lock_common(lock
, TASK_INTERRUPTIBLE
,
614 subclass
, NULL
, _RET_IP_
, NULL
, 0);
617 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested
);
620 ww_mutex_deadlock_injection(struct ww_mutex
*lock
, struct ww_acquire_ctx
*ctx
)
622 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
625 if (ctx
->deadlock_inject_countdown
-- == 0) {
626 tmp
= ctx
->deadlock_inject_interval
;
627 if (tmp
> UINT_MAX
/4)
630 tmp
= tmp
*2 + tmp
+ tmp
/2;
632 ctx
->deadlock_inject_interval
= tmp
;
633 ctx
->deadlock_inject_countdown
= tmp
;
634 ctx
->contending_lock
= lock
;
636 ww_mutex_unlock(lock
);
646 __ww_mutex_lock(struct ww_mutex
*lock
, struct ww_acquire_ctx
*ctx
)
651 ret
= __mutex_lock_common(&lock
->base
, TASK_UNINTERRUPTIBLE
,
652 0, &ctx
->dep_map
, _RET_IP_
, ctx
, 1);
653 if (!ret
&& ctx
->acquired
> 1)
654 return ww_mutex_deadlock_injection(lock
, ctx
);
658 EXPORT_SYMBOL_GPL(__ww_mutex_lock
);
661 __ww_mutex_lock_interruptible(struct ww_mutex
*lock
, struct ww_acquire_ctx
*ctx
)
666 ret
= __mutex_lock_common(&lock
->base
, TASK_INTERRUPTIBLE
,
667 0, &ctx
->dep_map
, _RET_IP_
, ctx
, 1);
669 if (!ret
&& ctx
->acquired
> 1)
670 return ww_mutex_deadlock_injection(lock
, ctx
);
674 EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible
);
679 * Release the lock, slowpath:
682 __mutex_unlock_common_slowpath(atomic_t
*lock_count
, int nested
)
684 struct mutex
*lock
= container_of(lock_count
, struct mutex
, count
);
688 * some architectures leave the lock unlocked in the fastpath failure
689 * case, others need to leave it locked. In the later case we have to
692 if (__mutex_slowpath_needs_to_unlock())
693 atomic_set(&lock
->count
, 1);
695 spin_lock_mutex(&lock
->wait_lock
, flags
);
696 mutex_release(&lock
->dep_map
, nested
, _RET_IP_
);
697 debug_mutex_unlock(lock
);
699 if (!list_empty(&lock
->wait_list
)) {
700 /* get the first entry from the wait-list: */
701 struct mutex_waiter
*waiter
=
702 list_entry(lock
->wait_list
.next
,
703 struct mutex_waiter
, list
);
705 debug_mutex_wake_waiter(lock
, waiter
);
707 wake_up_process(waiter
->task
);
710 spin_unlock_mutex(&lock
->wait_lock
, flags
);
714 * Release the lock, slowpath:
717 __mutex_unlock_slowpath(atomic_t
*lock_count
)
719 __mutex_unlock_common_slowpath(lock_count
, 1);
722 #ifndef CONFIG_DEBUG_LOCK_ALLOC
724 * Here come the less common (and hence less performance-critical) APIs:
725 * mutex_lock_interruptible() and mutex_trylock().
727 static noinline
int __sched
728 __mutex_lock_killable_slowpath(struct mutex
*lock
);
730 static noinline
int __sched
731 __mutex_lock_interruptible_slowpath(struct mutex
*lock
);
734 * mutex_lock_interruptible - acquire the mutex, interruptible
735 * @lock: the mutex to be acquired
737 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
738 * been acquired or sleep until the mutex becomes available. If a
739 * signal arrives while waiting for the lock then this function
742 * This function is similar to (but not equivalent to) down_interruptible().
744 int __sched
mutex_lock_interruptible(struct mutex
*lock
)
749 ret
= __mutex_fastpath_lock_retval(&lock
->count
);
751 mutex_set_owner(lock
);
754 return __mutex_lock_interruptible_slowpath(lock
);
757 EXPORT_SYMBOL(mutex_lock_interruptible
);
759 int __sched
mutex_lock_killable(struct mutex
*lock
)
764 ret
= __mutex_fastpath_lock_retval(&lock
->count
);
766 mutex_set_owner(lock
);
769 return __mutex_lock_killable_slowpath(lock
);
771 EXPORT_SYMBOL(mutex_lock_killable
);
773 __visible
void __sched
774 __mutex_lock_slowpath(atomic_t
*lock_count
)
776 struct mutex
*lock
= container_of(lock_count
, struct mutex
, count
);
778 __mutex_lock_common(lock
, TASK_UNINTERRUPTIBLE
, 0,
779 NULL
, _RET_IP_
, NULL
, 0);
782 static noinline
int __sched
783 __mutex_lock_killable_slowpath(struct mutex
*lock
)
785 return __mutex_lock_common(lock
, TASK_KILLABLE
, 0,
786 NULL
, _RET_IP_
, NULL
, 0);
789 static noinline
int __sched
790 __mutex_lock_interruptible_slowpath(struct mutex
*lock
)
792 return __mutex_lock_common(lock
, TASK_INTERRUPTIBLE
, 0,
793 NULL
, _RET_IP_
, NULL
, 0);
796 static noinline
int __sched
797 __ww_mutex_lock_slowpath(struct ww_mutex
*lock
, struct ww_acquire_ctx
*ctx
)
799 return __mutex_lock_common(&lock
->base
, TASK_UNINTERRUPTIBLE
, 0,
800 NULL
, _RET_IP_
, ctx
, 1);
803 static noinline
int __sched
804 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex
*lock
,
805 struct ww_acquire_ctx
*ctx
)
807 return __mutex_lock_common(&lock
->base
, TASK_INTERRUPTIBLE
, 0,
808 NULL
, _RET_IP_
, ctx
, 1);
814 * Spinlock based trylock, we take the spinlock and check whether we
817 static inline int __mutex_trylock_slowpath(atomic_t
*lock_count
)
819 struct mutex
*lock
= container_of(lock_count
, struct mutex
, count
);
823 /* No need to trylock if the mutex is locked. */
824 if (mutex_is_locked(lock
))
827 spin_lock_mutex(&lock
->wait_lock
, flags
);
829 prev
= atomic_xchg(&lock
->count
, -1);
830 if (likely(prev
== 1)) {
831 mutex_set_owner(lock
);
832 mutex_acquire(&lock
->dep_map
, 0, 1, _RET_IP_
);
835 /* Set it back to 0 if there are no waiters: */
836 if (likely(list_empty(&lock
->wait_list
)))
837 atomic_set(&lock
->count
, 0);
839 spin_unlock_mutex(&lock
->wait_lock
, flags
);
845 * mutex_trylock - try to acquire the mutex, without waiting
846 * @lock: the mutex to be acquired
848 * Try to acquire the mutex atomically. Returns 1 if the mutex
849 * has been acquired successfully, and 0 on contention.
851 * NOTE: this function follows the spin_trylock() convention, so
852 * it is negated from the down_trylock() return values! Be careful
853 * about this when converting semaphore users to mutexes.
855 * This function must not be used in interrupt context. The
856 * mutex must be released by the same task that acquired it.
858 int __sched
mutex_trylock(struct mutex
*lock
)
862 ret
= __mutex_fastpath_trylock(&lock
->count
, __mutex_trylock_slowpath
);
864 mutex_set_owner(lock
);
868 EXPORT_SYMBOL(mutex_trylock
);
870 #ifndef CONFIG_DEBUG_LOCK_ALLOC
872 __ww_mutex_lock(struct ww_mutex
*lock
, struct ww_acquire_ctx
*ctx
)
878 ret
= __mutex_fastpath_lock_retval(&lock
->base
.count
);
881 ww_mutex_set_context_fastpath(lock
, ctx
);
882 mutex_set_owner(&lock
->base
);
884 ret
= __ww_mutex_lock_slowpath(lock
, ctx
);
887 EXPORT_SYMBOL(__ww_mutex_lock
);
890 __ww_mutex_lock_interruptible(struct ww_mutex
*lock
, struct ww_acquire_ctx
*ctx
)
896 ret
= __mutex_fastpath_lock_retval(&lock
->base
.count
);
899 ww_mutex_set_context_fastpath(lock
, ctx
);
900 mutex_set_owner(&lock
->base
);
902 ret
= __ww_mutex_lock_interruptible_slowpath(lock
, ctx
);
905 EXPORT_SYMBOL(__ww_mutex_lock_interruptible
);
910 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
911 * @cnt: the atomic which we are to dec
912 * @lock: the mutex to return holding if we dec to 0
914 * return true and hold lock if we dec to 0, return false otherwise
916 int atomic_dec_and_mutex_lock(atomic_t
*cnt
, struct mutex
*lock
)
918 /* dec if we can't possibly hit 0 */
919 if (atomic_add_unless(cnt
, -1, 1))
921 /* we might hit 0, so take the lock */
923 if (!atomic_dec_and_test(cnt
)) {
924 /* when we actually did the dec, we didn't hit 0 */
928 /* we hit 0, and we hold the lock */
931 EXPORT_SYMBOL(atomic_dec_and_mutex_lock
);