2 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
4 * started by Ingo Molnar and Thomas Gleixner.
6 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9 * Copyright (C) 2006 Esben Nielsen
11 * See Documentation/rt-mutex-design.txt for details.
13 #include <linux/spinlock.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/timer.h>
18 #include "rtmutex_common.h"
20 #ifdef CONFIG_DEBUG_RT_MUTEXES
21 # include "rtmutex-debug.h"
27 * lock->owner state tracking:
29 * lock->owner holds the task_struct pointer of the owner. Bit 0 and 1
30 * are used to keep track of the "owner is pending" and "lock has
34 * NULL 0 0 lock is free (fast acquire possible)
35 * NULL 0 1 invalid state
36 * NULL 1 0 Transitional State*
37 * NULL 1 1 invalid state
38 * taskpointer 0 0 lock is held (fast release possible)
39 * taskpointer 0 1 task is pending owner
40 * taskpointer 1 0 lock is held and has waiters
41 * taskpointer 1 1 task is pending owner and lock has more waiters
43 * Pending ownership is assigned to the top (highest priority)
44 * waiter of the lock, when the lock is released. The thread is woken
45 * up and can now take the lock. Until the lock is taken (bit 0
46 * cleared) a competing higher priority thread can steal the lock
47 * which puts the woken up thread back on the waiters list.
49 * The fast atomic compare exchange based acquire and release is only
50 * possible when bit 0 and 1 of lock->owner are 0.
52 * (*) There's a small time where the owner can be NULL and the
53 * "lock has waiters" bit is set. This can happen when grabbing the lock.
54 * To prevent a cmpxchg of the owner releasing the lock, we need to set this
55 * bit before looking at the lock, hence the reason this is a transitional
60 rt_mutex_set_owner(struct rt_mutex
*lock
, struct task_struct
*owner
,
63 unsigned long val
= (unsigned long)owner
| mask
;
65 if (rt_mutex_has_waiters(lock
))
66 val
|= RT_MUTEX_HAS_WAITERS
;
68 lock
->owner
= (struct task_struct
*)val
;
71 static inline void clear_rt_mutex_waiters(struct rt_mutex
*lock
)
73 lock
->owner
= (struct task_struct
*)
74 ((unsigned long)lock
->owner
& ~RT_MUTEX_HAS_WAITERS
);
77 static void fixup_rt_mutex_waiters(struct rt_mutex
*lock
)
79 if (!rt_mutex_has_waiters(lock
))
80 clear_rt_mutex_waiters(lock
);
84 * Calculate task priority from the waiter list priority
86 * Return task->normal_prio when the waiter list is empty or when
87 * the waiter is not allowed to do priority boosting
89 int rt_mutex_getprio(struct task_struct
*task
)
91 if (likely(!task_has_pi_waiters(task
)))
92 return task
->normal_prio
;
94 return min(task_top_pi_waiter(task
)->pi_list_entry
.prio
,
99 * Adjust the priority of a task, after its pi_waiters got modified.
101 * This can be both boosting and unboosting. task->pi_lock must be held.
103 void __rt_mutex_adjust_prio(struct task_struct
*task
)
105 int prio
= rt_mutex_getprio(task
);
107 if (task
->prio
!= prio
)
108 rt_mutex_setprio(task
, prio
);
112 * Adjust task priority (undo boosting). Called from the exit path of
113 * rt_mutex_slowunlock() and rt_mutex_slowlock().
115 * (Note: We do this outside of the protection of lock->wait_lock to
116 * allow the lock to be taken while or before we readjust the priority
117 * of task. We do not use the spin_xx_mutex() variants here as we are
118 * outside of the debug path.)
120 static void rt_mutex_adjust_prio(struct task_struct
*task
)
124 spin_lock_irqsave(&task
->pi_lock
, flags
);
125 __rt_mutex_adjust_prio(task
);
126 spin_unlock_irqrestore(&task
->pi_lock
, flags
);
130 * Max number of times we'll walk the boosting chain:
132 int max_lock_depth
= 1024;
135 * Adjust the priority chain. Also used for deadlock detection.
136 * Decreases task's usage by one - may thus free the task.
137 * Returns 0 or -EDEADLK.
139 int rt_mutex_adjust_prio_chain(struct task_struct
*task
,
141 struct rt_mutex
*orig_lock
,
142 struct rt_mutex_waiter
*orig_waiter
,
143 struct task_struct
*top_task
)
145 struct rt_mutex
*lock
;
146 struct rt_mutex_waiter
*waiter
, *top_waiter
= orig_waiter
;
147 int detect_deadlock
, ret
= 0, depth
= 0;
150 detect_deadlock
= debug_rt_mutex_detect_deadlock(orig_waiter
,
154 * The (de)boosting is a step by step approach with a lot of
155 * pitfalls. We want this to be preemptible and we want hold a
156 * maximum of two locks per step. So we have to check
157 * carefully whether things change under us.
160 if (++depth
> max_lock_depth
) {
164 * Print this only once. If the admin changes the limit,
165 * print a new message when reaching the limit again.
167 if (prev_max
!= max_lock_depth
) {
168 prev_max
= max_lock_depth
;
169 printk(KERN_WARNING
"Maximum lock depth %d reached "
170 "task: %s (%d)\n", max_lock_depth
,
171 top_task
->comm
, top_task
->pid
);
173 put_task_struct(task
);
175 return deadlock_detect
? -EDEADLK
: 0;
179 * Task can not go away as we did a get_task() before !
181 spin_lock_irqsave(&task
->pi_lock
, flags
);
183 waiter
= task
->pi_blocked_on
;
185 * Check whether the end of the boosting chain has been
186 * reached or the state of the chain has changed while we
189 if (!waiter
|| !waiter
->task
)
193 * Check the orig_waiter state. After we dropped the locks,
194 * the previous owner of the lock might have released the lock
195 * and made us the pending owner:
197 if (orig_waiter
&& !orig_waiter
->task
)
201 * Drop out, when the task has no waiters. Note,
202 * top_waiter can be NULL, when we are in the deboosting
205 if (top_waiter
&& (!task_has_pi_waiters(task
) ||
206 top_waiter
!= task_top_pi_waiter(task
)))
210 * When deadlock detection is off then we check, if further
211 * priority adjustment is necessary.
213 if (!detect_deadlock
&& waiter
->list_entry
.prio
== task
->prio
)
217 if (!spin_trylock(&lock
->wait_lock
)) {
218 spin_unlock_irqrestore(&task
->pi_lock
, flags
);
223 /* Deadlock detection */
224 if (lock
== orig_lock
|| rt_mutex_owner(lock
) == top_task
) {
225 debug_rt_mutex_deadlock(deadlock_detect
, orig_waiter
, lock
);
226 spin_unlock(&lock
->wait_lock
);
227 ret
= deadlock_detect
? -EDEADLK
: 0;
231 top_waiter
= rt_mutex_top_waiter(lock
);
233 /* Requeue the waiter */
234 plist_del(&waiter
->list_entry
, &lock
->wait_list
);
235 waiter
->list_entry
.prio
= task
->prio
;
236 plist_add(&waiter
->list_entry
, &lock
->wait_list
);
238 /* Release the task */
239 spin_unlock_irqrestore(&task
->pi_lock
, flags
);
240 put_task_struct(task
);
242 /* Grab the next task */
243 task
= rt_mutex_owner(lock
);
244 get_task_struct(task
);
245 spin_lock_irqsave(&task
->pi_lock
, flags
);
247 if (waiter
== rt_mutex_top_waiter(lock
)) {
248 /* Boost the owner */
249 plist_del(&top_waiter
->pi_list_entry
, &task
->pi_waiters
);
250 waiter
->pi_list_entry
.prio
= waiter
->list_entry
.prio
;
251 plist_add(&waiter
->pi_list_entry
, &task
->pi_waiters
);
252 __rt_mutex_adjust_prio(task
);
254 } else if (top_waiter
== waiter
) {
255 /* Deboost the owner */
256 plist_del(&waiter
->pi_list_entry
, &task
->pi_waiters
);
257 waiter
= rt_mutex_top_waiter(lock
);
258 waiter
->pi_list_entry
.prio
= waiter
->list_entry
.prio
;
259 plist_add(&waiter
->pi_list_entry
, &task
->pi_waiters
);
260 __rt_mutex_adjust_prio(task
);
263 spin_unlock_irqrestore(&task
->pi_lock
, flags
);
265 top_waiter
= rt_mutex_top_waiter(lock
);
266 spin_unlock(&lock
->wait_lock
);
268 if (!detect_deadlock
&& waiter
!= top_waiter
)
274 spin_unlock_irqrestore(&task
->pi_lock
, flags
);
276 put_task_struct(task
);
282 * Optimization: check if we can steal the lock from the
283 * assigned pending owner [which might not have taken the
286 static inline int try_to_steal_lock(struct rt_mutex
*lock
)
288 struct task_struct
*pendowner
= rt_mutex_owner(lock
);
289 struct rt_mutex_waiter
*next
;
292 if (!rt_mutex_owner_pending(lock
))
295 if (pendowner
== current
)
298 spin_lock_irqsave(&pendowner
->pi_lock
, flags
);
299 if (current
->prio
>= pendowner
->prio
) {
300 spin_unlock_irqrestore(&pendowner
->pi_lock
, flags
);
305 * Check if a waiter is enqueued on the pending owners
306 * pi_waiters list. Remove it and readjust pending owners
309 if (likely(!rt_mutex_has_waiters(lock
))) {
310 spin_unlock_irqrestore(&pendowner
->pi_lock
, flags
);
314 /* No chain handling, pending owner is not blocked on anything: */
315 next
= rt_mutex_top_waiter(lock
);
316 plist_del(&next
->pi_list_entry
, &pendowner
->pi_waiters
);
317 __rt_mutex_adjust_prio(pendowner
);
318 spin_unlock_irqrestore(&pendowner
->pi_lock
, flags
);
321 * We are going to steal the lock and a waiter was
322 * enqueued on the pending owners pi_waiters queue. So
323 * we have to enqueue this waiter into
324 * current->pi_waiters list. This covers the case,
325 * where current is boosted because it holds another
326 * lock and gets unboosted because the booster is
327 * interrupted, so we would delay a waiter with higher
328 * priority as current->normal_prio.
330 * Note: in the rare case of a SCHED_OTHER task changing
331 * its priority and thus stealing the lock, next->task
334 if (likely(next
->task
!= current
)) {
335 spin_lock_irqsave(¤t
->pi_lock
, flags
);
336 plist_add(&next
->pi_list_entry
, ¤t
->pi_waiters
);
337 __rt_mutex_adjust_prio(current
);
338 spin_unlock_irqrestore(¤t
->pi_lock
, flags
);
344 * Try to take an rt-mutex
347 * - when the lock has a real owner
348 * - when a different pending owner exists and has higher priority than current
350 * Must be called with lock->wait_lock held.
352 static int try_to_take_rt_mutex(struct rt_mutex
*lock
)
355 * We have to be careful here if the atomic speedups are
356 * enabled, such that, when
357 * - no other waiter is on the lock
358 * - the lock has been released since we did the cmpxchg
359 * the lock can be released or taken while we are doing the
360 * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
362 * The atomic acquire/release aware variant of
363 * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
364 * the WAITERS bit, the atomic release / acquire can not
365 * happen anymore and lock->wait_lock protects us from the
368 * Note, that this might set lock->owner =
369 * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
370 * any more. This is fixed up when we take the ownership.
371 * This is the transitional state explained at the top of this file.
373 mark_rt_mutex_waiters(lock
);
375 if (rt_mutex_owner(lock
) && !try_to_steal_lock(lock
))
378 /* We got the lock. */
379 debug_rt_mutex_lock(lock
);
381 rt_mutex_set_owner(lock
, current
, 0);
383 rt_mutex_deadlock_account_lock(lock
, current
);
389 * Task blocks on lock.
391 * Prepare waiter and propagate pi chain
393 * This must be called with lock->wait_lock held.
395 static int task_blocks_on_rt_mutex(struct rt_mutex
*lock
,
396 struct rt_mutex_waiter
*waiter
,
399 struct task_struct
*owner
= rt_mutex_owner(lock
);
400 struct rt_mutex_waiter
*top_waiter
= waiter
;
402 int chain_walk
= 0, res
;
404 spin_lock_irqsave(¤t
->pi_lock
, flags
);
405 __rt_mutex_adjust_prio(current
);
406 waiter
->task
= current
;
408 plist_node_init(&waiter
->list_entry
, current
->prio
);
409 plist_node_init(&waiter
->pi_list_entry
, current
->prio
);
411 /* Get the top priority waiter on the lock */
412 if (rt_mutex_has_waiters(lock
))
413 top_waiter
= rt_mutex_top_waiter(lock
);
414 plist_add(&waiter
->list_entry
, &lock
->wait_list
);
416 current
->pi_blocked_on
= waiter
;
418 spin_unlock_irqrestore(¤t
->pi_lock
, flags
);
420 if (waiter
== rt_mutex_top_waiter(lock
)) {
421 spin_lock_irqsave(&owner
->pi_lock
, flags
);
422 plist_del(&top_waiter
->pi_list_entry
, &owner
->pi_waiters
);
423 plist_add(&waiter
->pi_list_entry
, &owner
->pi_waiters
);
425 __rt_mutex_adjust_prio(owner
);
426 if (owner
->pi_blocked_on
)
428 spin_unlock_irqrestore(&owner
->pi_lock
, flags
);
430 else if (debug_rt_mutex_detect_deadlock(waiter
, detect_deadlock
))
437 * The owner can't disappear while holding a lock,
438 * so the owner struct is protected by wait_lock.
439 * Gets dropped in rt_mutex_adjust_prio_chain()!
441 get_task_struct(owner
);
443 spin_unlock(&lock
->wait_lock
);
445 res
= rt_mutex_adjust_prio_chain(owner
, detect_deadlock
, lock
, waiter
,
448 spin_lock(&lock
->wait_lock
);
454 * Wake up the next waiter on the lock.
456 * Remove the top waiter from the current tasks waiter list and from
457 * the lock waiter list. Set it as pending owner. Then wake it up.
459 * Called with lock->wait_lock held.
461 static void wakeup_next_waiter(struct rt_mutex
*lock
)
463 struct rt_mutex_waiter
*waiter
;
464 struct task_struct
*pendowner
;
467 spin_lock_irqsave(¤t
->pi_lock
, flags
);
469 waiter
= rt_mutex_top_waiter(lock
);
470 plist_del(&waiter
->list_entry
, &lock
->wait_list
);
473 * Remove it from current->pi_waiters. We do not adjust a
474 * possible priority boost right now. We execute wakeup in the
475 * boosted mode and go back to normal after releasing
478 plist_del(&waiter
->pi_list_entry
, ¤t
->pi_waiters
);
479 pendowner
= waiter
->task
;
482 rt_mutex_set_owner(lock
, pendowner
, RT_MUTEX_OWNER_PENDING
);
484 spin_unlock_irqrestore(¤t
->pi_lock
, flags
);
487 * Clear the pi_blocked_on variable and enqueue a possible
488 * waiter into the pi_waiters list of the pending owner. This
489 * prevents that in case the pending owner gets unboosted a
490 * waiter with higher priority than pending-owner->normal_prio
491 * is blocked on the unboosted (pending) owner.
493 spin_lock_irqsave(&pendowner
->pi_lock
, flags
);
495 WARN_ON(!pendowner
->pi_blocked_on
);
496 WARN_ON(pendowner
->pi_blocked_on
!= waiter
);
497 WARN_ON(pendowner
->pi_blocked_on
->lock
!= lock
);
499 pendowner
->pi_blocked_on
= NULL
;
501 if (rt_mutex_has_waiters(lock
)) {
502 struct rt_mutex_waiter
*next
;
504 next
= rt_mutex_top_waiter(lock
);
505 plist_add(&next
->pi_list_entry
, &pendowner
->pi_waiters
);
507 spin_unlock_irqrestore(&pendowner
->pi_lock
, flags
);
509 wake_up_process(pendowner
);
513 * Remove a waiter from a lock
515 * Must be called with lock->wait_lock held
517 void remove_waiter(struct rt_mutex
*lock
,
518 struct rt_mutex_waiter
*waiter
)
520 int first
= (waiter
== rt_mutex_top_waiter(lock
));
521 struct task_struct
*owner
= rt_mutex_owner(lock
);
525 spin_lock_irqsave(¤t
->pi_lock
, flags
);
526 plist_del(&waiter
->list_entry
, &lock
->wait_list
);
528 current
->pi_blocked_on
= NULL
;
529 spin_unlock_irqrestore(¤t
->pi_lock
, flags
);
531 if (first
&& owner
!= current
) {
533 spin_lock_irqsave(&owner
->pi_lock
, flags
);
535 plist_del(&waiter
->pi_list_entry
, &owner
->pi_waiters
);
537 if (rt_mutex_has_waiters(lock
)) {
538 struct rt_mutex_waiter
*next
;
540 next
= rt_mutex_top_waiter(lock
);
541 plist_add(&next
->pi_list_entry
, &owner
->pi_waiters
);
543 __rt_mutex_adjust_prio(owner
);
545 if (owner
->pi_blocked_on
)
548 spin_unlock_irqrestore(&owner
->pi_lock
, flags
);
551 WARN_ON(!plist_node_empty(&waiter
->pi_list_entry
));
556 /* gets dropped in rt_mutex_adjust_prio_chain()! */
557 get_task_struct(owner
);
559 spin_unlock(&lock
->wait_lock
);
561 rt_mutex_adjust_prio_chain(owner
, 0, lock
, NULL
, current
);
563 spin_lock(&lock
->wait_lock
);
567 * Recheck the pi chain, in case we got a priority setting
569 * Called from sched_setscheduler
571 void rt_mutex_adjust_pi(struct task_struct
*task
)
573 struct rt_mutex_waiter
*waiter
;
576 spin_lock_irqsave(&task
->pi_lock
, flags
);
578 waiter
= task
->pi_blocked_on
;
579 if (!waiter
|| waiter
->list_entry
.prio
== task
->prio
) {
580 spin_unlock_irqrestore(&task
->pi_lock
, flags
);
584 spin_unlock_irqrestore(&task
->pi_lock
, flags
);
586 /* gets dropped in rt_mutex_adjust_prio_chain()! */
587 get_task_struct(task
);
588 rt_mutex_adjust_prio_chain(task
, 0, NULL
, NULL
, task
);
592 * Slow path lock function:
595 rt_mutex_slowlock(struct rt_mutex
*lock
, int state
,
596 struct hrtimer_sleeper
*timeout
,
599 struct rt_mutex_waiter waiter
;
602 debug_rt_mutex_init_waiter(&waiter
);
605 spin_lock(&lock
->wait_lock
);
607 /* Try to acquire the lock again: */
608 if (try_to_take_rt_mutex(lock
)) {
609 spin_unlock(&lock
->wait_lock
);
613 set_current_state(state
);
615 /* Setup the timer, when timeout != NULL */
616 if (unlikely(timeout
))
617 hrtimer_start(&timeout
->timer
, timeout
->timer
.expires
,
621 /* Try to acquire the lock: */
622 if (try_to_take_rt_mutex(lock
))
626 * TASK_INTERRUPTIBLE checks for signals and
627 * timeout. Ignored otherwise.
629 if (unlikely(state
== TASK_INTERRUPTIBLE
)) {
630 /* Signal pending? */
631 if (signal_pending(current
))
633 if (timeout
&& !timeout
->task
)
640 * waiter.task is NULL the first time we come here and
641 * when we have been woken up by the previous owner
642 * but the lock got stolen by a higher prio task.
645 ret
= task_blocks_on_rt_mutex(lock
, &waiter
,
648 * If we got woken up by the owner then start loop
649 * all over without going into schedule to try
650 * to get the lock now:
652 if (unlikely(!waiter
.task
)) {
654 * Reset the return value. We might
655 * have returned with -EDEADLK and the
656 * owner released the lock while we
657 * were walking the pi chain.
666 spin_unlock(&lock
->wait_lock
);
668 debug_rt_mutex_print_deadlock(&waiter
);
671 schedule_rt_mutex(lock
);
673 spin_lock(&lock
->wait_lock
);
674 set_current_state(state
);
677 set_current_state(TASK_RUNNING
);
679 if (unlikely(waiter
.task
))
680 remove_waiter(lock
, &waiter
);
683 * try_to_take_rt_mutex() sets the waiter bit
684 * unconditionally. We might have to fix that up.
686 fixup_rt_mutex_waiters(lock
);
688 spin_unlock(&lock
->wait_lock
);
690 /* Remove pending timer: */
691 if (unlikely(timeout
))
692 hrtimer_cancel(&timeout
->timer
);
695 * Readjust priority, when we did not get the lock. We might
696 * have been the pending owner and boosted. Since we did not
697 * take the lock, the PI boost has to go.
700 rt_mutex_adjust_prio(current
);
702 debug_rt_mutex_free_waiter(&waiter
);
708 * Slow path try-lock function:
711 rt_mutex_slowtrylock(struct rt_mutex
*lock
)
715 spin_lock(&lock
->wait_lock
);
717 if (likely(rt_mutex_owner(lock
) != current
)) {
719 ret
= try_to_take_rt_mutex(lock
);
721 * try_to_take_rt_mutex() sets the lock waiters
722 * bit unconditionally. Clean this up.
724 fixup_rt_mutex_waiters(lock
);
727 spin_unlock(&lock
->wait_lock
);
733 * Slow path to release a rt-mutex:
736 rt_mutex_slowunlock(struct rt_mutex
*lock
)
738 spin_lock(&lock
->wait_lock
);
740 debug_rt_mutex_unlock(lock
);
742 rt_mutex_deadlock_account_unlock(current
);
744 if (!rt_mutex_has_waiters(lock
)) {
746 spin_unlock(&lock
->wait_lock
);
750 wakeup_next_waiter(lock
);
752 spin_unlock(&lock
->wait_lock
);
754 /* Undo pi boosting if necessary: */
755 rt_mutex_adjust_prio(current
);
759 * debug aware fast / slowpath lock,trylock,unlock
761 * The atomic acquire/release ops are compiled away, when either the
762 * architecture does not support cmpxchg or when debugging is enabled.
765 rt_mutex_fastlock(struct rt_mutex
*lock
, int state
,
767 int (*slowfn
)(struct rt_mutex
*lock
, int state
,
768 struct hrtimer_sleeper
*timeout
,
769 int detect_deadlock
))
771 if (!detect_deadlock
&& likely(rt_mutex_cmpxchg(lock
, NULL
, current
))) {
772 rt_mutex_deadlock_account_lock(lock
, current
);
775 return slowfn(lock
, state
, NULL
, detect_deadlock
);
779 rt_mutex_timed_fastlock(struct rt_mutex
*lock
, int state
,
780 struct hrtimer_sleeper
*timeout
, int detect_deadlock
,
781 int (*slowfn
)(struct rt_mutex
*lock
, int state
,
782 struct hrtimer_sleeper
*timeout
,
783 int detect_deadlock
))
785 if (!detect_deadlock
&& likely(rt_mutex_cmpxchg(lock
, NULL
, current
))) {
786 rt_mutex_deadlock_account_lock(lock
, current
);
789 return slowfn(lock
, state
, timeout
, detect_deadlock
);
793 rt_mutex_fasttrylock(struct rt_mutex
*lock
,
794 int (*slowfn
)(struct rt_mutex
*lock
))
796 if (likely(rt_mutex_cmpxchg(lock
, NULL
, current
))) {
797 rt_mutex_deadlock_account_lock(lock
, current
);
804 rt_mutex_fastunlock(struct rt_mutex
*lock
,
805 void (*slowfn
)(struct rt_mutex
*lock
))
807 if (likely(rt_mutex_cmpxchg(lock
, current
, NULL
)))
808 rt_mutex_deadlock_account_unlock(current
);
814 * rt_mutex_lock - lock a rt_mutex
816 * @lock: the rt_mutex to be locked
818 void __sched
rt_mutex_lock(struct rt_mutex
*lock
)
822 rt_mutex_fastlock(lock
, TASK_UNINTERRUPTIBLE
, 0, rt_mutex_slowlock
);
824 EXPORT_SYMBOL_GPL(rt_mutex_lock
);
827 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
829 * @lock: the rt_mutex to be locked
830 * @detect_deadlock: deadlock detection on/off
834 * -EINTR when interrupted by a signal
835 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
837 int __sched
rt_mutex_lock_interruptible(struct rt_mutex
*lock
,
842 return rt_mutex_fastlock(lock
, TASK_INTERRUPTIBLE
,
843 detect_deadlock
, rt_mutex_slowlock
);
845 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible
);
848 * rt_mutex_lock_interruptible_ktime - lock a rt_mutex interruptible
849 * the timeout structure is provided
852 * @lock: the rt_mutex to be locked
853 * @timeout: timeout structure or NULL (no timeout)
854 * @detect_deadlock: deadlock detection on/off
858 * -EINTR when interrupted by a signal
859 * -ETIMEOUT when the timeout expired
860 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
863 rt_mutex_timed_lock(struct rt_mutex
*lock
, struct hrtimer_sleeper
*timeout
,
868 return rt_mutex_timed_fastlock(lock
, TASK_INTERRUPTIBLE
, timeout
,
869 detect_deadlock
, rt_mutex_slowlock
);
871 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock
);
874 * rt_mutex_trylock - try to lock a rt_mutex
876 * @lock: the rt_mutex to be locked
878 * Returns 1 on success and 0 on contention
880 int __sched
rt_mutex_trylock(struct rt_mutex
*lock
)
882 return rt_mutex_fasttrylock(lock
, rt_mutex_slowtrylock
);
884 EXPORT_SYMBOL_GPL(rt_mutex_trylock
);
887 * rt_mutex_unlock - unlock a rt_mutex
889 * @lock: the rt_mutex to be unlocked
891 void __sched
rt_mutex_unlock(struct rt_mutex
*lock
)
893 rt_mutex_fastunlock(lock
, rt_mutex_slowunlock
);
895 EXPORT_SYMBOL_GPL(rt_mutex_unlock
);
898 * rt_mutex_destroy - mark a mutex unusable
899 * @lock: the mutex to be destroyed
901 * This function marks the mutex uninitialized, and any subsequent
902 * use of the mutex is forbidden. The mutex must not be locked when
903 * this function is called.
905 void rt_mutex_destroy(struct rt_mutex
*lock
)
907 WARN_ON(rt_mutex_is_locked(lock
));
908 #ifdef CONFIG_DEBUG_RT_MUTEXES
913 EXPORT_SYMBOL_GPL(rt_mutex_destroy
);
916 * __rt_mutex_init - initialize the rt lock
918 * @lock: the rt lock to be initialized
920 * Initialize the rt lock to unlocked state.
922 * Initializing of a locked rt lock is not allowed
924 void __rt_mutex_init(struct rt_mutex
*lock
, const char *name
)
927 spin_lock_init(&lock
->wait_lock
);
928 plist_head_init(&lock
->wait_list
, &lock
->wait_lock
);
930 debug_rt_mutex_init(lock
, name
);
932 EXPORT_SYMBOL_GPL(__rt_mutex_init
);
935 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
938 * @lock: the rt_mutex to be locked
939 * @proxy_owner:the task to set as owner
941 * No locking. Caller has to do serializing itself
942 * Special API call for PI-futex support
944 void rt_mutex_init_proxy_locked(struct rt_mutex
*lock
,
945 struct task_struct
*proxy_owner
)
947 __rt_mutex_init(lock
, NULL
);
948 debug_rt_mutex_proxy_lock(lock
, proxy_owner
);
949 rt_mutex_set_owner(lock
, proxy_owner
, 0);
950 rt_mutex_deadlock_account_lock(lock
, proxy_owner
);
954 * rt_mutex_proxy_unlock - release a lock on behalf of owner
956 * @lock: the rt_mutex to be locked
958 * No locking. Caller has to do serializing itself
959 * Special API call for PI-futex support
961 void rt_mutex_proxy_unlock(struct rt_mutex
*lock
,
962 struct task_struct
*proxy_owner
)
964 debug_rt_mutex_proxy_unlock(lock
);
965 rt_mutex_set_owner(lock
, NULL
, 0);
966 rt_mutex_deadlock_account_unlock(proxy_owner
);
970 * rt_mutex_next_owner - return the next owner of the lock
972 * @lock: the rt lock query
974 * Returns the next owner of the lock or NULL
976 * Caller has to serialize against other accessors to the lock
979 * Special API call for PI-futex support
981 struct task_struct
*rt_mutex_next_owner(struct rt_mutex
*lock
)
983 if (!rt_mutex_has_waiters(lock
))
986 return rt_mutex_top_waiter(lock
)->task
;