Commit | Line | Data |
---|---|---|
23f78d4a IM |
1 | /* |
2 | * RT-Mutexes: simple blocking mutual exclusion locks with PI support | |
3 | * | |
4 | * started by Ingo Molnar and Thomas Gleixner. | |
5 | * | |
6 | * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
7 | * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> | |
8 | * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt | |
9 | * Copyright (C) 2006 Esben Nielsen | |
d07fe82c SR |
10 | * |
11 | * See Documentation/rt-mutex-design.txt for details. | |
23f78d4a IM |
12 | */ |
13 | #include <linux/spinlock.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/sched.h> | |
16 | #include <linux/timer.h> | |
17 | ||
18 | #include "rtmutex_common.h" | |
19 | ||
23f78d4a IM |
20 | /* |
21 | * lock->owner state tracking: | |
22 | * | |
8161239a LJ |
23 | * lock->owner holds the task_struct pointer of the owner. Bit 0 |
24 | * is used to keep track of the "lock has waiters" state. | |
23f78d4a | 25 | * |
8161239a LJ |
26 | * owner bit0 |
27 | * NULL 0 lock is free (fast acquire possible) | |
28 | * NULL 1 lock is free and has waiters and the top waiter | |
29 | * is going to take the lock* | |
30 | * taskpointer 0 lock is held (fast release possible) | |
31 | * taskpointer 1 lock is held and has waiters** | |
23f78d4a IM |
32 | * |
33 | * The fast atomic compare exchange based acquire and release is only | |
8161239a LJ |
34 | * possible when bit 0 of lock->owner is 0. |
35 | * | |
36 | * (*) It also can be a transitional state when grabbing the lock | |
37 | * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock, | |
38 | * we need to set the bit0 before looking at the lock, and the owner may be | |
39 | * NULL in this small time, hence this can be a transitional state. | |
23f78d4a | 40 | * |
8161239a LJ |
41 | * (**) There is a small time when bit 0 is set but there are no |
42 | * waiters. This can happen when grabbing the lock in the slow path. | |
43 | * To prevent a cmpxchg of the owner releasing the lock, we need to | |
44 | * set this bit before looking at the lock. | |
23f78d4a IM |
45 | */ |
46 | ||
bd197234 | 47 | static void |
8161239a | 48 | rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) |
23f78d4a | 49 | { |
8161239a | 50 | unsigned long val = (unsigned long)owner; |
23f78d4a IM |
51 | |
52 | if (rt_mutex_has_waiters(lock)) | |
53 | val |= RT_MUTEX_HAS_WAITERS; | |
54 | ||
55 | lock->owner = (struct task_struct *)val; | |
56 | } | |
57 | ||
58 | static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) | |
59 | { | |
60 | lock->owner = (struct task_struct *) | |
61 | ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); | |
62 | } | |
63 | ||
64 | static void fixup_rt_mutex_waiters(struct rt_mutex *lock) | |
65 | { | |
66 | if (!rt_mutex_has_waiters(lock)) | |
67 | clear_rt_mutex_waiters(lock); | |
68 | } | |
69 | ||
bd197234 TG |
70 | /* |
71 | * We can speed up the acquire/release, if the architecture | |
72 | * supports cmpxchg and if there's no debugging state to be set up | |
73 | */ | |
74 | #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES) | |
75 | # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) | |
76 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | |
77 | { | |
78 | unsigned long owner, *p = (unsigned long *) &lock->owner; | |
79 | ||
80 | do { | |
81 | owner = *p; | |
82 | } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); | |
83 | } | |
84 | #else | |
85 | # define rt_mutex_cmpxchg(l,c,n) (0) | |
86 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | |
87 | { | |
88 | lock->owner = (struct task_struct *) | |
89 | ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); | |
90 | } | |
91 | #endif | |
92 | ||
23f78d4a IM |
93 | /* |
94 | * Calculate task priority from the waiter list priority | |
95 | * | |
96 | * Return task->normal_prio when the waiter list is empty or when | |
97 | * the waiter is not allowed to do priority boosting | |
98 | */ | |
99 | int rt_mutex_getprio(struct task_struct *task) | |
100 | { | |
101 | if (likely(!task_has_pi_waiters(task))) | |
102 | return task->normal_prio; | |
103 | ||
104 | return min(task_top_pi_waiter(task)->pi_list_entry.prio, | |
105 | task->normal_prio); | |
106 | } | |
107 | ||
108 | /* | |
109 | * Adjust the priority of a task, after its pi_waiters got modified. | |
110 | * | |
111 | * This can be both boosting and unboosting. task->pi_lock must be held. | |
112 | */ | |
bd197234 | 113 | static void __rt_mutex_adjust_prio(struct task_struct *task) |
23f78d4a IM |
114 | { |
115 | int prio = rt_mutex_getprio(task); | |
116 | ||
117 | if (task->prio != prio) | |
118 | rt_mutex_setprio(task, prio); | |
119 | } | |
120 | ||
121 | /* | |
122 | * Adjust task priority (undo boosting). Called from the exit path of | |
123 | * rt_mutex_slowunlock() and rt_mutex_slowlock(). | |
124 | * | |
125 | * (Note: We do this outside of the protection of lock->wait_lock to | |
126 | * allow the lock to be taken while or before we readjust the priority | |
127 | * of task. We do not use the spin_xx_mutex() variants here as we are | |
128 | * outside of the debug path.) | |
129 | */ | |
130 | static void rt_mutex_adjust_prio(struct task_struct *task) | |
131 | { | |
132 | unsigned long flags; | |
133 | ||
1d615482 | 134 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
23f78d4a | 135 | __rt_mutex_adjust_prio(task); |
1d615482 | 136 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a IM |
137 | } |
138 | ||
139 | /* | |
140 | * Max number of times we'll walk the boosting chain: | |
141 | */ | |
142 | int max_lock_depth = 1024; | |
143 | ||
144 | /* | |
145 | * Adjust the priority chain. Also used for deadlock detection. | |
146 | * Decreases task's usage by one - may thus free the task. | |
147 | * Returns 0 or -EDEADLK. | |
148 | */ | |
bd197234 TG |
149 | static int rt_mutex_adjust_prio_chain(struct task_struct *task, |
150 | int deadlock_detect, | |
151 | struct rt_mutex *orig_lock, | |
152 | struct rt_mutex_waiter *orig_waiter, | |
153 | struct task_struct *top_task) | |
23f78d4a IM |
154 | { |
155 | struct rt_mutex *lock; | |
156 | struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; | |
157 | int detect_deadlock, ret = 0, depth = 0; | |
158 | unsigned long flags; | |
159 | ||
160 | detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter, | |
161 | deadlock_detect); | |
162 | ||
163 | /* | |
164 | * The (de)boosting is a step by step approach with a lot of | |
165 | * pitfalls. We want this to be preemptible and we want hold a | |
166 | * maximum of two locks per step. So we have to check | |
167 | * carefully whether things change under us. | |
168 | */ | |
169 | again: | |
170 | if (++depth > max_lock_depth) { | |
171 | static int prev_max; | |
172 | ||
173 | /* | |
174 | * Print this only once. If the admin changes the limit, | |
175 | * print a new message when reaching the limit again. | |
176 | */ | |
177 | if (prev_max != max_lock_depth) { | |
178 | prev_max = max_lock_depth; | |
179 | printk(KERN_WARNING "Maximum lock depth %d reached " | |
180 | "task: %s (%d)\n", max_lock_depth, | |
ba25f9dc | 181 | top_task->comm, task_pid_nr(top_task)); |
23f78d4a IM |
182 | } |
183 | put_task_struct(task); | |
184 | ||
185 | return deadlock_detect ? -EDEADLK : 0; | |
186 | } | |
187 | retry: | |
188 | /* | |
189 | * Task can not go away as we did a get_task() before ! | |
190 | */ | |
1d615482 | 191 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
23f78d4a IM |
192 | |
193 | waiter = task->pi_blocked_on; | |
194 | /* | |
195 | * Check whether the end of the boosting chain has been | |
196 | * reached or the state of the chain has changed while we | |
197 | * dropped the locks. | |
198 | */ | |
8161239a | 199 | if (!waiter) |
23f78d4a IM |
200 | goto out_unlock_pi; |
201 | ||
1a539a87 TG |
202 | /* |
203 | * Check the orig_waiter state. After we dropped the locks, | |
8161239a | 204 | * the previous owner of the lock might have released the lock. |
1a539a87 | 205 | */ |
8161239a | 206 | if (orig_waiter && !rt_mutex_owner(orig_lock)) |
1a539a87 TG |
207 | goto out_unlock_pi; |
208 | ||
209 | /* | |
210 | * Drop out, when the task has no waiters. Note, | |
211 | * top_waiter can be NULL, when we are in the deboosting | |
212 | * mode! | |
213 | */ | |
23f78d4a IM |
214 | if (top_waiter && (!task_has_pi_waiters(task) || |
215 | top_waiter != task_top_pi_waiter(task))) | |
216 | goto out_unlock_pi; | |
217 | ||
218 | /* | |
219 | * When deadlock detection is off then we check, if further | |
220 | * priority adjustment is necessary. | |
221 | */ | |
222 | if (!detect_deadlock && waiter->list_entry.prio == task->prio) | |
223 | goto out_unlock_pi; | |
224 | ||
225 | lock = waiter->lock; | |
d209d74d | 226 | if (!raw_spin_trylock(&lock->wait_lock)) { |
1d615482 | 227 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a IM |
228 | cpu_relax(); |
229 | goto retry; | |
230 | } | |
231 | ||
232 | /* Deadlock detection */ | |
95e02ca9 | 233 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { |
23f78d4a | 234 | debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); |
d209d74d | 235 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a IM |
236 | ret = deadlock_detect ? -EDEADLK : 0; |
237 | goto out_unlock_pi; | |
238 | } | |
239 | ||
240 | top_waiter = rt_mutex_top_waiter(lock); | |
241 | ||
242 | /* Requeue the waiter */ | |
243 | plist_del(&waiter->list_entry, &lock->wait_list); | |
244 | waiter->list_entry.prio = task->prio; | |
245 | plist_add(&waiter->list_entry, &lock->wait_list); | |
246 | ||
247 | /* Release the task */ | |
1d615482 | 248 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
8161239a LJ |
249 | if (!rt_mutex_owner(lock)) { |
250 | /* | |
251 | * If the requeue above changed the top waiter, then we need | |
252 | * to wake the new top waiter up to try to get the lock. | |
253 | */ | |
254 | ||
255 | if (top_waiter != rt_mutex_top_waiter(lock)) | |
256 | wake_up_process(rt_mutex_top_waiter(lock)->task); | |
257 | raw_spin_unlock(&lock->wait_lock); | |
258 | goto out_put_task; | |
259 | } | |
23f78d4a IM |
260 | put_task_struct(task); |
261 | ||
262 | /* Grab the next task */ | |
263 | task = rt_mutex_owner(lock); | |
db630637 | 264 | get_task_struct(task); |
1d615482 | 265 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
23f78d4a IM |
266 | |
267 | if (waiter == rt_mutex_top_waiter(lock)) { | |
268 | /* Boost the owner */ | |
269 | plist_del(&top_waiter->pi_list_entry, &task->pi_waiters); | |
270 | waiter->pi_list_entry.prio = waiter->list_entry.prio; | |
271 | plist_add(&waiter->pi_list_entry, &task->pi_waiters); | |
272 | __rt_mutex_adjust_prio(task); | |
273 | ||
274 | } else if (top_waiter == waiter) { | |
275 | /* Deboost the owner */ | |
276 | plist_del(&waiter->pi_list_entry, &task->pi_waiters); | |
277 | waiter = rt_mutex_top_waiter(lock); | |
278 | waiter->pi_list_entry.prio = waiter->list_entry.prio; | |
279 | plist_add(&waiter->pi_list_entry, &task->pi_waiters); | |
280 | __rt_mutex_adjust_prio(task); | |
281 | } | |
282 | ||
1d615482 | 283 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a IM |
284 | |
285 | top_waiter = rt_mutex_top_waiter(lock); | |
d209d74d | 286 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a IM |
287 | |
288 | if (!detect_deadlock && waiter != top_waiter) | |
289 | goto out_put_task; | |
290 | ||
291 | goto again; | |
292 | ||
293 | out_unlock_pi: | |
1d615482 | 294 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a IM |
295 | out_put_task: |
296 | put_task_struct(task); | |
36c8b586 | 297 | |
23f78d4a IM |
298 | return ret; |
299 | } | |
300 | ||
23f78d4a IM |
301 | /* |
302 | * Try to take an rt-mutex | |
303 | * | |
23f78d4a | 304 | * Must be called with lock->wait_lock held. |
8161239a LJ |
305 | * |
306 | * @lock: the lock to be acquired. | |
307 | * @task: the task which wants to acquire the lock | |
308 | * @waiter: the waiter that is queued to the lock's wait list. (could be NULL) | |
23f78d4a | 309 | */ |
8161239a LJ |
310 | static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, |
311 | struct rt_mutex_waiter *waiter) | |
23f78d4a IM |
312 | { |
313 | /* | |
314 | * We have to be careful here if the atomic speedups are | |
315 | * enabled, such that, when | |
316 | * - no other waiter is on the lock | |
317 | * - the lock has been released since we did the cmpxchg | |
318 | * the lock can be released or taken while we are doing the | |
319 | * checks and marking the lock with RT_MUTEX_HAS_WAITERS. | |
320 | * | |
321 | * The atomic acquire/release aware variant of | |
322 | * mark_rt_mutex_waiters uses a cmpxchg loop. After setting | |
323 | * the WAITERS bit, the atomic release / acquire can not | |
324 | * happen anymore and lock->wait_lock protects us from the | |
325 | * non-atomic case. | |
326 | * | |
327 | * Note, that this might set lock->owner = | |
328 | * RT_MUTEX_HAS_WAITERS in the case the lock is not contended | |
329 | * any more. This is fixed up when we take the ownership. | |
330 | * This is the transitional state explained at the top of this file. | |
331 | */ | |
332 | mark_rt_mutex_waiters(lock); | |
333 | ||
8161239a | 334 | if (rt_mutex_owner(lock)) |
23f78d4a IM |
335 | return 0; |
336 | ||
8161239a LJ |
337 | /* |
338 | * It will get the lock because of one of these conditions: | |
339 | * 1) there is no waiter | |
340 | * 2) higher priority than waiters | |
341 | * 3) it is top waiter | |
342 | */ | |
343 | if (rt_mutex_has_waiters(lock)) { | |
344 | if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) { | |
345 | if (!waiter || waiter != rt_mutex_top_waiter(lock)) | |
346 | return 0; | |
347 | } | |
348 | } | |
349 | ||
350 | if (waiter || rt_mutex_has_waiters(lock)) { | |
351 | unsigned long flags; | |
352 | struct rt_mutex_waiter *top; | |
353 | ||
354 | raw_spin_lock_irqsave(&task->pi_lock, flags); | |
355 | ||
356 | /* remove the queued waiter. */ | |
357 | if (waiter) { | |
358 | plist_del(&waiter->list_entry, &lock->wait_list); | |
359 | task->pi_blocked_on = NULL; | |
360 | } | |
361 | ||
362 | /* | |
363 | * We have to enqueue the top waiter(if it exists) into | |
364 | * task->pi_waiters list. | |
365 | */ | |
366 | if (rt_mutex_has_waiters(lock)) { | |
367 | top = rt_mutex_top_waiter(lock); | |
368 | top->pi_list_entry.prio = top->list_entry.prio; | |
369 | plist_add(&top->pi_list_entry, &task->pi_waiters); | |
370 | } | |
371 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | |
372 | } | |
373 | ||
23f78d4a | 374 | /* We got the lock. */ |
9a11b49a | 375 | debug_rt_mutex_lock(lock); |
23f78d4a | 376 | |
8161239a | 377 | rt_mutex_set_owner(lock, task); |
23f78d4a | 378 | |
8161239a | 379 | rt_mutex_deadlock_account_lock(lock, task); |
23f78d4a IM |
380 | |
381 | return 1; | |
382 | } | |
383 | ||
384 | /* | |
385 | * Task blocks on lock. | |
386 | * | |
387 | * Prepare waiter and propagate pi chain | |
388 | * | |
389 | * This must be called with lock->wait_lock held. | |
390 | */ | |
391 | static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |
392 | struct rt_mutex_waiter *waiter, | |
8dac456a | 393 | struct task_struct *task, |
9a11b49a | 394 | int detect_deadlock) |
23f78d4a | 395 | { |
36c8b586 | 396 | struct task_struct *owner = rt_mutex_owner(lock); |
23f78d4a | 397 | struct rt_mutex_waiter *top_waiter = waiter; |
23f78d4a | 398 | unsigned long flags; |
db630637 | 399 | int chain_walk = 0, res; |
23f78d4a | 400 | |
1d615482 | 401 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
8dac456a DH |
402 | __rt_mutex_adjust_prio(task); |
403 | waiter->task = task; | |
23f78d4a | 404 | waiter->lock = lock; |
8dac456a DH |
405 | plist_node_init(&waiter->list_entry, task->prio); |
406 | plist_node_init(&waiter->pi_list_entry, task->prio); | |
23f78d4a IM |
407 | |
408 | /* Get the top priority waiter on the lock */ | |
409 | if (rt_mutex_has_waiters(lock)) | |
410 | top_waiter = rt_mutex_top_waiter(lock); | |
411 | plist_add(&waiter->list_entry, &lock->wait_list); | |
412 | ||
8dac456a | 413 | task->pi_blocked_on = waiter; |
23f78d4a | 414 | |
1d615482 | 415 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a | 416 | |
8161239a LJ |
417 | if (!owner) |
418 | return 0; | |
419 | ||
23f78d4a | 420 | if (waiter == rt_mutex_top_waiter(lock)) { |
1d615482 | 421 | raw_spin_lock_irqsave(&owner->pi_lock, flags); |
23f78d4a IM |
422 | plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); |
423 | plist_add(&waiter->pi_list_entry, &owner->pi_waiters); | |
424 | ||
425 | __rt_mutex_adjust_prio(owner); | |
db630637 SR |
426 | if (owner->pi_blocked_on) |
427 | chain_walk = 1; | |
1d615482 | 428 | raw_spin_unlock_irqrestore(&owner->pi_lock, flags); |
23f78d4a | 429 | } |
db630637 SR |
430 | else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) |
431 | chain_walk = 1; | |
432 | ||
433 | if (!chain_walk) | |
23f78d4a IM |
434 | return 0; |
435 | ||
db630637 SR |
436 | /* |
437 | * The owner can't disappear while holding a lock, | |
438 | * so the owner struct is protected by wait_lock. | |
439 | * Gets dropped in rt_mutex_adjust_prio_chain()! | |
440 | */ | |
441 | get_task_struct(owner); | |
442 | ||
d209d74d | 443 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a | 444 | |
95e02ca9 | 445 | res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, |
8dac456a | 446 | task); |
23f78d4a | 447 | |
d209d74d | 448 | raw_spin_lock(&lock->wait_lock); |
23f78d4a IM |
449 | |
450 | return res; | |
451 | } | |
452 | ||
453 | /* | |
454 | * Wake up the next waiter on the lock. | |
455 | * | |
8161239a | 456 | * Remove the top waiter from the current tasks waiter list and wake it up. |
23f78d4a IM |
457 | * |
458 | * Called with lock->wait_lock held. | |
459 | */ | |
460 | static void wakeup_next_waiter(struct rt_mutex *lock) | |
461 | { | |
462 | struct rt_mutex_waiter *waiter; | |
23f78d4a IM |
463 | unsigned long flags; |
464 | ||
1d615482 | 465 | raw_spin_lock_irqsave(¤t->pi_lock, flags); |
23f78d4a IM |
466 | |
467 | waiter = rt_mutex_top_waiter(lock); | |
23f78d4a IM |
468 | |
469 | /* | |
470 | * Remove it from current->pi_waiters. We do not adjust a | |
471 | * possible priority boost right now. We execute wakeup in the | |
472 | * boosted mode and go back to normal after releasing | |
473 | * lock->wait_lock. | |
474 | */ | |
475 | plist_del(&waiter->pi_list_entry, ¤t->pi_waiters); | |
23f78d4a | 476 | |
8161239a | 477 | rt_mutex_set_owner(lock, NULL); |
23f78d4a | 478 | |
1d615482 | 479 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
23f78d4a | 480 | |
8161239a | 481 | wake_up_process(waiter->task); |
23f78d4a IM |
482 | } |
483 | ||
484 | /* | |
8161239a | 485 | * Remove a waiter from a lock and give up |
23f78d4a | 486 | * |
8161239a LJ |
487 | * Must be called with lock->wait_lock held and |
488 | * have just failed to try_to_take_rt_mutex(). | |
23f78d4a | 489 | */ |
bd197234 TG |
490 | static void remove_waiter(struct rt_mutex *lock, |
491 | struct rt_mutex_waiter *waiter) | |
23f78d4a IM |
492 | { |
493 | int first = (waiter == rt_mutex_top_waiter(lock)); | |
36c8b586 | 494 | struct task_struct *owner = rt_mutex_owner(lock); |
23f78d4a | 495 | unsigned long flags; |
db630637 | 496 | int chain_walk = 0; |
23f78d4a | 497 | |
1d615482 | 498 | raw_spin_lock_irqsave(¤t->pi_lock, flags); |
23f78d4a | 499 | plist_del(&waiter->list_entry, &lock->wait_list); |
23f78d4a | 500 | current->pi_blocked_on = NULL; |
1d615482 | 501 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
23f78d4a | 502 | |
8161239a LJ |
503 | if (!owner) |
504 | return; | |
505 | ||
506 | if (first) { | |
23f78d4a | 507 | |
1d615482 | 508 | raw_spin_lock_irqsave(&owner->pi_lock, flags); |
23f78d4a IM |
509 | |
510 | plist_del(&waiter->pi_list_entry, &owner->pi_waiters); | |
511 | ||
512 | if (rt_mutex_has_waiters(lock)) { | |
513 | struct rt_mutex_waiter *next; | |
514 | ||
515 | next = rt_mutex_top_waiter(lock); | |
516 | plist_add(&next->pi_list_entry, &owner->pi_waiters); | |
517 | } | |
518 | __rt_mutex_adjust_prio(owner); | |
519 | ||
db630637 SR |
520 | if (owner->pi_blocked_on) |
521 | chain_walk = 1; | |
522 | ||
1d615482 | 523 | raw_spin_unlock_irqrestore(&owner->pi_lock, flags); |
23f78d4a IM |
524 | } |
525 | ||
526 | WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); | |
527 | ||
db630637 | 528 | if (!chain_walk) |
23f78d4a IM |
529 | return; |
530 | ||
db630637 SR |
531 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
532 | get_task_struct(owner); | |
533 | ||
d209d74d | 534 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a | 535 | |
9a11b49a | 536 | rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); |
23f78d4a | 537 | |
d209d74d | 538 | raw_spin_lock(&lock->wait_lock); |
23f78d4a IM |
539 | } |
540 | ||
95e02ca9 TG |
541 | /* |
542 | * Recheck the pi chain, in case we got a priority setting | |
543 | * | |
544 | * Called from sched_setscheduler | |
545 | */ | |
546 | void rt_mutex_adjust_pi(struct task_struct *task) | |
547 | { | |
548 | struct rt_mutex_waiter *waiter; | |
549 | unsigned long flags; | |
550 | ||
1d615482 | 551 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
95e02ca9 TG |
552 | |
553 | waiter = task->pi_blocked_on; | |
554 | if (!waiter || waiter->list_entry.prio == task->prio) { | |
1d615482 | 555 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
95e02ca9 TG |
556 | return; |
557 | } | |
558 | ||
1d615482 | 559 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
95e02ca9 | 560 | |
db630637 SR |
561 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
562 | get_task_struct(task); | |
9a11b49a | 563 | rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); |
95e02ca9 TG |
564 | } |
565 | ||
8dac456a DH |
566 | /** |
567 | * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop | |
568 | * @lock: the rt_mutex to take | |
569 | * @state: the state the task should block in (TASK_INTERRUPTIBLE | |
570 | * or TASK_UNINTERRUPTIBLE) | |
571 | * @timeout: the pre-initialized and started timer, or NULL for none | |
572 | * @waiter: the pre-initialized rt_mutex_waiter | |
8dac456a DH |
573 | * |
574 | * lock->wait_lock must be held by the caller. | |
23f78d4a IM |
575 | */ |
576 | static int __sched | |
8dac456a DH |
577 | __rt_mutex_slowlock(struct rt_mutex *lock, int state, |
578 | struct hrtimer_sleeper *timeout, | |
8161239a | 579 | struct rt_mutex_waiter *waiter) |
23f78d4a | 580 | { |
23f78d4a IM |
581 | int ret = 0; |
582 | ||
23f78d4a IM |
583 | for (;;) { |
584 | /* Try to acquire the lock: */ | |
8161239a | 585 | if (try_to_take_rt_mutex(lock, current, waiter)) |
23f78d4a IM |
586 | break; |
587 | ||
588 | /* | |
589 | * TASK_INTERRUPTIBLE checks for signals and | |
590 | * timeout. Ignored otherwise. | |
591 | */ | |
592 | if (unlikely(state == TASK_INTERRUPTIBLE)) { | |
593 | /* Signal pending? */ | |
594 | if (signal_pending(current)) | |
595 | ret = -EINTR; | |
596 | if (timeout && !timeout->task) | |
597 | ret = -ETIMEDOUT; | |
598 | if (ret) | |
599 | break; | |
600 | } | |
601 | ||
d209d74d | 602 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a | 603 | |
8dac456a | 604 | debug_rt_mutex_print_deadlock(waiter); |
23f78d4a | 605 | |
8161239a | 606 | schedule_rt_mutex(lock); |
23f78d4a | 607 | |
d209d74d | 608 | raw_spin_lock(&lock->wait_lock); |
23f78d4a IM |
609 | set_current_state(state); |
610 | } | |
611 | ||
8dac456a DH |
612 | return ret; |
613 | } | |
614 | ||
615 | /* | |
616 | * Slow path lock function: | |
617 | */ | |
618 | static int __sched | |
619 | rt_mutex_slowlock(struct rt_mutex *lock, int state, | |
620 | struct hrtimer_sleeper *timeout, | |
621 | int detect_deadlock) | |
622 | { | |
623 | struct rt_mutex_waiter waiter; | |
624 | int ret = 0; | |
625 | ||
626 | debug_rt_mutex_init_waiter(&waiter); | |
8dac456a | 627 | |
d209d74d | 628 | raw_spin_lock(&lock->wait_lock); |
8dac456a DH |
629 | |
630 | /* Try to acquire the lock again: */ | |
8161239a | 631 | if (try_to_take_rt_mutex(lock, current, NULL)) { |
d209d74d | 632 | raw_spin_unlock(&lock->wait_lock); |
8dac456a DH |
633 | return 0; |
634 | } | |
635 | ||
636 | set_current_state(state); | |
637 | ||
638 | /* Setup the timer, when timeout != NULL */ | |
639 | if (unlikely(timeout)) { | |
640 | hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); | |
641 | if (!hrtimer_active(&timeout->timer)) | |
642 | timeout->task = NULL; | |
643 | } | |
644 | ||
8161239a LJ |
645 | ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock); |
646 | ||
647 | if (likely(!ret)) | |
648 | ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); | |
8dac456a | 649 | |
23f78d4a IM |
650 | set_current_state(TASK_RUNNING); |
651 | ||
8161239a | 652 | if (unlikely(ret)) |
9a11b49a | 653 | remove_waiter(lock, &waiter); |
23f78d4a IM |
654 | |
655 | /* | |
656 | * try_to_take_rt_mutex() sets the waiter bit | |
657 | * unconditionally. We might have to fix that up. | |
658 | */ | |
659 | fixup_rt_mutex_waiters(lock); | |
660 | ||
d209d74d | 661 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a IM |
662 | |
663 | /* Remove pending timer: */ | |
664 | if (unlikely(timeout)) | |
665 | hrtimer_cancel(&timeout->timer); | |
666 | ||
23f78d4a IM |
667 | debug_rt_mutex_free_waiter(&waiter); |
668 | ||
669 | return ret; | |
670 | } | |
671 | ||
672 | /* | |
673 | * Slow path try-lock function: | |
674 | */ | |
675 | static inline int | |
9a11b49a | 676 | rt_mutex_slowtrylock(struct rt_mutex *lock) |
23f78d4a IM |
677 | { |
678 | int ret = 0; | |
679 | ||
d209d74d | 680 | raw_spin_lock(&lock->wait_lock); |
23f78d4a IM |
681 | |
682 | if (likely(rt_mutex_owner(lock) != current)) { | |
683 | ||
8161239a | 684 | ret = try_to_take_rt_mutex(lock, current, NULL); |
23f78d4a IM |
685 | /* |
686 | * try_to_take_rt_mutex() sets the lock waiters | |
687 | * bit unconditionally. Clean this up. | |
688 | */ | |
689 | fixup_rt_mutex_waiters(lock); | |
690 | } | |
691 | ||
d209d74d | 692 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a IM |
693 | |
694 | return ret; | |
695 | } | |
696 | ||
697 | /* | |
698 | * Slow path to release a rt-mutex: | |
699 | */ | |
700 | static void __sched | |
701 | rt_mutex_slowunlock(struct rt_mutex *lock) | |
702 | { | |
d209d74d | 703 | raw_spin_lock(&lock->wait_lock); |
23f78d4a IM |
704 | |
705 | debug_rt_mutex_unlock(lock); | |
706 | ||
707 | rt_mutex_deadlock_account_unlock(current); | |
708 | ||
709 | if (!rt_mutex_has_waiters(lock)) { | |
710 | lock->owner = NULL; | |
d209d74d | 711 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a IM |
712 | return; |
713 | } | |
714 | ||
715 | wakeup_next_waiter(lock); | |
716 | ||
d209d74d | 717 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a IM |
718 | |
719 | /* Undo pi boosting if necessary: */ | |
720 | rt_mutex_adjust_prio(current); | |
721 | } | |
722 | ||
723 | /* | |
724 | * debug aware fast / slowpath lock,trylock,unlock | |
725 | * | |
726 | * The atomic acquire/release ops are compiled away, when either the | |
727 | * architecture does not support cmpxchg or when debugging is enabled. | |
728 | */ | |
729 | static inline int | |
730 | rt_mutex_fastlock(struct rt_mutex *lock, int state, | |
731 | int detect_deadlock, | |
732 | int (*slowfn)(struct rt_mutex *lock, int state, | |
733 | struct hrtimer_sleeper *timeout, | |
9a11b49a | 734 | int detect_deadlock)) |
23f78d4a IM |
735 | { |
736 | if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | |
737 | rt_mutex_deadlock_account_lock(lock, current); | |
738 | return 0; | |
739 | } else | |
9a11b49a | 740 | return slowfn(lock, state, NULL, detect_deadlock); |
23f78d4a IM |
741 | } |
742 | ||
743 | static inline int | |
744 | rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, | |
745 | struct hrtimer_sleeper *timeout, int detect_deadlock, | |
746 | int (*slowfn)(struct rt_mutex *lock, int state, | |
747 | struct hrtimer_sleeper *timeout, | |
9a11b49a | 748 | int detect_deadlock)) |
23f78d4a IM |
749 | { |
750 | if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | |
751 | rt_mutex_deadlock_account_lock(lock, current); | |
752 | return 0; | |
753 | } else | |
9a11b49a | 754 | return slowfn(lock, state, timeout, detect_deadlock); |
23f78d4a IM |
755 | } |
756 | ||
757 | static inline int | |
758 | rt_mutex_fasttrylock(struct rt_mutex *lock, | |
9a11b49a | 759 | int (*slowfn)(struct rt_mutex *lock)) |
23f78d4a IM |
760 | { |
761 | if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { | |
762 | rt_mutex_deadlock_account_lock(lock, current); | |
763 | return 1; | |
764 | } | |
9a11b49a | 765 | return slowfn(lock); |
23f78d4a IM |
766 | } |
767 | ||
768 | static inline void | |
769 | rt_mutex_fastunlock(struct rt_mutex *lock, | |
770 | void (*slowfn)(struct rt_mutex *lock)) | |
771 | { | |
772 | if (likely(rt_mutex_cmpxchg(lock, current, NULL))) | |
773 | rt_mutex_deadlock_account_unlock(current); | |
774 | else | |
775 | slowfn(lock); | |
776 | } | |
777 | ||
778 | /** | |
779 | * rt_mutex_lock - lock a rt_mutex | |
780 | * | |
781 | * @lock: the rt_mutex to be locked | |
782 | */ | |
783 | void __sched rt_mutex_lock(struct rt_mutex *lock) | |
784 | { | |
785 | might_sleep(); | |
786 | ||
787 | rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock); | |
788 | } | |
789 | EXPORT_SYMBOL_GPL(rt_mutex_lock); | |
790 | ||
791 | /** | |
792 | * rt_mutex_lock_interruptible - lock a rt_mutex interruptible | |
793 | * | |
794 | * @lock: the rt_mutex to be locked | |
795 | * @detect_deadlock: deadlock detection on/off | |
796 | * | |
797 | * Returns: | |
798 | * 0 on success | |
799 | * -EINTR when interrupted by a signal | |
800 | * -EDEADLK when the lock would deadlock (when deadlock detection is on) | |
801 | */ | |
802 | int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, | |
803 | int detect_deadlock) | |
804 | { | |
805 | might_sleep(); | |
806 | ||
807 | return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, | |
808 | detect_deadlock, rt_mutex_slowlock); | |
809 | } | |
810 | EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); | |
811 | ||
812 | /** | |
23b94b96 LH |
813 | * rt_mutex_timed_lock - lock a rt_mutex interruptible |
814 | * the timeout structure is provided | |
815 | * by the caller | |
23f78d4a IM |
816 | * |
817 | * @lock: the rt_mutex to be locked | |
818 | * @timeout: timeout structure or NULL (no timeout) | |
819 | * @detect_deadlock: deadlock detection on/off | |
820 | * | |
821 | * Returns: | |
822 | * 0 on success | |
823 | * -EINTR when interrupted by a signal | |
3ac49a1c | 824 | * -ETIMEDOUT when the timeout expired |
23f78d4a IM |
825 | * -EDEADLK when the lock would deadlock (when deadlock detection is on) |
826 | */ | |
827 | int | |
828 | rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout, | |
829 | int detect_deadlock) | |
830 | { | |
831 | might_sleep(); | |
832 | ||
833 | return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, | |
834 | detect_deadlock, rt_mutex_slowlock); | |
835 | } | |
836 | EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); | |
837 | ||
838 | /** | |
839 | * rt_mutex_trylock - try to lock a rt_mutex | |
840 | * | |
841 | * @lock: the rt_mutex to be locked | |
842 | * | |
843 | * Returns 1 on success and 0 on contention | |
844 | */ | |
845 | int __sched rt_mutex_trylock(struct rt_mutex *lock) | |
846 | { | |
847 | return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); | |
848 | } | |
849 | EXPORT_SYMBOL_GPL(rt_mutex_trylock); | |
850 | ||
851 | /** | |
852 | * rt_mutex_unlock - unlock a rt_mutex | |
853 | * | |
854 | * @lock: the rt_mutex to be unlocked | |
855 | */ | |
856 | void __sched rt_mutex_unlock(struct rt_mutex *lock) | |
857 | { | |
858 | rt_mutex_fastunlock(lock, rt_mutex_slowunlock); | |
859 | } | |
860 | EXPORT_SYMBOL_GPL(rt_mutex_unlock); | |
861 | ||
23b94b96 | 862 | /** |
23f78d4a IM |
863 | * rt_mutex_destroy - mark a mutex unusable |
864 | * @lock: the mutex to be destroyed | |
865 | * | |
866 | * This function marks the mutex uninitialized, and any subsequent | |
867 | * use of the mutex is forbidden. The mutex must not be locked when | |
868 | * this function is called. | |
869 | */ | |
870 | void rt_mutex_destroy(struct rt_mutex *lock) | |
871 | { | |
872 | WARN_ON(rt_mutex_is_locked(lock)); | |
873 | #ifdef CONFIG_DEBUG_RT_MUTEXES | |
874 | lock->magic = NULL; | |
875 | #endif | |
876 | } | |
877 | ||
878 | EXPORT_SYMBOL_GPL(rt_mutex_destroy); | |
879 | ||
880 | /** | |
881 | * __rt_mutex_init - initialize the rt lock | |
882 | * | |
883 | * @lock: the rt lock to be initialized | |
884 | * | |
885 | * Initialize the rt lock to unlocked state. | |
886 | * | |
887 | * Initializing of a locked rt lock is not allowed | |
888 | */ | |
889 | void __rt_mutex_init(struct rt_mutex *lock, const char *name) | |
890 | { | |
891 | lock->owner = NULL; | |
d209d74d TG |
892 | raw_spin_lock_init(&lock->wait_lock); |
893 | plist_head_init_raw(&lock->wait_list, &lock->wait_lock); | |
23f78d4a IM |
894 | |
895 | debug_rt_mutex_init(lock, name); | |
896 | } | |
897 | EXPORT_SYMBOL_GPL(__rt_mutex_init); | |
0cdbee99 IM |
898 | |
899 | /** | |
900 | * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a | |
901 | * proxy owner | |
902 | * | |
903 | * @lock: the rt_mutex to be locked | |
904 | * @proxy_owner:the task to set as owner | |
905 | * | |
906 | * No locking. Caller has to do serializing itself | |
907 | * Special API call for PI-futex support | |
908 | */ | |
909 | void rt_mutex_init_proxy_locked(struct rt_mutex *lock, | |
910 | struct task_struct *proxy_owner) | |
911 | { | |
912 | __rt_mutex_init(lock, NULL); | |
9a11b49a | 913 | debug_rt_mutex_proxy_lock(lock, proxy_owner); |
8161239a | 914 | rt_mutex_set_owner(lock, proxy_owner); |
0cdbee99 IM |
915 | rt_mutex_deadlock_account_lock(lock, proxy_owner); |
916 | } | |
917 | ||
918 | /** | |
919 | * rt_mutex_proxy_unlock - release a lock on behalf of owner | |
920 | * | |
921 | * @lock: the rt_mutex to be locked | |
922 | * | |
923 | * No locking. Caller has to do serializing itself | |
924 | * Special API call for PI-futex support | |
925 | */ | |
926 | void rt_mutex_proxy_unlock(struct rt_mutex *lock, | |
927 | struct task_struct *proxy_owner) | |
928 | { | |
929 | debug_rt_mutex_proxy_unlock(lock); | |
8161239a | 930 | rt_mutex_set_owner(lock, NULL); |
0cdbee99 IM |
931 | rt_mutex_deadlock_account_unlock(proxy_owner); |
932 | } | |
933 | ||
8dac456a DH |
934 | /** |
935 | * rt_mutex_start_proxy_lock() - Start lock acquisition for another task | |
936 | * @lock: the rt_mutex to take | |
937 | * @waiter: the pre-initialized rt_mutex_waiter | |
938 | * @task: the task to prepare | |
939 | * @detect_deadlock: perform deadlock detection (1) or not (0) | |
940 | * | |
941 | * Returns: | |
942 | * 0 - task blocked on lock | |
943 | * 1 - acquired the lock for task, caller should wake it up | |
944 | * <0 - error | |
945 | * | |
946 | * Special API call for FUTEX_REQUEUE_PI support. | |
947 | */ | |
948 | int rt_mutex_start_proxy_lock(struct rt_mutex *lock, | |
949 | struct rt_mutex_waiter *waiter, | |
950 | struct task_struct *task, int detect_deadlock) | |
951 | { | |
952 | int ret; | |
953 | ||
d209d74d | 954 | raw_spin_lock(&lock->wait_lock); |
8dac456a | 955 | |
8161239a | 956 | if (try_to_take_rt_mutex(lock, task, NULL)) { |
d209d74d | 957 | raw_spin_unlock(&lock->wait_lock); |
8dac456a DH |
958 | return 1; |
959 | } | |
960 | ||
961 | ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); | |
962 | ||
8161239a | 963 | if (ret && !rt_mutex_owner(lock)) { |
8dac456a DH |
964 | /* |
965 | * Reset the return value. We might have | |
966 | * returned with -EDEADLK and the owner | |
967 | * released the lock while we were walking the | |
968 | * pi chain. Let the waiter sort it out. | |
969 | */ | |
970 | ret = 0; | |
971 | } | |
8161239a LJ |
972 | |
973 | if (unlikely(ret)) | |
974 | remove_waiter(lock, waiter); | |
975 | ||
d209d74d | 976 | raw_spin_unlock(&lock->wait_lock); |
8dac456a DH |
977 | |
978 | debug_rt_mutex_print_deadlock(waiter); | |
979 | ||
980 | return ret; | |
981 | } | |
982 | ||
0cdbee99 IM |
983 | /** |
984 | * rt_mutex_next_owner - return the next owner of the lock | |
985 | * | |
986 | * @lock: the rt lock query | |
987 | * | |
988 | * Returns the next owner of the lock or NULL | |
989 | * | |
990 | * Caller has to serialize against other accessors to the lock | |
991 | * itself. | |
992 | * | |
993 | * Special API call for PI-futex support | |
994 | */ | |
995 | struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) | |
996 | { | |
997 | if (!rt_mutex_has_waiters(lock)) | |
998 | return NULL; | |
999 | ||
1000 | return rt_mutex_top_waiter(lock)->task; | |
1001 | } | |
8dac456a DH |
1002 | |
1003 | /** | |
1004 | * rt_mutex_finish_proxy_lock() - Complete lock acquisition | |
1005 | * @lock: the rt_mutex we were woken on | |
1006 | * @to: the timeout, null if none. hrtimer should already have | |
1007 | * been started. | |
1008 | * @waiter: the pre-initialized rt_mutex_waiter | |
1009 | * @detect_deadlock: perform deadlock detection (1) or not (0) | |
1010 | * | |
1011 | * Complete the lock acquisition started our behalf by another thread. | |
1012 | * | |
1013 | * Returns: | |
1014 | * 0 - success | |
1015 | * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK | |
1016 | * | |
1017 | * Special API call for PI-futex requeue support | |
1018 | */ | |
1019 | int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, | |
1020 | struct hrtimer_sleeper *to, | |
1021 | struct rt_mutex_waiter *waiter, | |
1022 | int detect_deadlock) | |
1023 | { | |
1024 | int ret; | |
1025 | ||
d209d74d | 1026 | raw_spin_lock(&lock->wait_lock); |
8dac456a DH |
1027 | |
1028 | set_current_state(TASK_INTERRUPTIBLE); | |
1029 | ||
8161239a | 1030 | ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); |
8dac456a DH |
1031 | |
1032 | set_current_state(TASK_RUNNING); | |
1033 | ||
8161239a | 1034 | if (unlikely(ret)) |
8dac456a DH |
1035 | remove_waiter(lock, waiter); |
1036 | ||
1037 | /* | |
1038 | * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might | |
1039 | * have to fix that up. | |
1040 | */ | |
1041 | fixup_rt_mutex_waiters(lock); | |
1042 | ||
d209d74d | 1043 | raw_spin_unlock(&lock->wait_lock); |
8dac456a | 1044 | |
8dac456a DH |
1045 | return ret; |
1046 | } |