Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Fast Userspace Mutexes (which I call "Futexes!"). | |
3 | * (C) Rusty Russell, IBM 2002 | |
4 | * | |
5 | * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar | |
6 | * (C) Copyright 2003 Red Hat Inc, All Rights Reserved | |
7 | * | |
8 | * Removed page pinning, fix privately mapped COW pages and other cleanups | |
9 | * (C) Copyright 2003, 2004 Jamie Lokier | |
10 | * | |
0771dfef IM |
11 | * Robust futex support started by Ingo Molnar |
12 | * (C) Copyright 2006 Red Hat Inc, All Rights Reserved | |
13 | * Thanks to Thomas Gleixner for suggestions, analysis and fixes. | |
14 | * | |
1da177e4 LT |
15 | * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly |
16 | * enough at me, Linus for the original (flawed) idea, Matthew | |
17 | * Kirkwood for proof-of-concept implementation. | |
18 | * | |
19 | * "The futexes are also cursed." | |
20 | * "But they come in a choice of three flavours!" | |
21 | * | |
22 | * This program is free software; you can redistribute it and/or modify | |
23 | * it under the terms of the GNU General Public License as published by | |
24 | * the Free Software Foundation; either version 2 of the License, or | |
25 | * (at your option) any later version. | |
26 | * | |
27 | * This program is distributed in the hope that it will be useful, | |
28 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
29 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
30 | * GNU General Public License for more details. | |
31 | * | |
32 | * You should have received a copy of the GNU General Public License | |
33 | * along with this program; if not, write to the Free Software | |
34 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
35 | */ | |
36 | #include <linux/slab.h> | |
37 | #include <linux/poll.h> | |
38 | #include <linux/fs.h> | |
39 | #include <linux/file.h> | |
40 | #include <linux/jhash.h> | |
41 | #include <linux/init.h> | |
42 | #include <linux/futex.h> | |
43 | #include <linux/mount.h> | |
44 | #include <linux/pagemap.h> | |
45 | #include <linux/syscalls.h> | |
7ed20e1a | 46 | #include <linux/signal.h> |
4732efbe | 47 | #include <asm/futex.h> |
1da177e4 LT |
48 | |
49 | #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) | |
50 | ||
51 | /* | |
52 | * Futexes are matched on equal values of this key. | |
53 | * The key type depends on whether it's a shared or private mapping. | |
54 | * Don't rearrange members without looking at hash_futex(). | |
55 | * | |
56 | * offset is aligned to a multiple of sizeof(u32) (== 4) by definition. | |
57 | * We set bit 0 to indicate if it's an inode-based key. | |
58 | */ | |
59 | union futex_key { | |
60 | struct { | |
61 | unsigned long pgoff; | |
62 | struct inode *inode; | |
63 | int offset; | |
64 | } shared; | |
65 | struct { | |
66 | unsigned long uaddr; | |
67 | struct mm_struct *mm; | |
68 | int offset; | |
69 | } private; | |
70 | struct { | |
71 | unsigned long word; | |
72 | void *ptr; | |
73 | int offset; | |
74 | } both; | |
75 | }; | |
76 | ||
77 | /* | |
78 | * We use this hashed waitqueue instead of a normal wait_queue_t, so | |
79 | * we can wake only the relevant ones (hashed queues may be shared). | |
80 | * | |
81 | * A futex_q has a woken state, just like tasks have TASK_RUNNING. | |
82 | * It is considered woken when list_empty(&q->list) || q->lock_ptr == 0. | |
83 | * The order of wakup is always to make the first condition true, then | |
84 | * wake up q->waiters, then make the second condition true. | |
85 | */ | |
86 | struct futex_q { | |
87 | struct list_head list; | |
88 | wait_queue_head_t waiters; | |
89 | ||
90 | /* Which hash list lock to use. */ | |
91 | spinlock_t *lock_ptr; | |
92 | ||
93 | /* Key which the futex is hashed on. */ | |
94 | union futex_key key; | |
95 | ||
96 | /* For fd, sigio sent using these. */ | |
97 | int fd; | |
98 | struct file *filp; | |
99 | }; | |
100 | ||
101 | /* | |
102 | * Split the global futex_lock into every hash list lock. | |
103 | */ | |
104 | struct futex_hash_bucket { | |
105 | spinlock_t lock; | |
106 | struct list_head chain; | |
107 | }; | |
108 | ||
109 | static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS]; | |
110 | ||
111 | /* Futex-fs vfsmount entry: */ | |
112 | static struct vfsmount *futex_mnt; | |
113 | ||
114 | /* | |
115 | * We hash on the keys returned from get_futex_key (see below). | |
116 | */ | |
117 | static struct futex_hash_bucket *hash_futex(union futex_key *key) | |
118 | { | |
119 | u32 hash = jhash2((u32*)&key->both.word, | |
120 | (sizeof(key->both.word)+sizeof(key->both.ptr))/4, | |
121 | key->both.offset); | |
122 | return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)]; | |
123 | } | |
124 | ||
125 | /* | |
126 | * Return 1 if two futex_keys are equal, 0 otherwise. | |
127 | */ | |
128 | static inline int match_futex(union futex_key *key1, union futex_key *key2) | |
129 | { | |
130 | return (key1->both.word == key2->both.word | |
131 | && key1->both.ptr == key2->both.ptr | |
132 | && key1->both.offset == key2->both.offset); | |
133 | } | |
134 | ||
135 | /* | |
136 | * Get parameters which are the keys for a futex. | |
137 | * | |
138 | * For shared mappings, it's (page->index, vma->vm_file->f_dentry->d_inode, | |
139 | * offset_within_page). For private mappings, it's (uaddr, current->mm). | |
140 | * We can usually work out the index without swapping in the page. | |
141 | * | |
142 | * Returns: 0, or negative error code. | |
143 | * The key words are stored in *key on success. | |
144 | * | |
145 | * Should be called with ¤t->mm->mmap_sem but NOT any spinlocks. | |
146 | */ | |
147 | static int get_futex_key(unsigned long uaddr, union futex_key *key) | |
148 | { | |
149 | struct mm_struct *mm = current->mm; | |
150 | struct vm_area_struct *vma; | |
151 | struct page *page; | |
152 | int err; | |
153 | ||
154 | /* | |
155 | * The futex address must be "naturally" aligned. | |
156 | */ | |
157 | key->both.offset = uaddr % PAGE_SIZE; | |
158 | if (unlikely((key->both.offset % sizeof(u32)) != 0)) | |
159 | return -EINVAL; | |
160 | uaddr -= key->both.offset; | |
161 | ||
162 | /* | |
163 | * The futex is hashed differently depending on whether | |
164 | * it's in a shared or private mapping. So check vma first. | |
165 | */ | |
166 | vma = find_extend_vma(mm, uaddr); | |
167 | if (unlikely(!vma)) | |
168 | return -EFAULT; | |
169 | ||
170 | /* | |
171 | * Permissions. | |
172 | */ | |
173 | if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ)) | |
174 | return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES; | |
175 | ||
176 | /* | |
177 | * Private mappings are handled in a simple way. | |
178 | * | |
179 | * NOTE: When userspace waits on a MAP_SHARED mapping, even if | |
180 | * it's a read-only handle, it's expected that futexes attach to | |
181 | * the object not the particular process. Therefore we use | |
182 | * VM_MAYSHARE here, not VM_SHARED which is restricted to shared | |
183 | * mappings of _writable_ handles. | |
184 | */ | |
185 | if (likely(!(vma->vm_flags & VM_MAYSHARE))) { | |
186 | key->private.mm = mm; | |
187 | key->private.uaddr = uaddr; | |
188 | return 0; | |
189 | } | |
190 | ||
191 | /* | |
192 | * Linear file mappings are also simple. | |
193 | */ | |
194 | key->shared.inode = vma->vm_file->f_dentry->d_inode; | |
195 | key->both.offset++; /* Bit 0 of offset indicates inode-based key. */ | |
196 | if (likely(!(vma->vm_flags & VM_NONLINEAR))) { | |
197 | key->shared.pgoff = (((uaddr - vma->vm_start) >> PAGE_SHIFT) | |
198 | + vma->vm_pgoff); | |
199 | return 0; | |
200 | } | |
201 | ||
202 | /* | |
203 | * We could walk the page table to read the non-linear | |
204 | * pte, and get the page index without fetching the page | |
205 | * from swap. But that's a lot of code to duplicate here | |
206 | * for a rare case, so we simply fetch the page. | |
207 | */ | |
1da177e4 LT |
208 | err = get_user_pages(current, mm, uaddr, 1, 0, 0, &page, NULL); |
209 | if (err >= 0) { | |
210 | key->shared.pgoff = | |
211 | page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | |
212 | put_page(page); | |
213 | return 0; | |
214 | } | |
215 | return err; | |
216 | } | |
217 | ||
218 | /* | |
219 | * Take a reference to the resource addressed by a key. | |
220 | * Can be called while holding spinlocks. | |
221 | * | |
222 | * NOTE: mmap_sem MUST be held between get_futex_key() and calling this | |
223 | * function, if it is called at all. mmap_sem keeps key->shared.inode valid. | |
224 | */ | |
225 | static inline void get_key_refs(union futex_key *key) | |
226 | { | |
227 | if (key->both.ptr != 0) { | |
228 | if (key->both.offset & 1) | |
229 | atomic_inc(&key->shared.inode->i_count); | |
230 | else | |
231 | atomic_inc(&key->private.mm->mm_count); | |
232 | } | |
233 | } | |
234 | ||
235 | /* | |
236 | * Drop a reference to the resource addressed by a key. | |
237 | * The hash bucket spinlock must not be held. | |
238 | */ | |
239 | static void drop_key_refs(union futex_key *key) | |
240 | { | |
241 | if (key->both.ptr != 0) { | |
242 | if (key->both.offset & 1) | |
243 | iput(key->shared.inode); | |
244 | else | |
245 | mmdrop(key->private.mm); | |
246 | } | |
247 | } | |
248 | ||
249 | static inline int get_futex_value_locked(int *dest, int __user *from) | |
250 | { | |
251 | int ret; | |
252 | ||
253 | inc_preempt_count(); | |
254 | ret = __copy_from_user_inatomic(dest, from, sizeof(int)); | |
255 | dec_preempt_count(); | |
256 | ||
257 | return ret ? -EFAULT : 0; | |
258 | } | |
259 | ||
260 | /* | |
261 | * The hash bucket lock must be held when this is called. | |
262 | * Afterwards, the futex_q must not be accessed. | |
263 | */ | |
264 | static void wake_futex(struct futex_q *q) | |
265 | { | |
266 | list_del_init(&q->list); | |
267 | if (q->filp) | |
268 | send_sigio(&q->filp->f_owner, q->fd, POLL_IN); | |
269 | /* | |
270 | * The lock in wake_up_all() is a crucial memory barrier after the | |
271 | * list_del_init() and also before assigning to q->lock_ptr. | |
272 | */ | |
273 | wake_up_all(&q->waiters); | |
274 | /* | |
275 | * The waiting task can free the futex_q as soon as this is written, | |
276 | * without taking any locks. This must come last. | |
8e31108b AM |
277 | * |
278 | * A memory barrier is required here to prevent the following store | |
279 | * to lock_ptr from getting ahead of the wakeup. Clearing the lock | |
280 | * at the end of wake_up_all() does not prevent this store from | |
281 | * moving. | |
1da177e4 | 282 | */ |
8e31108b | 283 | wmb(); |
1da177e4 LT |
284 | q->lock_ptr = NULL; |
285 | } | |
286 | ||
287 | /* | |
288 | * Wake up all waiters hashed on the physical page that is mapped | |
289 | * to this virtual address: | |
290 | */ | |
291 | static int futex_wake(unsigned long uaddr, int nr_wake) | |
292 | { | |
293 | union futex_key key; | |
294 | struct futex_hash_bucket *bh; | |
295 | struct list_head *head; | |
296 | struct futex_q *this, *next; | |
297 | int ret; | |
298 | ||
299 | down_read(¤t->mm->mmap_sem); | |
300 | ||
301 | ret = get_futex_key(uaddr, &key); | |
302 | if (unlikely(ret != 0)) | |
303 | goto out; | |
304 | ||
305 | bh = hash_futex(&key); | |
306 | spin_lock(&bh->lock); | |
307 | head = &bh->chain; | |
308 | ||
309 | list_for_each_entry_safe(this, next, head, list) { | |
310 | if (match_futex (&this->key, &key)) { | |
311 | wake_futex(this); | |
312 | if (++ret >= nr_wake) | |
313 | break; | |
314 | } | |
315 | } | |
316 | ||
317 | spin_unlock(&bh->lock); | |
318 | out: | |
319 | up_read(¤t->mm->mmap_sem); | |
320 | return ret; | |
321 | } | |
322 | ||
4732efbe JJ |
323 | /* |
324 | * Wake up all waiters hashed on the physical page that is mapped | |
325 | * to this virtual address: | |
326 | */ | |
327 | static int futex_wake_op(unsigned long uaddr1, unsigned long uaddr2, int nr_wake, int nr_wake2, int op) | |
328 | { | |
329 | union futex_key key1, key2; | |
330 | struct futex_hash_bucket *bh1, *bh2; | |
331 | struct list_head *head; | |
332 | struct futex_q *this, *next; | |
333 | int ret, op_ret, attempt = 0; | |
334 | ||
335 | retryfull: | |
336 | down_read(¤t->mm->mmap_sem); | |
337 | ||
338 | ret = get_futex_key(uaddr1, &key1); | |
339 | if (unlikely(ret != 0)) | |
340 | goto out; | |
341 | ret = get_futex_key(uaddr2, &key2); | |
342 | if (unlikely(ret != 0)) | |
343 | goto out; | |
344 | ||
345 | bh1 = hash_futex(&key1); | |
346 | bh2 = hash_futex(&key2); | |
347 | ||
348 | retry: | |
349 | if (bh1 < bh2) | |
350 | spin_lock(&bh1->lock); | |
351 | spin_lock(&bh2->lock); | |
352 | if (bh1 > bh2) | |
353 | spin_lock(&bh1->lock); | |
354 | ||
355 | op_ret = futex_atomic_op_inuser(op, (int __user *)uaddr2); | |
356 | if (unlikely(op_ret < 0)) { | |
357 | int dummy; | |
358 | ||
359 | spin_unlock(&bh1->lock); | |
360 | if (bh1 != bh2) | |
361 | spin_unlock(&bh2->lock); | |
362 | ||
7ee1dd3f DH |
363 | #ifndef CONFIG_MMU |
364 | /* we don't get EFAULT from MMU faults if we don't have an MMU, | |
365 | * but we might get them from range checking */ | |
366 | ret = op_ret; | |
367 | goto out; | |
368 | #endif | |
369 | ||
796f8d9b DG |
370 | if (unlikely(op_ret != -EFAULT)) { |
371 | ret = op_ret; | |
372 | goto out; | |
373 | } | |
374 | ||
4732efbe JJ |
375 | /* futex_atomic_op_inuser needs to both read and write |
376 | * *(int __user *)uaddr2, but we can't modify it | |
377 | * non-atomically. Therefore, if get_user below is not | |
378 | * enough, we need to handle the fault ourselves, while | |
379 | * still holding the mmap_sem. */ | |
380 | if (attempt++) { | |
381 | struct vm_area_struct * vma; | |
382 | struct mm_struct *mm = current->mm; | |
383 | ||
384 | ret = -EFAULT; | |
385 | if (attempt >= 2 || | |
386 | !(vma = find_vma(mm, uaddr2)) || | |
387 | vma->vm_start > uaddr2 || | |
388 | !(vma->vm_flags & VM_WRITE)) | |
389 | goto out; | |
390 | ||
391 | switch (handle_mm_fault(mm, vma, uaddr2, 1)) { | |
392 | case VM_FAULT_MINOR: | |
393 | current->min_flt++; | |
394 | break; | |
395 | case VM_FAULT_MAJOR: | |
396 | current->maj_flt++; | |
397 | break; | |
398 | default: | |
399 | goto out; | |
400 | } | |
401 | goto retry; | |
402 | } | |
403 | ||
404 | /* If we would have faulted, release mmap_sem, | |
405 | * fault it in and start all over again. */ | |
406 | up_read(¤t->mm->mmap_sem); | |
407 | ||
408 | ret = get_user(dummy, (int __user *)uaddr2); | |
409 | if (ret) | |
410 | return ret; | |
411 | ||
412 | goto retryfull; | |
413 | } | |
414 | ||
415 | head = &bh1->chain; | |
416 | ||
417 | list_for_each_entry_safe(this, next, head, list) { | |
418 | if (match_futex (&this->key, &key1)) { | |
419 | wake_futex(this); | |
420 | if (++ret >= nr_wake) | |
421 | break; | |
422 | } | |
423 | } | |
424 | ||
425 | if (op_ret > 0) { | |
426 | head = &bh2->chain; | |
427 | ||
428 | op_ret = 0; | |
429 | list_for_each_entry_safe(this, next, head, list) { | |
430 | if (match_futex (&this->key, &key2)) { | |
431 | wake_futex(this); | |
432 | if (++op_ret >= nr_wake2) | |
433 | break; | |
434 | } | |
435 | } | |
436 | ret += op_ret; | |
437 | } | |
438 | ||
439 | spin_unlock(&bh1->lock); | |
440 | if (bh1 != bh2) | |
441 | spin_unlock(&bh2->lock); | |
442 | out: | |
443 | up_read(¤t->mm->mmap_sem); | |
444 | return ret; | |
445 | } | |
446 | ||
1da177e4 LT |
447 | /* |
448 | * Requeue all waiters hashed on one physical page to another | |
449 | * physical page. | |
450 | */ | |
451 | static int futex_requeue(unsigned long uaddr1, unsigned long uaddr2, | |
452 | int nr_wake, int nr_requeue, int *valp) | |
453 | { | |
454 | union futex_key key1, key2; | |
455 | struct futex_hash_bucket *bh1, *bh2; | |
456 | struct list_head *head1; | |
457 | struct futex_q *this, *next; | |
458 | int ret, drop_count = 0; | |
459 | ||
460 | retry: | |
461 | down_read(¤t->mm->mmap_sem); | |
462 | ||
463 | ret = get_futex_key(uaddr1, &key1); | |
464 | if (unlikely(ret != 0)) | |
465 | goto out; | |
466 | ret = get_futex_key(uaddr2, &key2); | |
467 | if (unlikely(ret != 0)) | |
468 | goto out; | |
469 | ||
470 | bh1 = hash_futex(&key1); | |
471 | bh2 = hash_futex(&key2); | |
472 | ||
473 | if (bh1 < bh2) | |
474 | spin_lock(&bh1->lock); | |
475 | spin_lock(&bh2->lock); | |
476 | if (bh1 > bh2) | |
477 | spin_lock(&bh1->lock); | |
478 | ||
479 | if (likely(valp != NULL)) { | |
480 | int curval; | |
481 | ||
482 | ret = get_futex_value_locked(&curval, (int __user *)uaddr1); | |
483 | ||
484 | if (unlikely(ret)) { | |
485 | spin_unlock(&bh1->lock); | |
486 | if (bh1 != bh2) | |
487 | spin_unlock(&bh2->lock); | |
488 | ||
489 | /* If we would have faulted, release mmap_sem, fault | |
490 | * it in and start all over again. | |
491 | */ | |
492 | up_read(¤t->mm->mmap_sem); | |
493 | ||
494 | ret = get_user(curval, (int __user *)uaddr1); | |
495 | ||
496 | if (!ret) | |
497 | goto retry; | |
498 | ||
499 | return ret; | |
500 | } | |
501 | if (curval != *valp) { | |
502 | ret = -EAGAIN; | |
503 | goto out_unlock; | |
504 | } | |
505 | } | |
506 | ||
507 | head1 = &bh1->chain; | |
508 | list_for_each_entry_safe(this, next, head1, list) { | |
509 | if (!match_futex (&this->key, &key1)) | |
510 | continue; | |
511 | if (++ret <= nr_wake) { | |
512 | wake_futex(this); | |
513 | } else { | |
514 | list_move_tail(&this->list, &bh2->chain); | |
515 | this->lock_ptr = &bh2->lock; | |
516 | this->key = key2; | |
517 | get_key_refs(&key2); | |
518 | drop_count++; | |
519 | ||
520 | if (ret - nr_wake >= nr_requeue) | |
521 | break; | |
522 | /* Make sure to stop if key1 == key2 */ | |
523 | if (head1 == &bh2->chain && head1 != &next->list) | |
524 | head1 = &this->list; | |
525 | } | |
526 | } | |
527 | ||
528 | out_unlock: | |
529 | spin_unlock(&bh1->lock); | |
530 | if (bh1 != bh2) | |
531 | spin_unlock(&bh2->lock); | |
532 | ||
533 | /* drop_key_refs() must be called outside the spinlocks. */ | |
534 | while (--drop_count >= 0) | |
535 | drop_key_refs(&key1); | |
536 | ||
537 | out: | |
538 | up_read(¤t->mm->mmap_sem); | |
539 | return ret; | |
540 | } | |
541 | ||
542 | /* The key must be already stored in q->key. */ | |
543 | static inline struct futex_hash_bucket * | |
544 | queue_lock(struct futex_q *q, int fd, struct file *filp) | |
545 | { | |
546 | struct futex_hash_bucket *bh; | |
547 | ||
548 | q->fd = fd; | |
549 | q->filp = filp; | |
550 | ||
551 | init_waitqueue_head(&q->waiters); | |
552 | ||
553 | get_key_refs(&q->key); | |
554 | bh = hash_futex(&q->key); | |
555 | q->lock_ptr = &bh->lock; | |
556 | ||
557 | spin_lock(&bh->lock); | |
558 | return bh; | |
559 | } | |
560 | ||
561 | static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *bh) | |
562 | { | |
563 | list_add_tail(&q->list, &bh->chain); | |
564 | spin_unlock(&bh->lock); | |
565 | } | |
566 | ||
567 | static inline void | |
568 | queue_unlock(struct futex_q *q, struct futex_hash_bucket *bh) | |
569 | { | |
570 | spin_unlock(&bh->lock); | |
571 | drop_key_refs(&q->key); | |
572 | } | |
573 | ||
574 | /* | |
575 | * queue_me and unqueue_me must be called as a pair, each | |
576 | * exactly once. They are called with the hashed spinlock held. | |
577 | */ | |
578 | ||
579 | /* The key must be already stored in q->key. */ | |
580 | static void queue_me(struct futex_q *q, int fd, struct file *filp) | |
581 | { | |
582 | struct futex_hash_bucket *bh; | |
583 | bh = queue_lock(q, fd, filp); | |
584 | __queue_me(q, bh); | |
585 | } | |
586 | ||
587 | /* Return 1 if we were still queued (ie. 0 means we were woken) */ | |
588 | static int unqueue_me(struct futex_q *q) | |
589 | { | |
590 | int ret = 0; | |
591 | spinlock_t *lock_ptr; | |
592 | ||
593 | /* In the common case we don't take the spinlock, which is nice. */ | |
594 | retry: | |
595 | lock_ptr = q->lock_ptr; | |
596 | if (lock_ptr != 0) { | |
597 | spin_lock(lock_ptr); | |
598 | /* | |
599 | * q->lock_ptr can change between reading it and | |
600 | * spin_lock(), causing us to take the wrong lock. This | |
601 | * corrects the race condition. | |
602 | * | |
603 | * Reasoning goes like this: if we have the wrong lock, | |
604 | * q->lock_ptr must have changed (maybe several times) | |
605 | * between reading it and the spin_lock(). It can | |
606 | * change again after the spin_lock() but only if it was | |
607 | * already changed before the spin_lock(). It cannot, | |
608 | * however, change back to the original value. Therefore | |
609 | * we can detect whether we acquired the correct lock. | |
610 | */ | |
611 | if (unlikely(lock_ptr != q->lock_ptr)) { | |
612 | spin_unlock(lock_ptr); | |
613 | goto retry; | |
614 | } | |
615 | WARN_ON(list_empty(&q->list)); | |
616 | list_del(&q->list); | |
617 | spin_unlock(lock_ptr); | |
618 | ret = 1; | |
619 | } | |
620 | ||
621 | drop_key_refs(&q->key); | |
622 | return ret; | |
623 | } | |
624 | ||
625 | static int futex_wait(unsigned long uaddr, int val, unsigned long time) | |
626 | { | |
627 | DECLARE_WAITQUEUE(wait, current); | |
628 | int ret, curval; | |
629 | struct futex_q q; | |
630 | struct futex_hash_bucket *bh; | |
631 | ||
632 | retry: | |
633 | down_read(¤t->mm->mmap_sem); | |
634 | ||
635 | ret = get_futex_key(uaddr, &q.key); | |
636 | if (unlikely(ret != 0)) | |
637 | goto out_release_sem; | |
638 | ||
639 | bh = queue_lock(&q, -1, NULL); | |
640 | ||
641 | /* | |
642 | * Access the page AFTER the futex is queued. | |
643 | * Order is important: | |
644 | * | |
645 | * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val); | |
646 | * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); } | |
647 | * | |
648 | * The basic logical guarantee of a futex is that it blocks ONLY | |
649 | * if cond(var) is known to be true at the time of blocking, for | |
650 | * any cond. If we queued after testing *uaddr, that would open | |
651 | * a race condition where we could block indefinitely with | |
652 | * cond(var) false, which would violate the guarantee. | |
653 | * | |
654 | * A consequence is that futex_wait() can return zero and absorb | |
655 | * a wakeup when *uaddr != val on entry to the syscall. This is | |
656 | * rare, but normal. | |
657 | * | |
658 | * We hold the mmap semaphore, so the mapping cannot have changed | |
659 | * since we looked it up in get_futex_key. | |
660 | */ | |
661 | ||
662 | ret = get_futex_value_locked(&curval, (int __user *)uaddr); | |
663 | ||
664 | if (unlikely(ret)) { | |
665 | queue_unlock(&q, bh); | |
666 | ||
667 | /* If we would have faulted, release mmap_sem, fault it in and | |
668 | * start all over again. | |
669 | */ | |
670 | up_read(¤t->mm->mmap_sem); | |
671 | ||
672 | ret = get_user(curval, (int __user *)uaddr); | |
673 | ||
674 | if (!ret) | |
675 | goto retry; | |
676 | return ret; | |
677 | } | |
678 | if (curval != val) { | |
679 | ret = -EWOULDBLOCK; | |
680 | queue_unlock(&q, bh); | |
681 | goto out_release_sem; | |
682 | } | |
683 | ||
684 | /* Only actually queue if *uaddr contained val. */ | |
685 | __queue_me(&q, bh); | |
686 | ||
687 | /* | |
688 | * Now the futex is queued and we have checked the data, we | |
689 | * don't want to hold mmap_sem while we sleep. | |
690 | */ | |
691 | up_read(¤t->mm->mmap_sem); | |
692 | ||
693 | /* | |
694 | * There might have been scheduling since the queue_me(), as we | |
695 | * cannot hold a spinlock across the get_user() in case it | |
696 | * faults, and we cannot just set TASK_INTERRUPTIBLE state when | |
697 | * queueing ourselves into the futex hash. This code thus has to | |
698 | * rely on the futex_wake() code removing us from hash when it | |
699 | * wakes us up. | |
700 | */ | |
701 | ||
702 | /* add_wait_queue is the barrier after __set_current_state. */ | |
703 | __set_current_state(TASK_INTERRUPTIBLE); | |
704 | add_wait_queue(&q.waiters, &wait); | |
705 | /* | |
706 | * !list_empty() is safe here without any lock. | |
707 | * q.lock_ptr != 0 is not safe, because of ordering against wakeup. | |
708 | */ | |
709 | if (likely(!list_empty(&q.list))) | |
710 | time = schedule_timeout(time); | |
711 | __set_current_state(TASK_RUNNING); | |
712 | ||
713 | /* | |
714 | * NOTE: we don't remove ourselves from the waitqueue because | |
715 | * we are the only user of it. | |
716 | */ | |
717 | ||
718 | /* If we were woken (and unqueued), we succeeded, whatever. */ | |
719 | if (!unqueue_me(&q)) | |
720 | return 0; | |
721 | if (time == 0) | |
722 | return -ETIMEDOUT; | |
723 | /* We expect signal_pending(current), but another thread may | |
724 | * have handled it for us already. */ | |
725 | return -EINTR; | |
726 | ||
727 | out_release_sem: | |
728 | up_read(¤t->mm->mmap_sem); | |
729 | return ret; | |
730 | } | |
731 | ||
732 | static int futex_close(struct inode *inode, struct file *filp) | |
733 | { | |
734 | struct futex_q *q = filp->private_data; | |
735 | ||
736 | unqueue_me(q); | |
737 | kfree(q); | |
738 | return 0; | |
739 | } | |
740 | ||
741 | /* This is one-shot: once it's gone off you need a new fd */ | |
742 | static unsigned int futex_poll(struct file *filp, | |
743 | struct poll_table_struct *wait) | |
744 | { | |
745 | struct futex_q *q = filp->private_data; | |
746 | int ret = 0; | |
747 | ||
748 | poll_wait(filp, &q->waiters, wait); | |
749 | ||
750 | /* | |
751 | * list_empty() is safe here without any lock. | |
752 | * q->lock_ptr != 0 is not safe, because of ordering against wakeup. | |
753 | */ | |
754 | if (list_empty(&q->list)) | |
755 | ret = POLLIN | POLLRDNORM; | |
756 | ||
757 | return ret; | |
758 | } | |
759 | ||
760 | static struct file_operations futex_fops = { | |
761 | .release = futex_close, | |
762 | .poll = futex_poll, | |
763 | }; | |
764 | ||
765 | /* | |
766 | * Signal allows caller to avoid the race which would occur if they | |
767 | * set the sigio stuff up afterwards. | |
768 | */ | |
769 | static int futex_fd(unsigned long uaddr, int signal) | |
770 | { | |
771 | struct futex_q *q; | |
772 | struct file *filp; | |
773 | int ret, err; | |
774 | ||
775 | ret = -EINVAL; | |
7ed20e1a | 776 | if (!valid_signal(signal)) |
1da177e4 LT |
777 | goto out; |
778 | ||
779 | ret = get_unused_fd(); | |
780 | if (ret < 0) | |
781 | goto out; | |
782 | filp = get_empty_filp(); | |
783 | if (!filp) { | |
784 | put_unused_fd(ret); | |
785 | ret = -ENFILE; | |
786 | goto out; | |
787 | } | |
788 | filp->f_op = &futex_fops; | |
789 | filp->f_vfsmnt = mntget(futex_mnt); | |
790 | filp->f_dentry = dget(futex_mnt->mnt_root); | |
791 | filp->f_mapping = filp->f_dentry->d_inode->i_mapping; | |
792 | ||
793 | if (signal) { | |
1da177e4 LT |
794 | err = f_setown(filp, current->pid, 1); |
795 | if (err < 0) { | |
39ed3fde | 796 | goto error; |
1da177e4 LT |
797 | } |
798 | filp->f_owner.signum = signal; | |
799 | } | |
800 | ||
801 | q = kmalloc(sizeof(*q), GFP_KERNEL); | |
802 | if (!q) { | |
39ed3fde PE |
803 | err = -ENOMEM; |
804 | goto error; | |
1da177e4 LT |
805 | } |
806 | ||
807 | down_read(¤t->mm->mmap_sem); | |
808 | err = get_futex_key(uaddr, &q->key); | |
809 | ||
810 | if (unlikely(err != 0)) { | |
811 | up_read(¤t->mm->mmap_sem); | |
1da177e4 | 812 | kfree(q); |
39ed3fde | 813 | goto error; |
1da177e4 LT |
814 | } |
815 | ||
816 | /* | |
817 | * queue_me() must be called before releasing mmap_sem, because | |
818 | * key->shared.inode needs to be referenced while holding it. | |
819 | */ | |
820 | filp->private_data = q; | |
821 | ||
822 | queue_me(q, ret, filp); | |
823 | up_read(¤t->mm->mmap_sem); | |
824 | ||
825 | /* Now we map fd to filp, so userspace can access it */ | |
826 | fd_install(ret, filp); | |
827 | out: | |
828 | return ret; | |
39ed3fde PE |
829 | error: |
830 | put_unused_fd(ret); | |
831 | put_filp(filp); | |
832 | ret = err; | |
833 | goto out; | |
1da177e4 LT |
834 | } |
835 | ||
0771dfef IM |
836 | /* |
837 | * Support for robust futexes: the kernel cleans up held futexes at | |
838 | * thread exit time. | |
839 | * | |
840 | * Implementation: user-space maintains a per-thread list of locks it | |
841 | * is holding. Upon do_exit(), the kernel carefully walks this list, | |
842 | * and marks all locks that are owned by this thread with the | |
843 | * FUTEX_OWNER_DEAD bit, and wakes up a waiter (if any). The list is | |
844 | * always manipulated with the lock held, so the list is private and | |
845 | * per-thread. Userspace also maintains a per-thread 'list_op_pending' | |
846 | * field, to allow the kernel to clean up if the thread dies after | |
847 | * acquiring the lock, but just before it could have added itself to | |
848 | * the list. There can only be one such pending lock. | |
849 | */ | |
850 | ||
851 | /** | |
852 | * sys_set_robust_list - set the robust-futex list head of a task | |
853 | * @head: pointer to the list-head | |
854 | * @len: length of the list-head, as userspace expects | |
855 | */ | |
856 | asmlinkage long | |
857 | sys_set_robust_list(struct robust_list_head __user *head, | |
858 | size_t len) | |
859 | { | |
860 | /* | |
861 | * The kernel knows only one size for now: | |
862 | */ | |
863 | if (unlikely(len != sizeof(*head))) | |
864 | return -EINVAL; | |
865 | ||
866 | current->robust_list = head; | |
867 | ||
868 | return 0; | |
869 | } | |
870 | ||
871 | /** | |
872 | * sys_get_robust_list - get the robust-futex list head of a task | |
873 | * @pid: pid of the process [zero for current task] | |
874 | * @head_ptr: pointer to a list-head pointer, the kernel fills it in | |
875 | * @len_ptr: pointer to a length field, the kernel fills in the header size | |
876 | */ | |
877 | asmlinkage long | |
878 | sys_get_robust_list(int pid, struct robust_list_head __user **head_ptr, | |
879 | size_t __user *len_ptr) | |
880 | { | |
881 | struct robust_list_head *head; | |
882 | unsigned long ret; | |
883 | ||
884 | if (!pid) | |
885 | head = current->robust_list; | |
886 | else { | |
887 | struct task_struct *p; | |
888 | ||
889 | ret = -ESRCH; | |
890 | read_lock(&tasklist_lock); | |
891 | p = find_task_by_pid(pid); | |
892 | if (!p) | |
893 | goto err_unlock; | |
894 | ret = -EPERM; | |
895 | if ((current->euid != p->euid) && (current->euid != p->uid) && | |
896 | !capable(CAP_SYS_PTRACE)) | |
897 | goto err_unlock; | |
898 | head = p->robust_list; | |
899 | read_unlock(&tasklist_lock); | |
900 | } | |
901 | ||
902 | if (put_user(sizeof(*head), len_ptr)) | |
903 | return -EFAULT; | |
904 | return put_user(head, head_ptr); | |
905 | ||
906 | err_unlock: | |
907 | read_unlock(&tasklist_lock); | |
908 | ||
909 | return ret; | |
910 | } | |
911 | ||
912 | /* | |
913 | * Process a futex-list entry, check whether it's owned by the | |
914 | * dying task, and do notification if so: | |
915 | */ | |
8f17d3a5 | 916 | int handle_futex_death(u32 __user *uaddr, struct task_struct *curr) |
0771dfef | 917 | { |
8f17d3a5 | 918 | u32 uval; |
0771dfef | 919 | |
8f17d3a5 IM |
920 | retry: |
921 | if (get_user(uval, uaddr)) | |
0771dfef IM |
922 | return -1; |
923 | ||
8f17d3a5 | 924 | if ((uval & FUTEX_TID_MASK) == curr->pid) { |
0771dfef IM |
925 | /* |
926 | * Ok, this dying thread is truly holding a futex | |
927 | * of interest. Set the OWNER_DIED bit atomically | |
928 | * via cmpxchg, and if the value had FUTEX_WAITERS | |
929 | * set, wake up a waiter (if any). (We have to do a | |
930 | * futex_wake() even if OWNER_DIED is already set - | |
931 | * to handle the rare but possible case of recursive | |
932 | * thread-death.) The rest of the cleanup is done in | |
933 | * userspace. | |
934 | */ | |
8f17d3a5 IM |
935 | if (futex_atomic_cmpxchg_inatomic(uaddr, uval, |
936 | uval | FUTEX_OWNER_DIED) != uval) | |
937 | goto retry; | |
0771dfef | 938 | |
8f17d3a5 | 939 | if (uval & FUTEX_WAITERS) |
0771dfef IM |
940 | futex_wake((unsigned long)uaddr, 1); |
941 | } | |
942 | return 0; | |
943 | } | |
944 | ||
945 | /* | |
946 | * Walk curr->robust_list (very carefully, it's a userspace list!) | |
947 | * and mark any locks found there dead, and notify any waiters. | |
948 | * | |
949 | * We silently return on any sign of list-walking problem. | |
950 | */ | |
951 | void exit_robust_list(struct task_struct *curr) | |
952 | { | |
953 | struct robust_list_head __user *head = curr->robust_list; | |
954 | struct robust_list __user *entry, *pending; | |
955 | unsigned int limit = ROBUST_LIST_LIMIT; | |
956 | unsigned long futex_offset; | |
957 | ||
958 | /* | |
959 | * Fetch the list head (which was registered earlier, via | |
960 | * sys_set_robust_list()): | |
961 | */ | |
962 | if (get_user(entry, &head->list.next)) | |
963 | return; | |
964 | /* | |
965 | * Fetch the relative futex offset: | |
966 | */ | |
967 | if (get_user(futex_offset, &head->futex_offset)) | |
968 | return; | |
969 | /* | |
970 | * Fetch any possibly pending lock-add first, and handle it | |
971 | * if it exists: | |
972 | */ | |
973 | if (get_user(pending, &head->list_op_pending)) | |
974 | return; | |
975 | if (pending) | |
976 | handle_futex_death((void *)pending + futex_offset, curr); | |
977 | ||
978 | while (entry != &head->list) { | |
979 | /* | |
980 | * A pending lock might already be on the list, so | |
981 | * dont process it twice: | |
982 | */ | |
983 | if (entry != pending) | |
984 | if (handle_futex_death((void *)entry + futex_offset, | |
985 | curr)) | |
986 | return; | |
0771dfef IM |
987 | /* |
988 | * Fetch the next entry in the list: | |
989 | */ | |
990 | if (get_user(entry, &entry->next)) | |
991 | return; | |
992 | /* | |
993 | * Avoid excessively long or circular lists: | |
994 | */ | |
995 | if (!--limit) | |
996 | break; | |
997 | ||
998 | cond_resched(); | |
999 | } | |
1000 | } | |
1001 | ||
1da177e4 LT |
1002 | long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout, |
1003 | unsigned long uaddr2, int val2, int val3) | |
1004 | { | |
1005 | int ret; | |
1006 | ||
1007 | switch (op) { | |
1008 | case FUTEX_WAIT: | |
1009 | ret = futex_wait(uaddr, val, timeout); | |
1010 | break; | |
1011 | case FUTEX_WAKE: | |
1012 | ret = futex_wake(uaddr, val); | |
1013 | break; | |
1014 | case FUTEX_FD: | |
1015 | /* non-zero val means F_SETOWN(getpid()) & F_SETSIG(val) */ | |
1016 | ret = futex_fd(uaddr, val); | |
1017 | break; | |
1018 | case FUTEX_REQUEUE: | |
1019 | ret = futex_requeue(uaddr, uaddr2, val, val2, NULL); | |
1020 | break; | |
1021 | case FUTEX_CMP_REQUEUE: | |
1022 | ret = futex_requeue(uaddr, uaddr2, val, val2, &val3); | |
1023 | break; | |
4732efbe JJ |
1024 | case FUTEX_WAKE_OP: |
1025 | ret = futex_wake_op(uaddr, uaddr2, val, val2, val3); | |
1026 | break; | |
1da177e4 LT |
1027 | default: |
1028 | ret = -ENOSYS; | |
1029 | } | |
1030 | return ret; | |
1031 | } | |
1032 | ||
1033 | ||
1034 | asmlinkage long sys_futex(u32 __user *uaddr, int op, int val, | |
1035 | struct timespec __user *utime, u32 __user *uaddr2, | |
1036 | int val3) | |
1037 | { | |
1038 | struct timespec t; | |
1039 | unsigned long timeout = MAX_SCHEDULE_TIMEOUT; | |
1040 | int val2 = 0; | |
1041 | ||
9741ef96 | 1042 | if (utime && (op == FUTEX_WAIT)) { |
1da177e4 LT |
1043 | if (copy_from_user(&t, utime, sizeof(t)) != 0) |
1044 | return -EFAULT; | |
9741ef96 TG |
1045 | if (!timespec_valid(&t)) |
1046 | return -EINVAL; | |
1da177e4 LT |
1047 | timeout = timespec_to_jiffies(&t) + 1; |
1048 | } | |
1049 | /* | |
1050 | * requeue parameter in 'utime' if op == FUTEX_REQUEUE. | |
1051 | */ | |
1052 | if (op >= FUTEX_REQUEUE) | |
1053 | val2 = (int) (unsigned long) utime; | |
1054 | ||
1055 | return do_futex((unsigned long)uaddr, op, val, timeout, | |
1056 | (unsigned long)uaddr2, val2, val3); | |
1057 | } | |
1058 | ||
454e2398 DH |
1059 | static int futexfs_get_sb(struct file_system_type *fs_type, |
1060 | int flags, const char *dev_name, void *data, | |
1061 | struct vfsmount *mnt) | |
1da177e4 | 1062 | { |
454e2398 | 1063 | return get_sb_pseudo(fs_type, "futex", NULL, 0xBAD1DEA, mnt); |
1da177e4 LT |
1064 | } |
1065 | ||
1066 | static struct file_system_type futex_fs_type = { | |
1067 | .name = "futexfs", | |
1068 | .get_sb = futexfs_get_sb, | |
1069 | .kill_sb = kill_anon_super, | |
1070 | }; | |
1071 | ||
1072 | static int __init init(void) | |
1073 | { | |
1074 | unsigned int i; | |
1075 | ||
1076 | register_filesystem(&futex_fs_type); | |
1077 | futex_mnt = kern_mount(&futex_fs_type); | |
1078 | ||
1079 | for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { | |
1080 | INIT_LIST_HEAD(&futex_queues[i].chain); | |
1081 | spin_lock_init(&futex_queues[i].lock); | |
1082 | } | |
1083 | return 0; | |
1084 | } | |
1085 | __initcall(init); |