]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Fast Userspace Mutexes (which I call "Futexes!"). | |
3 | * (C) Rusty Russell, IBM 2002 | |
4 | * | |
5 | * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar | |
6 | * (C) Copyright 2003 Red Hat Inc, All Rights Reserved | |
7 | * | |
8 | * Removed page pinning, fix privately mapped COW pages and other cleanups | |
9 | * (C) Copyright 2003, 2004 Jamie Lokier | |
10 | * | |
11 | * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly | |
12 | * enough at me, Linus for the original (flawed) idea, Matthew | |
13 | * Kirkwood for proof-of-concept implementation. | |
14 | * | |
15 | * "The futexes are also cursed." | |
16 | * "But they come in a choice of three flavours!" | |
17 | * | |
18 | * This program is free software; you can redistribute it and/or modify | |
19 | * it under the terms of the GNU General Public License as published by | |
20 | * the Free Software Foundation; either version 2 of the License, or | |
21 | * (at your option) any later version. | |
22 | * | |
23 | * This program is distributed in the hope that it will be useful, | |
24 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
25 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
26 | * GNU General Public License for more details. | |
27 | * | |
28 | * You should have received a copy of the GNU General Public License | |
29 | * along with this program; if not, write to the Free Software | |
30 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
31 | */ | |
32 | #include <linux/slab.h> | |
33 | #include <linux/poll.h> | |
34 | #include <linux/fs.h> | |
35 | #include <linux/file.h> | |
36 | #include <linux/jhash.h> | |
37 | #include <linux/init.h> | |
38 | #include <linux/futex.h> | |
39 | #include <linux/mount.h> | |
40 | #include <linux/pagemap.h> | |
41 | #include <linux/syscalls.h> | |
7ed20e1a | 42 | #include <linux/signal.h> |
4732efbe | 43 | #include <asm/futex.h> |
1da177e4 LT |
44 | |
45 | #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) | |
46 | ||
47 | /* | |
48 | * Futexes are matched on equal values of this key. | |
49 | * The key type depends on whether it's a shared or private mapping. | |
50 | * Don't rearrange members without looking at hash_futex(). | |
51 | * | |
52 | * offset is aligned to a multiple of sizeof(u32) (== 4) by definition. | |
53 | * We set bit 0 to indicate if it's an inode-based key. | |
54 | */ | |
55 | union futex_key { | |
56 | struct { | |
57 | unsigned long pgoff; | |
58 | struct inode *inode; | |
59 | int offset; | |
60 | } shared; | |
61 | struct { | |
62 | unsigned long uaddr; | |
63 | struct mm_struct *mm; | |
64 | int offset; | |
65 | } private; | |
66 | struct { | |
67 | unsigned long word; | |
68 | void *ptr; | |
69 | int offset; | |
70 | } both; | |
71 | }; | |
72 | ||
73 | /* | |
74 | * We use this hashed waitqueue instead of a normal wait_queue_t, so | |
75 | * we can wake only the relevant ones (hashed queues may be shared). | |
76 | * | |
77 | * A futex_q has a woken state, just like tasks have TASK_RUNNING. | |
78 | * It is considered woken when list_empty(&q->list) || q->lock_ptr == 0. | |
79 | * The order of wakup is always to make the first condition true, then | |
80 | * wake up q->waiters, then make the second condition true. | |
81 | */ | |
82 | struct futex_q { | |
83 | struct list_head list; | |
84 | wait_queue_head_t waiters; | |
85 | ||
86 | /* Which hash list lock to use. */ | |
87 | spinlock_t *lock_ptr; | |
88 | ||
89 | /* Key which the futex is hashed on. */ | |
90 | union futex_key key; | |
91 | ||
92 | /* For fd, sigio sent using these. */ | |
93 | int fd; | |
94 | struct file *filp; | |
95 | }; | |
96 | ||
97 | /* | |
98 | * Split the global futex_lock into every hash list lock. | |
99 | */ | |
100 | struct futex_hash_bucket { | |
101 | spinlock_t lock; | |
102 | struct list_head chain; | |
103 | }; | |
104 | ||
105 | static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS]; | |
106 | ||
107 | /* Futex-fs vfsmount entry: */ | |
108 | static struct vfsmount *futex_mnt; | |
109 | ||
110 | /* | |
111 | * We hash on the keys returned from get_futex_key (see below). | |
112 | */ | |
113 | static struct futex_hash_bucket *hash_futex(union futex_key *key) | |
114 | { | |
115 | u32 hash = jhash2((u32*)&key->both.word, | |
116 | (sizeof(key->both.word)+sizeof(key->both.ptr))/4, | |
117 | key->both.offset); | |
118 | return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)]; | |
119 | } | |
120 | ||
121 | /* | |
122 | * Return 1 if two futex_keys are equal, 0 otherwise. | |
123 | */ | |
124 | static inline int match_futex(union futex_key *key1, union futex_key *key2) | |
125 | { | |
126 | return (key1->both.word == key2->both.word | |
127 | && key1->both.ptr == key2->both.ptr | |
128 | && key1->both.offset == key2->both.offset); | |
129 | } | |
130 | ||
131 | /* | |
132 | * Get parameters which are the keys for a futex. | |
133 | * | |
134 | * For shared mappings, it's (page->index, vma->vm_file->f_dentry->d_inode, | |
135 | * offset_within_page). For private mappings, it's (uaddr, current->mm). | |
136 | * We can usually work out the index without swapping in the page. | |
137 | * | |
138 | * Returns: 0, or negative error code. | |
139 | * The key words are stored in *key on success. | |
140 | * | |
141 | * Should be called with ¤t->mm->mmap_sem but NOT any spinlocks. | |
142 | */ | |
143 | static int get_futex_key(unsigned long uaddr, union futex_key *key) | |
144 | { | |
145 | struct mm_struct *mm = current->mm; | |
146 | struct vm_area_struct *vma; | |
147 | struct page *page; | |
148 | int err; | |
149 | ||
150 | /* | |
151 | * The futex address must be "naturally" aligned. | |
152 | */ | |
153 | key->both.offset = uaddr % PAGE_SIZE; | |
154 | if (unlikely((key->both.offset % sizeof(u32)) != 0)) | |
155 | return -EINVAL; | |
156 | uaddr -= key->both.offset; | |
157 | ||
158 | /* | |
159 | * The futex is hashed differently depending on whether | |
160 | * it's in a shared or private mapping. So check vma first. | |
161 | */ | |
162 | vma = find_extend_vma(mm, uaddr); | |
163 | if (unlikely(!vma)) | |
164 | return -EFAULT; | |
165 | ||
166 | /* | |
167 | * Permissions. | |
168 | */ | |
169 | if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ)) | |
170 | return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES; | |
171 | ||
172 | /* | |
173 | * Private mappings are handled in a simple way. | |
174 | * | |
175 | * NOTE: When userspace waits on a MAP_SHARED mapping, even if | |
176 | * it's a read-only handle, it's expected that futexes attach to | |
177 | * the object not the particular process. Therefore we use | |
178 | * VM_MAYSHARE here, not VM_SHARED which is restricted to shared | |
179 | * mappings of _writable_ handles. | |
180 | */ | |
181 | if (likely(!(vma->vm_flags & VM_MAYSHARE))) { | |
182 | key->private.mm = mm; | |
183 | key->private.uaddr = uaddr; | |
184 | return 0; | |
185 | } | |
186 | ||
187 | /* | |
188 | * Linear file mappings are also simple. | |
189 | */ | |
190 | key->shared.inode = vma->vm_file->f_dentry->d_inode; | |
191 | key->both.offset++; /* Bit 0 of offset indicates inode-based key. */ | |
192 | if (likely(!(vma->vm_flags & VM_NONLINEAR))) { | |
193 | key->shared.pgoff = (((uaddr - vma->vm_start) >> PAGE_SHIFT) | |
194 | + vma->vm_pgoff); | |
195 | return 0; | |
196 | } | |
197 | ||
198 | /* | |
199 | * We could walk the page table to read the non-linear | |
200 | * pte, and get the page index without fetching the page | |
201 | * from swap. But that's a lot of code to duplicate here | |
202 | * for a rare case, so we simply fetch the page. | |
203 | */ | |
1da177e4 LT |
204 | err = get_user_pages(current, mm, uaddr, 1, 0, 0, &page, NULL); |
205 | if (err >= 0) { | |
206 | key->shared.pgoff = | |
207 | page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | |
208 | put_page(page); | |
209 | return 0; | |
210 | } | |
211 | return err; | |
212 | } | |
213 | ||
214 | /* | |
215 | * Take a reference to the resource addressed by a key. | |
216 | * Can be called while holding spinlocks. | |
217 | * | |
218 | * NOTE: mmap_sem MUST be held between get_futex_key() and calling this | |
219 | * function, if it is called at all. mmap_sem keeps key->shared.inode valid. | |
220 | */ | |
221 | static inline void get_key_refs(union futex_key *key) | |
222 | { | |
223 | if (key->both.ptr != 0) { | |
224 | if (key->both.offset & 1) | |
225 | atomic_inc(&key->shared.inode->i_count); | |
226 | else | |
227 | atomic_inc(&key->private.mm->mm_count); | |
228 | } | |
229 | } | |
230 | ||
231 | /* | |
232 | * Drop a reference to the resource addressed by a key. | |
233 | * The hash bucket spinlock must not be held. | |
234 | */ | |
235 | static void drop_key_refs(union futex_key *key) | |
236 | { | |
237 | if (key->both.ptr != 0) { | |
238 | if (key->both.offset & 1) | |
239 | iput(key->shared.inode); | |
240 | else | |
241 | mmdrop(key->private.mm); | |
242 | } | |
243 | } | |
244 | ||
245 | static inline int get_futex_value_locked(int *dest, int __user *from) | |
246 | { | |
247 | int ret; | |
248 | ||
249 | inc_preempt_count(); | |
250 | ret = __copy_from_user_inatomic(dest, from, sizeof(int)); | |
251 | dec_preempt_count(); | |
252 | ||
253 | return ret ? -EFAULT : 0; | |
254 | } | |
255 | ||
256 | /* | |
257 | * The hash bucket lock must be held when this is called. | |
258 | * Afterwards, the futex_q must not be accessed. | |
259 | */ | |
260 | static void wake_futex(struct futex_q *q) | |
261 | { | |
262 | list_del_init(&q->list); | |
263 | if (q->filp) | |
264 | send_sigio(&q->filp->f_owner, q->fd, POLL_IN); | |
265 | /* | |
266 | * The lock in wake_up_all() is a crucial memory barrier after the | |
267 | * list_del_init() and also before assigning to q->lock_ptr. | |
268 | */ | |
269 | wake_up_all(&q->waiters); | |
270 | /* | |
271 | * The waiting task can free the futex_q as soon as this is written, | |
272 | * without taking any locks. This must come last. | |
273 | */ | |
274 | q->lock_ptr = NULL; | |
275 | } | |
276 | ||
277 | /* | |
278 | * Wake up all waiters hashed on the physical page that is mapped | |
279 | * to this virtual address: | |
280 | */ | |
281 | static int futex_wake(unsigned long uaddr, int nr_wake) | |
282 | { | |
283 | union futex_key key; | |
284 | struct futex_hash_bucket *bh; | |
285 | struct list_head *head; | |
286 | struct futex_q *this, *next; | |
287 | int ret; | |
288 | ||
289 | down_read(¤t->mm->mmap_sem); | |
290 | ||
291 | ret = get_futex_key(uaddr, &key); | |
292 | if (unlikely(ret != 0)) | |
293 | goto out; | |
294 | ||
295 | bh = hash_futex(&key); | |
296 | spin_lock(&bh->lock); | |
297 | head = &bh->chain; | |
298 | ||
299 | list_for_each_entry_safe(this, next, head, list) { | |
300 | if (match_futex (&this->key, &key)) { | |
301 | wake_futex(this); | |
302 | if (++ret >= nr_wake) | |
303 | break; | |
304 | } | |
305 | } | |
306 | ||
307 | spin_unlock(&bh->lock); | |
308 | out: | |
309 | up_read(¤t->mm->mmap_sem); | |
310 | return ret; | |
311 | } | |
312 | ||
4732efbe JJ |
313 | /* |
314 | * Wake up all waiters hashed on the physical page that is mapped | |
315 | * to this virtual address: | |
316 | */ | |
317 | static int futex_wake_op(unsigned long uaddr1, unsigned long uaddr2, int nr_wake, int nr_wake2, int op) | |
318 | { | |
319 | union futex_key key1, key2; | |
320 | struct futex_hash_bucket *bh1, *bh2; | |
321 | struct list_head *head; | |
322 | struct futex_q *this, *next; | |
323 | int ret, op_ret, attempt = 0; | |
324 | ||
325 | retryfull: | |
326 | down_read(¤t->mm->mmap_sem); | |
327 | ||
328 | ret = get_futex_key(uaddr1, &key1); | |
329 | if (unlikely(ret != 0)) | |
330 | goto out; | |
331 | ret = get_futex_key(uaddr2, &key2); | |
332 | if (unlikely(ret != 0)) | |
333 | goto out; | |
334 | ||
335 | bh1 = hash_futex(&key1); | |
336 | bh2 = hash_futex(&key2); | |
337 | ||
338 | retry: | |
339 | if (bh1 < bh2) | |
340 | spin_lock(&bh1->lock); | |
341 | spin_lock(&bh2->lock); | |
342 | if (bh1 > bh2) | |
343 | spin_lock(&bh1->lock); | |
344 | ||
345 | op_ret = futex_atomic_op_inuser(op, (int __user *)uaddr2); | |
346 | if (unlikely(op_ret < 0)) { | |
347 | int dummy; | |
348 | ||
349 | spin_unlock(&bh1->lock); | |
350 | if (bh1 != bh2) | |
351 | spin_unlock(&bh2->lock); | |
352 | ||
796f8d9b DG |
353 | if (unlikely(op_ret != -EFAULT)) { |
354 | ret = op_ret; | |
355 | goto out; | |
356 | } | |
357 | ||
4732efbe JJ |
358 | /* futex_atomic_op_inuser needs to both read and write |
359 | * *(int __user *)uaddr2, but we can't modify it | |
360 | * non-atomically. Therefore, if get_user below is not | |
361 | * enough, we need to handle the fault ourselves, while | |
362 | * still holding the mmap_sem. */ | |
363 | if (attempt++) { | |
364 | struct vm_area_struct * vma; | |
365 | struct mm_struct *mm = current->mm; | |
366 | ||
367 | ret = -EFAULT; | |
368 | if (attempt >= 2 || | |
369 | !(vma = find_vma(mm, uaddr2)) || | |
370 | vma->vm_start > uaddr2 || | |
371 | !(vma->vm_flags & VM_WRITE)) | |
372 | goto out; | |
373 | ||
374 | switch (handle_mm_fault(mm, vma, uaddr2, 1)) { | |
375 | case VM_FAULT_MINOR: | |
376 | current->min_flt++; | |
377 | break; | |
378 | case VM_FAULT_MAJOR: | |
379 | current->maj_flt++; | |
380 | break; | |
381 | default: | |
382 | goto out; | |
383 | } | |
384 | goto retry; | |
385 | } | |
386 | ||
387 | /* If we would have faulted, release mmap_sem, | |
388 | * fault it in and start all over again. */ | |
389 | up_read(¤t->mm->mmap_sem); | |
390 | ||
391 | ret = get_user(dummy, (int __user *)uaddr2); | |
392 | if (ret) | |
393 | return ret; | |
394 | ||
395 | goto retryfull; | |
396 | } | |
397 | ||
398 | head = &bh1->chain; | |
399 | ||
400 | list_for_each_entry_safe(this, next, head, list) { | |
401 | if (match_futex (&this->key, &key1)) { | |
402 | wake_futex(this); | |
403 | if (++ret >= nr_wake) | |
404 | break; | |
405 | } | |
406 | } | |
407 | ||
408 | if (op_ret > 0) { | |
409 | head = &bh2->chain; | |
410 | ||
411 | op_ret = 0; | |
412 | list_for_each_entry_safe(this, next, head, list) { | |
413 | if (match_futex (&this->key, &key2)) { | |
414 | wake_futex(this); | |
415 | if (++op_ret >= nr_wake2) | |
416 | break; | |
417 | } | |
418 | } | |
419 | ret += op_ret; | |
420 | } | |
421 | ||
422 | spin_unlock(&bh1->lock); | |
423 | if (bh1 != bh2) | |
424 | spin_unlock(&bh2->lock); | |
425 | out: | |
426 | up_read(¤t->mm->mmap_sem); | |
427 | return ret; | |
428 | } | |
429 | ||
1da177e4 LT |
430 | /* |
431 | * Requeue all waiters hashed on one physical page to another | |
432 | * physical page. | |
433 | */ | |
434 | static int futex_requeue(unsigned long uaddr1, unsigned long uaddr2, | |
435 | int nr_wake, int nr_requeue, int *valp) | |
436 | { | |
437 | union futex_key key1, key2; | |
438 | struct futex_hash_bucket *bh1, *bh2; | |
439 | struct list_head *head1; | |
440 | struct futex_q *this, *next; | |
441 | int ret, drop_count = 0; | |
442 | ||
443 | retry: | |
444 | down_read(¤t->mm->mmap_sem); | |
445 | ||
446 | ret = get_futex_key(uaddr1, &key1); | |
447 | if (unlikely(ret != 0)) | |
448 | goto out; | |
449 | ret = get_futex_key(uaddr2, &key2); | |
450 | if (unlikely(ret != 0)) | |
451 | goto out; | |
452 | ||
453 | bh1 = hash_futex(&key1); | |
454 | bh2 = hash_futex(&key2); | |
455 | ||
456 | if (bh1 < bh2) | |
457 | spin_lock(&bh1->lock); | |
458 | spin_lock(&bh2->lock); | |
459 | if (bh1 > bh2) | |
460 | spin_lock(&bh1->lock); | |
461 | ||
462 | if (likely(valp != NULL)) { | |
463 | int curval; | |
464 | ||
465 | ret = get_futex_value_locked(&curval, (int __user *)uaddr1); | |
466 | ||
467 | if (unlikely(ret)) { | |
468 | spin_unlock(&bh1->lock); | |
469 | if (bh1 != bh2) | |
470 | spin_unlock(&bh2->lock); | |
471 | ||
472 | /* If we would have faulted, release mmap_sem, fault | |
473 | * it in and start all over again. | |
474 | */ | |
475 | up_read(¤t->mm->mmap_sem); | |
476 | ||
477 | ret = get_user(curval, (int __user *)uaddr1); | |
478 | ||
479 | if (!ret) | |
480 | goto retry; | |
481 | ||
482 | return ret; | |
483 | } | |
484 | if (curval != *valp) { | |
485 | ret = -EAGAIN; | |
486 | goto out_unlock; | |
487 | } | |
488 | } | |
489 | ||
490 | head1 = &bh1->chain; | |
491 | list_for_each_entry_safe(this, next, head1, list) { | |
492 | if (!match_futex (&this->key, &key1)) | |
493 | continue; | |
494 | if (++ret <= nr_wake) { | |
495 | wake_futex(this); | |
496 | } else { | |
497 | list_move_tail(&this->list, &bh2->chain); | |
498 | this->lock_ptr = &bh2->lock; | |
499 | this->key = key2; | |
500 | get_key_refs(&key2); | |
501 | drop_count++; | |
502 | ||
503 | if (ret - nr_wake >= nr_requeue) | |
504 | break; | |
505 | /* Make sure to stop if key1 == key2 */ | |
506 | if (head1 == &bh2->chain && head1 != &next->list) | |
507 | head1 = &this->list; | |
508 | } | |
509 | } | |
510 | ||
511 | out_unlock: | |
512 | spin_unlock(&bh1->lock); | |
513 | if (bh1 != bh2) | |
514 | spin_unlock(&bh2->lock); | |
515 | ||
516 | /* drop_key_refs() must be called outside the spinlocks. */ | |
517 | while (--drop_count >= 0) | |
518 | drop_key_refs(&key1); | |
519 | ||
520 | out: | |
521 | up_read(¤t->mm->mmap_sem); | |
522 | return ret; | |
523 | } | |
524 | ||
525 | /* The key must be already stored in q->key. */ | |
526 | static inline struct futex_hash_bucket * | |
527 | queue_lock(struct futex_q *q, int fd, struct file *filp) | |
528 | { | |
529 | struct futex_hash_bucket *bh; | |
530 | ||
531 | q->fd = fd; | |
532 | q->filp = filp; | |
533 | ||
534 | init_waitqueue_head(&q->waiters); | |
535 | ||
536 | get_key_refs(&q->key); | |
537 | bh = hash_futex(&q->key); | |
538 | q->lock_ptr = &bh->lock; | |
539 | ||
540 | spin_lock(&bh->lock); | |
541 | return bh; | |
542 | } | |
543 | ||
544 | static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *bh) | |
545 | { | |
546 | list_add_tail(&q->list, &bh->chain); | |
547 | spin_unlock(&bh->lock); | |
548 | } | |
549 | ||
550 | static inline void | |
551 | queue_unlock(struct futex_q *q, struct futex_hash_bucket *bh) | |
552 | { | |
553 | spin_unlock(&bh->lock); | |
554 | drop_key_refs(&q->key); | |
555 | } | |
556 | ||
557 | /* | |
558 | * queue_me and unqueue_me must be called as a pair, each | |
559 | * exactly once. They are called with the hashed spinlock held. | |
560 | */ | |
561 | ||
562 | /* The key must be already stored in q->key. */ | |
563 | static void queue_me(struct futex_q *q, int fd, struct file *filp) | |
564 | { | |
565 | struct futex_hash_bucket *bh; | |
566 | bh = queue_lock(q, fd, filp); | |
567 | __queue_me(q, bh); | |
568 | } | |
569 | ||
570 | /* Return 1 if we were still queued (ie. 0 means we were woken) */ | |
571 | static int unqueue_me(struct futex_q *q) | |
572 | { | |
573 | int ret = 0; | |
574 | spinlock_t *lock_ptr; | |
575 | ||
576 | /* In the common case we don't take the spinlock, which is nice. */ | |
577 | retry: | |
578 | lock_ptr = q->lock_ptr; | |
579 | if (lock_ptr != 0) { | |
580 | spin_lock(lock_ptr); | |
581 | /* | |
582 | * q->lock_ptr can change between reading it and | |
583 | * spin_lock(), causing us to take the wrong lock. This | |
584 | * corrects the race condition. | |
585 | * | |
586 | * Reasoning goes like this: if we have the wrong lock, | |
587 | * q->lock_ptr must have changed (maybe several times) | |
588 | * between reading it and the spin_lock(). It can | |
589 | * change again after the spin_lock() but only if it was | |
590 | * already changed before the spin_lock(). It cannot, | |
591 | * however, change back to the original value. Therefore | |
592 | * we can detect whether we acquired the correct lock. | |
593 | */ | |
594 | if (unlikely(lock_ptr != q->lock_ptr)) { | |
595 | spin_unlock(lock_ptr); | |
596 | goto retry; | |
597 | } | |
598 | WARN_ON(list_empty(&q->list)); | |
599 | list_del(&q->list); | |
600 | spin_unlock(lock_ptr); | |
601 | ret = 1; | |
602 | } | |
603 | ||
604 | drop_key_refs(&q->key); | |
605 | return ret; | |
606 | } | |
607 | ||
608 | static int futex_wait(unsigned long uaddr, int val, unsigned long time) | |
609 | { | |
610 | DECLARE_WAITQUEUE(wait, current); | |
611 | int ret, curval; | |
612 | struct futex_q q; | |
613 | struct futex_hash_bucket *bh; | |
614 | ||
615 | retry: | |
616 | down_read(¤t->mm->mmap_sem); | |
617 | ||
618 | ret = get_futex_key(uaddr, &q.key); | |
619 | if (unlikely(ret != 0)) | |
620 | goto out_release_sem; | |
621 | ||
622 | bh = queue_lock(&q, -1, NULL); | |
623 | ||
624 | /* | |
625 | * Access the page AFTER the futex is queued. | |
626 | * Order is important: | |
627 | * | |
628 | * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val); | |
629 | * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); } | |
630 | * | |
631 | * The basic logical guarantee of a futex is that it blocks ONLY | |
632 | * if cond(var) is known to be true at the time of blocking, for | |
633 | * any cond. If we queued after testing *uaddr, that would open | |
634 | * a race condition where we could block indefinitely with | |
635 | * cond(var) false, which would violate the guarantee. | |
636 | * | |
637 | * A consequence is that futex_wait() can return zero and absorb | |
638 | * a wakeup when *uaddr != val on entry to the syscall. This is | |
639 | * rare, but normal. | |
640 | * | |
641 | * We hold the mmap semaphore, so the mapping cannot have changed | |
642 | * since we looked it up in get_futex_key. | |
643 | */ | |
644 | ||
645 | ret = get_futex_value_locked(&curval, (int __user *)uaddr); | |
646 | ||
647 | if (unlikely(ret)) { | |
648 | queue_unlock(&q, bh); | |
649 | ||
650 | /* If we would have faulted, release mmap_sem, fault it in and | |
651 | * start all over again. | |
652 | */ | |
653 | up_read(¤t->mm->mmap_sem); | |
654 | ||
655 | ret = get_user(curval, (int __user *)uaddr); | |
656 | ||
657 | if (!ret) | |
658 | goto retry; | |
659 | return ret; | |
660 | } | |
661 | if (curval != val) { | |
662 | ret = -EWOULDBLOCK; | |
663 | queue_unlock(&q, bh); | |
664 | goto out_release_sem; | |
665 | } | |
666 | ||
667 | /* Only actually queue if *uaddr contained val. */ | |
668 | __queue_me(&q, bh); | |
669 | ||
670 | /* | |
671 | * Now the futex is queued and we have checked the data, we | |
672 | * don't want to hold mmap_sem while we sleep. | |
673 | */ | |
674 | up_read(¤t->mm->mmap_sem); | |
675 | ||
676 | /* | |
677 | * There might have been scheduling since the queue_me(), as we | |
678 | * cannot hold a spinlock across the get_user() in case it | |
679 | * faults, and we cannot just set TASK_INTERRUPTIBLE state when | |
680 | * queueing ourselves into the futex hash. This code thus has to | |
681 | * rely on the futex_wake() code removing us from hash when it | |
682 | * wakes us up. | |
683 | */ | |
684 | ||
685 | /* add_wait_queue is the barrier after __set_current_state. */ | |
686 | __set_current_state(TASK_INTERRUPTIBLE); | |
687 | add_wait_queue(&q.waiters, &wait); | |
688 | /* | |
689 | * !list_empty() is safe here without any lock. | |
690 | * q.lock_ptr != 0 is not safe, because of ordering against wakeup. | |
691 | */ | |
692 | if (likely(!list_empty(&q.list))) | |
693 | time = schedule_timeout(time); | |
694 | __set_current_state(TASK_RUNNING); | |
695 | ||
696 | /* | |
697 | * NOTE: we don't remove ourselves from the waitqueue because | |
698 | * we are the only user of it. | |
699 | */ | |
700 | ||
701 | /* If we were woken (and unqueued), we succeeded, whatever. */ | |
702 | if (!unqueue_me(&q)) | |
703 | return 0; | |
704 | if (time == 0) | |
705 | return -ETIMEDOUT; | |
706 | /* We expect signal_pending(current), but another thread may | |
707 | * have handled it for us already. */ | |
708 | return -EINTR; | |
709 | ||
710 | out_release_sem: | |
711 | up_read(¤t->mm->mmap_sem); | |
712 | return ret; | |
713 | } | |
714 | ||
715 | static int futex_close(struct inode *inode, struct file *filp) | |
716 | { | |
717 | struct futex_q *q = filp->private_data; | |
718 | ||
719 | unqueue_me(q); | |
720 | kfree(q); | |
721 | return 0; | |
722 | } | |
723 | ||
724 | /* This is one-shot: once it's gone off you need a new fd */ | |
725 | static unsigned int futex_poll(struct file *filp, | |
726 | struct poll_table_struct *wait) | |
727 | { | |
728 | struct futex_q *q = filp->private_data; | |
729 | int ret = 0; | |
730 | ||
731 | poll_wait(filp, &q->waiters, wait); | |
732 | ||
733 | /* | |
734 | * list_empty() is safe here without any lock. | |
735 | * q->lock_ptr != 0 is not safe, because of ordering against wakeup. | |
736 | */ | |
737 | if (list_empty(&q->list)) | |
738 | ret = POLLIN | POLLRDNORM; | |
739 | ||
740 | return ret; | |
741 | } | |
742 | ||
743 | static struct file_operations futex_fops = { | |
744 | .release = futex_close, | |
745 | .poll = futex_poll, | |
746 | }; | |
747 | ||
748 | /* | |
749 | * Signal allows caller to avoid the race which would occur if they | |
750 | * set the sigio stuff up afterwards. | |
751 | */ | |
752 | static int futex_fd(unsigned long uaddr, int signal) | |
753 | { | |
754 | struct futex_q *q; | |
755 | struct file *filp; | |
756 | int ret, err; | |
757 | ||
758 | ret = -EINVAL; | |
7ed20e1a | 759 | if (!valid_signal(signal)) |
1da177e4 LT |
760 | goto out; |
761 | ||
762 | ret = get_unused_fd(); | |
763 | if (ret < 0) | |
764 | goto out; | |
765 | filp = get_empty_filp(); | |
766 | if (!filp) { | |
767 | put_unused_fd(ret); | |
768 | ret = -ENFILE; | |
769 | goto out; | |
770 | } | |
771 | filp->f_op = &futex_fops; | |
772 | filp->f_vfsmnt = mntget(futex_mnt); | |
773 | filp->f_dentry = dget(futex_mnt->mnt_root); | |
774 | filp->f_mapping = filp->f_dentry->d_inode->i_mapping; | |
775 | ||
776 | if (signal) { | |
1da177e4 LT |
777 | err = f_setown(filp, current->pid, 1); |
778 | if (err < 0) { | |
39ed3fde | 779 | goto error; |
1da177e4 LT |
780 | } |
781 | filp->f_owner.signum = signal; | |
782 | } | |
783 | ||
784 | q = kmalloc(sizeof(*q), GFP_KERNEL); | |
785 | if (!q) { | |
39ed3fde PE |
786 | err = -ENOMEM; |
787 | goto error; | |
1da177e4 LT |
788 | } |
789 | ||
790 | down_read(¤t->mm->mmap_sem); | |
791 | err = get_futex_key(uaddr, &q->key); | |
792 | ||
793 | if (unlikely(err != 0)) { | |
794 | up_read(¤t->mm->mmap_sem); | |
1da177e4 | 795 | kfree(q); |
39ed3fde | 796 | goto error; |
1da177e4 LT |
797 | } |
798 | ||
799 | /* | |
800 | * queue_me() must be called before releasing mmap_sem, because | |
801 | * key->shared.inode needs to be referenced while holding it. | |
802 | */ | |
803 | filp->private_data = q; | |
804 | ||
805 | queue_me(q, ret, filp); | |
806 | up_read(¤t->mm->mmap_sem); | |
807 | ||
808 | /* Now we map fd to filp, so userspace can access it */ | |
809 | fd_install(ret, filp); | |
810 | out: | |
811 | return ret; | |
39ed3fde PE |
812 | error: |
813 | put_unused_fd(ret); | |
814 | put_filp(filp); | |
815 | ret = err; | |
816 | goto out; | |
1da177e4 LT |
817 | } |
818 | ||
819 | long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout, | |
820 | unsigned long uaddr2, int val2, int val3) | |
821 | { | |
822 | int ret; | |
823 | ||
824 | switch (op) { | |
825 | case FUTEX_WAIT: | |
826 | ret = futex_wait(uaddr, val, timeout); | |
827 | break; | |
828 | case FUTEX_WAKE: | |
829 | ret = futex_wake(uaddr, val); | |
830 | break; | |
831 | case FUTEX_FD: | |
832 | /* non-zero val means F_SETOWN(getpid()) & F_SETSIG(val) */ | |
833 | ret = futex_fd(uaddr, val); | |
834 | break; | |
835 | case FUTEX_REQUEUE: | |
836 | ret = futex_requeue(uaddr, uaddr2, val, val2, NULL); | |
837 | break; | |
838 | case FUTEX_CMP_REQUEUE: | |
839 | ret = futex_requeue(uaddr, uaddr2, val, val2, &val3); | |
840 | break; | |
4732efbe JJ |
841 | case FUTEX_WAKE_OP: |
842 | ret = futex_wake_op(uaddr, uaddr2, val, val2, val3); | |
843 | break; | |
1da177e4 LT |
844 | default: |
845 | ret = -ENOSYS; | |
846 | } | |
847 | return ret; | |
848 | } | |
849 | ||
850 | ||
851 | asmlinkage long sys_futex(u32 __user *uaddr, int op, int val, | |
852 | struct timespec __user *utime, u32 __user *uaddr2, | |
853 | int val3) | |
854 | { | |
855 | struct timespec t; | |
856 | unsigned long timeout = MAX_SCHEDULE_TIMEOUT; | |
857 | int val2 = 0; | |
858 | ||
859 | if ((op == FUTEX_WAIT) && utime) { | |
860 | if (copy_from_user(&t, utime, sizeof(t)) != 0) | |
861 | return -EFAULT; | |
862 | timeout = timespec_to_jiffies(&t) + 1; | |
863 | } | |
864 | /* | |
865 | * requeue parameter in 'utime' if op == FUTEX_REQUEUE. | |
866 | */ | |
867 | if (op >= FUTEX_REQUEUE) | |
868 | val2 = (int) (unsigned long) utime; | |
869 | ||
870 | return do_futex((unsigned long)uaddr, op, val, timeout, | |
871 | (unsigned long)uaddr2, val2, val3); | |
872 | } | |
873 | ||
874 | static struct super_block * | |
875 | futexfs_get_sb(struct file_system_type *fs_type, | |
876 | int flags, const char *dev_name, void *data) | |
877 | { | |
878 | return get_sb_pseudo(fs_type, "futex", NULL, 0xBAD1DEA); | |
879 | } | |
880 | ||
881 | static struct file_system_type futex_fs_type = { | |
882 | .name = "futexfs", | |
883 | .get_sb = futexfs_get_sb, | |
884 | .kill_sb = kill_anon_super, | |
885 | }; | |
886 | ||
887 | static int __init init(void) | |
888 | { | |
889 | unsigned int i; | |
890 | ||
891 | register_filesystem(&futex_fs_type); | |
892 | futex_mnt = kern_mount(&futex_fs_type); | |
893 | ||
894 | for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { | |
895 | INIT_LIST_HEAD(&futex_queues[i].chain); | |
896 | spin_lock_init(&futex_queues[i].lock); | |
897 | } | |
898 | return 0; | |
899 | } | |
900 | __initcall(init); |