]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_FUTEX_H |
2 | #define _LINUX_FUTEX_H | |
3 | ||
0771dfef IM |
4 | #include <linux/sched.h> |
5 | ||
1da177e4 LT |
6 | /* Second argument to futex syscall */ |
7 | ||
8 | ||
4732efbe JJ |
9 | #define FUTEX_WAIT 0 |
10 | #define FUTEX_WAKE 1 | |
11 | #define FUTEX_FD 2 | |
12 | #define FUTEX_REQUEUE 3 | |
13 | #define FUTEX_CMP_REQUEUE 4 | |
14 | #define FUTEX_WAKE_OP 5 | |
c87e2837 IM |
15 | #define FUTEX_LOCK_PI 6 |
16 | #define FUTEX_UNLOCK_PI 7 | |
17 | #define FUTEX_TRYLOCK_PI 8 | |
1da177e4 | 18 | |
0771dfef IM |
19 | /* |
20 | * Support for robust futexes: the kernel cleans up held futexes at | |
21 | * thread exit time. | |
22 | */ | |
23 | ||
24 | /* | |
25 | * Per-lock list entry - embedded in user-space locks, somewhere close | |
26 | * to the futex field. (Note: user-space uses a double-linked list to | |
27 | * achieve O(1) list add and remove, but the kernel only needs to know | |
28 | * about the forward link) | |
29 | * | |
30 | * NOTE: this structure is part of the syscall ABI, and must not be | |
31 | * changed. | |
32 | */ | |
33 | struct robust_list { | |
34 | struct robust_list __user *next; | |
35 | }; | |
36 | ||
37 | /* | |
38 | * Per-thread list head: | |
39 | * | |
40 | * NOTE: this structure is part of the syscall ABI, and must only be | |
41 | * changed if the change is first communicated with the glibc folks. | |
42 | * (When an incompatible change is done, we'll increase the structure | |
43 | * size, which glibc will detect) | |
44 | */ | |
45 | struct robust_list_head { | |
46 | /* | |
47 | * The head of the list. Points back to itself if empty: | |
48 | */ | |
49 | struct robust_list list; | |
50 | ||
51 | /* | |
52 | * This relative offset is set by user-space, it gives the kernel | |
53 | * the relative position of the futex field to examine. This way | |
54 | * we keep userspace flexible, to freely shape its data-structure, | |
55 | * without hardcoding any particular offset into the kernel: | |
56 | */ | |
57 | long futex_offset; | |
58 | ||
59 | /* | |
60 | * The death of the thread may race with userspace setting | |
61 | * up a lock's links. So to handle this race, userspace first | |
62 | * sets this field to the address of the to-be-taken lock, | |
63 | * then does the lock acquire, and then adds itself to the | |
64 | * list, and then clears this field. Hence the kernel will | |
65 | * always have full knowledge of all locks that the thread | |
66 | * _might_ have taken. We check the owner TID in any case, | |
67 | * so only truly owned locks will be handled. | |
68 | */ | |
69 | struct robust_list __user *list_op_pending; | |
70 | }; | |
71 | ||
72 | /* | |
73 | * Are there any waiters for this robust futex: | |
74 | */ | |
75 | #define FUTEX_WAITERS 0x80000000 | |
76 | ||
77 | /* | |
78 | * The kernel signals via this bit that a thread holding a futex | |
79 | * has exited without unlocking the futex. The kernel also does | |
80 | * a FUTEX_WAKE on such futexes, after setting the bit, to wake | |
81 | * up any possible waiters: | |
82 | */ | |
83 | #define FUTEX_OWNER_DIED 0x40000000 | |
84 | ||
0771dfef IM |
85 | /* |
86 | * The rest of the robust-futex field is for the TID: | |
87 | */ | |
76b81e2b | 88 | #define FUTEX_TID_MASK 0x3fffffff |
0771dfef IM |
89 | |
90 | /* | |
76b81e2b IM |
91 | * This limit protects against a deliberately circular list. |
92 | * (Not worth introducing an rlimit for it) | |
0771dfef | 93 | */ |
76b81e2b | 94 | #define ROBUST_LIST_LIMIT 2048 |
0771dfef | 95 | |
58f64d83 | 96 | #ifdef __KERNEL__ |
e2970f2f IM |
97 | long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout, |
98 | u32 __user *uaddr2, u32 val2, u32 val3); | |
1da177e4 | 99 | |
e3f2ddea IM |
100 | extern int |
101 | handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi); | |
0771dfef IM |
102 | |
103 | #ifdef CONFIG_FUTEX | |
104 | extern void exit_robust_list(struct task_struct *curr); | |
c87e2837 | 105 | extern void exit_pi_state_list(struct task_struct *curr); |
0771dfef IM |
106 | #else |
107 | static inline void exit_robust_list(struct task_struct *curr) | |
108 | { | |
109 | } | |
c87e2837 IM |
110 | static inline void exit_pi_state_list(struct task_struct *curr) |
111 | { | |
112 | } | |
0771dfef | 113 | #endif |
58f64d83 | 114 | #endif /* __KERNEL__ */ |
0771dfef | 115 | |
4732efbe JJ |
116 | #define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */ |
117 | #define FUTEX_OP_ADD 1 /* *(int *)UADDR2 += OPARG; */ | |
118 | #define FUTEX_OP_OR 2 /* *(int *)UADDR2 |= OPARG; */ | |
119 | #define FUTEX_OP_ANDN 3 /* *(int *)UADDR2 &= ~OPARG; */ | |
120 | #define FUTEX_OP_XOR 4 /* *(int *)UADDR2 ^= OPARG; */ | |
121 | ||
122 | #define FUTEX_OP_OPARG_SHIFT 8 /* Use (1 << OPARG) instead of OPARG. */ | |
123 | ||
124 | #define FUTEX_OP_CMP_EQ 0 /* if (oldval == CMPARG) wake */ | |
125 | #define FUTEX_OP_CMP_NE 1 /* if (oldval != CMPARG) wake */ | |
126 | #define FUTEX_OP_CMP_LT 2 /* if (oldval < CMPARG) wake */ | |
127 | #define FUTEX_OP_CMP_LE 3 /* if (oldval <= CMPARG) wake */ | |
128 | #define FUTEX_OP_CMP_GT 4 /* if (oldval > CMPARG) wake */ | |
129 | #define FUTEX_OP_CMP_GE 5 /* if (oldval >= CMPARG) wake */ | |
130 | ||
131 | /* FUTEX_WAKE_OP will perform atomically | |
132 | int oldval = *(int *)UADDR2; | |
133 | *(int *)UADDR2 = oldval OP OPARG; | |
134 | if (oldval CMP CMPARG) | |
135 | wake UADDR2; */ | |
136 | ||
137 | #define FUTEX_OP(op, oparg, cmp, cmparg) \ | |
138 | (((op & 0xf) << 28) | ((cmp & 0xf) << 24) \ | |
139 | | ((oparg & 0xfff) << 12) | (cmparg & 0xfff)) | |
140 | ||
1da177e4 | 141 | #endif |