]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
e1ad7468 DL |
2 | /* |
3 | * fs/eventfd.c | |
4 | * | |
5 | * Copyright (C) 2007 Davide Libenzi <[email protected]> | |
6 | * | |
7 | */ | |
8 | ||
9 | #include <linux/file.h> | |
10 | #include <linux/poll.h> | |
11 | #include <linux/init.h> | |
12 | #include <linux/fs.h> | |
174cd4b1 | 13 | #include <linux/sched/signal.h> |
e1ad7468 | 14 | #include <linux/kernel.h> |
5a0e3ad6 | 15 | #include <linux/slab.h> |
e1ad7468 DL |
16 | #include <linux/list.h> |
17 | #include <linux/spinlock.h> | |
18 | #include <linux/anon_inodes.h> | |
7747cdb2 | 19 | #include <linux/syscalls.h> |
630d9c47 | 20 | #include <linux/export.h> |
13389010 DL |
21 | #include <linux/kref.h> |
22 | #include <linux/eventfd.h> | |
cbac5542 CG |
23 | #include <linux/proc_fs.h> |
24 | #include <linux/seq_file.h> | |
b556db17 | 25 | #include <linux/idr.h> |
12aceb89 | 26 | #include <linux/uio.h> |
b556db17 | 27 | |
b5e683d5 JA |
28 | DEFINE_PER_CPU(int, eventfd_wake_count); |
29 | ||
ce528c4c | 30 | static DEFINE_IDA(eventfd_ida); |
e1ad7468 DL |
31 | |
32 | struct eventfd_ctx { | |
13389010 | 33 | struct kref kref; |
e1ad7468 DL |
34 | wait_queue_head_t wqh; |
35 | /* | |
36 | * Every time that a write(2) is performed on an eventfd, the | |
37 | * value of the __u64 being written is added to "count" and a | |
38 | * wakeup is performed on "wqh". A read(2) will return the "count" | |
39 | * value to userspace, and will reset "count" to zero. The kernel | |
13389010 | 40 | * side eventfd_signal() also, adds to the "count" counter and |
e1ad7468 DL |
41 | * issue a wakeup. |
42 | */ | |
43 | __u64 count; | |
bcd0b235 | 44 | unsigned int flags; |
b556db17 | 45 | int id; |
e1ad7468 DL |
46 | }; |
47 | ||
13389010 DL |
48 | /** |
49 | * eventfd_signal - Adds @n to the eventfd counter. | |
50 | * @ctx: [in] Pointer to the eventfd context. | |
51 | * @n: [in] Value of the counter to be added to the eventfd internal counter. | |
52 | * The value cannot be negative. | |
53 | * | |
54 | * This function is supposed to be called by the kernel in paths that do not | |
55 | * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX | |
a9a08845 | 56 | * value, and we signal this as overflow condition by returning a EPOLLERR |
13389010 DL |
57 | * to poll(2). |
58 | * | |
20d5a865 | 59 | * Returns the amount by which the counter was incremented. This will be less |
ee62c6b2 | 60 | * than @n if the counter has overflowed. |
e1ad7468 | 61 | */ |
ee62c6b2 | 62 | __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n) |
e1ad7468 | 63 | { |
e1ad7468 DL |
64 | unsigned long flags; |
65 | ||
b5e683d5 JA |
66 | /* |
67 | * Deadlock or stack overflow issues can happen if we recurse here | |
68 | * through waitqueue wakeup handlers. If the caller users potentially | |
69 | * nested waitqueues with custom wakeup handlers, then it should | |
70 | * check eventfd_signal_count() before calling this function. If | |
71 | * it returns true, the eventfd_signal() call should be deferred to a | |
72 | * safe context. | |
73 | */ | |
74 | if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count))) | |
75 | return 0; | |
76 | ||
d48eb233 | 77 | spin_lock_irqsave(&ctx->wqh.lock, flags); |
b5e683d5 | 78 | this_cpu_inc(eventfd_wake_count); |
e1ad7468 | 79 | if (ULLONG_MAX - ctx->count < n) |
ee62c6b2 | 80 | n = ULLONG_MAX - ctx->count; |
e1ad7468 DL |
81 | ctx->count += n; |
82 | if (waitqueue_active(&ctx->wqh)) | |
a9a08845 | 83 | wake_up_locked_poll(&ctx->wqh, EPOLLIN); |
b5e683d5 | 84 | this_cpu_dec(eventfd_wake_count); |
d48eb233 | 85 | spin_unlock_irqrestore(&ctx->wqh.lock, flags); |
e1ad7468 DL |
86 | |
87 | return n; | |
88 | } | |
5718607b | 89 | EXPORT_SYMBOL_GPL(eventfd_signal); |
e1ad7468 | 90 | |
562787a5 DL |
91 | static void eventfd_free_ctx(struct eventfd_ctx *ctx) |
92 | { | |
b556db17 MY |
93 | if (ctx->id >= 0) |
94 | ida_simple_remove(&eventfd_ida, ctx->id); | |
562787a5 DL |
95 | kfree(ctx); |
96 | } | |
97 | ||
13389010 DL |
98 | static void eventfd_free(struct kref *kref) |
99 | { | |
100 | struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref); | |
101 | ||
562787a5 | 102 | eventfd_free_ctx(ctx); |
13389010 DL |
103 | } |
104 | ||
13389010 DL |
105 | /** |
106 | * eventfd_ctx_put - Releases a reference to the internal eventfd context. | |
107 | * @ctx: [in] Pointer to eventfd context. | |
108 | * | |
109 | * The eventfd context reference must have been previously acquired either | |
105f2b70 | 110 | * with eventfd_ctx_fdget() or eventfd_ctx_fileget(). |
13389010 DL |
111 | */ |
112 | void eventfd_ctx_put(struct eventfd_ctx *ctx) | |
113 | { | |
114 | kref_put(&ctx->kref, eventfd_free); | |
115 | } | |
116 | EXPORT_SYMBOL_GPL(eventfd_ctx_put); | |
117 | ||
e1ad7468 DL |
118 | static int eventfd_release(struct inode *inode, struct file *file) |
119 | { | |
13389010 DL |
120 | struct eventfd_ctx *ctx = file->private_data; |
121 | ||
a9a08845 | 122 | wake_up_poll(&ctx->wqh, EPOLLHUP); |
13389010 | 123 | eventfd_ctx_put(ctx); |
e1ad7468 DL |
124 | return 0; |
125 | } | |
126 | ||
a11e1d43 | 127 | static __poll_t eventfd_poll(struct file *file, poll_table *wait) |
e1ad7468 DL |
128 | { |
129 | struct eventfd_ctx *ctx = file->private_data; | |
076ccb76 | 130 | __poll_t events = 0; |
e22553e2 | 131 | u64 count; |
e1ad7468 | 132 | |
a11e1d43 LT |
133 | poll_wait(file, &ctx->wqh, wait); |
134 | ||
a484c3dd PB |
135 | /* |
136 | * All writes to ctx->count occur within ctx->wqh.lock. This read | |
137 | * can be done outside ctx->wqh.lock because we know that poll_wait | |
138 | * takes that lock (through add_wait_queue) if our caller will sleep. | |
139 | * | |
140 | * The read _can_ therefore seep into add_wait_queue's critical | |
141 | * section, but cannot move above it! add_wait_queue's spin_lock acts | |
142 | * as an acquire barrier and ensures that the read be ordered properly | |
143 | * against the writes. The following CAN happen and is safe: | |
144 | * | |
145 | * poll write | |
146 | * ----------------- ------------ | |
147 | * lock ctx->wqh.lock (in poll_wait) | |
148 | * count = ctx->count | |
149 | * __add_wait_queue | |
150 | * unlock ctx->wqh.lock | |
151 | * lock ctx->qwh.lock | |
152 | * ctx->count += n | |
153 | * if (waitqueue_active) | |
154 | * wake_up_locked_poll | |
155 | * unlock ctx->qwh.lock | |
156 | * eventfd_poll returns 0 | |
157 | * | |
158 | * but the following, which would miss a wakeup, cannot happen: | |
159 | * | |
160 | * poll write | |
161 | * ----------------- ------------ | |
162 | * count = ctx->count (INVALID!) | |
163 | * lock ctx->qwh.lock | |
164 | * ctx->count += n | |
165 | * **waitqueue_active is false** | |
166 | * **no wake_up_locked_poll!** | |
167 | * unlock ctx->qwh.lock | |
168 | * lock ctx->wqh.lock (in poll_wait) | |
169 | * __add_wait_queue | |
170 | * unlock ctx->wqh.lock | |
171 | * eventfd_poll returns 0 | |
172 | */ | |
173 | count = READ_ONCE(ctx->count); | |
e1ad7468 | 174 | |
e22553e2 | 175 | if (count > 0) |
a11e1d43 | 176 | events |= EPOLLIN; |
e22553e2 | 177 | if (count == ULLONG_MAX) |
a9a08845 | 178 | events |= EPOLLERR; |
e22553e2 | 179 | if (ULLONG_MAX - 1 > count) |
a11e1d43 | 180 | events |= EPOLLOUT; |
e1ad7468 DL |
181 | |
182 | return events; | |
183 | } | |
184 | ||
28f13267 | 185 | void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) |
cb289d62 | 186 | { |
28f13267 DW |
187 | lockdep_assert_held(&ctx->wqh.lock); |
188 | ||
cb289d62 DL |
189 | *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count; |
190 | ctx->count -= *cnt; | |
191 | } | |
28f13267 | 192 | EXPORT_SYMBOL_GPL(eventfd_ctx_do_read); |
cb289d62 DL |
193 | |
194 | /** | |
195 | * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue. | |
196 | * @ctx: [in] Pointer to eventfd context. | |
197 | * @wait: [in] Wait queue to be removed. | |
36182185 | 198 | * @cnt: [out] Pointer to the 64-bit counter value. |
cb289d62 | 199 | * |
36182185 | 200 | * Returns %0 if successful, or the following error codes: |
cb289d62 DL |
201 | * |
202 | * -EAGAIN : The operation would have blocked. | |
203 | * | |
204 | * This is used to atomically remove a wait queue entry from the eventfd wait | |
205 | * queue head, and read/reset the counter value. | |
206 | */ | |
ac6424b9 | 207 | int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, |
cb289d62 DL |
208 | __u64 *cnt) |
209 | { | |
210 | unsigned long flags; | |
211 | ||
212 | spin_lock_irqsave(&ctx->wqh.lock, flags); | |
213 | eventfd_ctx_do_read(ctx, cnt); | |
214 | __remove_wait_queue(&ctx->wqh, wait); | |
215 | if (*cnt != 0 && waitqueue_active(&ctx->wqh)) | |
a9a08845 | 216 | wake_up_locked_poll(&ctx->wqh, EPOLLOUT); |
cb289d62 DL |
217 | spin_unlock_irqrestore(&ctx->wqh.lock, flags); |
218 | ||
219 | return *cnt != 0 ? 0 : -EAGAIN; | |
220 | } | |
221 | EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue); | |
222 | ||
12aceb89 | 223 | static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to) |
e1ad7468 | 224 | { |
12aceb89 | 225 | struct file *file = iocb->ki_filp; |
b6364572 | 226 | struct eventfd_ctx *ctx = file->private_data; |
b6364572 | 227 | __u64 ucnt = 0; |
e1ad7468 DL |
228 | DECLARE_WAITQUEUE(wait, current); |
229 | ||
12aceb89 | 230 | if (iov_iter_count(to) < sizeof(ucnt)) |
b6364572 | 231 | return -EINVAL; |
d48eb233 | 232 | spin_lock_irq(&ctx->wqh.lock); |
12aceb89 JA |
233 | if (!ctx->count) { |
234 | if ((file->f_flags & O_NONBLOCK) || | |
235 | (iocb->ki_flags & IOCB_NOWAIT)) { | |
236 | spin_unlock_irq(&ctx->wqh.lock); | |
237 | return -EAGAIN; | |
238 | } | |
e1ad7468 | 239 | __add_wait_queue(&ctx->wqh, &wait); |
cb289d62 | 240 | for (;;) { |
e1ad7468 | 241 | set_current_state(TASK_INTERRUPTIBLE); |
12aceb89 | 242 | if (ctx->count) |
e1ad7468 | 243 | break; |
e1ad7468 | 244 | if (signal_pending(current)) { |
12aceb89 JA |
245 | __remove_wait_queue(&ctx->wqh, &wait); |
246 | __set_current_state(TASK_RUNNING); | |
247 | spin_unlock_irq(&ctx->wqh.lock); | |
248 | return -ERESTARTSYS; | |
e1ad7468 | 249 | } |
d48eb233 | 250 | spin_unlock_irq(&ctx->wqh.lock); |
e1ad7468 | 251 | schedule(); |
d48eb233 | 252 | spin_lock_irq(&ctx->wqh.lock); |
e1ad7468 DL |
253 | } |
254 | __remove_wait_queue(&ctx->wqh, &wait); | |
255 | __set_current_state(TASK_RUNNING); | |
256 | } | |
12aceb89 JA |
257 | eventfd_ctx_do_read(ctx, &ucnt); |
258 | if (waitqueue_active(&ctx->wqh)) | |
259 | wake_up_locked_poll(&ctx->wqh, EPOLLOUT); | |
d48eb233 | 260 | spin_unlock_irq(&ctx->wqh.lock); |
12aceb89 | 261 | if (unlikely(copy_to_iter(&ucnt, sizeof(ucnt), to) != sizeof(ucnt))) |
b6364572 | 262 | return -EFAULT; |
cb289d62 | 263 | |
12aceb89 | 264 | return sizeof(ucnt); |
cb289d62 | 265 | } |
e1ad7468 DL |
266 | |
267 | static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count, | |
268 | loff_t *ppos) | |
269 | { | |
270 | struct eventfd_ctx *ctx = file->private_data; | |
271 | ssize_t res; | |
272 | __u64 ucnt; | |
273 | DECLARE_WAITQUEUE(wait, current); | |
274 | ||
275 | if (count < sizeof(ucnt)) | |
276 | return -EINVAL; | |
277 | if (copy_from_user(&ucnt, buf, sizeof(ucnt))) | |
278 | return -EFAULT; | |
279 | if (ucnt == ULLONG_MAX) | |
280 | return -EINVAL; | |
d48eb233 | 281 | spin_lock_irq(&ctx->wqh.lock); |
e1ad7468 DL |
282 | res = -EAGAIN; |
283 | if (ULLONG_MAX - ctx->count > ucnt) | |
284 | res = sizeof(ucnt); | |
285 | else if (!(file->f_flags & O_NONBLOCK)) { | |
286 | __add_wait_queue(&ctx->wqh, &wait); | |
287 | for (res = 0;;) { | |
288 | set_current_state(TASK_INTERRUPTIBLE); | |
289 | if (ULLONG_MAX - ctx->count > ucnt) { | |
290 | res = sizeof(ucnt); | |
291 | break; | |
292 | } | |
293 | if (signal_pending(current)) { | |
294 | res = -ERESTARTSYS; | |
295 | break; | |
296 | } | |
d48eb233 | 297 | spin_unlock_irq(&ctx->wqh.lock); |
e1ad7468 | 298 | schedule(); |
d48eb233 | 299 | spin_lock_irq(&ctx->wqh.lock); |
e1ad7468 DL |
300 | } |
301 | __remove_wait_queue(&ctx->wqh, &wait); | |
302 | __set_current_state(TASK_RUNNING); | |
303 | } | |
bcd0b235 | 304 | if (likely(res > 0)) { |
e1ad7468 DL |
305 | ctx->count += ucnt; |
306 | if (waitqueue_active(&ctx->wqh)) | |
a9a08845 | 307 | wake_up_locked_poll(&ctx->wqh, EPOLLIN); |
e1ad7468 | 308 | } |
d48eb233 | 309 | spin_unlock_irq(&ctx->wqh.lock); |
e1ad7468 DL |
310 | |
311 | return res; | |
312 | } | |
313 | ||
cbac5542 | 314 | #ifdef CONFIG_PROC_FS |
a3816ab0 | 315 | static void eventfd_show_fdinfo(struct seq_file *m, struct file *f) |
cbac5542 CG |
316 | { |
317 | struct eventfd_ctx *ctx = f->private_data; | |
cbac5542 CG |
318 | |
319 | spin_lock_irq(&ctx->wqh.lock); | |
a3816ab0 JP |
320 | seq_printf(m, "eventfd-count: %16llx\n", |
321 | (unsigned long long)ctx->count); | |
cbac5542 | 322 | spin_unlock_irq(&ctx->wqh.lock); |
b556db17 | 323 | seq_printf(m, "eventfd-id: %d\n", ctx->id); |
cbac5542 CG |
324 | } |
325 | #endif | |
326 | ||
e1ad7468 | 327 | static const struct file_operations eventfd_fops = { |
cbac5542 CG |
328 | #ifdef CONFIG_PROC_FS |
329 | .show_fdinfo = eventfd_show_fdinfo, | |
330 | #endif | |
e1ad7468 | 331 | .release = eventfd_release, |
a11e1d43 | 332 | .poll = eventfd_poll, |
12aceb89 | 333 | .read_iter = eventfd_read, |
e1ad7468 | 334 | .write = eventfd_write, |
6038f373 | 335 | .llseek = noop_llseek, |
e1ad7468 DL |
336 | }; |
337 | ||
13389010 DL |
338 | /** |
339 | * eventfd_fget - Acquire a reference of an eventfd file descriptor. | |
340 | * @fd: [in] Eventfd file descriptor. | |
341 | * | |
342 | * Returns a pointer to the eventfd file structure in case of success, or the | |
343 | * following error pointer: | |
344 | * | |
345 | * -EBADF : Invalid @fd file descriptor. | |
346 | * -EINVAL : The @fd file descriptor is not an eventfd file. | |
347 | */ | |
e1ad7468 DL |
348 | struct file *eventfd_fget(int fd) |
349 | { | |
350 | struct file *file; | |
351 | ||
352 | file = fget(fd); | |
353 | if (!file) | |
354 | return ERR_PTR(-EBADF); | |
355 | if (file->f_op != &eventfd_fops) { | |
356 | fput(file); | |
357 | return ERR_PTR(-EINVAL); | |
358 | } | |
359 | ||
360 | return file; | |
361 | } | |
5718607b | 362 | EXPORT_SYMBOL_GPL(eventfd_fget); |
e1ad7468 | 363 | |
13389010 DL |
364 | /** |
365 | * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context. | |
366 | * @fd: [in] Eventfd file descriptor. | |
367 | * | |
368 | * Returns a pointer to the internal eventfd context, otherwise the error | |
369 | * pointers returned by the following functions: | |
370 | * | |
371 | * eventfd_fget | |
372 | */ | |
373 | struct eventfd_ctx *eventfd_ctx_fdget(int fd) | |
374 | { | |
13389010 | 375 | struct eventfd_ctx *ctx; |
36a74117 AV |
376 | struct fd f = fdget(fd); |
377 | if (!f.file) | |
378 | return ERR_PTR(-EBADF); | |
379 | ctx = eventfd_ctx_fileget(f.file); | |
380 | fdput(f); | |
13389010 DL |
381 | return ctx; |
382 | } | |
383 | EXPORT_SYMBOL_GPL(eventfd_ctx_fdget); | |
384 | ||
385 | /** | |
386 | * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context. | |
387 | * @file: [in] Eventfd file pointer. | |
388 | * | |
389 | * Returns a pointer to the internal eventfd context, otherwise the error | |
390 | * pointer: | |
391 | * | |
392 | * -EINVAL : The @fd file descriptor is not an eventfd file. | |
393 | */ | |
394 | struct eventfd_ctx *eventfd_ctx_fileget(struct file *file) | |
395 | { | |
105f2b70 EB |
396 | struct eventfd_ctx *ctx; |
397 | ||
13389010 DL |
398 | if (file->f_op != &eventfd_fops) |
399 | return ERR_PTR(-EINVAL); | |
400 | ||
105f2b70 EB |
401 | ctx = file->private_data; |
402 | kref_get(&ctx->kref); | |
403 | return ctx; | |
13389010 DL |
404 | } |
405 | EXPORT_SYMBOL_GPL(eventfd_ctx_fileget); | |
406 | ||
2fc96f83 | 407 | static int do_eventfd(unsigned int count, int flags) |
e1ad7468 | 408 | { |
e1ad7468 | 409 | struct eventfd_ctx *ctx; |
12aceb89 | 410 | struct file *file; |
7d815165 | 411 | int fd; |
e1ad7468 | 412 | |
e38b36f3 UD |
413 | /* Check the EFD_* constants for consistency. */ |
414 | BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC); | |
415 | BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK); | |
416 | ||
bcd0b235 | 417 | if (flags & ~EFD_FLAGS_SET) |
7d815165 | 418 | return -EINVAL; |
b087498e | 419 | |
e1ad7468 DL |
420 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); |
421 | if (!ctx) | |
7d815165 | 422 | return -ENOMEM; |
e1ad7468 | 423 | |
13389010 | 424 | kref_init(&ctx->kref); |
e1ad7468 | 425 | init_waitqueue_head(&ctx->wqh); |
e1ad7468 | 426 | ctx->count = count; |
bcd0b235 | 427 | ctx->flags = flags; |
b556db17 | 428 | ctx->id = ida_simple_get(&eventfd_ida, 0, 0, GFP_KERNEL); |
e1ad7468 | 429 | |
12aceb89 JA |
430 | flags &= EFD_SHARED_FCNTL_FLAGS; |
431 | flags |= O_RDWR; | |
432 | fd = get_unused_fd_flags(flags); | |
7d815165 | 433 | if (fd < 0) |
12aceb89 JA |
434 | goto err; |
435 | ||
436 | file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, flags); | |
437 | if (IS_ERR(file)) { | |
438 | put_unused_fd(fd); | |
439 | fd = PTR_ERR(file); | |
440 | goto err; | |
441 | } | |
562787a5 | 442 | |
12aceb89 JA |
443 | file->f_mode |= FMODE_NOWAIT; |
444 | fd_install(fd, file); | |
445 | return fd; | |
446 | err: | |
447 | eventfd_free_ctx(ctx); | |
2030a42c | 448 | return fd; |
e1ad7468 DL |
449 | } |
450 | ||
2fc96f83 DB |
451 | SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) |
452 | { | |
453 | return do_eventfd(count, flags); | |
454 | } | |
455 | ||
d4e82042 | 456 | SYSCALL_DEFINE1(eventfd, unsigned int, count) |
b087498e | 457 | { |
2fc96f83 | 458 | return do_eventfd(count, 0); |
b087498e | 459 | } |
bcd0b235 | 460 |