2 * An async IO implementation for Linux
5 * Implements an efficient asynchronous io interface.
7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
8 * Copyright 2018 Christoph Hellwig.
10 * See ../COPYING for licensing terms.
12 #define pr_fmt(fmt) "%s: " fmt, __func__
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/errno.h>
17 #include <linux/time.h>
18 #include <linux/aio_abi.h>
19 #include <linux/export.h>
20 #include <linux/syscalls.h>
21 #include <linux/backing-dev.h>
22 #include <linux/refcount.h>
23 #include <linux/uio.h>
25 #include <linux/sched/signal.h>
27 #include <linux/file.h>
29 #include <linux/mman.h>
30 #include <linux/percpu.h>
31 #include <linux/slab.h>
32 #include <linux/timer.h>
33 #include <linux/aio.h>
34 #include <linux/highmem.h>
35 #include <linux/workqueue.h>
36 #include <linux/security.h>
37 #include <linux/eventfd.h>
38 #include <linux/blkdev.h>
39 #include <linux/compat.h>
40 #include <linux/migrate.h>
41 #include <linux/ramfs.h>
42 #include <linux/percpu-refcount.h>
43 #include <linux/mount.h>
44 #include <linux/pseudo_fs.h>
46 #include <linux/uaccess.h>
47 #include <linux/nospec.h>
53 #define AIO_RING_MAGIC 0xa10a10a1
54 #define AIO_RING_COMPAT_FEATURES 1
55 #define AIO_RING_INCOMPAT_FEATURES 0
57 unsigned id; /* kernel internal index number */
58 unsigned nr; /* number of io_events */
59 unsigned head; /* Written to by userland or under ring_lock
60 * mutex by aio_read_events_ring(). */
64 unsigned compat_features;
65 unsigned incompat_features;
66 unsigned header_length; /* size of aio_ring */
69 struct io_event io_events[];
70 }; /* 128 bytes + ring size */
73 * Plugging is meant to work with larger batches of IOs. If we don't
74 * have more than the below, then don't bother setting up a plug.
76 #define AIO_PLUG_THRESHOLD 2
78 #define AIO_RING_PAGES 8
83 struct kioctx __rcu *table[] __counted_by(nr);
87 unsigned reqs_available;
91 struct completion comp;
96 struct percpu_ref users;
99 struct percpu_ref reqs;
101 unsigned long user_id;
103 struct __percpu kioctx_cpu *cpu;
106 * For percpu reqs_available, number of slots we move to/from global
111 * This is what userspace passed to io_setup(), it's not used for
112 * anything but counting against the global max_reqs quota.
114 * The real limit is nr_events - 1, which will be larger (see
119 /* Size of ringbuffer, in units of struct io_event */
122 unsigned long mmap_base;
123 unsigned long mmap_size;
125 struct folio **ring_folios;
128 struct rcu_work free_rwork; /* see free_ioctx() */
131 * signals when all in-flight requests are done
133 struct ctx_rq_wait *rq_wait;
137 * This counts the number of available slots in the ringbuffer,
138 * so we avoid overflowing it: it's decremented (if positive)
139 * when allocating a kiocb and incremented when the resulting
140 * io_event is pulled off the ringbuffer.
142 * We batch accesses to it with a percpu version.
144 atomic_t reqs_available;
145 } ____cacheline_aligned_in_smp;
149 struct list_head active_reqs; /* used for cancellation */
150 } ____cacheline_aligned_in_smp;
153 struct mutex ring_lock;
154 wait_queue_head_t wait;
155 } ____cacheline_aligned_in_smp;
159 unsigned completed_events;
160 spinlock_t completion_lock;
161 } ____cacheline_aligned_in_smp;
163 struct folio *internal_folios[AIO_RING_PAGES];
164 struct file *aio_ring_file;
170 * First field must be the file pointer in all the
171 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
175 struct work_struct work;
182 struct wait_queue_head *head;
186 bool work_need_resched;
187 struct wait_queue_entry wait;
188 struct work_struct work;
192 * NOTE! Each of the iocb union members has the file pointer
193 * as the first entry in their struct definition. So you can
194 * access the file pointer through any of the sub-structs,
195 * or directly as just 'ki_filp' in this struct.
199 struct file *ki_filp;
201 struct fsync_iocb fsync;
202 struct poll_iocb poll;
205 struct kioctx *ki_ctx;
206 kiocb_cancel_fn *ki_cancel;
208 struct io_event ki_res;
210 struct list_head ki_list; /* the aio core uses this
211 * for cancellation */
212 refcount_t ki_refcnt;
215 * If the aio_resfd field of the userspace iocb is not zero,
216 * this is the underlying eventfd context to deliver events to.
218 struct eventfd_ctx *ki_eventfd;
221 /*------ sysctl variables----*/
222 static DEFINE_SPINLOCK(aio_nr_lock);
223 static unsigned long aio_nr; /* current system wide number of aio requests */
224 static unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
225 /*----end sysctl variables---*/
227 static struct ctl_table aio_sysctls[] = {
229 .procname = "aio-nr",
231 .maxlen = sizeof(aio_nr),
233 .proc_handler = proc_doulongvec_minmax,
236 .procname = "aio-max-nr",
238 .maxlen = sizeof(aio_max_nr),
240 .proc_handler = proc_doulongvec_minmax,
244 static void __init aio_sysctl_init(void)
246 register_sysctl_init("fs", aio_sysctls);
249 #define aio_sysctl_init() do { } while (0)
252 static struct kmem_cache *kiocb_cachep;
253 static struct kmem_cache *kioctx_cachep;
255 static struct vfsmount *aio_mnt;
257 static const struct file_operations aio_ring_fops;
258 static const struct address_space_operations aio_ctx_aops;
260 static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
263 struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb);
265 return ERR_CAST(inode);
267 inode->i_mapping->a_ops = &aio_ctx_aops;
268 inode->i_mapping->i_private_data = ctx;
269 inode->i_size = PAGE_SIZE * nr_pages;
271 file = alloc_file_pseudo(inode, aio_mnt, "[aio]",
272 O_RDWR, &aio_ring_fops);
278 static int aio_init_fs_context(struct fs_context *fc)
280 if (!init_pseudo(fc, AIO_RING_MAGIC))
282 fc->s_iflags |= SB_I_NOEXEC;
287 * Creates the slab caches used by the aio routines, panic on
288 * failure as this is done early during the boot sequence.
290 static int __init aio_setup(void)
292 static struct file_system_type aio_fs = {
294 .init_fs_context = aio_init_fs_context,
295 .kill_sb = kill_anon_super,
297 aio_mnt = kern_mount(&aio_fs);
299 panic("Failed to create aio fs mount.");
301 kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
302 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
306 __initcall(aio_setup);
308 static void put_aio_ring_file(struct kioctx *ctx)
310 struct file *aio_ring_file = ctx->aio_ring_file;
311 struct address_space *i_mapping;
314 truncate_setsize(file_inode(aio_ring_file), 0);
316 /* Prevent further access to the kioctx from migratepages */
317 i_mapping = aio_ring_file->f_mapping;
318 spin_lock(&i_mapping->i_private_lock);
319 i_mapping->i_private_data = NULL;
320 ctx->aio_ring_file = NULL;
321 spin_unlock(&i_mapping->i_private_lock);
327 static void aio_free_ring(struct kioctx *ctx)
331 /* Disconnect the kiotx from the ring file. This prevents future
332 * accesses to the kioctx from page migration.
334 put_aio_ring_file(ctx);
336 for (i = 0; i < ctx->nr_pages; i++) {
337 struct folio *folio = ctx->ring_folios[i];
342 pr_debug("pid(%d) [%d] folio->count=%d\n", current->pid, i,
343 folio_ref_count(folio));
344 ctx->ring_folios[i] = NULL;
348 if (ctx->ring_folios && ctx->ring_folios != ctx->internal_folios) {
349 kfree(ctx->ring_folios);
350 ctx->ring_folios = NULL;
354 static int aio_ring_mremap(struct vm_area_struct *vma)
356 struct file *file = vma->vm_file;
357 struct mm_struct *mm = vma->vm_mm;
358 struct kioctx_table *table;
359 int i, res = -EINVAL;
361 spin_lock(&mm->ioctx_lock);
363 table = rcu_dereference(mm->ioctx_table);
367 for (i = 0; i < table->nr; i++) {
370 ctx = rcu_dereference(table->table[i]);
371 if (ctx && ctx->aio_ring_file == file) {
372 if (!atomic_read(&ctx->dead)) {
373 ctx->user_id = ctx->mmap_base = vma->vm_start;
382 spin_unlock(&mm->ioctx_lock);
386 static const struct vm_operations_struct aio_ring_vm_ops = {
387 .mremap = aio_ring_mremap,
388 #if IS_ENABLED(CONFIG_MMU)
389 .fault = filemap_fault,
390 .map_pages = filemap_map_pages,
391 .page_mkwrite = filemap_page_mkwrite,
395 static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
397 vm_flags_set(vma, VM_DONTEXPAND);
398 vma->vm_ops = &aio_ring_vm_ops;
402 static const struct file_operations aio_ring_fops = {
403 .mmap = aio_ring_mmap,
406 #if IS_ENABLED(CONFIG_MIGRATION)
407 static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
408 struct folio *src, enum migrate_mode mode)
416 * We cannot support the _NO_COPY case here, because copy needs to
417 * happen under the ctx->completion_lock. That does not work with the
418 * migration workflow of MIGRATE_SYNC_NO_COPY.
420 if (mode == MIGRATE_SYNC_NO_COPY)
425 /* mapping->i_private_lock here protects against the kioctx teardown. */
426 spin_lock(&mapping->i_private_lock);
427 ctx = mapping->i_private_data;
433 /* The ring_lock mutex. The prevents aio_read_events() from writing
434 * to the ring's head, and prevents page migration from mucking in
435 * a partially initialized kiotx.
437 if (!mutex_trylock(&ctx->ring_lock)) {
443 if (idx < (pgoff_t)ctx->nr_pages) {
444 /* Make sure the old folio hasn't already been changed */
445 if (ctx->ring_folios[idx] != src)
453 /* Writeback must be complete */
454 BUG_ON(folio_test_writeback(src));
457 rc = folio_migrate_mapping(mapping, dst, src, 1);
458 if (rc != MIGRATEPAGE_SUCCESS) {
463 /* Take completion_lock to prevent other writes to the ring buffer
464 * while the old folio is copied to the new. This prevents new
465 * events from being lost.
467 spin_lock_irqsave(&ctx->completion_lock, flags);
468 folio_migrate_copy(dst, src);
469 BUG_ON(ctx->ring_folios[idx] != src);
470 ctx->ring_folios[idx] = dst;
471 spin_unlock_irqrestore(&ctx->completion_lock, flags);
473 /* The old folio is no longer accessible. */
477 mutex_unlock(&ctx->ring_lock);
479 spin_unlock(&mapping->i_private_lock);
483 #define aio_migrate_folio NULL
486 static const struct address_space_operations aio_ctx_aops = {
487 .dirty_folio = noop_dirty_folio,
488 .migrate_folio = aio_migrate_folio,
491 static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
493 struct aio_ring *ring;
494 struct mm_struct *mm = current->mm;
495 unsigned long size, unused;
500 /* Compensate for the ring buffer's head/tail overlap entry */
501 nr_events += 2; /* 1 is required, 2 for good luck */
503 size = sizeof(struct aio_ring);
504 size += sizeof(struct io_event) * nr_events;
506 nr_pages = PFN_UP(size);
510 file = aio_private_file(ctx, nr_pages);
512 ctx->aio_ring_file = NULL;
516 ctx->aio_ring_file = file;
517 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
518 / sizeof(struct io_event);
520 ctx->ring_folios = ctx->internal_folios;
521 if (nr_pages > AIO_RING_PAGES) {
522 ctx->ring_folios = kcalloc(nr_pages, sizeof(struct folio *),
524 if (!ctx->ring_folios) {
525 put_aio_ring_file(ctx);
530 for (i = 0; i < nr_pages; i++) {
533 folio = __filemap_get_folio(file->f_mapping, i,
534 FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
535 GFP_USER | __GFP_ZERO);
539 pr_debug("pid(%d) [%d] folio->count=%d\n", current->pid, i,
540 folio_ref_count(folio));
541 folio_end_read(folio, true);
543 ctx->ring_folios[i] = folio;
547 if (unlikely(i != nr_pages)) {
552 ctx->mmap_size = nr_pages * PAGE_SIZE;
553 pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
555 if (mmap_write_lock_killable(mm)) {
561 ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size,
562 PROT_READ | PROT_WRITE,
563 MAP_SHARED, 0, 0, &unused, NULL);
564 mmap_write_unlock(mm);
565 if (IS_ERR((void *)ctx->mmap_base)) {
571 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
573 ctx->user_id = ctx->mmap_base;
574 ctx->nr_events = nr_events; /* trusted copy */
576 ring = folio_address(ctx->ring_folios[0]);
577 ring->nr = nr_events; /* user copy */
579 ring->head = ring->tail = 0;
580 ring->magic = AIO_RING_MAGIC;
581 ring->compat_features = AIO_RING_COMPAT_FEATURES;
582 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
583 ring->header_length = sizeof(struct aio_ring);
584 flush_dcache_folio(ctx->ring_folios[0]);
589 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
590 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
591 #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
593 void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
595 struct aio_kiocb *req;
600 * kiocb didn't come from aio or is neither a read nor a write, hence
603 if (!(iocb->ki_flags & IOCB_AIO_RW))
606 req = container_of(iocb, struct aio_kiocb, rw);
608 if (WARN_ON_ONCE(!list_empty(&req->ki_list)))
613 spin_lock_irqsave(&ctx->ctx_lock, flags);
614 list_add_tail(&req->ki_list, &ctx->active_reqs);
615 req->ki_cancel = cancel;
616 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
618 EXPORT_SYMBOL(kiocb_set_cancel_fn);
621 * free_ioctx() should be RCU delayed to synchronize against the RCU
622 * protected lookup_ioctx() and also needs process context to call
623 * aio_free_ring(). Use rcu_work.
625 static void free_ioctx(struct work_struct *work)
627 struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx,
629 pr_debug("freeing %p\n", ctx);
632 free_percpu(ctx->cpu);
633 percpu_ref_exit(&ctx->reqs);
634 percpu_ref_exit(&ctx->users);
635 kmem_cache_free(kioctx_cachep, ctx);
638 static void free_ioctx_reqs(struct percpu_ref *ref)
640 struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
642 /* At this point we know that there are no any in-flight requests */
643 if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
644 complete(&ctx->rq_wait->comp);
646 /* Synchronize against RCU protected table->table[] dereferences */
647 INIT_RCU_WORK(&ctx->free_rwork, free_ioctx);
648 queue_rcu_work(system_wq, &ctx->free_rwork);
652 * When this function runs, the kioctx has been removed from the "hash table"
653 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
654 * now it's safe to cancel any that need to be.
656 static void free_ioctx_users(struct percpu_ref *ref)
658 struct kioctx *ctx = container_of(ref, struct kioctx, users);
659 struct aio_kiocb *req;
661 spin_lock_irq(&ctx->ctx_lock);
663 while (!list_empty(&ctx->active_reqs)) {
664 req = list_first_entry(&ctx->active_reqs,
665 struct aio_kiocb, ki_list);
666 req->ki_cancel(&req->rw);
667 list_del_init(&req->ki_list);
670 spin_unlock_irq(&ctx->ctx_lock);
672 percpu_ref_kill(&ctx->reqs);
673 percpu_ref_put(&ctx->reqs);
676 static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
679 struct kioctx_table *table, *old;
680 struct aio_ring *ring;
682 spin_lock(&mm->ioctx_lock);
683 table = rcu_dereference_raw(mm->ioctx_table);
687 for (i = 0; i < table->nr; i++)
688 if (!rcu_access_pointer(table->table[i])) {
690 rcu_assign_pointer(table->table[i], ctx);
691 spin_unlock(&mm->ioctx_lock);
693 /* While kioctx setup is in progress,
694 * we are protected from page migration
695 * changes ring_folios by ->ring_lock.
697 ring = folio_address(ctx->ring_folios[0]);
702 new_nr = (table ? table->nr : 1) * 4;
703 spin_unlock(&mm->ioctx_lock);
705 table = kzalloc(struct_size(table, table, new_nr), GFP_KERNEL);
711 spin_lock(&mm->ioctx_lock);
712 old = rcu_dereference_raw(mm->ioctx_table);
715 rcu_assign_pointer(mm->ioctx_table, table);
716 } else if (table->nr > old->nr) {
717 memcpy(table->table, old->table,
718 old->nr * sizeof(struct kioctx *));
720 rcu_assign_pointer(mm->ioctx_table, table);
729 static void aio_nr_sub(unsigned nr)
731 spin_lock(&aio_nr_lock);
732 if (WARN_ON(aio_nr - nr > aio_nr))
736 spin_unlock(&aio_nr_lock);
740 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
742 static struct kioctx *ioctx_alloc(unsigned nr_events)
744 struct mm_struct *mm = current->mm;
749 * Store the original nr_events -- what userspace passed to io_setup(),
750 * for counting against the global limit -- before it changes.
752 unsigned int max_reqs = nr_events;
755 * We keep track of the number of available ringbuffer slots, to prevent
756 * overflow (reqs_available), and we also use percpu counters for this.
758 * So since up to half the slots might be on other cpu's percpu counters
759 * and unavailable, double nr_events so userspace sees what they
760 * expected: additionally, we move req_batch slots to/from percpu
761 * counters at a time, so make sure that isn't 0:
763 nr_events = max(nr_events, num_possible_cpus() * 4);
766 /* Prevent overflows */
767 if (nr_events > (0x10000000U / sizeof(struct io_event))) {
768 pr_debug("ENOMEM: nr_events too high\n");
769 return ERR_PTR(-EINVAL);
772 if (!nr_events || (unsigned long)max_reqs > aio_max_nr)
773 return ERR_PTR(-EAGAIN);
775 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
777 return ERR_PTR(-ENOMEM);
779 ctx->max_reqs = max_reqs;
781 spin_lock_init(&ctx->ctx_lock);
782 spin_lock_init(&ctx->completion_lock);
783 mutex_init(&ctx->ring_lock);
784 /* Protect against page migration throughout kiotx setup by keeping
785 * the ring_lock mutex held until setup is complete. */
786 mutex_lock(&ctx->ring_lock);
787 init_waitqueue_head(&ctx->wait);
789 INIT_LIST_HEAD(&ctx->active_reqs);
791 if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL))
794 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL))
797 ctx->cpu = alloc_percpu(struct kioctx_cpu);
801 err = aio_setup_ring(ctx, nr_events);
805 atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
806 ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4);
807 if (ctx->req_batch < 1)
810 /* limit the number of system wide aios */
811 spin_lock(&aio_nr_lock);
812 if (aio_nr + ctx->max_reqs > aio_max_nr ||
813 aio_nr + ctx->max_reqs < aio_nr) {
814 spin_unlock(&aio_nr_lock);
818 aio_nr += ctx->max_reqs;
819 spin_unlock(&aio_nr_lock);
821 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
822 percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */
824 err = ioctx_add_table(ctx, mm);
828 /* Release the ring_lock mutex now that all setup is complete. */
829 mutex_unlock(&ctx->ring_lock);
831 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
832 ctx, ctx->user_id, mm, ctx->nr_events);
836 aio_nr_sub(ctx->max_reqs);
838 atomic_set(&ctx->dead, 1);
840 vm_munmap(ctx->mmap_base, ctx->mmap_size);
843 mutex_unlock(&ctx->ring_lock);
844 free_percpu(ctx->cpu);
845 percpu_ref_exit(&ctx->reqs);
846 percpu_ref_exit(&ctx->users);
847 kmem_cache_free(kioctx_cachep, ctx);
848 pr_debug("error allocating ioctx %d\n", err);
853 * Cancels all outstanding aio requests on an aio context. Used
854 * when the processes owning a context have all exited to encourage
855 * the rapid destruction of the kioctx.
857 static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
858 struct ctx_rq_wait *wait)
860 struct kioctx_table *table;
862 spin_lock(&mm->ioctx_lock);
863 if (atomic_xchg(&ctx->dead, 1)) {
864 spin_unlock(&mm->ioctx_lock);
868 table = rcu_dereference_raw(mm->ioctx_table);
869 WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id]));
870 RCU_INIT_POINTER(table->table[ctx->id], NULL);
871 spin_unlock(&mm->ioctx_lock);
873 /* free_ioctx_reqs() will do the necessary RCU synchronization */
874 wake_up_all(&ctx->wait);
877 * It'd be more correct to do this in free_ioctx(), after all
878 * the outstanding kiocbs have finished - but by then io_destroy
879 * has already returned, so io_setup() could potentially return
880 * -EAGAIN with no ioctxs actually in use (as far as userspace
883 aio_nr_sub(ctx->max_reqs);
886 vm_munmap(ctx->mmap_base, ctx->mmap_size);
889 percpu_ref_kill(&ctx->users);
894 * exit_aio: called when the last user of mm goes away. At this point, there is
895 * no way for any new requests to be submited or any of the io_* syscalls to be
896 * called on the context.
898 * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
901 void exit_aio(struct mm_struct *mm)
903 struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table);
904 struct ctx_rq_wait wait;
910 atomic_set(&wait.count, table->nr);
911 init_completion(&wait.comp);
914 for (i = 0; i < table->nr; ++i) {
916 rcu_dereference_protected(table->table[i], true);
924 * We don't need to bother with munmap() here - exit_mmap(mm)
925 * is coming and it'll unmap everything. And we simply can't,
926 * this is not necessarily our ->mm.
927 * Since kill_ioctx() uses non-zero ->mmap_size as indicator
928 * that it needs to unmap the area, just set it to 0.
931 kill_ioctx(mm, ctx, &wait);
934 if (!atomic_sub_and_test(skipped, &wait.count)) {
935 /* Wait until all IO for the context are done. */
936 wait_for_completion(&wait.comp);
939 RCU_INIT_POINTER(mm->ioctx_table, NULL);
943 static void put_reqs_available(struct kioctx *ctx, unsigned nr)
945 struct kioctx_cpu *kcpu;
948 local_irq_save(flags);
949 kcpu = this_cpu_ptr(ctx->cpu);
950 kcpu->reqs_available += nr;
952 while (kcpu->reqs_available >= ctx->req_batch * 2) {
953 kcpu->reqs_available -= ctx->req_batch;
954 atomic_add(ctx->req_batch, &ctx->reqs_available);
957 local_irq_restore(flags);
960 static bool __get_reqs_available(struct kioctx *ctx)
962 struct kioctx_cpu *kcpu;
966 local_irq_save(flags);
967 kcpu = this_cpu_ptr(ctx->cpu);
968 if (!kcpu->reqs_available) {
969 int avail = atomic_read(&ctx->reqs_available);
972 if (avail < ctx->req_batch)
974 } while (!atomic_try_cmpxchg(&ctx->reqs_available,
975 &avail, avail - ctx->req_batch));
977 kcpu->reqs_available += ctx->req_batch;
981 kcpu->reqs_available--;
983 local_irq_restore(flags);
987 /* refill_reqs_available
988 * Updates the reqs_available reference counts used for tracking the
989 * number of free slots in the completion ring. This can be called
990 * from aio_complete() (to optimistically update reqs_available) or
991 * from aio_get_req() (the we're out of events case). It must be
992 * called holding ctx->completion_lock.
994 static void refill_reqs_available(struct kioctx *ctx, unsigned head,
997 unsigned events_in_ring, completed;
999 /* Clamp head since userland can write to it. */
1000 head %= ctx->nr_events;
1002 events_in_ring = tail - head;
1004 events_in_ring = ctx->nr_events - (head - tail);
1006 completed = ctx->completed_events;
1007 if (events_in_ring < completed)
1008 completed -= events_in_ring;
1015 ctx->completed_events -= completed;
1016 put_reqs_available(ctx, completed);
1019 /* user_refill_reqs_available
1020 * Called to refill reqs_available when aio_get_req() encounters an
1021 * out of space in the completion ring.
1023 static void user_refill_reqs_available(struct kioctx *ctx)
1025 spin_lock_irq(&ctx->completion_lock);
1026 if (ctx->completed_events) {
1027 struct aio_ring *ring;
1030 /* Access of ring->head may race with aio_read_events_ring()
1031 * here, but that's okay since whether we read the old version
1032 * or the new version, and either will be valid. The important
1033 * part is that head cannot pass tail since we prevent
1034 * aio_complete() from updating tail by holding
1035 * ctx->completion_lock. Even if head is invalid, the check
1036 * against ctx->completed_events below will make sure we do the
1039 ring = folio_address(ctx->ring_folios[0]);
1042 refill_reqs_available(ctx, head, ctx->tail);
1045 spin_unlock_irq(&ctx->completion_lock);
1048 static bool get_reqs_available(struct kioctx *ctx)
1050 if (__get_reqs_available(ctx))
1052 user_refill_reqs_available(ctx);
1053 return __get_reqs_available(ctx);
1057 * Allocate a slot for an aio request.
1058 * Returns NULL if no requests are free.
1060 * The refcount is initialized to 2 - one for the async op completion,
1061 * one for the synchronous code that does this.
1063 static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
1065 struct aio_kiocb *req;
1067 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
1071 if (unlikely(!get_reqs_available(ctx))) {
1072 kmem_cache_free(kiocb_cachep, req);
1076 percpu_ref_get(&ctx->reqs);
1078 INIT_LIST_HEAD(&req->ki_list);
1079 refcount_set(&req->ki_refcnt, 2);
1080 req->ki_eventfd = NULL;
1084 static struct kioctx *lookup_ioctx(unsigned long ctx_id)
1086 struct aio_ring __user *ring = (void __user *)ctx_id;
1087 struct mm_struct *mm = current->mm;
1088 struct kioctx *ctx, *ret = NULL;
1089 struct kioctx_table *table;
1092 if (get_user(id, &ring->id))
1096 table = rcu_dereference(mm->ioctx_table);
1098 if (!table || id >= table->nr)
1101 id = array_index_nospec(id, table->nr);
1102 ctx = rcu_dereference(table->table[id]);
1103 if (ctx && ctx->user_id == ctx_id) {
1104 if (percpu_ref_tryget_live(&ctx->users))
1112 static inline void iocb_destroy(struct aio_kiocb *iocb)
1114 if (iocb->ki_eventfd)
1115 eventfd_ctx_put(iocb->ki_eventfd);
1117 fput(iocb->ki_filp);
1118 percpu_ref_put(&iocb->ki_ctx->reqs);
1119 kmem_cache_free(kiocb_cachep, iocb);
1123 struct wait_queue_entry w;
1128 * Called when the io request on the given iocb is complete.
1130 static void aio_complete(struct aio_kiocb *iocb)
1132 struct kioctx *ctx = iocb->ki_ctx;
1133 struct aio_ring *ring;
1134 struct io_event *ev_page, *event;
1135 unsigned tail, pos, head, avail;
1136 unsigned long flags;
1139 * Add a completion event to the ring buffer. Must be done holding
1140 * ctx->completion_lock to prevent other code from messing with the tail
1141 * pointer since we might be called from irq context.
1143 spin_lock_irqsave(&ctx->completion_lock, flags);
1146 pos = tail + AIO_EVENTS_OFFSET;
1148 if (++tail >= ctx->nr_events)
1151 ev_page = folio_address(ctx->ring_folios[pos / AIO_EVENTS_PER_PAGE]);
1152 event = ev_page + pos % AIO_EVENTS_PER_PAGE;
1154 *event = iocb->ki_res;
1156 flush_dcache_folio(ctx->ring_folios[pos / AIO_EVENTS_PER_PAGE]);
1158 pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
1159 (void __user *)(unsigned long)iocb->ki_res.obj,
1160 iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
1162 /* after flagging the request as done, we
1163 * must never even look at it again
1165 smp_wmb(); /* make event visible before updating tail */
1169 ring = folio_address(ctx->ring_folios[0]);
1172 flush_dcache_folio(ctx->ring_folios[0]);
1174 ctx->completed_events++;
1175 if (ctx->completed_events > 1)
1176 refill_reqs_available(ctx, head, tail);
1180 : tail + ctx->nr_events - head;
1181 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1183 pr_debug("added to ring %p at [%u]\n", iocb, tail);
1186 * Check if the user asked us to deliver the result through an
1187 * eventfd. The eventfd_signal() function is safe to be called
1190 if (iocb->ki_eventfd)
1191 eventfd_signal(iocb->ki_eventfd);
1194 * We have to order our ring_info tail store above and test
1195 * of the wait list below outside the wait lock. This is
1196 * like in wake_up_bit() where clearing a bit has to be
1197 * ordered with the unlocked test.
1201 if (waitqueue_active(&ctx->wait)) {
1202 struct aio_waiter *curr, *next;
1203 unsigned long flags;
1205 spin_lock_irqsave(&ctx->wait.lock, flags);
1206 list_for_each_entry_safe(curr, next, &ctx->wait.head, w.entry)
1207 if (avail >= curr->min_nr) {
1208 wake_up_process(curr->w.private);
1209 list_del_init_careful(&curr->w.entry);
1211 spin_unlock_irqrestore(&ctx->wait.lock, flags);
1215 static inline void iocb_put(struct aio_kiocb *iocb)
1217 if (refcount_dec_and_test(&iocb->ki_refcnt)) {
1223 /* aio_read_events_ring
1224 * Pull an event off of the ioctx's event ring. Returns the number of
1227 static long aio_read_events_ring(struct kioctx *ctx,
1228 struct io_event __user *event, long nr)
1230 struct aio_ring *ring;
1231 unsigned head, tail, pos;
1236 * The mutex can block and wake us up and that will cause
1237 * wait_event_interruptible_hrtimeout() to schedule without sleeping
1238 * and repeat. This should be rare enough that it doesn't cause
1239 * peformance issues. See the comment in read_events() for more detail.
1241 sched_annotate_sleep();
1242 mutex_lock(&ctx->ring_lock);
1244 /* Access to ->ring_folios here is protected by ctx->ring_lock. */
1245 ring = folio_address(ctx->ring_folios[0]);
1250 * Ensure that once we've read the current tail pointer, that
1251 * we also see the events that were stored up to the tail.
1255 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
1260 head %= ctx->nr_events;
1261 tail %= ctx->nr_events;
1265 struct io_event *ev;
1266 struct folio *folio;
1268 avail = (head <= tail ? tail : ctx->nr_events) - head;
1272 pos = head + AIO_EVENTS_OFFSET;
1273 folio = ctx->ring_folios[pos / AIO_EVENTS_PER_PAGE];
1274 pos %= AIO_EVENTS_PER_PAGE;
1276 avail = min(avail, nr - ret);
1277 avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos);
1279 ev = folio_address(folio);
1280 copy_ret = copy_to_user(event + ret, ev + pos,
1281 sizeof(*ev) * avail);
1283 if (unlikely(copy_ret)) {
1290 head %= ctx->nr_events;
1293 ring = folio_address(ctx->ring_folios[0]);
1295 flush_dcache_folio(ctx->ring_folios[0]);
1297 pr_debug("%li h%u t%u\n", ret, head, tail);
1299 mutex_unlock(&ctx->ring_lock);
1304 static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr,
1305 struct io_event __user *event, long *i)
1307 long ret = aio_read_events_ring(ctx, event + *i, nr - *i);
1312 if (unlikely(atomic_read(&ctx->dead)))
1318 return ret < 0 || *i >= min_nr;
1321 static long read_events(struct kioctx *ctx, long min_nr, long nr,
1322 struct io_event __user *event,
1325 struct hrtimer_sleeper t;
1326 struct aio_waiter w;
1327 long ret = 0, ret2 = 0;
1330 * Note that aio_read_events() is being called as the conditional - i.e.
1331 * we're calling it after prepare_to_wait() has set task state to
1332 * TASK_INTERRUPTIBLE.
1334 * But aio_read_events() can block, and if it blocks it's going to flip
1335 * the task state back to TASK_RUNNING.
1337 * This should be ok, provided it doesn't flip the state back to
1338 * TASK_RUNNING and return 0 too much - that causes us to spin. That
1339 * will only happen if the mutex_lock() call blocks, and we then find
1340 * the ringbuffer empty. So in practice we should be ok, but it's
1341 * something to be aware of when touching this code.
1343 aio_read_events(ctx, min_nr, nr, event, &ret);
1344 if (until == 0 || ret < 0 || ret >= min_nr)
1347 hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1348 if (until != KTIME_MAX) {
1349 hrtimer_set_expires_range_ns(&t.timer, until, current->timer_slack_ns);
1350 hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_REL);
1356 unsigned long nr_got = ret;
1358 w.min_nr = min_nr - ret;
1360 ret2 = prepare_to_wait_event(&ctx->wait, &w.w, TASK_INTERRUPTIBLE);
1361 if (!ret2 && !t.task)
1364 if (aio_read_events(ctx, min_nr, nr, event, &ret) || ret2)
1371 finish_wait(&ctx->wait, &w.w);
1372 hrtimer_cancel(&t.timer);
1373 destroy_hrtimer_on_stack(&t.timer);
1379 * Create an aio_context capable of receiving at least nr_events.
1380 * ctxp must not point to an aio_context that already exists, and
1381 * must be initialized to 0 prior to the call. On successful
1382 * creation of the aio_context, *ctxp is filled in with the resulting
1383 * handle. May fail with -EINVAL if *ctxp is not initialized,
1384 * if the specified nr_events exceeds internal limits. May fail
1385 * with -EAGAIN if the specified nr_events exceeds the user's limit
1386 * of available events. May fail with -ENOMEM if insufficient kernel
1387 * resources are available. May fail with -EFAULT if an invalid
1388 * pointer is passed for ctxp. Will fail with -ENOSYS if not
1391 SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1393 struct kioctx *ioctx = NULL;
1397 ret = get_user(ctx, ctxp);
1402 if (unlikely(ctx || nr_events == 0)) {
1403 pr_debug("EINVAL: ctx %lu nr_events %u\n",
1408 ioctx = ioctx_alloc(nr_events);
1409 ret = PTR_ERR(ioctx);
1410 if (!IS_ERR(ioctx)) {
1411 ret = put_user(ioctx->user_id, ctxp);
1413 kill_ioctx(current->mm, ioctx, NULL);
1414 percpu_ref_put(&ioctx->users);
1421 #ifdef CONFIG_COMPAT
1422 COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p)
1424 struct kioctx *ioctx = NULL;
1428 ret = get_user(ctx, ctx32p);
1433 if (unlikely(ctx || nr_events == 0)) {
1434 pr_debug("EINVAL: ctx %lu nr_events %u\n",
1439 ioctx = ioctx_alloc(nr_events);
1440 ret = PTR_ERR(ioctx);
1441 if (!IS_ERR(ioctx)) {
1442 /* truncating is ok because it's a user address */
1443 ret = put_user((u32)ioctx->user_id, ctx32p);
1445 kill_ioctx(current->mm, ioctx, NULL);
1446 percpu_ref_put(&ioctx->users);
1455 * Destroy the aio_context specified. May cancel any outstanding
1456 * AIOs and block on completion. Will fail with -ENOSYS if not
1457 * implemented. May fail with -EINVAL if the context pointed to
1460 SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1462 struct kioctx *ioctx = lookup_ioctx(ctx);
1463 if (likely(NULL != ioctx)) {
1464 struct ctx_rq_wait wait;
1467 init_completion(&wait.comp);
1468 atomic_set(&wait.count, 1);
1470 /* Pass requests_done to kill_ioctx() where it can be set
1471 * in a thread-safe way. If we try to set it here then we have
1472 * a race condition if two io_destroy() called simultaneously.
1474 ret = kill_ioctx(current->mm, ioctx, &wait);
1475 percpu_ref_put(&ioctx->users);
1477 /* Wait until all IO for the context are done. Otherwise kernel
1478 * keep using user-space buffers even if user thinks the context
1482 wait_for_completion(&wait.comp);
1486 pr_debug("EINVAL: invalid context id\n");
1490 static void aio_remove_iocb(struct aio_kiocb *iocb)
1492 struct kioctx *ctx = iocb->ki_ctx;
1493 unsigned long flags;
1495 spin_lock_irqsave(&ctx->ctx_lock, flags);
1496 list_del(&iocb->ki_list);
1497 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1500 static void aio_complete_rw(struct kiocb *kiocb, long res)
1502 struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw);
1504 if (!list_empty_careful(&iocb->ki_list))
1505 aio_remove_iocb(iocb);
1507 if (kiocb->ki_flags & IOCB_WRITE) {
1508 struct inode *inode = file_inode(kiocb->ki_filp);
1510 if (S_ISREG(inode->i_mode))
1511 kiocb_end_write(kiocb);
1514 iocb->ki_res.res = res;
1515 iocb->ki_res.res2 = 0;
1519 static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
1523 req->ki_complete = aio_complete_rw;
1524 req->private = NULL;
1525 req->ki_pos = iocb->aio_offset;
1526 req->ki_flags = req->ki_filp->f_iocb_flags | IOCB_AIO_RW;
1527 if (iocb->aio_flags & IOCB_FLAG_RESFD)
1528 req->ki_flags |= IOCB_EVENTFD;
1529 if (iocb->aio_flags & IOCB_FLAG_IOPRIO) {
1531 * If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then
1532 * aio_reqprio is interpreted as an I/O scheduling
1533 * class and priority.
1535 ret = ioprio_check_cap(iocb->aio_reqprio);
1537 pr_debug("aio ioprio check cap error: %d\n", ret);
1541 req->ki_ioprio = iocb->aio_reqprio;
1543 req->ki_ioprio = get_current_ioprio();
1545 ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags);
1549 req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */
1553 static ssize_t aio_setup_rw(int rw, const struct iocb *iocb,
1554 struct iovec **iovec, bool vectored, bool compat,
1555 struct iov_iter *iter)
1557 void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf;
1558 size_t len = iocb->aio_nbytes;
1561 ssize_t ret = import_ubuf(rw, buf, len, iter);
1566 return __import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter, compat);
1569 static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
1575 case -ERESTARTNOINTR:
1576 case -ERESTARTNOHAND:
1577 case -ERESTART_RESTARTBLOCK:
1579 * There's no easy way to restart the syscall since other AIO's
1580 * may be already running. Just fail this IO with EINTR.
1585 req->ki_complete(req, ret);
1589 static int aio_read(struct kiocb *req, const struct iocb *iocb,
1590 bool vectored, bool compat)
1592 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1593 struct iov_iter iter;
1597 ret = aio_prep_rw(req, iocb);
1600 file = req->ki_filp;
1601 if (unlikely(!(file->f_mode & FMODE_READ)))
1603 if (unlikely(!file->f_op->read_iter))
1606 ret = aio_setup_rw(ITER_DEST, iocb, &iovec, vectored, compat, &iter);
1609 ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
1611 aio_rw_done(req, call_read_iter(file, req, &iter));
1616 static int aio_write(struct kiocb *req, const struct iocb *iocb,
1617 bool vectored, bool compat)
1619 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1620 struct iov_iter iter;
1624 ret = aio_prep_rw(req, iocb);
1627 file = req->ki_filp;
1629 if (unlikely(!(file->f_mode & FMODE_WRITE)))
1631 if (unlikely(!file->f_op->write_iter))
1634 ret = aio_setup_rw(ITER_SOURCE, iocb, &iovec, vectored, compat, &iter);
1637 ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
1639 if (S_ISREG(file_inode(file)->i_mode))
1640 kiocb_start_write(req);
1641 req->ki_flags |= IOCB_WRITE;
1642 aio_rw_done(req, call_write_iter(file, req, &iter));
1648 static void aio_fsync_work(struct work_struct *work)
1650 struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
1651 const struct cred *old_cred = override_creds(iocb->fsync.creds);
1653 iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
1654 revert_creds(old_cred);
1655 put_cred(iocb->fsync.creds);
1659 static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
1662 if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
1663 iocb->aio_rw_flags))
1666 if (unlikely(!req->file->f_op->fsync))
1669 req->creds = prepare_creds();
1673 req->datasync = datasync;
1674 INIT_WORK(&req->work, aio_fsync_work);
1675 schedule_work(&req->work);
1679 static void aio_poll_put_work(struct work_struct *work)
1681 struct poll_iocb *req = container_of(work, struct poll_iocb, work);
1682 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1688 * Safely lock the waitqueue which the request is on, synchronizing with the
1689 * case where the ->poll() provider decides to free its waitqueue early.
1691 * Returns true on success, meaning that req->head->lock was locked, req->wait
1692 * is on req->head, and an RCU read lock was taken. Returns false if the
1693 * request was already removed from its waitqueue (which might no longer exist).
1695 static bool poll_iocb_lock_wq(struct poll_iocb *req)
1697 wait_queue_head_t *head;
1700 * While we hold the waitqueue lock and the waitqueue is nonempty,
1701 * wake_up_pollfree() will wait for us. However, taking the waitqueue
1702 * lock in the first place can race with the waitqueue being freed.
1704 * We solve this as eventpoll does: by taking advantage of the fact that
1705 * all users of wake_up_pollfree() will RCU-delay the actual free. If
1706 * we enter rcu_read_lock() and see that the pointer to the queue is
1707 * non-NULL, we can then lock it without the memory being freed out from
1708 * under us, then check whether the request is still on the queue.
1710 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
1711 * case the caller deletes the entry from the queue, leaving it empty.
1712 * In that case, only RCU prevents the queue memory from being freed.
1715 head = smp_load_acquire(&req->head);
1717 spin_lock(&head->lock);
1718 if (!list_empty(&req->wait.entry))
1720 spin_unlock(&head->lock);
1726 static void poll_iocb_unlock_wq(struct poll_iocb *req)
1728 spin_unlock(&req->head->lock);
1732 static void aio_poll_complete_work(struct work_struct *work)
1734 struct poll_iocb *req = container_of(work, struct poll_iocb, work);
1735 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1736 struct poll_table_struct pt = { ._key = req->events };
1737 struct kioctx *ctx = iocb->ki_ctx;
1740 if (!READ_ONCE(req->cancelled))
1741 mask = vfs_poll(req->file, &pt) & req->events;
1744 * Note that ->ki_cancel callers also delete iocb from active_reqs after
1745 * calling ->ki_cancel. We need the ctx_lock roundtrip here to
1746 * synchronize with them. In the cancellation case the list_del_init
1747 * itself is not actually needed, but harmless so we keep it in to
1748 * avoid further branches in the fast path.
1750 spin_lock_irq(&ctx->ctx_lock);
1751 if (poll_iocb_lock_wq(req)) {
1752 if (!mask && !READ_ONCE(req->cancelled)) {
1754 * The request isn't actually ready to be completed yet.
1755 * Reschedule completion if another wakeup came in.
1757 if (req->work_need_resched) {
1758 schedule_work(&req->work);
1759 req->work_need_resched = false;
1761 req->work_scheduled = false;
1763 poll_iocb_unlock_wq(req);
1764 spin_unlock_irq(&ctx->ctx_lock);
1767 list_del_init(&req->wait.entry);
1768 poll_iocb_unlock_wq(req);
1769 } /* else, POLLFREE has freed the waitqueue, so we must complete */
1770 list_del_init(&iocb->ki_list);
1771 iocb->ki_res.res = mangle_poll(mask);
1772 spin_unlock_irq(&ctx->ctx_lock);
1777 /* assumes we are called with irqs disabled */
1778 static int aio_poll_cancel(struct kiocb *iocb)
1780 struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
1781 struct poll_iocb *req = &aiocb->poll;
1783 if (poll_iocb_lock_wq(req)) {
1784 WRITE_ONCE(req->cancelled, true);
1785 if (!req->work_scheduled) {
1786 schedule_work(&aiocb->poll.work);
1787 req->work_scheduled = true;
1789 poll_iocb_unlock_wq(req);
1790 } /* else, the request was force-cancelled by POLLFREE already */
1795 static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1798 struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
1799 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1800 __poll_t mask = key_to_poll(key);
1801 unsigned long flags;
1803 /* for instances that support it check for an event match first: */
1804 if (mask && !(mask & req->events))
1808 * Complete the request inline if possible. This requires that three
1809 * conditions be met:
1810 * 1. An event mask must have been passed. If a plain wakeup was done
1811 * instead, then mask == 0 and we have to call vfs_poll() to get
1812 * the events, so inline completion isn't possible.
1813 * 2. The completion work must not have already been scheduled.
1814 * 3. ctx_lock must not be busy. We have to use trylock because we
1815 * already hold the waitqueue lock, so this inverts the normal
1816 * locking order. Use irqsave/irqrestore because not all
1817 * filesystems (e.g. fuse) call this function with IRQs disabled,
1818 * yet IRQs have to be disabled before ctx_lock is obtained.
1820 if (mask && !req->work_scheduled &&
1821 spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
1822 struct kioctx *ctx = iocb->ki_ctx;
1824 list_del_init(&req->wait.entry);
1825 list_del(&iocb->ki_list);
1826 iocb->ki_res.res = mangle_poll(mask);
1827 if (iocb->ki_eventfd && !eventfd_signal_allowed()) {
1829 INIT_WORK(&req->work, aio_poll_put_work);
1830 schedule_work(&req->work);
1832 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1837 * Schedule the completion work if needed. If it was already
1838 * scheduled, record that another wakeup came in.
1840 * Don't remove the request from the waitqueue here, as it might
1841 * not actually be complete yet (we won't know until vfs_poll()
1842 * is called), and we must not miss any wakeups. POLLFREE is an
1843 * exception to this; see below.
1845 if (req->work_scheduled) {
1846 req->work_need_resched = true;
1848 schedule_work(&req->work);
1849 req->work_scheduled = true;
1853 * If the waitqueue is being freed early but we can't complete
1854 * the request inline, we have to tear down the request as best
1855 * we can. That means immediately removing the request from its
1856 * waitqueue and preventing all further accesses to the
1857 * waitqueue via the request. We also need to schedule the
1858 * completion work (done above). Also mark the request as
1859 * cancelled, to potentially skip an unneeded call to ->poll().
1861 if (mask & POLLFREE) {
1862 WRITE_ONCE(req->cancelled, true);
1863 list_del_init(&req->wait.entry);
1866 * Careful: this *must* be the last step, since as soon
1867 * as req->head is NULL'ed out, the request can be
1868 * completed and freed, since aio_poll_complete_work()
1869 * will no longer need to take the waitqueue lock.
1871 smp_store_release(&req->head, NULL);
1877 struct aio_poll_table {
1878 struct poll_table_struct pt;
1879 struct aio_kiocb *iocb;
1885 aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1886 struct poll_table_struct *p)
1888 struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
1890 /* multiple wait queues per file are not supported */
1891 if (unlikely(pt->queued)) {
1892 pt->error = -EINVAL;
1898 pt->iocb->poll.head = head;
1899 add_wait_queue(head, &pt->iocb->poll.wait);
1902 static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1904 struct kioctx *ctx = aiocb->ki_ctx;
1905 struct poll_iocb *req = &aiocb->poll;
1906 struct aio_poll_table apt;
1907 bool cancel = false;
1910 /* reject any unknown events outside the normal event mask. */
1911 if ((u16)iocb->aio_buf != iocb->aio_buf)
1913 /* reject fields that are not defined for poll */
1914 if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)
1917 INIT_WORK(&req->work, aio_poll_complete_work);
1918 req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
1921 req->cancelled = false;
1922 req->work_scheduled = false;
1923 req->work_need_resched = false;
1925 apt.pt._qproc = aio_poll_queue_proc;
1926 apt.pt._key = req->events;
1929 apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
1931 /* initialized the list so that we can do list_empty checks */
1932 INIT_LIST_HEAD(&req->wait.entry);
1933 init_waitqueue_func_entry(&req->wait, aio_poll_wake);
1935 mask = vfs_poll(req->file, &apt.pt) & req->events;
1936 spin_lock_irq(&ctx->ctx_lock);
1937 if (likely(apt.queued)) {
1938 bool on_queue = poll_iocb_lock_wq(req);
1940 if (!on_queue || req->work_scheduled) {
1942 * aio_poll_wake() already either scheduled the async
1943 * completion work, or completed the request inline.
1945 if (apt.error) /* unsupported case: multiple queues */
1950 if (mask || apt.error) {
1951 /* Steal to complete synchronously. */
1952 list_del_init(&req->wait.entry);
1953 } else if (cancel) {
1954 /* Cancel if possible (may be too late though). */
1955 WRITE_ONCE(req->cancelled, true);
1956 } else if (on_queue) {
1958 * Actually waiting for an event, so add the request to
1959 * active_reqs so that it can be cancelled if needed.
1961 list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
1962 aiocb->ki_cancel = aio_poll_cancel;
1965 poll_iocb_unlock_wq(req);
1967 if (mask) { /* no async, we'd stolen it */
1968 aiocb->ki_res.res = mangle_poll(mask);
1971 spin_unlock_irq(&ctx->ctx_lock);
1977 static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
1978 struct iocb __user *user_iocb, struct aio_kiocb *req,
1981 req->ki_filp = fget(iocb->aio_fildes);
1982 if (unlikely(!req->ki_filp))
1985 if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1986 struct eventfd_ctx *eventfd;
1988 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1989 * instance of the file* now. The file descriptor must be
1990 * an eventfd() fd, and will be signaled for each completed
1991 * event using the eventfd_signal() function.
1993 eventfd = eventfd_ctx_fdget(iocb->aio_resfd);
1994 if (IS_ERR(eventfd))
1995 return PTR_ERR(eventfd);
1997 req->ki_eventfd = eventfd;
2000 if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) {
2001 pr_debug("EFAULT: aio_key\n");
2005 req->ki_res.obj = (u64)(unsigned long)user_iocb;
2006 req->ki_res.data = iocb->aio_data;
2007 req->ki_res.res = 0;
2008 req->ki_res.res2 = 0;
2010 switch (iocb->aio_lio_opcode) {
2011 case IOCB_CMD_PREAD:
2012 return aio_read(&req->rw, iocb, false, compat);
2013 case IOCB_CMD_PWRITE:
2014 return aio_write(&req->rw, iocb, false, compat);
2015 case IOCB_CMD_PREADV:
2016 return aio_read(&req->rw, iocb, true, compat);
2017 case IOCB_CMD_PWRITEV:
2018 return aio_write(&req->rw, iocb, true, compat);
2019 case IOCB_CMD_FSYNC:
2020 return aio_fsync(&req->fsync, iocb, false);
2021 case IOCB_CMD_FDSYNC:
2022 return aio_fsync(&req->fsync, iocb, true);
2024 return aio_poll(req, iocb);
2026 pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
2031 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
2034 struct aio_kiocb *req;
2038 if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
2041 /* enforce forwards compatibility on users */
2042 if (unlikely(iocb.aio_reserved2)) {
2043 pr_debug("EINVAL: reserve field set\n");
2047 /* prevent overflows */
2049 (iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
2050 (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
2051 ((ssize_t)iocb.aio_nbytes < 0)
2053 pr_debug("EINVAL: overflow check\n");
2057 req = aio_get_req(ctx);
2061 err = __io_submit_one(ctx, &iocb, user_iocb, req, compat);
2063 /* Done with the synchronous reference */
2067 * If err is 0, we'd either done aio_complete() ourselves or have
2068 * arranged for that to be done asynchronously. Anything non-zero
2069 * means that we need to destroy req ourselves.
2071 if (unlikely(err)) {
2073 put_reqs_available(ctx, 1);
2079 * Queue the nr iocbs pointed to by iocbpp for processing. Returns
2080 * the number of iocbs queued. May return -EINVAL if the aio_context
2081 * specified by ctx_id is invalid, if nr is < 0, if the iocb at
2082 * *iocbpp[0] is not properly initialized, if the operation specified
2083 * is invalid for the file descriptor in the iocb. May fail with
2084 * -EFAULT if any of the data structures point to invalid data. May
2085 * fail with -EBADF if the file descriptor specified in the first
2086 * iocb is invalid. May fail with -EAGAIN if insufficient resources
2087 * are available to queue any iocbs. Will return 0 if nr is 0. Will
2088 * fail with -ENOSYS if not implemented.
2090 SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
2091 struct iocb __user * __user *, iocbpp)
2096 struct blk_plug plug;
2098 if (unlikely(nr < 0))
2101 ctx = lookup_ioctx(ctx_id);
2102 if (unlikely(!ctx)) {
2103 pr_debug("EINVAL: invalid context id\n");
2107 if (nr > ctx->nr_events)
2108 nr = ctx->nr_events;
2110 if (nr > AIO_PLUG_THRESHOLD)
2111 blk_start_plug(&plug);
2112 for (i = 0; i < nr; i++) {
2113 struct iocb __user *user_iocb;
2115 if (unlikely(get_user(user_iocb, iocbpp + i))) {
2120 ret = io_submit_one(ctx, user_iocb, false);
2124 if (nr > AIO_PLUG_THRESHOLD)
2125 blk_finish_plug(&plug);
2127 percpu_ref_put(&ctx->users);
2131 #ifdef CONFIG_COMPAT
2132 COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
2133 int, nr, compat_uptr_t __user *, iocbpp)
2138 struct blk_plug plug;
2140 if (unlikely(nr < 0))
2143 ctx = lookup_ioctx(ctx_id);
2144 if (unlikely(!ctx)) {
2145 pr_debug("EINVAL: invalid context id\n");
2149 if (nr > ctx->nr_events)
2150 nr = ctx->nr_events;
2152 if (nr > AIO_PLUG_THRESHOLD)
2153 blk_start_plug(&plug);
2154 for (i = 0; i < nr; i++) {
2155 compat_uptr_t user_iocb;
2157 if (unlikely(get_user(user_iocb, iocbpp + i))) {
2162 ret = io_submit_one(ctx, compat_ptr(user_iocb), true);
2166 if (nr > AIO_PLUG_THRESHOLD)
2167 blk_finish_plug(&plug);
2169 percpu_ref_put(&ctx->users);
2175 * Attempts to cancel an iocb previously passed to io_submit. If
2176 * the operation is successfully cancelled, the resulting event is
2177 * copied into the memory pointed to by result without being placed
2178 * into the completion queue and 0 is returned. May fail with
2179 * -EFAULT if any of the data structures pointed to are invalid.
2180 * May fail with -EINVAL if aio_context specified by ctx_id is
2181 * invalid. May fail with -EAGAIN if the iocb specified was not
2182 * cancelled. Will fail with -ENOSYS if not implemented.
2184 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
2185 struct io_event __user *, result)
2188 struct aio_kiocb *kiocb;
2191 u64 obj = (u64)(unsigned long)iocb;
2193 if (unlikely(get_user(key, &iocb->aio_key)))
2195 if (unlikely(key != KIOCB_KEY))
2198 ctx = lookup_ioctx(ctx_id);
2202 spin_lock_irq(&ctx->ctx_lock);
2203 /* TODO: use a hash or array, this sucks. */
2204 list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
2205 if (kiocb->ki_res.obj == obj) {
2206 ret = kiocb->ki_cancel(&kiocb->rw);
2207 list_del_init(&kiocb->ki_list);
2211 spin_unlock_irq(&ctx->ctx_lock);
2215 * The result argument is no longer used - the io_event is
2216 * always delivered via the ring buffer. -EINPROGRESS indicates
2217 * cancellation is progress:
2222 percpu_ref_put(&ctx->users);
2227 static long do_io_getevents(aio_context_t ctx_id,
2230 struct io_event __user *events,
2231 struct timespec64 *ts)
2233 ktime_t until = ts ? timespec64_to_ktime(*ts) : KTIME_MAX;
2234 struct kioctx *ioctx = lookup_ioctx(ctx_id);
2237 if (likely(ioctx)) {
2238 if (likely(min_nr <= nr && min_nr >= 0))
2239 ret = read_events(ioctx, min_nr, nr, events, until);
2240 percpu_ref_put(&ioctx->users);
2247 * Attempts to read at least min_nr events and up to nr events from
2248 * the completion queue for the aio_context specified by ctx_id. If
2249 * it succeeds, the number of read events is returned. May fail with
2250 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
2251 * out of range, if timeout is out of range. May fail with -EFAULT
2252 * if any of the memory specified is invalid. May return 0 or
2253 * < min_nr if the timeout specified by timeout has elapsed
2254 * before sufficient events are available, where timeout == NULL
2255 * specifies an infinite timeout. Note that the timeout pointed to by
2256 * timeout is relative. Will fail with -ENOSYS if not implemented.
2260 SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
2263 struct io_event __user *, events,
2264 struct __kernel_timespec __user *, timeout)
2266 struct timespec64 ts;
2269 if (timeout && unlikely(get_timespec64(&ts, timeout)))
2272 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2273 if (!ret && signal_pending(current))
2280 struct __aio_sigset {
2281 const sigset_t __user *sigmask;
2285 SYSCALL_DEFINE6(io_pgetevents,
2286 aio_context_t, ctx_id,
2289 struct io_event __user *, events,
2290 struct __kernel_timespec __user *, timeout,
2291 const struct __aio_sigset __user *, usig)
2293 struct __aio_sigset ksig = { NULL, };
2294 struct timespec64 ts;
2298 if (timeout && unlikely(get_timespec64(&ts, timeout)))
2301 if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2304 ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
2308 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2310 interrupted = signal_pending(current);
2311 restore_saved_sigmask_unless(interrupted);
2312 if (interrupted && !ret)
2313 ret = -ERESTARTNOHAND;
2318 #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
2320 SYSCALL_DEFINE6(io_pgetevents_time32,
2321 aio_context_t, ctx_id,
2324 struct io_event __user *, events,
2325 struct old_timespec32 __user *, timeout,
2326 const struct __aio_sigset __user *, usig)
2328 struct __aio_sigset ksig = { NULL, };
2329 struct timespec64 ts;
2333 if (timeout && unlikely(get_old_timespec32(&ts, timeout)))
2336 if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2340 ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
2344 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2346 interrupted = signal_pending(current);
2347 restore_saved_sigmask_unless(interrupted);
2348 if (interrupted && !ret)
2349 ret = -ERESTARTNOHAND;
2356 #if defined(CONFIG_COMPAT_32BIT_TIME)
2358 SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id,
2361 struct io_event __user *, events,
2362 struct old_timespec32 __user *, timeout)
2364 struct timespec64 t;
2367 if (timeout && get_old_timespec32(&t, timeout))
2370 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2371 if (!ret && signal_pending(current))
2378 #ifdef CONFIG_COMPAT
2380 struct __compat_aio_sigset {
2381 compat_uptr_t sigmask;
2382 compat_size_t sigsetsize;
2385 #if defined(CONFIG_COMPAT_32BIT_TIME)
2387 COMPAT_SYSCALL_DEFINE6(io_pgetevents,
2388 compat_aio_context_t, ctx_id,
2389 compat_long_t, min_nr,
2391 struct io_event __user *, events,
2392 struct old_timespec32 __user *, timeout,
2393 const struct __compat_aio_sigset __user *, usig)
2395 struct __compat_aio_sigset ksig = { 0, };
2396 struct timespec64 t;
2400 if (timeout && get_old_timespec32(&t, timeout))
2403 if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2406 ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
2410 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2412 interrupted = signal_pending(current);
2413 restore_saved_sigmask_unless(interrupted);
2414 if (interrupted && !ret)
2415 ret = -ERESTARTNOHAND;
2422 COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
2423 compat_aio_context_t, ctx_id,
2424 compat_long_t, min_nr,
2426 struct io_event __user *, events,
2427 struct __kernel_timespec __user *, timeout,
2428 const struct __compat_aio_sigset __user *, usig)
2430 struct __compat_aio_sigset ksig = { 0, };
2431 struct timespec64 t;
2435 if (timeout && get_timespec64(&t, timeout))
2438 if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2441 ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
2445 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2447 interrupted = signal_pending(current);
2448 restore_saved_sigmask_unless(interrupted);
2449 if (interrupted && !ret)
2450 ret = -ERESTARTNOHAND;